content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
source('framework/data.R');
source('framework/backtester.R')
source('framework/processResults.R');
source('strategies/W%R-MACD-MA3-limit1.R')
numOfDays <- 1100
dataList <- getData(directory="PART1")
dataList <- lapply(dataList, function(x) x[1:numOfDays])
sMult <- 0.2 # slippage multiplier
#params <- list(lookbackR=14,lookbackS=5,lookbackL=8,threshold=40,nWait=0,
#nFast=5, nSlow=10, nSig=5,
#lossLimits=8000, profitTarget=10000, series=c(1:10))#W%R & macd
lookbackR <- seq(from=10, to=20, by=10)
lookbackS <- seq(from=5, to=10,by=5)
lookbackL <- seq(from=10, to=50, by=20)
threshold <- seq(from=20,to=40,by=10)
#threshold<-30
nFast <- seq(from=5,to=15,by=10)
#nFast<-12
nSlow <- seq(from=10,to=50,by=20)
#nSlow<-26
nSig <- seq(from=5,to=20,by=10)
#nSig<-9
#nWait<-seq(from=0,to=3,by=2)
lossLimits <- seq(from=50,to=550,by=500)
profitTarget <- seq(from=50,to=550,by=500)
#lookbackLimit <- seq(from=10,to=30,by=10)
lookbackLimit <- 14
paramsList <- list(lookbackS,lookbackL,lookbackR,threshold,
nFast,nSlow,nSig,lookbackLimit,lossLimits,profitTarget)
numberComb <- prod(sapply(paramsList,length))
resultsMatrix <- matrix(nrow=numberComb,ncol=11)
colnames(resultsMatrix) <- c("lookbackS","lookbackL","lookbackR",
"threshold","nFast","nSlow","nSig",
"lossLimits","profitTarget","lookbackLimit","PD Ratio")
pfolioPnLList <- vector(mode="list",length=numberComb)
count <- 1
for (lbs in lookbackS) {
for (lbl in lookbackL) {
if(lbs < lbl){
for (lbr in lookbackR) {
for (th in threshold) {
for (nf in nFast) {
for (ns in nSlow) {
if(ns>nf){
for (nsig in nSig){
for(lbli in lookbackLimit){
for (ll in lossLimits){
for (pt in profitTarget){
params <- list(lookbackS=lbs,lookbackL=lbl,lookbackR=lbr,
threshold=th,nFast=nf,nSlow=ns,nSig=nsig,
lookbackLimit=lbli,
lossLimits=ll,profitTarget=pt,
series=8,posSizes=rep(1,10))
results <- backtest(dataList, getOrders, params, sMult)
pfolioPnL <- plotResults(dataList,results)
resultsMatrix[count,] <- c(lbs,lbl,lbr,th,nf,ns,nsig,lbli,ll,pt,pfolioPnL$fitAgg)
pfolioPnLList[[count]]<- pfolioPnL
cat("Just completed",count,"out of",numberComb,"\n")
print(resultsMatrix[count,])
count <- count + 1
}
}
}
}
}
}
}
}
}
}
}
}
print(resultsMatrix[order(resultsMatrix[,"PD Ratio"]),])
| /backtester_v4.2/optimize/oo3-8.R | no_license | whyVeronica/backtester_v4.2 | R | false | false | 2,941 | r | source('framework/data.R');
source('framework/backtester.R')
source('framework/processResults.R');
source('strategies/W%R-MACD-MA3-limit1.R')
numOfDays <- 1100
dataList <- getData(directory="PART1")
dataList <- lapply(dataList, function(x) x[1:numOfDays])
sMult <- 0.2 # slippage multiplier
#params <- list(lookbackR=14,lookbackS=5,lookbackL=8,threshold=40,nWait=0,
#nFast=5, nSlow=10, nSig=5,
#lossLimits=8000, profitTarget=10000, series=c(1:10))#W%R & macd
lookbackR <- seq(from=10, to=20, by=10)
lookbackS <- seq(from=5, to=10,by=5)
lookbackL <- seq(from=10, to=50, by=20)
threshold <- seq(from=20,to=40,by=10)
#threshold<-30
nFast <- seq(from=5,to=15,by=10)
#nFast<-12
nSlow <- seq(from=10,to=50,by=20)
#nSlow<-26
nSig <- seq(from=5,to=20,by=10)
#nSig<-9
#nWait<-seq(from=0,to=3,by=2)
lossLimits <- seq(from=50,to=550,by=500)
profitTarget <- seq(from=50,to=550,by=500)
#lookbackLimit <- seq(from=10,to=30,by=10)
lookbackLimit <- 14
paramsList <- list(lookbackS,lookbackL,lookbackR,threshold,
nFast,nSlow,nSig,lookbackLimit,lossLimits,profitTarget)
numberComb <- prod(sapply(paramsList,length))
resultsMatrix <- matrix(nrow=numberComb,ncol=11)
colnames(resultsMatrix) <- c("lookbackS","lookbackL","lookbackR",
"threshold","nFast","nSlow","nSig",
"lossLimits","profitTarget","lookbackLimit","PD Ratio")
pfolioPnLList <- vector(mode="list",length=numberComb)
count <- 1
for (lbs in lookbackS) {
for (lbl in lookbackL) {
if(lbs < lbl){
for (lbr in lookbackR) {
for (th in threshold) {
for (nf in nFast) {
for (ns in nSlow) {
if(ns>nf){
for (nsig in nSig){
for(lbli in lookbackLimit){
for (ll in lossLimits){
for (pt in profitTarget){
params <- list(lookbackS=lbs,lookbackL=lbl,lookbackR=lbr,
threshold=th,nFast=nf,nSlow=ns,nSig=nsig,
lookbackLimit=lbli,
lossLimits=ll,profitTarget=pt,
series=8,posSizes=rep(1,10))
results <- backtest(dataList, getOrders, params, sMult)
pfolioPnL <- plotResults(dataList,results)
resultsMatrix[count,] <- c(lbs,lbl,lbr,th,nf,ns,nsig,lbli,ll,pt,pfolioPnL$fitAgg)
pfolioPnLList[[count]]<- pfolioPnL
cat("Just completed",count,"out of",numberComb,"\n")
print(resultsMatrix[count,])
count <- count + 1
}
}
}
}
}
}
}
}
}
}
}
}
print(resultsMatrix[order(resultsMatrix[,"PD Ratio"]),])
|
# 11-29-2016
# clean code for larger base learner library
# same code as CleanCode1, basically changing workspace names and specify new library
# 8 base learners:
# random forest, generalized linear regression, quadratic splines regression, CART,
# 10 nearest neighbors, generalized boosting, support vector machine, Bagging Classification
# note that for Kenyan data, the svm need to redefine tuning parameter nu,
# this is done in the data preparation section, new wrapper with small
# nu is called SL.svm_nu
# Controlled Random Search use initiation from SL NNLS and constrain to [0, 5]^K
# CRS setting: maxeval = 10000, pop.size = 10000*(length(x0)+1)
# 1. prepare data (and save Kenyan data to a workspace)
# 2. clean code
###############################################################
# Some IMPORTANT notes for applications:
# Sample size is very important in this method application.
# Even the theoretical result is derived under asymptotic assumptions.
# For PIMA Indians Diabetes data, the sample size is less than 400,
# different seeds on the cross validation process has quite some influence on
# the cross-validated risk curves, in addition, it is important in this case to
# re-tune the CRS parameters (e.g greatly increase maxeval) in order to
# have satisfying performance.
########################################################################################
# 1. Prepare Data
###############################################################
###############################################################
# Kenyan data
setwd("C:\\Users\\Yizhen Xu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\data")
load("analysis_data_10_7_2015.Rdata")
load("Long Crisis Study CD4.Rdata")
load("Long Second Line Study CD4.Rdata")
load("Long TDF Study CD4.Rdata")
colnames(crisis.cd4)[2] = "Study.ID"
second.cd4[,1] = as.factor(second.cd4[,1])
prodata = rbind(second.cd4[,c(1,3)], crisis.cd4[,c(2,5)], tdf.cd4[,c(1,3)])
# define min CD4 and max CD4 for each patient in every study
library(plyr)
minCD4 = ddply(prodata,"Study.ID",function(x) min(x$CD4Count))
maxCD4 = ddply(prodata,"Study.ID",function(x) max(x$CD4Count))
# define the full data with all complete cases
FullData = kenyadata
slope.count <- (FullData[,5]-FullData[,17])/FullData[,16]
slope.perc <- (FullData[,6]-FullData[,19])/FullData[,18]
sndline = (FullData[,2]=="line2")
tdf = (FullData[,2]=="TDF")
pftadh = (FullData[,10]=="None")
female = (FullData[,11]=="F")
mydata <- cbind(FullData[,c(1,7,2,5,6,12,14,22)], female,pftadh,tdf,sndline,slope.count, slope.perc)
mydata <- mydata[complete.cases(mydata),]
dim(mydata) # 899 14
# merge minCD4 and maxCD4 into mydata
colnames(minCD4)[2] = "minCD4"
colnames(maxCD4)[2] = "maxCD4"
mydata = merge(minCD4, mydata, by = "Study.ID")
mydata = merge(maxCD4, mydata, by = "Study.ID")
dim(mydata)
# select covariates for model fitting
Z = (mydata$ViralLoad_E>1000)*1 #define True Status
Y = Z #response used in finding risk score
W = as.data.frame(mydata[,c(8,11,3,6,7,12,9,16)]*1)
W[,c(1,3,4,5,7,8)]=apply(W[,c(1,3,4,5,7,8)],2,function(x) (x-mean(x))/sd(x))
W$female[W$female==0] = -1
W$pftadh[W$pftadh==0] = -1
setwd("C:\\Users\\Yizhen Xu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\CRS constrained")
save(Y, W, file = "KenyanData.RData")
SL.svm_nu <- function(Y, X, newX, family, type.reg = "nu-regression", type.class = "nu-classification", kernel =
"radial", nu = 0.01, degree = 3, cost = 1, coef0 = 0, ...) {
require('e1071')
if(family$family == "gaussian") {
fit.svm <- e1071::svm(y = Y, x = X, nu = nu, type = type.reg, fitted = FALSE, kernel = kernel, degree = degree, cost = cost, coef0 = coef0)
pred <- predict(fit.svm, newdata = newX)
fit <- list(object = fit.svm)
}
if(family$family == "binomial") {
fit.svm <- e1071::svm(y = as.factor(Y), x = X, nu = nu, type = type.class, fitted = FALSE, probability = TRUE, kernel = kernel, degree = degree, cost = cost, coef0 = coef0)
pred <- attr(predict(fit.svm, newdata = newX, probability = TRUE), "prob")[, "1"] # assumes Y is 0/1 numeric
fit <- list(object = fit.svm)
}
out <- list(pred = pred, fit = fit)
class(out$fit) <- c("SL.svm")
return(out)
}
predict.SL.svm_nu <- function(object, newdata, family,...){
require('e1071')
if(family$family == "gaussian") {
pred <- predict(object$object, newdata = newdata)
}
if(family$family == "binomial") {
pred <- attr(predict(object$object, newdata = newdata, probability = TRUE), "prob")[, "1"]
}
return(pred)
}
###############################################################
###############################################################
# Wisconsin Breast Cancer
#setwd("C:\\Users\\yxu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\data")
#data = read.table("wdbc.data",sep = ",")
#colnames(data) = c("ID","diag",
# "MRadius","MTexture","MPerimeter","MArea","MSmooth",
# "MCompact","MConcavity","MConcaveP","MSymmetry","MFracDim",
# "SERadius","SETexture","SEPerimeter","SEArea","SESmooth",
# "SECompact","SEConcavity","SEConcaveP","SESymmetry","SEFracDim",
# "WRadius","WTexture","WPerimeter","WArea","WSmooth",
# "WCompact","WConcavity","WConcaveP","WSymmetry","WFracDim")
#W = matrix(unlist(data[,3:32]),ncol=dim(data[,3:32])[2])
#colnames(W) = colnames(data)[3:32]
#W = apply(W,2,function(x) (x-mean(x))/sd(x))
#W = as.data.frame(W)
#Y = rep(0,dim(data)[1])
#Y[data[,2]=="B"] = 1
#Z = Y
#table(Y)
###############################################################
###############################################################
# PIMA
setwd("C:\\Users\\Yizhen Xu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\data")
setwd("C:\\Users\\yxu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\data")
data = read.table("pima-indians-diabetes.data",sep = ",")
dim(data) #768 9
head(data)
summary(data)
Y = data[,ncol(data)]
W = data[,-ncol(data)]
summary(W)
# we need to make all variables mean 0 sd 1
# for continuous variables, simply standardize
# for categorical varialbes, (0,1)->(-1,1), (0,1,2)->(-1,0,1)
# continuous columns: all W
W = apply(W,2,function(x) (x-mean(x))/sd(x))
W = as.data.frame(W)
table(Y)
#0 1
#500 268
mean(Y)
# prevelence 0.3489583
########################################################################################
# 2.clean code
# load in functions_CRSnes.R -- changes made to crs.fit:
# (1)initiation using NNLS
# (2)region [0, 5]^K
# (3)normalize to sum up to 1 before output and making predictions
# simply change the load data step for other runs
# data format: Y -- outcome; W -- covariates in dataframe
# other parameters for tuning:
# number of folds K, SL library, number of minimization alg
setwd("C:\\Users\\Yizhen Xu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\CRS constrained")
setwd("C:\\Users\\yxu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\CRS constrained")
#load("KenyanData.RData")# load data
# load packages
library("SuperLearner")
library("randomForest")
library("gam")
library("rpart")
library("dplyr")
library("plyr")
library("ggplot2")
library("nloptr")
library("lpSolve")
library("nnls")
source("functions_CRSnew.r")# load functions
# svm_nu for Kenyan Daya
SL.library = c("SL.randomForest","SL.glm","SL.gam", "SL.rpart","SL.knn","SL.gbm","SL.svm_nu","SL.ipredbagg")
# regular svm for PIMA
SL.library = c("SL.randomForest","SL.glm","SL.gam", "SL.rpart","SL.knn","SL.gbm","SL.svm","SL.ipredbagg")
# random forest, generalized linear regression, quadratic splines regression, CART,
# 10 nearest neighbors, generalized boosting, support vector machine, Bagging Classification
###### cross validation
###### balance the ratio of Y=0 and Y=1's in each fold
N = length(Y)
K = 10 # number of folds for calculating cross validated risk
N0 = table(Y)[1] # number of {Y=0}
l0 = floor(N0/K) # number of {Y=0} obs in each fold
# evenly distribute the leftovers, fold label vector:
t0 = rep(1:K,rep(l0,K)+c(rep(1,N0%%l0),rep(0,K-N0%%l0)))
N1 = table(Y)[2] # number of {Y=1}
l1 = floor(N1/K) # number of {Y=0} obs in each fold
t1 = rep(1:K,rep(l1,K)+c(rep(1,N1%%l1),rep(0,K-N1%%l1)))
data = cbind(Y,W)
#as.integer(runif(1)*2e9)
#set.seed(1818715699)#Kenyan
#set.seed(100) #WDBC
set.seed(852632362)#PIMA
# also ran seed = 49836901 (riskPIMA_bigLib1.RData) and seed = 1566626083 (riskPIMA_bigLib2.RData)
ind0 = sample(1:N0,replace=FALSE)
ind1 = sample(1:N1,replace=FALSE)
# permute the fold label vector
t0 = t0[ind0]
t1 = t1[ind1]
# permute the Y=0 and Y=1's separately to make balance in each fold
# cv.fold is the validation fold index for the whole dataset
cv.fold = rep(0,length(Y))
cv.fold[Y==0] = t0
cv.fold[Y==1] = t1
for(i in 1:K){
print(i)
train.ind <- (cv.fold!=i)
val.ind <- (cv.fold==i)
fit.data.SLL <- SuperLearner(Y=Y[train.ind], X=W[train.ind,],newX=W, SL.library = SL.library, family = binomial(), method = "method.NNLS",verbose = FALSE)
sl.pred <- fit.data.SLL$SL.predict #prediction from super learner
lib.pred <- fit.data.SLL$library.predict #prediction from library algorithms
pred <- cbind(sl.pred,lib.pred) #all predictions
colnames(pred) <- c("SuperLearner",SL.library)
train.S <- pred[train.ind,] # trained predictions
val.S <- pred[val.ind,] # validation predictions
train.CVS <- fit.data.SLL$Z # cross-validated library predictions
trainCVS_SL = fit.data.SLL$Z %*% fit.data.SLL$coef # SL CV predictions from within SL
train.CVS.wSL = cbind(trainCVS_SL, train.CVS) # add the first col to be the SL CV predictions
train.Z <- Y[train.ind] # trained outcome
val.Z <- Y[val.ind] # validation outcome
assign(paste("train.CVS.wSL", i, sep=""),train.CVS.wSL)
assign(paste("train.CVS", i, sep=""),train.CVS)
assign(paste("train.S", i, sep=""),train.S)
assign(paste("val.S", i, sep=""),val.S)
assign(paste("train.Z", i, sep=""),train.Z)
assign(paste("val.Z", i, sep=""),val.Z)
}
###### Solving for (alpha, c) using alg number of methods
alg=3
#as.integer(runif(1)*2e9)
# randomly generated number, for replicating results
#set.seed(1988420473)#Kenyan data
#set.seed(1920413227)#WDBC
set.seed(17187750)#PIMA
lambdas = unique(seq(0.1,0.9,0.01))
#lambdas = unique(c(seq(0,0.7,0.025),seq(0.7,1,0.01)))
# alg algorithms, length(lambdas) lambdas, 10 folds
cutoff = array(NA, dim = c(alg,length(lambdas),K))
# true status vector that follows the order of the stacked cross validated predictions
CVZ = val.Z1
for(k in 2:K){
CVZ = c(CVZ,get(paste("val.Z", k, sep="")))
}
FPR = matrix(NA,ncol = alg, nrow = length(lambdas))
FNR = matrix(NA,ncol = alg, nrow = length(lambdas))
TPR = matrix(NA,ncol = alg, nrow = length(lambdas))
risk = matrix(NA,ncol = alg, nrow = length(lambdas))
deci = vector("list", length(lambdas))
for(k in 1:K){
train.CVS = get(paste("train.CVS", k, sep=""))
train.S = get(paste("train.S", k, sep=""))
val.S = get(paste("val.S", k, sep=""))
train.Z = get(paste("train.Z", k, sep=""))
val.Z = get(paste("val.Z", k, sep=""))
train.CVS.wSL = get(paste("train.CVS.wSL", k, sep=""))
XX = train.S[,-1] # training fold library predictions
for(i in 1:length(lambdas)){
print(c(i,k))
lambda = lambdas[i]
crs.seed = as.integer(runif(1)*2e9)
cbs = crs.fit(seed = crs.seed, lambda,train.CVS,train.Z, val.S[,-1], val.Z)
cutoff[3, i, k] = cbs$c #CBS cutoff for lambda i fold k
# SL Common
opt = Opt.nonpar.rule(train.Z,train.S[,1],phi=0,lambda)
cutoff[1, i, k] = as.numeric(opt)[1]
# SL Proposal Iterative
opt = Opt.nonpar.rule(train.Z,train.CVS.wSL[,1],phi=0,lambda)
cutoff[2, i, k] = as.numeric(opt)[1]
cut = matrix(rep(cutoff[, i, k],nrow(val.S)),nrow=nrow(val.S),byrow=T)
val = cbind(val.S[,1], val.S[,1], cbs$score)
dki = (val > cut)*1
# one decision matrix for each lambda value, matrix size n x alg
if(k == 1){
deci[[i]] = dki
}
if(k>1){
deci[[i]] = rbind(deci[[i]],dki) # each row is validation decision arranged by folds, col is algorithm
}
}#i
}#k
for(i in 1:length(lambdas)){
lambda = lambdas[i]
dec = deci[[i]]
FPR[i,] = apply(dec,2,function(x) mean((x==1)*(1-CVZ))/mean(1-CVZ))
FNR[i,] = apply(dec,2,function(x) mean((x==0)*(CVZ))/mean(CVZ))
risk[i,] = lambda*mean(CVZ)*FNR[i,] + (1-lambda)*(mean(1-CVZ))*FPR[i,]
}
toplot = risk
colnames(toplot) = c("Conditional Thresholding", "Two-Step Minimization", "CRS Minimization")
dat1 = data.frame(cbind(lambdas,c(toplot)))
colnames(dat1) <- c("lambda","risk")
dat1$Approach = rep(colnames(toplot),rep(length(lambdas),ncol(toplot)))
p1 <- ggplot(dat1, aes(x=lambda, y=risk)) + geom_line(data=dat1,aes(x=lambda, y=risk,group=Approach,col = Approach),lwd=1)
print(p1)
ind = which(lambdas %in% seq(0.1,0.9,0.1))
round(t(cbind(lambdas,risk)[ind,]),3)
#############################################################################
# save workspace
riskPIMA = toplot
lambdasPIMA = lambdas
save(lambdasPIMA, riskPIMA, file = "riskPIMA_bigLib.RData")
save.image("CRSnew_PIMAData_bigLib.RData")
#setwd("C:\\Users\\yxu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\CRS constrained")
#load("CRSnew_KenyanData.RData")
riskKenya = toplot
lambdaKenya = lambdas
save(lambdaKenya, riskKenya, file = "riskKenya_bigLib.RData")
save.image("CRSnew_KenyanData_bigLib.RData")
## the WDBC is already done in wdbc_largerLibrary.R (11-28-2016)
# riskWDBC = toplot
# lambdasWDBC = lambdas
# save(lambdasWDBC, riskWDBC, file = "riskWDBC1.RData")
# save.image("CRSnew_WDBCData_bigLib.RData")
###############################################################
###############################################################
###############################################################
###############################################################
###############################################################
###############################################################
###############################################################
rm(list = ls())
###############################################################
setwd("C:\\Users\\Yizhen Xu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\CRS constrained")
setwd("C:\\Users\\yxu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\CRS constrained")
# load packages
library("SuperLearner")
library("randomForest")
library("gam")
library("rpart")
library("dplyr")
library("plyr")
library("ggplot2")
library("nloptr")
library("lpSolve")
library("nnls")
load("riskKenya_bigLib.RData")
load("riskPIMA_bigLib.RData")
load("riskWDBC_bigLib.RData")
toplot = riskKenya
colnames(toplot) = c("Conditional Thresholding", "Two-Step Minimization", "CRS Minimization")
lambdas = lambdaKenya
dat3 = data.frame(cbind(lambdas,c(toplot)))
colnames(dat3) <- c("lambda","risk")
dat3$Approach = rep(colnames(toplot),rep(length(lambdas),ncol(toplot)))
p3 <- ggplot(dat3, aes(x=lambda, y=risk)) + geom_line(data=dat3,aes(x=lambda, y=risk,group=Approach,col = Approach),lwd=1) + ggtitle("Kenyan Study")
p3 <- p3 + xlab(expression(paste(lambda)))
print(p3)
colnames(riskWDBC) = c("Conditional Thresholding", "Two-Step Minimization", "CRS Minimization")
toplot = riskWDBC
lambdas = lambdasWDBC
dat4 = data.frame(cbind(lambdas,c(toplot)))
colnames(dat4) <- c("lambda","risk")
dat4$Approach = rep(colnames(toplot),rep(length(lambdas),ncol(toplot)))
p4 <- ggplot(dat4, aes(x=lambda, y=risk)) + geom_line(data=dat4,aes(x=lambda, y=risk,group=Approach,col = Approach),lwd=1) + ggtitle("Breast Cancer Study")
p4 <- p4 + xlab(expression(paste(lambda)))
print(p4)
toplot = riskPIMA
colnames(toplot) = c("Conditional Thresholding", "Two-Step Minimization", "CRS Minimization")
lambdas = lambdasPIMA
dat5 = data.frame(cbind(lambdas,c(toplot)))
colnames(dat5) <- c("lambda","risk")
dat5$Approach = rep(colnames(toplot),rep(length(lambdas),ncol(toplot)))
p5 <- ggplot(dat5, aes(x=lambda, y=risk)) + geom_line(data=dat5,aes(x=lambda, y=risk,group=Approach,col = Approach),lwd=1) + ggtitle("PIMA Diabetes Study")
p5 <- p5 + xlab(expression(paste(lambda)))
print(p5)
library(ggplot2)
library(gridExtra)
library(grid)
grid_arrange_shared_legend <- function(...) {
plots <- list(...)
g <- ggplotGrob(plots[[1]] + theme(legend.position="bottom"))$grobs
legend <- g[[which(sapply(g, function(x) x$name) == "guide-box")]]
lheight <- sum(legend$height)
grid.arrange(
do.call(arrangeGrob, lapply(plots, function(x)
x + theme(legend.position="none"))),
legend,
ncol = 1,
heights = unit.c(unit(1, "npc") - lheight, lheight))
}
png(filename = "RiskPlot_Application_bigLib.png", width = 550, height = 850)
grid_arrange_shared_legend(p3, p4,p5)
dev.off()
which(lambdaKenya %in% seq(0.1,0.9, 0.1))
ind = c(11, 21, 31,41, 51, 61, 71, 81, 91)
round(t(cbind(lambdaKenya,riskKenya)[ind,]),3)
ind = c(5,9,13,17,21,25,29,33,37)
round(t(cbind(lambdasWDBC,riskWDBC)[ind,]),3)
ind = c(11, 21, 31,41, 51, 61, 71, 81, 91)
round(t(cbind(lambdasPIMA,riskPIMA)[ind,]),3)
ind = which(lambdaKenya %in% c(0.2,0.5,0.8))
round(t(cbind(lambdaKenya,riskKenya)[ind,]),3)
ind = which(lambdasWDBC %in% c(0.2,0.5,0.8))
round(t(cbind(lambdasWDBC,riskWDBC)[ind,]),3)
| /R/CleanCode2_largerLibrary.R | no_license | yizhenxu/SL_Thresholding | R | false | false | 17,763 | r | # 11-29-2016
# clean code for larger base learner library
# same code as CleanCode1, basically changing workspace names and specify new library
# 8 base learners:
# random forest, generalized linear regression, quadratic splines regression, CART,
# 10 nearest neighbors, generalized boosting, support vector machine, Bagging Classification
# note that for Kenyan data, the svm need to redefine tuning parameter nu,
# this is done in the data preparation section, new wrapper with small
# nu is called SL.svm_nu
# Controlled Random Search use initiation from SL NNLS and constrain to [0, 5]^K
# CRS setting: maxeval = 10000, pop.size = 10000*(length(x0)+1)
# 1. prepare data (and save Kenyan data to a workspace)
# 2. clean code
###############################################################
# Some IMPORTANT notes for applications:
# Sample size is very important in this method application.
# Even the theoretical result is derived under asymptotic assumptions.
# For PIMA Indians Diabetes data, the sample size is less than 400,
# different seeds on the cross validation process has quite some influence on
# the cross-validated risk curves, in addition, it is important in this case to
# re-tune the CRS parameters (e.g greatly increase maxeval) in order to
# have satisfying performance.
########################################################################################
# 1. Prepare Data
###############################################################
###############################################################
# Kenyan data
setwd("C:\\Users\\Yizhen Xu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\data")
load("analysis_data_10_7_2015.Rdata")
load("Long Crisis Study CD4.Rdata")
load("Long Second Line Study CD4.Rdata")
load("Long TDF Study CD4.Rdata")
colnames(crisis.cd4)[2] = "Study.ID"
second.cd4[,1] = as.factor(second.cd4[,1])
prodata = rbind(second.cd4[,c(1,3)], crisis.cd4[,c(2,5)], tdf.cd4[,c(1,3)])
# define min CD4 and max CD4 for each patient in every study
library(plyr)
minCD4 = ddply(prodata,"Study.ID",function(x) min(x$CD4Count))
maxCD4 = ddply(prodata,"Study.ID",function(x) max(x$CD4Count))
# define the full data with all complete cases
FullData = kenyadata
slope.count <- (FullData[,5]-FullData[,17])/FullData[,16]
slope.perc <- (FullData[,6]-FullData[,19])/FullData[,18]
sndline = (FullData[,2]=="line2")
tdf = (FullData[,2]=="TDF")
pftadh = (FullData[,10]=="None")
female = (FullData[,11]=="F")
mydata <- cbind(FullData[,c(1,7,2,5,6,12,14,22)], female,pftadh,tdf,sndline,slope.count, slope.perc)
mydata <- mydata[complete.cases(mydata),]
dim(mydata) # 899 14
# merge minCD4 and maxCD4 into mydata
colnames(minCD4)[2] = "minCD4"
colnames(maxCD4)[2] = "maxCD4"
mydata = merge(minCD4, mydata, by = "Study.ID")
mydata = merge(maxCD4, mydata, by = "Study.ID")
dim(mydata)
# select covariates for model fitting
Z = (mydata$ViralLoad_E>1000)*1 #define True Status
Y = Z #response used in finding risk score
W = as.data.frame(mydata[,c(8,11,3,6,7,12,9,16)]*1)
W[,c(1,3,4,5,7,8)]=apply(W[,c(1,3,4,5,7,8)],2,function(x) (x-mean(x))/sd(x))
W$female[W$female==0] = -1
W$pftadh[W$pftadh==0] = -1
setwd("C:\\Users\\Yizhen Xu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\CRS constrained")
save(Y, W, file = "KenyanData.RData")
SL.svm_nu <- function(Y, X, newX, family, type.reg = "nu-regression", type.class = "nu-classification", kernel =
"radial", nu = 0.01, degree = 3, cost = 1, coef0 = 0, ...) {
require('e1071')
if(family$family == "gaussian") {
fit.svm <- e1071::svm(y = Y, x = X, nu = nu, type = type.reg, fitted = FALSE, kernel = kernel, degree = degree, cost = cost, coef0 = coef0)
pred <- predict(fit.svm, newdata = newX)
fit <- list(object = fit.svm)
}
if(family$family == "binomial") {
fit.svm <- e1071::svm(y = as.factor(Y), x = X, nu = nu, type = type.class, fitted = FALSE, probability = TRUE, kernel = kernel, degree = degree, cost = cost, coef0 = coef0)
pred <- attr(predict(fit.svm, newdata = newX, probability = TRUE), "prob")[, "1"] # assumes Y is 0/1 numeric
fit <- list(object = fit.svm)
}
out <- list(pred = pred, fit = fit)
class(out$fit) <- c("SL.svm")
return(out)
}
predict.SL.svm_nu <- function(object, newdata, family,...){
require('e1071')
if(family$family == "gaussian") {
pred <- predict(object$object, newdata = newdata)
}
if(family$family == "binomial") {
pred <- attr(predict(object$object, newdata = newdata, probability = TRUE), "prob")[, "1"]
}
return(pred)
}
###############################################################
###############################################################
# Wisconsin Breast Cancer
#setwd("C:\\Users\\yxu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\data")
#data = read.table("wdbc.data",sep = ",")
#colnames(data) = c("ID","diag",
# "MRadius","MTexture","MPerimeter","MArea","MSmooth",
# "MCompact","MConcavity","MConcaveP","MSymmetry","MFracDim",
# "SERadius","SETexture","SEPerimeter","SEArea","SESmooth",
# "SECompact","SEConcavity","SEConcaveP","SESymmetry","SEFracDim",
# "WRadius","WTexture","WPerimeter","WArea","WSmooth",
# "WCompact","WConcavity","WConcaveP","WSymmetry","WFracDim")
#W = matrix(unlist(data[,3:32]),ncol=dim(data[,3:32])[2])
#colnames(W) = colnames(data)[3:32]
#W = apply(W,2,function(x) (x-mean(x))/sd(x))
#W = as.data.frame(W)
#Y = rep(0,dim(data)[1])
#Y[data[,2]=="B"] = 1
#Z = Y
#table(Y)
###############################################################
###############################################################
# PIMA
setwd("C:\\Users\\Yizhen Xu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\data")
setwd("C:\\Users\\yxu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\data")
data = read.table("pima-indians-diabetes.data",sep = ",")
dim(data) #768 9
head(data)
summary(data)
Y = data[,ncol(data)]
W = data[,-ncol(data)]
summary(W)
# we need to make all variables mean 0 sd 1
# for continuous variables, simply standardize
# for categorical varialbes, (0,1)->(-1,1), (0,1,2)->(-1,0,1)
# continuous columns: all W
W = apply(W,2,function(x) (x-mean(x))/sd(x))
W = as.data.frame(W)
table(Y)
#0 1
#500 268
mean(Y)
# prevelence 0.3489583
########################################################################################
# 2.clean code
# load in functions_CRSnes.R -- changes made to crs.fit:
# (1)initiation using NNLS
# (2)region [0, 5]^K
# (3)normalize to sum up to 1 before output and making predictions
# simply change the load data step for other runs
# data format: Y -- outcome; W -- covariates in dataframe
# other parameters for tuning:
# number of folds K, SL library, number of minimization alg
setwd("C:\\Users\\Yizhen Xu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\CRS constrained")
setwd("C:\\Users\\yxu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\CRS constrained")
#load("KenyanData.RData")# load data
# load packages
library("SuperLearner")
library("randomForest")
library("gam")
library("rpart")
library("dplyr")
library("plyr")
library("ggplot2")
library("nloptr")
library("lpSolve")
library("nnls")
source("functions_CRSnew.r")# load functions
# svm_nu for Kenyan Daya
SL.library = c("SL.randomForest","SL.glm","SL.gam", "SL.rpart","SL.knn","SL.gbm","SL.svm_nu","SL.ipredbagg")
# regular svm for PIMA
SL.library = c("SL.randomForest","SL.glm","SL.gam", "SL.rpart","SL.knn","SL.gbm","SL.svm","SL.ipredbagg")
# random forest, generalized linear regression, quadratic splines regression, CART,
# 10 nearest neighbors, generalized boosting, support vector machine, Bagging Classification
###### cross validation
###### balance the ratio of Y=0 and Y=1's in each fold
N = length(Y)
K = 10 # number of folds for calculating cross validated risk
N0 = table(Y)[1] # number of {Y=0}
l0 = floor(N0/K) # number of {Y=0} obs in each fold
# evenly distribute the leftovers, fold label vector:
t0 = rep(1:K,rep(l0,K)+c(rep(1,N0%%l0),rep(0,K-N0%%l0)))
N1 = table(Y)[2] # number of {Y=1}
l1 = floor(N1/K) # number of {Y=0} obs in each fold
t1 = rep(1:K,rep(l1,K)+c(rep(1,N1%%l1),rep(0,K-N1%%l1)))
data = cbind(Y,W)
#as.integer(runif(1)*2e9)
#set.seed(1818715699)#Kenyan
#set.seed(100) #WDBC
set.seed(852632362)#PIMA
# also ran seed = 49836901 (riskPIMA_bigLib1.RData) and seed = 1566626083 (riskPIMA_bigLib2.RData)
ind0 = sample(1:N0,replace=FALSE)
ind1 = sample(1:N1,replace=FALSE)
# permute the fold label vector
t0 = t0[ind0]
t1 = t1[ind1]
# permute the Y=0 and Y=1's separately to make balance in each fold
# cv.fold is the validation fold index for the whole dataset
cv.fold = rep(0,length(Y))
cv.fold[Y==0] = t0
cv.fold[Y==1] = t1
for(i in 1:K){
print(i)
train.ind <- (cv.fold!=i)
val.ind <- (cv.fold==i)
fit.data.SLL <- SuperLearner(Y=Y[train.ind], X=W[train.ind,],newX=W, SL.library = SL.library, family = binomial(), method = "method.NNLS",verbose = FALSE)
sl.pred <- fit.data.SLL$SL.predict #prediction from super learner
lib.pred <- fit.data.SLL$library.predict #prediction from library algorithms
pred <- cbind(sl.pred,lib.pred) #all predictions
colnames(pred) <- c("SuperLearner",SL.library)
train.S <- pred[train.ind,] # trained predictions
val.S <- pred[val.ind,] # validation predictions
train.CVS <- fit.data.SLL$Z # cross-validated library predictions
trainCVS_SL = fit.data.SLL$Z %*% fit.data.SLL$coef # SL CV predictions from within SL
train.CVS.wSL = cbind(trainCVS_SL, train.CVS) # add the first col to be the SL CV predictions
train.Z <- Y[train.ind] # trained outcome
val.Z <- Y[val.ind] # validation outcome
assign(paste("train.CVS.wSL", i, sep=""),train.CVS.wSL)
assign(paste("train.CVS", i, sep=""),train.CVS)
assign(paste("train.S", i, sep=""),train.S)
assign(paste("val.S", i, sep=""),val.S)
assign(paste("train.Z", i, sep=""),train.Z)
assign(paste("val.Z", i, sep=""),val.Z)
}
###### Solving for (alpha, c) using alg number of methods
alg=3
#as.integer(runif(1)*2e9)
# randomly generated number, for replicating results
#set.seed(1988420473)#Kenyan data
#set.seed(1920413227)#WDBC
set.seed(17187750)#PIMA
lambdas = unique(seq(0.1,0.9,0.01))
#lambdas = unique(c(seq(0,0.7,0.025),seq(0.7,1,0.01)))
# alg algorithms, length(lambdas) lambdas, 10 folds
cutoff = array(NA, dim = c(alg,length(lambdas),K))
# true status vector that follows the order of the stacked cross validated predictions
CVZ = val.Z1
for(k in 2:K){
CVZ = c(CVZ,get(paste("val.Z", k, sep="")))
}
FPR = matrix(NA,ncol = alg, nrow = length(lambdas))
FNR = matrix(NA,ncol = alg, nrow = length(lambdas))
TPR = matrix(NA,ncol = alg, nrow = length(lambdas))
risk = matrix(NA,ncol = alg, nrow = length(lambdas))
deci = vector("list", length(lambdas))
for(k in 1:K){
train.CVS = get(paste("train.CVS", k, sep=""))
train.S = get(paste("train.S", k, sep=""))
val.S = get(paste("val.S", k, sep=""))
train.Z = get(paste("train.Z", k, sep=""))
val.Z = get(paste("val.Z", k, sep=""))
train.CVS.wSL = get(paste("train.CVS.wSL", k, sep=""))
XX = train.S[,-1] # training fold library predictions
for(i in 1:length(lambdas)){
print(c(i,k))
lambda = lambdas[i]
crs.seed = as.integer(runif(1)*2e9)
cbs = crs.fit(seed = crs.seed, lambda,train.CVS,train.Z, val.S[,-1], val.Z)
cutoff[3, i, k] = cbs$c #CBS cutoff for lambda i fold k
# SL Common
opt = Opt.nonpar.rule(train.Z,train.S[,1],phi=0,lambda)
cutoff[1, i, k] = as.numeric(opt)[1]
# SL Proposal Iterative
opt = Opt.nonpar.rule(train.Z,train.CVS.wSL[,1],phi=0,lambda)
cutoff[2, i, k] = as.numeric(opt)[1]
cut = matrix(rep(cutoff[, i, k],nrow(val.S)),nrow=nrow(val.S),byrow=T)
val = cbind(val.S[,1], val.S[,1], cbs$score)
dki = (val > cut)*1
# one decision matrix for each lambda value, matrix size n x alg
if(k == 1){
deci[[i]] = dki
}
if(k>1){
deci[[i]] = rbind(deci[[i]],dki) # each row is validation decision arranged by folds, col is algorithm
}
}#i
}#k
for(i in 1:length(lambdas)){
lambda = lambdas[i]
dec = deci[[i]]
FPR[i,] = apply(dec,2,function(x) mean((x==1)*(1-CVZ))/mean(1-CVZ))
FNR[i,] = apply(dec,2,function(x) mean((x==0)*(CVZ))/mean(CVZ))
risk[i,] = lambda*mean(CVZ)*FNR[i,] + (1-lambda)*(mean(1-CVZ))*FPR[i,]
}
toplot = risk
colnames(toplot) = c("Conditional Thresholding", "Two-Step Minimization", "CRS Minimization")
dat1 = data.frame(cbind(lambdas,c(toplot)))
colnames(dat1) <- c("lambda","risk")
dat1$Approach = rep(colnames(toplot),rep(length(lambdas),ncol(toplot)))
p1 <- ggplot(dat1, aes(x=lambda, y=risk)) + geom_line(data=dat1,aes(x=lambda, y=risk,group=Approach,col = Approach),lwd=1)
print(p1)
ind = which(lambdas %in% seq(0.1,0.9,0.1))
round(t(cbind(lambdas,risk)[ind,]),3)
#############################################################################
# save workspace
riskPIMA = toplot
lambdasPIMA = lambdas
save(lambdasPIMA, riskPIMA, file = "riskPIMA_bigLib.RData")
save.image("CRSnew_PIMAData_bigLib.RData")
#setwd("C:\\Users\\yxu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\CRS constrained")
#load("CRSnew_KenyanData.RData")
riskKenya = toplot
lambdaKenya = lambdas
save(lambdaKenya, riskKenya, file = "riskKenya_bigLib.RData")
save.image("CRSnew_KenyanData_bigLib.RData")
## the WDBC is already done in wdbc_largerLibrary.R (11-28-2016)
# riskWDBC = toplot
# lambdasWDBC = lambdas
# save(lambdasWDBC, riskWDBC, file = "riskWDBC1.RData")
# save.image("CRSnew_WDBCData_bigLib.RData")
###############################################################
###############################################################
###############################################################
###############################################################
###############################################################
###############################################################
###############################################################
rm(list = ls())
###############################################################
setwd("C:\\Users\\Yizhen Xu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\CRS constrained")
setwd("C:\\Users\\yxu\\Google Drive\\Desktop\\2015 summer\\Risk Score Wrap Up\\CRS constrained")
# load packages
library("SuperLearner")
library("randomForest")
library("gam")
library("rpart")
library("dplyr")
library("plyr")
library("ggplot2")
library("nloptr")
library("lpSolve")
library("nnls")
load("riskKenya_bigLib.RData")
load("riskPIMA_bigLib.RData")
load("riskWDBC_bigLib.RData")
toplot = riskKenya
colnames(toplot) = c("Conditional Thresholding", "Two-Step Minimization", "CRS Minimization")
lambdas = lambdaKenya
dat3 = data.frame(cbind(lambdas,c(toplot)))
colnames(dat3) <- c("lambda","risk")
dat3$Approach = rep(colnames(toplot),rep(length(lambdas),ncol(toplot)))
p3 <- ggplot(dat3, aes(x=lambda, y=risk)) + geom_line(data=dat3,aes(x=lambda, y=risk,group=Approach,col = Approach),lwd=1) + ggtitle("Kenyan Study")
p3 <- p3 + xlab(expression(paste(lambda)))
print(p3)
colnames(riskWDBC) = c("Conditional Thresholding", "Two-Step Minimization", "CRS Minimization")
toplot = riskWDBC
lambdas = lambdasWDBC
dat4 = data.frame(cbind(lambdas,c(toplot)))
colnames(dat4) <- c("lambda","risk")
dat4$Approach = rep(colnames(toplot),rep(length(lambdas),ncol(toplot)))
p4 <- ggplot(dat4, aes(x=lambda, y=risk)) + geom_line(data=dat4,aes(x=lambda, y=risk,group=Approach,col = Approach),lwd=1) + ggtitle("Breast Cancer Study")
p4 <- p4 + xlab(expression(paste(lambda)))
print(p4)
toplot = riskPIMA
colnames(toplot) = c("Conditional Thresholding", "Two-Step Minimization", "CRS Minimization")
lambdas = lambdasPIMA
dat5 = data.frame(cbind(lambdas,c(toplot)))
colnames(dat5) <- c("lambda","risk")
dat5$Approach = rep(colnames(toplot),rep(length(lambdas),ncol(toplot)))
p5 <- ggplot(dat5, aes(x=lambda, y=risk)) + geom_line(data=dat5,aes(x=lambda, y=risk,group=Approach,col = Approach),lwd=1) + ggtitle("PIMA Diabetes Study")
p5 <- p5 + xlab(expression(paste(lambda)))
print(p5)
library(ggplot2)
library(gridExtra)
library(grid)
grid_arrange_shared_legend <- function(...) {
plots <- list(...)
g <- ggplotGrob(plots[[1]] + theme(legend.position="bottom"))$grobs
legend <- g[[which(sapply(g, function(x) x$name) == "guide-box")]]
lheight <- sum(legend$height)
grid.arrange(
do.call(arrangeGrob, lapply(plots, function(x)
x + theme(legend.position="none"))),
legend,
ncol = 1,
heights = unit.c(unit(1, "npc") - lheight, lheight))
}
png(filename = "RiskPlot_Application_bigLib.png", width = 550, height = 850)
grid_arrange_shared_legend(p3, p4,p5)
dev.off()
which(lambdaKenya %in% seq(0.1,0.9, 0.1))
ind = c(11, 21, 31,41, 51, 61, 71, 81, 91)
round(t(cbind(lambdaKenya,riskKenya)[ind,]),3)
ind = c(5,9,13,17,21,25,29,33,37)
round(t(cbind(lambdasWDBC,riskWDBC)[ind,]),3)
ind = c(11, 21, 31,41, 51, 61, 71, 81, 91)
round(t(cbind(lambdasPIMA,riskPIMA)[ind,]),3)
ind = which(lambdaKenya %in% c(0.2,0.5,0.8))
round(t(cbind(lambdaKenya,riskKenya)[ind,]),3)
ind = which(lambdasWDBC %in% c(0.2,0.5,0.8))
round(t(cbind(lambdasWDBC,riskWDBC)[ind,]),3)
|
library(fpa)
### Name: ft2fp
### Title: Convert fixation time to fixation probability
### Aliases: ft2fp
### ** Examples
data(rawdata)
newdata <- ft2fp (rawdata, 4, 3000, 100)
newdata <- ft2fp (rawdata, 4, 3000, 100, norm=TRUE, rm.nr=TRUE, rm.1p=FALSE)
| /data/genthat_extracted_code/fpa/examples/ft2fp.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 261 | r | library(fpa)
### Name: ft2fp
### Title: Convert fixation time to fixation probability
### Aliases: ft2fp
### ** Examples
data(rawdata)
newdata <- ft2fp (rawdata, 4, 3000, 100)
newdata <- ft2fp (rawdata, 4, 3000, 100, norm=TRUE, rm.nr=TRUE, rm.1p=FALSE)
|
\name{sam_MVNORM}
\alias{sam_MVNORM}
\alias{MVNORM}
\title{
Create Samples for BAMLSS by Multivariate Normal Approximation
}
\description{
This sampler function for BAMLSS uses estimated \code{\link{parameters}} and the Hessian
information to create samples from a multivariate normal distribution. Note that smoothing
variance uncertainty is not accounted for, therefore, the resulting credible intervals
are most likely too narrow.
}
\usage{
sam_MVNORM(x, y = NULL, family = NULL, start = NULL,
n.samples = 500, hessian = NULL, ...)
MVNORM(x, y = NULL, family = NULL, start = NULL,
n.samples = 500, hessian = NULL, ...)
}
\arguments{
\item{x}{The \code{x} list, as returned from
function \code{\link{bamlss.frame}}, holding all model matrices and other information that is
used for fitting the model. Or an object returned from function \code{\link{bamlss}}.}
\item{y}{The model response, as returned from function \code{\link{bamlss.frame}}.}
\item{family}{A \pkg{bamlss} family object, see \code{\link{family.bamlss}}.}
\item{start}{A named numeric vector containing possible starting values, the names are based on
function \code{\link{parameters}}.}
\item{n.samples}{Sets the number of samples that should be generated.}
\item{hessian}{The Hessian matrix that should be used. Note that the row and column names
must be the same as the names of the \code{\link{parameters}}. If \code{hessian = NULL}
the function uses \code{\link{optim}} to compute the Hessian if it is not provided
within \code{x}.}
\item{\dots}{Arguments passed to function \code{\link{optim}}.}
}
\value{
Function \code{MVNORM()} returns samples of parameters. The samples are provided as a
\code{\link[coda]{mcmc}} matrix.
}
\seealso{
\code{\link{bamlss}}, \code{\link{bamlss.frame}},
\code{\link{bamlss.engine.setup}}, \code{\link{set.starting.values}}, \code{\link{opt_bfit}},
\code{\link{sam_GMCMC}}
}
\examples{
## Simulated data example illustrating
## how to call the sampler function.
## This is done internally within
## the setup of function bamlss().
d <- GAMart()
f <- num ~ s(x1, bs = "ps")
bf <- bamlss.frame(f, data = d, family = "gaussian")
## First, find starting values with optimizer.
o <- with(bf, opt_bfit(x, y, family))
## Sample.
samps <- with(bf, sam_MVNORM(x, y, family, start = o$parameters))
plot(samps)
}
\keyword{regression}
| /man/MVNORM.Rd | no_license | cran/bamlss | R | false | false | 2,404 | rd | \name{sam_MVNORM}
\alias{sam_MVNORM}
\alias{MVNORM}
\title{
Create Samples for BAMLSS by Multivariate Normal Approximation
}
\description{
This sampler function for BAMLSS uses estimated \code{\link{parameters}} and the Hessian
information to create samples from a multivariate normal distribution. Note that smoothing
variance uncertainty is not accounted for, therefore, the resulting credible intervals
are most likely too narrow.
}
\usage{
sam_MVNORM(x, y = NULL, family = NULL, start = NULL,
n.samples = 500, hessian = NULL, ...)
MVNORM(x, y = NULL, family = NULL, start = NULL,
n.samples = 500, hessian = NULL, ...)
}
\arguments{
\item{x}{The \code{x} list, as returned from
function \code{\link{bamlss.frame}}, holding all model matrices and other information that is
used for fitting the model. Or an object returned from function \code{\link{bamlss}}.}
\item{y}{The model response, as returned from function \code{\link{bamlss.frame}}.}
\item{family}{A \pkg{bamlss} family object, see \code{\link{family.bamlss}}.}
\item{start}{A named numeric vector containing possible starting values, the names are based on
function \code{\link{parameters}}.}
\item{n.samples}{Sets the number of samples that should be generated.}
\item{hessian}{The Hessian matrix that should be used. Note that the row and column names
must be the same as the names of the \code{\link{parameters}}. If \code{hessian = NULL}
the function uses \code{\link{optim}} to compute the Hessian if it is not provided
within \code{x}.}
\item{\dots}{Arguments passed to function \code{\link{optim}}.}
}
\value{
Function \code{MVNORM()} returns samples of parameters. The samples are provided as a
\code{\link[coda]{mcmc}} matrix.
}
\seealso{
\code{\link{bamlss}}, \code{\link{bamlss.frame}},
\code{\link{bamlss.engine.setup}}, \code{\link{set.starting.values}}, \code{\link{opt_bfit}},
\code{\link{sam_GMCMC}}
}
\examples{
## Simulated data example illustrating
## how to call the sampler function.
## This is done internally within
## the setup of function bamlss().
d <- GAMart()
f <- num ~ s(x1, bs = "ps")
bf <- bamlss.frame(f, data = d, family = "gaussian")
## First, find starting values with optimizer.
o <- with(bf, opt_bfit(x, y, family))
## Sample.
samps <- with(bf, sam_MVNORM(x, y, family, start = o$parameters))
plot(samps)
}
\keyword{regression}
|
fileurl <- url("http://biostat.jhsph.edu/~jleek/contact.html")
htmlCode <- readLines(fileurl)
close(connection)
c(nchar(htmlCode[10]), nchar(htmlCode[20]), nchar(htmlCode[30]), nchar(htmlCode[100]))
| /Course 3/Week 2/Quiz/readHTML.R | no_license | molgarc/Coursera_Data_Science | R | false | false | 199 | r | fileurl <- url("http://biostat.jhsph.edu/~jleek/contact.html")
htmlCode <- readLines(fileurl)
close(connection)
c(nchar(htmlCode[10]), nchar(htmlCode[20]), nchar(htmlCode[30]), nchar(htmlCode[100]))
|
# Plotting Assignment 1 for Exploratory Data Analysis
# Refs
# http://archive.ics.uci.edu/ml/
# https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
#Tasks
#Tasks
#Goal here is simply to examine how household energy usage varies over a 2-day period (2007-02-01 and 2007-02-02)
# 1) Construct the plot and save it to a PNG file with a width of 480 pixels and a height of 480 pixels.
# 2) Name of the plot file as plot4.png
# 3) Create a separate R code file (plot4.R) that constructs the corresponding plot, i.e. code in plot4.R constructs the plot4.png plot. Your code file should include code for reading the data so that the plot can be fully reproduced. You must also include the code that creates the PNG file.
#a. cleanup
rm(list = ls())
#b. download and unzip data
baseDir <- "."
#b.1 create data sub directory if doesn't exist
dataDir <- paste(baseDir, "data", sep = "/")
if(!file.exists(dataDir)){
dir.create(dataDir)
}
#b.2 download dataset if doen't exist
zipFilePath <- paste(dataDir, "Dataset.zip", sep = "/")
if(!file.exists(zipFilePath)){
zipFileUlr <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(zipFileUlr, zipFilePath)
dateDownloaded <- date()
cat("Dataset downloaded on:", dateDownloaded,"\n")
}
#b.3 unzip and create data directory if doesn't exist
dataSetDir <- paste(dataDir, "household_power_consumption", sep = "/")
if(!file.exists(dataSetDir)){
unzip(zipFilePath, exdir = dataSetDir)
}
list.files(dataSetDir)
#c read the datasets
hpcPath <- paste(dataSetDir, "household_power_consumption.txt", sep = "/")
hpc <- read.table(hpcPath, header=T, sep=';', stringsAsFactors=F, dec = ".")
#c.1 filter data
subSetHpc <- hpc[hpc$Date %in% c("1/2/2007","2/2/2007"), ]
#D. plot
globalActivePower <- as.numeric(subSetHpc$Global_reactive_power)
datetime <- strptime(paste(subSetHpc$Date, subSetHpc$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalReactivePower <- as.numeric(subSetHpc$Global_reactive_power)
voltage <- as.numeric(subSetHpc$Voltage)
subMetering1 <- as.numeric(subSetHpc$Sub_metering_1)
subMetering2 <- as.numeric(subSetHpc$Sub_metering_2)
subMetering3 <- as.numeric(subSetHpc$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global Reactive Power")
dev.off()
| /plot4.R | no_license | prabakaranece/ExData_Plotting1 | R | false | false | 2,862 | r | # Plotting Assignment 1 for Exploratory Data Analysis
# Refs
# http://archive.ics.uci.edu/ml/
# https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
#Tasks
#Tasks
#Goal here is simply to examine how household energy usage varies over a 2-day period (2007-02-01 and 2007-02-02)
# 1) Construct the plot and save it to a PNG file with a width of 480 pixels and a height of 480 pixels.
# 2) Name of the plot file as plot4.png
# 3) Create a separate R code file (plot4.R) that constructs the corresponding plot, i.e. code in plot4.R constructs the plot4.png plot. Your code file should include code for reading the data so that the plot can be fully reproduced. You must also include the code that creates the PNG file.
#a. cleanup
rm(list = ls())
#b. download and unzip data
baseDir <- "."
#b.1 create data sub directory if doesn't exist
dataDir <- paste(baseDir, "data", sep = "/")
if(!file.exists(dataDir)){
dir.create(dataDir)
}
#b.2 download dataset if doen't exist
zipFilePath <- paste(dataDir, "Dataset.zip", sep = "/")
if(!file.exists(zipFilePath)){
zipFileUlr <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(zipFileUlr, zipFilePath)
dateDownloaded <- date()
cat("Dataset downloaded on:", dateDownloaded,"\n")
}
#b.3 unzip and create data directory if doesn't exist
dataSetDir <- paste(dataDir, "household_power_consumption", sep = "/")
if(!file.exists(dataSetDir)){
unzip(zipFilePath, exdir = dataSetDir)
}
list.files(dataSetDir)
#c read the datasets
hpcPath <- paste(dataSetDir, "household_power_consumption.txt", sep = "/")
hpc <- read.table(hpcPath, header=T, sep=';', stringsAsFactors=F, dec = ".")
#c.1 filter data
subSetHpc <- hpc[hpc$Date %in% c("1/2/2007","2/2/2007"), ]
#D. plot
globalActivePower <- as.numeric(subSetHpc$Global_reactive_power)
datetime <- strptime(paste(subSetHpc$Date, subSetHpc$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalReactivePower <- as.numeric(subSetHpc$Global_reactive_power)
voltage <- as.numeric(subSetHpc$Voltage)
subMetering1 <- as.numeric(subSetHpc$Sub_metering_1)
subMetering2 <- as.numeric(subSetHpc$Sub_metering_2)
subMetering3 <- as.numeric(subSetHpc$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global Reactive Power")
dev.off()
|
############################################################################
#-----We will be looking at 3 Automated Model Selection Procedures--------------
#-----1. Backward Elimination Method--------------------------------------------
#-----Works with the most general model and drops variables one by one until the
#-----best model is reached.----------------------------------------------------
#
#-----using command step()------------------------------------------------------
step(lm(Oxygen ~ Age + Weight + RunTime + RestPulse + RunPulse + MaxPulse,
data = fitness), direction = "backward")
#-----2. Forward Step Method - Intercept of the Model --------------------------
#-----Starts with the simplest model of all and adds suitable variables, one by
#-----one, until the best model is reached.-------------------------------------
#-----using command step()------------------------------------------------------
step(lm(Oxygen ~ 1, data = fitness), direction = "forward",
scope = ~ Age + Weight + RunTime + RestPulse + RunPulse + MaxPulse)
#-----3. Stepwise Method (both directions - Backwards and Forward---------------
#-----The Stepwise procedure combines the two previous methods (Forward and-----
#-----Backwards Methods), where variables can be added and dropped.-------------
#-----In all these 3 methods, the AIC is used as the criteria to select tje model,
#-----which is based in the following rule: the lower the AIC the better the model.
#-----using command step()------------------------------------------------------
#-----starting the method with all the variables--------------------------------
step(lm(Oxygen ~ Age + Weight + RunTime + RestPulse + RunPulse + MaxPulse,
data = fitness), direction = "both")
#-----data set------------------------------------------------------------------
# getwd()
# dim(fitness)
# head(fitness, 31)
# length(fitness)
# fitness[2:4, 5:7]
# fitness[ , 1]
# names(fitness)
# fitness$Age
# mean(fitness$Age)
# sd(fitness$Age)
# sum(fitness$Age)
#-------------------------------------------------------------------------------
w
summary(fitnessLM)
names(fitnessLM)
str(fitnessLM)
head(fitnessLM$x)
#-----Recalling y = XB----------------------------------------------------------
prediction <- fitnessLM$x%*%coef(fitnessLM)
head(prediction)
summary(fitnessLM)
coef(fitnessLM)
#-----Confidence Interval CI----------------------------------------------------
confint(fitnessLM)
#-----residuals-----------------------------------------------------------------
head(resid(fitnessLM))
#-----Checking the Assumptions--------------------------------------------------
#-----1. The adquacy of our model for the signal
#-----2. The adquacy of our model fro the noise
qqnorm(resid(fitnessLM))
#-----Shapiro-Wilk normality test-----------------------------------------------
#an approximate p-value < 0.1 - the test is adequate
shapiro.test(resid(fitnessLM))
fitnessResid <- resid(fitnessLM)
par(mfrow = c(2, 2))
plot(fitness$Age, fitnessResid)
plot(fitness$Weight, fitnessResid)
plot(fitness$Oxygen, fitnessResid)
plot(fitness$RunTime, fitnessResid)
plot(fitness$RestPulse, fitnessResid)
plot(fitness$RunPulse, fitnessResid)
plot(factor(fitness$Age), fitnessResid)
plot(factor(fitness$Weight), fitnessResid)
plot(factor(fitness$Oxygen), fitnessResid)
plot(factor(fitness$RunTime), fitnessResid)
plot(factor(fitness$RestPulse), fitnessResid)
plot(factor(fitness$RunPulse), fitnessResid)
plot(fitnessResid, fitted(fitnessLM))
AIC(fitnessLM)
par(mfrow = c(2, 2))
plot(fitnessLM)
#-----Backward Elimination Method-----------------------------------------------
step(fitnessLM)
#-----Predictions--------------------------------------------------------------
table(fitness$Age)
table(fitness$Weight)
table(fitness$Oxygen)
table(fitness$RunTime)
table(fitness$RestPulse)
table(fitness$RunPulse)
| /EXERCICIO PRATICO PARA FITNESS DATA.R | no_license | DazLoop/MT5763_proj2_individual | R | false | false | 3,903 | r | ############################################################################
#-----We will be looking at 3 Automated Model Selection Procedures--------------
#-----1. Backward Elimination Method--------------------------------------------
#-----Works with the most general model and drops variables one by one until the
#-----best model is reached.----------------------------------------------------
#
#-----using command step()------------------------------------------------------
step(lm(Oxygen ~ Age + Weight + RunTime + RestPulse + RunPulse + MaxPulse,
data = fitness), direction = "backward")
#-----2. Forward Step Method - Intercept of the Model --------------------------
#-----Starts with the simplest model of all and adds suitable variables, one by
#-----one, until the best model is reached.-------------------------------------
#-----using command step()------------------------------------------------------
step(lm(Oxygen ~ 1, data = fitness), direction = "forward",
scope = ~ Age + Weight + RunTime + RestPulse + RunPulse + MaxPulse)
#-----3. Stepwise Method (both directions - Backwards and Forward---------------
#-----The Stepwise procedure combines the two previous methods (Forward and-----
#-----Backwards Methods), where variables can be added and dropped.-------------
#-----In all these 3 methods, the AIC is used as the criteria to select tje model,
#-----which is based in the following rule: the lower the AIC the better the model.
#-----using command step()------------------------------------------------------
#-----starting the method with all the variables--------------------------------
step(lm(Oxygen ~ Age + Weight + RunTime + RestPulse + RunPulse + MaxPulse,
data = fitness), direction = "both")
#-----data set------------------------------------------------------------------
# getwd()
# dim(fitness)
# head(fitness, 31)
# length(fitness)
# fitness[2:4, 5:7]
# fitness[ , 1]
# names(fitness)
# fitness$Age
# mean(fitness$Age)
# sd(fitness$Age)
# sum(fitness$Age)
#-------------------------------------------------------------------------------
w
summary(fitnessLM)
names(fitnessLM)
str(fitnessLM)
head(fitnessLM$x)
#-----Recalling y = XB----------------------------------------------------------
prediction <- fitnessLM$x%*%coef(fitnessLM)
head(prediction)
summary(fitnessLM)
coef(fitnessLM)
#-----Confidence Interval CI----------------------------------------------------
confint(fitnessLM)
#-----residuals-----------------------------------------------------------------
head(resid(fitnessLM))
#-----Checking the Assumptions--------------------------------------------------
#-----1. The adquacy of our model for the signal
#-----2. The adquacy of our model fro the noise
qqnorm(resid(fitnessLM))
#-----Shapiro-Wilk normality test-----------------------------------------------
#an approximate p-value < 0.1 - the test is adequate
shapiro.test(resid(fitnessLM))
fitnessResid <- resid(fitnessLM)
par(mfrow = c(2, 2))
plot(fitness$Age, fitnessResid)
plot(fitness$Weight, fitnessResid)
plot(fitness$Oxygen, fitnessResid)
plot(fitness$RunTime, fitnessResid)
plot(fitness$RestPulse, fitnessResid)
plot(fitness$RunPulse, fitnessResid)
plot(factor(fitness$Age), fitnessResid)
plot(factor(fitness$Weight), fitnessResid)
plot(factor(fitness$Oxygen), fitnessResid)
plot(factor(fitness$RunTime), fitnessResid)
plot(factor(fitness$RestPulse), fitnessResid)
plot(factor(fitness$RunPulse), fitnessResid)
plot(fitnessResid, fitted(fitnessLM))
AIC(fitnessLM)
par(mfrow = c(2, 2))
plot(fitnessLM)
#-----Backward Elimination Method-----------------------------------------------
step(fitnessLM)
#-----Predictions--------------------------------------------------------------
table(fitness$Age)
table(fitness$Weight)
table(fitness$Oxygen)
table(fitness$RunTime)
table(fitness$RestPulse)
table(fitness$RunPulse)
|
# Bellman initiation
Bellman_init <- function(param_list,K,GH_init,cheby_init,verbose=F){
x <- cheby_init$Adjusted.base.nodes
nx <- nrow(x)
colnames(x) <- state.name
num_cheb <- cheby_init$Cheb.coefficient.num
m <- with(param_list, (tau1-beta*log(K)+(1-beta)*lambda1) / (2*tau2-2*lambda2*(1-beta) ))
n <- with(param_list, 2*tau2/(2*tau2 - 2*lambda2*(1-beta)) )
Phi.bound <- 10e-8
cat("m=",m,",n=",n,"\n")
x_prim <- array(NA,c(nx, K, dim(GH_init$nodes)))
flow_u <- array(NA,c(nx, K))
ET <- array(NA,c(nx, K, num_cheb))
n.nodes <- nrow(GH_init$nodes)
for(i in 1:nx){
for(k in 1:K){
sigma <- sigma_Q[k]
a0 <- -(m/n + mu_Q_fn(k,x[i,y.idx]))/sigma
b0 <- -(m/(n-1) + mu_Q_fn(k,x[i,y.idx]))/sigma
mu <- x[i,I.idx] + mu_Q_fn(k,x[i,y.idx])
if(pnorm(a0)<Phi.bound){
EQ_lower <- 0
EQ2_lower <- 0
}else{
rho <- dnorm(a0)/pnorm(a0)
EQ_lower <- mu - sigma*rho
EQ2_lower <- mu^2 - 2*mu*sigma*rho + sigma^2*(1+a0*rho)
}
if(pnorm(-b0)<Phi.bound){
EQ_upper <- 0
EQ2_upper <- 0
}else{
rho <- dnorm(b0)/pnorm(-b0)
EQ_upper <- mu + sigma*rho
EQ2_upper <- mu^2 + 2*mu*sigma*rho +sigma^2*(1+b0*rho)
}
p.mid <- pnorm(b0) - pnorm(a0)
if(p.mid<Phi.bound){
EQ_mid <- 0
EQ2_mid <- 0
Ec_mid <- 0
Ec2_mid <- 0
EI_mid <- 0
EI2_mid <- 0
}else{
EQ_mid <- mu - sigma*(dnorm(b0)-dnorm(a0))/p.mid
EQ2_mid <- mu^2 - 2*mu*sigma*(dnorm(b0) - dnorm(a0))/p.mid + sigma^2*(1 - (b0*dnorm(b0) - a0*dnorm(a0))/p.mid)
Ec_mid <- m*p.mid + n*EQ_mid
Ec2_mid <- m^2*p.mid + 2*m*n*EQ_mid + n^2*EQ2_mid
EI_mid <- -m*p.mid - (n-1)*EQ_mid
EI2_mid <- m^2*p.mid + 2*m*(n-1)*EQ_mid + (n-1)^2*EQ2_mid
}
# Expected consumption, inventory and flow utility
Ec <- EQ_upper + Ec_mid
Ec2 <- EQ2_upper + Ec2_mid
EI_prim <- EQ_lower + EI_mid
EI2_prim <- EQ2_lower + EI2_mid
flow_u[i,k] <- with(param_list, lambda1*Ec + lambda2*Ec2 - tau1*EI_prim - tau2*EI2_prim) + omega_fn(k,x[i,y.idx])
# The nodes at which to compute integral of value function
I_prim <- cbind(-m-(n-1)*(mu + sqrt(2)*sigma*GH_init$nodes[,I.idx]),
mu + sqrt(2)*sigma*GH_init$nodes[,I.idx] )
I_prim <- apply(I_prim,1,function(x) min(max(x[1],0),x[2]) )
y_prim <- x[i,y.idx] + sqrt(2)*param_list$sigma_y * GH_init$nodes[,y.idx]
x_prim[i,k,,I.idx] <- sapply(I_prim, function(x) min(max(x,cheby_init$lower[I.idx]),cheby_init$upper[I.idx]) )
x_prim[i,k,,y.idx] <- sapply(y_prim, function(x) min(max(x,cheby_init$lower[y.idx]),cheby_init$upper[y.idx]) )
# Re-normalize the nodes to compute the expected basis function
x_prim_adj <- 2*(x_prim[i,k,,] - rep(1,n.nodes)%*%t(cheby_init$lower))/
(rep(1,n.nodes) %*% t(cheby_init$upper - cheby_init$lower)) - 1
T <- apply(x_prim_adj,1,cheb_polynm_multD,cheby_init$Polynomial.degree)
ET[i,k,] <- apply(T,1,function(x) x %*% GH_init$weights)/pi
}
}
policy_init <- rep(1,nx)
theta_init <- rep(0,num_cheb)
value_init <- cheby_init$Basis.polynomial %*% theta_init
if(verbose){
cat("Valuation function initialization\n")
cat("\nThe state to be evaluated:\n"); print(x)
cat("\nFlow utility at the current parameter: \n"); print(flow_u)
# cat("\nThe states to be evaluated in the GH-quadrature:\n")
# for(i in 1:nx){
# for(k in 1:K){
# cat("State",i,"k=",k,"\n"); print(x_prim[i,k,,])
# }
# }
# cat("\nExpected basis function in the value function: \n")
# for(k in 1:K){
# cat("k=",k,"\n"); print(ET[,k,]);
# }
}
list(param_list = param_list,
state = x,
next.state = x_prim,
policy = policy_init,
Cheb.theta = theta_init,
value.fn = value_init,
flow.utility= flow_u,
ET = ET,
consumption = c(m=m,n=n),
status = 1
)
}
# Bellman operator
Bellman_operator <- function(param_list,DP_list){
Ew <- apply(DP_list$ET,c(1,2),function(x) x%*% DP_list$Cheb.theta)
v <- DP_list$flow.utility + param_list$beta * Ew
max_v <- apply(v,1,max)
Tw <- log(rowSums(exp(v-max_v)))
return(Tw)
} | /Basket DP/Simulation_Cheby/Value_iteration_functions.R | no_license | Superet/Expenditure | R | false | false | 4,073 | r |
# Bellman initiation
Bellman_init <- function(param_list,K,GH_init,cheby_init,verbose=F){
x <- cheby_init$Adjusted.base.nodes
nx <- nrow(x)
colnames(x) <- state.name
num_cheb <- cheby_init$Cheb.coefficient.num
m <- with(param_list, (tau1-beta*log(K)+(1-beta)*lambda1) / (2*tau2-2*lambda2*(1-beta) ))
n <- with(param_list, 2*tau2/(2*tau2 - 2*lambda2*(1-beta)) )
Phi.bound <- 10e-8
cat("m=",m,",n=",n,"\n")
x_prim <- array(NA,c(nx, K, dim(GH_init$nodes)))
flow_u <- array(NA,c(nx, K))
ET <- array(NA,c(nx, K, num_cheb))
n.nodes <- nrow(GH_init$nodes)
for(i in 1:nx){
for(k in 1:K){
sigma <- sigma_Q[k]
a0 <- -(m/n + mu_Q_fn(k,x[i,y.idx]))/sigma
b0 <- -(m/(n-1) + mu_Q_fn(k,x[i,y.idx]))/sigma
mu <- x[i,I.idx] + mu_Q_fn(k,x[i,y.idx])
if(pnorm(a0)<Phi.bound){
EQ_lower <- 0
EQ2_lower <- 0
}else{
rho <- dnorm(a0)/pnorm(a0)
EQ_lower <- mu - sigma*rho
EQ2_lower <- mu^2 - 2*mu*sigma*rho + sigma^2*(1+a0*rho)
}
if(pnorm(-b0)<Phi.bound){
EQ_upper <- 0
EQ2_upper <- 0
}else{
rho <- dnorm(b0)/pnorm(-b0)
EQ_upper <- mu + sigma*rho
EQ2_upper <- mu^2 + 2*mu*sigma*rho +sigma^2*(1+b0*rho)
}
p.mid <- pnorm(b0) - pnorm(a0)
if(p.mid<Phi.bound){
EQ_mid <- 0
EQ2_mid <- 0
Ec_mid <- 0
Ec2_mid <- 0
EI_mid <- 0
EI2_mid <- 0
}else{
EQ_mid <- mu - sigma*(dnorm(b0)-dnorm(a0))/p.mid
EQ2_mid <- mu^2 - 2*mu*sigma*(dnorm(b0) - dnorm(a0))/p.mid + sigma^2*(1 - (b0*dnorm(b0) - a0*dnorm(a0))/p.mid)
Ec_mid <- m*p.mid + n*EQ_mid
Ec2_mid <- m^2*p.mid + 2*m*n*EQ_mid + n^2*EQ2_mid
EI_mid <- -m*p.mid - (n-1)*EQ_mid
EI2_mid <- m^2*p.mid + 2*m*(n-1)*EQ_mid + (n-1)^2*EQ2_mid
}
# Expected consumption, inventory and flow utility
Ec <- EQ_upper + Ec_mid
Ec2 <- EQ2_upper + Ec2_mid
EI_prim <- EQ_lower + EI_mid
EI2_prim <- EQ2_lower + EI2_mid
flow_u[i,k] <- with(param_list, lambda1*Ec + lambda2*Ec2 - tau1*EI_prim - tau2*EI2_prim) + omega_fn(k,x[i,y.idx])
# The nodes at which to compute integral of value function
I_prim <- cbind(-m-(n-1)*(mu + sqrt(2)*sigma*GH_init$nodes[,I.idx]),
mu + sqrt(2)*sigma*GH_init$nodes[,I.idx] )
I_prim <- apply(I_prim,1,function(x) min(max(x[1],0),x[2]) )
y_prim <- x[i,y.idx] + sqrt(2)*param_list$sigma_y * GH_init$nodes[,y.idx]
x_prim[i,k,,I.idx] <- sapply(I_prim, function(x) min(max(x,cheby_init$lower[I.idx]),cheby_init$upper[I.idx]) )
x_prim[i,k,,y.idx] <- sapply(y_prim, function(x) min(max(x,cheby_init$lower[y.idx]),cheby_init$upper[y.idx]) )
# Re-normalize the nodes to compute the expected basis function
x_prim_adj <- 2*(x_prim[i,k,,] - rep(1,n.nodes)%*%t(cheby_init$lower))/
(rep(1,n.nodes) %*% t(cheby_init$upper - cheby_init$lower)) - 1
T <- apply(x_prim_adj,1,cheb_polynm_multD,cheby_init$Polynomial.degree)
ET[i,k,] <- apply(T,1,function(x) x %*% GH_init$weights)/pi
}
}
policy_init <- rep(1,nx)
theta_init <- rep(0,num_cheb)
value_init <- cheby_init$Basis.polynomial %*% theta_init
if(verbose){
cat("Valuation function initialization\n")
cat("\nThe state to be evaluated:\n"); print(x)
cat("\nFlow utility at the current parameter: \n"); print(flow_u)
# cat("\nThe states to be evaluated in the GH-quadrature:\n")
# for(i in 1:nx){
# for(k in 1:K){
# cat("State",i,"k=",k,"\n"); print(x_prim[i,k,,])
# }
# }
# cat("\nExpected basis function in the value function: \n")
# for(k in 1:K){
# cat("k=",k,"\n"); print(ET[,k,]);
# }
}
list(param_list = param_list,
state = x,
next.state = x_prim,
policy = policy_init,
Cheb.theta = theta_init,
value.fn = value_init,
flow.utility= flow_u,
ET = ET,
consumption = c(m=m,n=n),
status = 1
)
}
# Bellman operator
Bellman_operator <- function(param_list,DP_list){
Ew <- apply(DP_list$ET,c(1,2),function(x) x%*% DP_list$Cheb.theta)
v <- DP_list$flow.utility + param_list$beta * Ew
max_v <- apply(v,1,max)
Tw <- log(rowSums(exp(v-max_v)))
return(Tw)
} |
library(adapr)
### Name: getFileSysTime
### Title: Retrun time of file system
### Aliases: getFileSysTime
### ** Examples
## Not run:
##D getFileSysTime()
## End(Not run)
| /data/genthat_extracted_code/adapr/examples/getFileSysTime.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 182 | r | library(adapr)
### Name: getFileSysTime
### Title: Retrun time of file system
### Aliases: getFileSysTime
### ** Examples
## Not run:
##D getFileSysTime()
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/svy_scale_weights.R
\name{scale_weights}
\alias{scale_weights}
\title{Rescale design weights for multilevel analysis}
\usage{
scale_weights(x, cluster.id, pweight)
}
\arguments{
\item{x}{A data frame.}
\item{cluster.id}{Variable indicating the grouping structure (strata) of
the survey data (level-2-cluster variable).}
\item{pweight}{Variable indicating the probability (design or sampling)
weights of the survey data (level-1-weight).}
}
\value{
\code{x}, with two new variables: \code{svywght_a} and \code{svywght_b},
which represent the rescaled design weights to use in multilevel models
(use these variables for the \code{weights} argument).
}
\description{
Most functions to fit multilevel and mixed effects models only
allow to specify frequency weights, but not design (i.e. sampling or probability)
weights, which should be used when analyzing complex samples and survey data.
\code{scale_weights()} implements an algorithm proposed by Aaparouhov (2006)
and Carle (2009) to rescale design weights in survey data to account for
the grouping structure of multilevel models, which then can be used for
multilevel modelling.
}
\details{
Rescaling is based on two methods: For \code{svywght_a}, the sample
weights \code{pweight} are adjusted by a factor that represents the proportion
of cluster size divided by the sum of sampling weights within each cluster.
The adjustment factor for \code{svywght_b} is the sum of sample weights
within each cluster devided by the sum of squared sample weights within
each cluster (see Carle (2009), Appendix B).
\cr \cr
Regarding the choice between scaling methods A and B, Carle suggests
that "analysts who wish to discuss point estimates should report results
based on weighting method A. For analysts more interested in residual
between-cluster variance, method B may generally provide the least biased
estimates". In general, it is recommended to fit a non-weighted model
and weighted models with both scaling methods and when comparing the
models, see whether the "inferential decisions converge", to gain
confidence in the results.
\cr \cr
Though the bias of scaled weights decreases with increasing cluster size,
method A is preferred when insufficient or low cluster size is a concern.
\cr \cr
The cluster ID and probably PSU may be used as random effects (e.g.
nested design, or cluster and PSU as varying intercepts), depending
on the survey design that should be mimicked.
}
\examples{
data(nhanes_sample)
scale_weights(nhanes_sample, SDMVSTRA, WTINT2YR)
library(lme4)
nhanes_sample <- scale_weights(nhanes_sample, SDMVSTRA, WTINT2YR)
glmer(
total ~ factor(RIAGENDR) * (log(age) + factor(RIDRETH1)) + (1 | SDMVPSU),
family = poisson(),
data = nhanes_sample,
weights = svywght_a
)
}
\references{
Carle AC. \emph{Fitting multilevel models in complex survey data with design weights: Recommendations} BMC Medical Research Methodology 2009, 9(49): 1-13
\cr \cr
Asparouhov T. \emph{General Multi-Level Modeling with Sampling Weights} Communications in Statistics - Theory and Methods 2006, 35: 439-460
}
| /man/scale_weights.Rd | no_license | Mattkaye3/sjstats | R | false | true | 3,236 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/svy_scale_weights.R
\name{scale_weights}
\alias{scale_weights}
\title{Rescale design weights for multilevel analysis}
\usage{
scale_weights(x, cluster.id, pweight)
}
\arguments{
\item{x}{A data frame.}
\item{cluster.id}{Variable indicating the grouping structure (strata) of
the survey data (level-2-cluster variable).}
\item{pweight}{Variable indicating the probability (design or sampling)
weights of the survey data (level-1-weight).}
}
\value{
\code{x}, with two new variables: \code{svywght_a} and \code{svywght_b},
which represent the rescaled design weights to use in multilevel models
(use these variables for the \code{weights} argument).
}
\description{
Most functions to fit multilevel and mixed effects models only
allow to specify frequency weights, but not design (i.e. sampling or probability)
weights, which should be used when analyzing complex samples and survey data.
\code{scale_weights()} implements an algorithm proposed by Aaparouhov (2006)
and Carle (2009) to rescale design weights in survey data to account for
the grouping structure of multilevel models, which then can be used for
multilevel modelling.
}
\details{
Rescaling is based on two methods: For \code{svywght_a}, the sample
weights \code{pweight} are adjusted by a factor that represents the proportion
of cluster size divided by the sum of sampling weights within each cluster.
The adjustment factor for \code{svywght_b} is the sum of sample weights
within each cluster devided by the sum of squared sample weights within
each cluster (see Carle (2009), Appendix B).
\cr \cr
Regarding the choice between scaling methods A and B, Carle suggests
that "analysts who wish to discuss point estimates should report results
based on weighting method A. For analysts more interested in residual
between-cluster variance, method B may generally provide the least biased
estimates". In general, it is recommended to fit a non-weighted model
and weighted models with both scaling methods and when comparing the
models, see whether the "inferential decisions converge", to gain
confidence in the results.
\cr \cr
Though the bias of scaled weights decreases with increasing cluster size,
method A is preferred when insufficient or low cluster size is a concern.
\cr \cr
The cluster ID and probably PSU may be used as random effects (e.g.
nested design, or cluster and PSU as varying intercepts), depending
on the survey design that should be mimicked.
}
\examples{
data(nhanes_sample)
scale_weights(nhanes_sample, SDMVSTRA, WTINT2YR)
library(lme4)
nhanes_sample <- scale_weights(nhanes_sample, SDMVSTRA, WTINT2YR)
glmer(
total ~ factor(RIAGENDR) * (log(age) + factor(RIDRETH1)) + (1 | SDMVPSU),
family = poisson(),
data = nhanes_sample,
weights = svywght_a
)
}
\references{
Carle AC. \emph{Fitting multilevel models in complex survey data with design weights: Recommendations} BMC Medical Research Methodology 2009, 9(49): 1-13
\cr \cr
Asparouhov T. \emph{General Multi-Level Modeling with Sampling Weights} Communications in Statistics - Theory and Methods 2006, 35: 439-460
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CDFtestingSuite.R
\name{findMaxError}
\alias{findMaxError}
\title{Locate where the maximum error occurs between two CDFs}
\usage{
findMaxError(Y, est, range, gran, ...)
}
\arguments{
\item{Y}{The vector output of a non-differentially private CDF
computation (cumulative count bins)}
\item{est}{The vector output of a differentially private CDF
computation (cumulative count bins)}
\item{range}{A vector length 2 containing user-specified min and max to
truncate the universe to}
\item{gran}{The smallest unit of measurement in the data (one [year] for
a list of ages)}
\item{...}{Optionally add additional parameters. This is primarily used to allow automated
execution of varied diagnostic functions.}
}
\value{
A single value, the value at which the largest absolute vertical
difference between
parallel observations in the private- and true-CDF vectors occurs.
}
\description{
Find the location of the maximum direct error between a
non-private CDF and a DP approximation of that CDF.
}
\examples{
findMaxError(c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1), c(.1,.2,.3,.3,.3,.3,.3,.3,.4,1), c(1,10),1)
}
| /man/findMaxError.Rd | no_license | cran/CDF.PSIdekick | R | false | true | 1,227 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CDFtestingSuite.R
\name{findMaxError}
\alias{findMaxError}
\title{Locate where the maximum error occurs between two CDFs}
\usage{
findMaxError(Y, est, range, gran, ...)
}
\arguments{
\item{Y}{The vector output of a non-differentially private CDF
computation (cumulative count bins)}
\item{est}{The vector output of a differentially private CDF
computation (cumulative count bins)}
\item{range}{A vector length 2 containing user-specified min and max to
truncate the universe to}
\item{gran}{The smallest unit of measurement in the data (one [year] for
a list of ages)}
\item{...}{Optionally add additional parameters. This is primarily used to allow automated
execution of varied diagnostic functions.}
}
\value{
A single value, the value at which the largest absolute vertical
difference between
parallel observations in the private- and true-CDF vectors occurs.
}
\description{
Find the location of the maximum direct error between a
non-private CDF and a DP approximation of that CDF.
}
\examples{
findMaxError(c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1), c(.1,.2,.3,.3,.3,.3,.3,.3,.4,1), c(1,10),1)
}
|
# Clear plots
if(!is.null(dev.list())) dev.off()
# Clear console
cat("\014")
# Clean workspace
rm(list=ls())
library(tidyverse)
library(readxl)
library(edgeR)
library(RColorBrewer)
library(scales)
library(cowplot)
campaignName<- "cd22cells_vs_YZ" #Do not put extention. File should be saves in .xlsx format ONLY.
dirFiles <- "~/Google Drive/Derda Lab/Glycan Array/AllSeqFiles/" #Folder where all the .txt liga files are stored. No need to put name of files
dirCampaign <- "~/Dropbox/Database/Campaign/" #Folder where the xlsx file is stored with information about experiment. Look at Dropbox/Database/Campaign/CD22_vs_YZ.xlsx for examples.
dirSave<- dirCampaign #Folder where all the images will be saved.
dirMaldi<- "~/Dropbox/Database/" #Place where MALDI file is stored. Default is Dropbox/Database/
dirOrder<- "~/Dropbox/Database/" #Place where order of x-axis plotting file is stored. Default is Dropbox/Database/
x_axis<- 4 ## Options: 1=Mod, 2=Glytoucan, 3=IUPAC, 4=CommonName
TypeOfLibrary<-"YZ" #Options: 3x3 and YZ.
#-------------------------------------------####################-----------------------------------------------------------------
###Do not change anything beyond this point--------------------------------------------------------------------------------------
setwd(dirSave)
#load the campaign file.
fileC<-read_excel(paste0(dirCampaign, campaignName, ".xlsx", sep="")) #reads the Campaign file
#load the test data--------------------------------------------------------------------------------------------
testFiles<-fileC$Test
testFiles<-testFiles[!is.na(testFiles)]
test<-lapply(paste0(dirFiles, testFiles, "-ppm.txt", sep=""), function(x)
read.table(x, header = TRUE, stringsAsFactors = FALSE,fill = TRUE))
test2<-lapply(test, function(x) x[!(names(x) %in% c("index", "mindex", "Primer","Nuc", "AA"))]) #drops the uncessary
for(i in 1:length(test2)){
colnames(test2[[i]]) <- c("Mod",rep(paste0(substr(testFiles[i], 1, 20),
1:(sapply(test2[i], NCOL)-1)[1])))
}
test2<- test2 %>%
reduce(full_join)
test2<-aggregate(. ~Mod, test2, sum)
test2<-test2[!test2$Mod == "XX", ]
colnames(test2) <- c("Mod", rep(paste0("test", 1:(NCOL(test2)-1))))
#Load the control data in the environment--------------------------------------------------------------------------------------
controlFiles<-fileC$Control
controlFiles<-controlFiles[!is.na(controlFiles)]
control<-lapply(paste0(dirFiles, controlFiles, "-ppm.txt", sep=""), function(x)
read.table(x, header = TRUE, stringsAsFactors = FALSE,fill = TRUE)) #reads the Campaign file
control2<-lapply(control, function(x) x[!(names(x) %in% c("index", "mindex", "Primer","Nuc", "AA"))]) #drops the uncessary stuff
for(i in 1:length(control2)){
colnames(control2[[i]]) <- c("Mod",rep(paste0(substr(controlFiles[i], 1, 20),
1:(sapply(control2[i], NCOL)-1)[1])))
}
control2<- control2 %>%
reduce(full_join)
control2<-aggregate(. ~Mod, control2, sum)
control2<-control2[!control2$Mod == "XX", ]
colnames(control2) <- c("Mod", rep(paste0("control", 1:(NCOL(control2)-1))))
#load the naive data in the environment--------------------------------------------------------------------------------------
naiveFiles<-fileC$Naïve
naiveFiles<-naiveFiles[!is.na(naiveFiles)]
naive<- lapply(paste0(dirFiles, naiveFiles, "-ppm.txt", sep=""), function(x)
read.table(x, header=T, stringsAsFactors = FALSE,fill = TRUE))
naive2<- lapply(naive, function(x) x[!(names(x) %in% c("index", "mindex", "Primer","Nuc", "AA"))])
for(i in 1:length(naive2)){
colnames(naive2[[i]]) <- c("Mod",rep(paste0(substr(naiveFiles[i], 1, 20),
1:(sapply(naive2[i], NCOL)-1)[1])))
}
naive2<- naive2 %>%
reduce(full_join)
naive2<-aggregate(. ~Mod, naive2, sum)
naive2<-naive2[!naive2$Mod == "XX", ]
colnames(naive2) <- c("Mod", rep(paste0("naive", 1:(NCOL(naive2)-1))))
listAll<-list(test2, control2, naive2)
mergedData <- Reduce(function(x, y) full_join(x, y, by="Mod"), listAll)
##This is where TMM happens. Need to put a switch to turn it on and off. ------------------------------------------------
pepdat <- sapply(mergedData[,-c(1)], as.numeric)
rownames(pepdat) <- mergedData[,1]
pepdat[is.na(pepdat)] <- 0
barplot(colSums(pepdat), col = c(rep("grey50", 4), rep("grey90", 4)),
ylab = "Library sizes", main="")
colnam<-colnames(pepdat)
for (i in 1:NCOL(pepdat)) {
colnam[i]<-substr((colnames(pepdat)[i]), 1, (nchar(colnames(pepdat)[i])-1))
}
cond<-colnam
cond
design <- model.matrix(~0+cond)
design
DGE <- DGEList(counts=pepdat, group=cond)
DGE
DGE <- calcNormFactors(DGE)
DGE$samples
tmm <- cpm(DGE,normalized.lib.sizes = TRUE)
tmm
c=as.data.frame(tmm)
c$Mod<-row.names(tmm)
rownames(c) <- NULL
mergedDataNorm<-c[c("Mod", setdiff(names(c), "Mod"))]
#End of TMM analysis. --------------------------------------------------------------------------------------------------
mergedDataNorm<-mergedDataNorm[!mergedDataNorm$Mod == "???", ]
testAvg<- apply(mergedDataNorm[,2:NCOL(test2)], 1, mean) #Mean
testStd<- apply(mergedDataNorm[,2:NCOL(test2)], 1, sd) #Standard Deviation
controlAvg<- apply(mergedDataNorm[,(tail(2:NCOL(test2), n=1)+1):(tail(2:NCOL(test2), n=1)+NCOL(control2)-1)], 1, mean) #Mean
controlStd<- apply(mergedDataNorm[,(tail(2:NCOL(test2), n=1)+1):(tail(2:NCOL(test2), n=1)+NCOL(control2)-1)], 1, sd) #Standard Deviation
naiveAvg<- apply(mergedDataNorm[,(tail(2:NCOL(test2), n=1)+NCOL(control2)):(NCOL(mergedDataNorm))], 1, mean) #Mean
naiveStd<- apply(mergedDataNorm[,(tail(2:NCOL(test2), n=1)+NCOL(control2)):(NCOL(mergedDataNorm))], 1, sd) #Standard Deviation
dataT<- data.frame(glycan=mergedDataNorm[1],testAvg,controlAvg, naiveAvg)
longdataT<- mergedDataNorm %>%
gather(Sample, Freq, colnames(mergedDataNorm[2:ncol(mergedDataNorm)]))
jitter <- position_jitter(width = 0.2, height = 0.2)
### Load the order of the plotting------------------------------------------------------------------------------------------
ligaFile<-read_excel(paste0(dirMaldi, "MALDI-names.xlsx"), col_names=T, skip=0)
ligaFile = ligaFile[-1,]
head(ligaFile)
longdataT$IUPAC<- ligaFile$IUPAC[match(longdataT$Mod,
ligaFile$`Glycan Name`)]
longdataT$linker<- ligaFile$Linkage[match(longdataT$Mod,
ligaFile$`Glycan Name`)]
longdataT$CommonName<-ligaFile$`Common Name`[match(longdataT$Mod,
ligaFile$`Glycan Name`)]
axisfile<-read_excel(paste0(dirMaldi,TypeOfLibrary,"-axis.xlsx"), col_names = T, skip=0)
longdataT$Order<- axisfile$Order[match(longdataT$Mod,
axisfile$Alphanum.)]
longdataT$Density<- axisfile$Density[match(longdataT$Mod,
axisfile$Alphanum.)]
longdataT$GlycanNum<-axisfile$`Number Glycan`[match(longdataT$Mod,axisfile$Alphanum.)]
longdataT$IUPAC<-paste0(longdataT$IUPAC, longdataT$linker, "-[", longdataT$GlycanNum, "]")
longdataT$Glytoucan<-ligaFile$`GlyTouCan ID`[match(longdataT$Mod,
ligaFile$`Glycan Name`)]
longdataT$Glytoucan<-paste0(longdataT$Glytoucan,"-[", longdataT$GlycanNum, "]")
longdataT$Mod2<-paste0(longdataT$Mod,"-[", longdataT$GlycanNum, "]")
longdataT$CommonName2<-paste0(longdataT$CommonName,"-[", longdataT$GlycanNum, "]")
longdataT[is.na(longdataT)] <- 0
if (x_axis==1) {
longdataT$x_label<-longdataT$Mod2
} else if (x_axis==2) {
longdataT$x_label<-longdataT$Glytoucan
} else if (x_axis==3){
longdataT$x_label<-longdataT$IUPAC
} else {
longdataT$x_label<-longdataT$CommonName2
}
### Load the plotting parameters------------------------------------------------------------------------------------------
# jitter <- position_jitter(width = 0.2, height = 0.2)
#
# ColorP<-c(rep("black", NCOL(control2)-1), rep("#999999", NCOL(naive2)-1), rep("black", NCOL(test2)-1))
# names(ColorP) <- levels(factor(longdataT$Sample))
# colScale <- scale_colour_manual(name =factor(longdataT$Sample),values = ColorP)
#
# ShapeP<- c(rep(25, NCOL(control2)-1), rep(21, NCOL(naive2)-1), rep(24, NCOL(test2)-1))
# names(ShapeP) <- levels(factor(longdataT$Sample))
# shapeScale <- scale_shape_manual(name =factor(longdataT$Sample),values = ShapeP)
#
# ShapeFill<-c(rep("white", NCOL(control2)-1), rep("#999999", NCOL(naive2)-1), rep("black", NCOL(test2)-1))
# names(ShapeFill) <- levels(factor(longdataT$Sample))
# fillScale <- scale_fill_manual(name =factor(longdataT$Sample),values = ShapeFill)
# ### Load the plotting parameters------------------------------------------------------------------------------------------
# ### Generate Scatterplot1 Map------------------------------------------------------------------------------------------
# scatter1<-ggplot(longdataT)+
# theme_bw()+
# geom_point(position = jitter, aes(x=reorder(x_label, +Order), y=Freq, color = Sample, shape=Sample, fill = factor(Sample)),
# stroke=0.7, size=3)+
# scale_y_log10(limits=c(1, 1e6), labels = trans_format("log10", math_format(10^.x)))+
# colScale+
# shapeScale+
# fillScale+
# labs(y="PPM", x="Glycan")+
# ggtitle(campaignName)+
# theme(axis.text.x=element_text(family="Helvetica", color="black", angle=90, size=7,hjust=1,vjust=0.2),
# axis.text.y=element_text(family="Helvetica", color="black",size=7, face="bold"),
# legend.title=element_text(family="Helvetica", color="black",size=7),
# legend.text=element_text(family="Helvetica", color="black", size=7),
# title=element_text(family="Helvetica", color="black", size=7))
# scatter1
# ggsave(plot = scatter1, width = 17.71, height = 5.2, dpi = 300, units="in",
# filename = paste0(campaignName, "-scatter1.eps", sep=""))
# ggsave(plot = scatter1, width = 17.71, height = 5.2, dpi = 300, units="in",
# filename = paste0(campaignName, "-scatter1.jpg", sep=""))
### Generate Heat Map------------------------------------------------------------------------------------------
jet.colors <- colorRampPalette(c("#00007F", "blue", "#007FFF", "cyan", "#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000"))
hm<-ggplot(longdataT, aes(x=reorder(x_label, +Order), y=fct_rev(factor(Sample)))) +
theme_light()+
geom_tile(aes(fill = log10(Freq)), colour = "black", size=0.3) +
scale_fill_gradientn(colours = jet.colors(7))+
ggtitle(campaignName)+
labs(y="PPM", x="Glycan")+
theme(axis.text.x=element_text(family="Helvetica", color="black", angle=90, size=12,hjust=1,vjust=0.2),
axis.text.y=element_text(family="Helvetica", color="black",size=12, face="bold"),
legend.title=element_text(family="Helvetica", color="black",size=12),
legend.text=element_text(family="Helvetica", color="black", size=12),
title=element_text(family="Helvetica", color="black", size=12))+
coord_equal()
hm
ggsave(plot = hm, width = 17.71, height = 5.2, dpi = 300, units="in",
filename = paste0(campaignName, "-hm.eps", sep=""))
ggsave(plot = hm, width = 17.71, height = 5.2, dpi = 300, units="in",
filename = paste0(campaignName, "-hm.jpg", sep=""))
### Generate Enrichment Data-------------------------------------------------------------------------------------
testEN <- testAvg/naiveAvg
controlEN <- controlAvg/naiveAvg
totalEN<-testEN/controlEN
totalStd<-totalEN*sqrt(((testStd/testAvg)^2)+(((controlStd/controlAvg)^2)))
dataEN<- data.frame(glycan=mergedDataNorm[1],testEN,controlEN, totalEN)
is.na(dataEN) <- sapply(dataEN, is.infinite)
dataEN[is.na(dataEN)] <- 0
dataEN$Glytoucan<- longdataT$Glytoucan[match(dataEN$Mod,
longdataT$Mod)]
dataEN$Order<- longdataT$Order[match(dataEN$Mod,
longdataT$Mod)]
dataEN$Mod2<- longdataT$Mod2[match(dataEN$Mod,
longdataT$Mod)]
dataEN$IUPAC<- longdataT$IUPAC[match(dataEN$Mod,
longdataT$Mod)]
dataEN$CommonName2<- longdataT$CommonName2[match(dataEN$Mod,
longdataT$Mod)]
if (x_axis==1) {
dataEN$x_label<-dataEN$Mod2
} else if (x_axis==2) {
dataEN$x_label<-dataEN$Glytoucan
} else if (x_axis==3){
dataEN$x_label<-dataEN$IUPAC
} else {
dataEN$x_label<-dataEN$CommonName2
}
###Plotting parameters for scatter 2------------------------------------------------------------------------------------------
scatter2<-ggplot(data=dataEN)+
theme_bw()+
geom_point(aes(x=reorder(x_label, +Order),
y=testEN),
stat='identity', size=6, fill="black",
color="black", shape=23)+
geom_point(aes(x=reorder(x_label, +Order),
y=controlEN),
stat='identity', size=6,
color="black", fill="white", shape=23)+
geom_segment(aes(x=reorder(x_label, +Order),
xend=x_label,
y=testEN,
yend=controlEN, size=totalEN))+
scale_y_log10(labels = trans_format("log10", function(x) 10^x))+
scale_size_continuous(range = c(0.1, 1.5))+
labs(y="Enrichment", x="Glycan")+
ggtitle(campaignName)+
scale_fill_manual(values=c("#97CAD8","#DC1452"))+
theme(axis.text.x=element_text(family="Helvetica", color="black", angle=90, size=12,hjust=1,vjust=0.2),
axis.text.y=element_text(family="Helvetica", color="black",size=12, face="bold"),
legend.title=element_text(family="Helvetica", color="black",size=12),
legend.text=element_text(family="Helvetica", color="black", size=12),
title=element_text(family="Helvetica", color="black", size=12))
scatter2
ggsave(plot = scatter2, width = 17.71, height = 5.2, dpi = 300, units="in",
filename = paste0(campaignName, "-scatter2.eps", sep=""))
ggsave(plot = scatter2, width = 17.71, height = 5.2, dpi = 300, units="in",
filename = paste0(campaignName, "-scatter2.jpg", sep=""))
#This is experimental
dataTotal<- data.frame(glycan=mergedDataNorm[1], totalEN, totalStd)
is.na(dataTotal) <- sapply(dataTotal, is.infinite)
dataTotal[is.na(dataTotal)] <- 0
dataTotal$Glytoucan<- longdataT$Glytoucan[match(dataTotal$Mod,
longdataT$Mod)]
dataTotal$Order<- longdataT$Order[match(dataTotal$Mod,
longdataT$Mod)]
dataTotal$Mod2<- longdataT$Mod2[match(dataTotal$Mod,
longdataT$Mod)]
dataTotal$IUPAC<- longdataT$IUPAC[match(dataTotal$Mod,
longdataT$Mod)]
dataTotal$CommonName2<- longdataT$CommonName2[match(dataTotal$Mod,
longdataT$Mod)]
if (x_axis==1) {
dataTotal$x_label<-dataTotal$Mod2
} else if (x_axis==2) {
dataTotal$x_label<-dataTotal$Glytoucan
} else if (x_axis==3){
dataTotal$x_label<-dataTotal$IUPAC
} else {
dataTotal$x_label<-dataTotal$CommonName2
}
root <- function(x) x ^ (1/2)
nonroot <- function(x) x ^ 2
trans <- trans_new(name = "root",
transform = root,
inverse = nonroot)
barchart<-ggplot(dataTotal, aes(x=reorder(x_label, +Order), y=totalEN))+
theme_classic()+
geom_bar(stat="identity", color="black", fill="black",
position=position_dodge()) +
geom_errorbar(aes(ymin=totalEN, ymax=totalEN+totalStd), width=.5, size=.4,
position=position_dodge(.9))+
labs(y="Enrichment", x="Glycan")+
ggtitle(campaignName)+
expand_limits(x = 0, y = 0)+
theme(axis.text.x=element_text(family="Helvetica", color="black", angle=90, size=12,hjust=1,vjust=0.2),
axis.text.y=element_text(family="Helvetica", color="black",size=12, face="bold"),
legend.title=element_text(family="Helvetica", color="black",size=12),
legend.text=element_text(family="Helvetica", color="black", size=12),
title=element_text(family="Helvetica", color="black", size=12))+
theme( # remove the vertical grid lines
panel.grid.major.x = element_blank() ,
# explicitly set the horizontal lines (or they will disappear too)
panel.grid.major.y = element_line( size=.1, color="black" ),
panel.grid.minor.y = element_blank()
)+
scale_y_continuous(trans=trans)+
geom_hline(yintercept = c(1,5,20))
barchart
ggsave(plot = barchart, width = 17.71, height = 5.2, dpi = 300, units="in",
filename = paste0(campaignName, "-barchart.eps", sep=""))
ggsave(plot = barchart, width = 17.71, height = 5.2, dpi = 300, units="in",
filename = paste0(campaignName, "-barchart.jpg", sep=""))
| /LigaPlot.R | no_license | mirats/LiGA | R | false | false | 16,590 | r | # Clear plots
if(!is.null(dev.list())) dev.off()
# Clear console
cat("\014")
# Clean workspace
rm(list=ls())
library(tidyverse)
library(readxl)
library(edgeR)
library(RColorBrewer)
library(scales)
library(cowplot)
campaignName<- "cd22cells_vs_YZ" #Do not put extention. File should be saves in .xlsx format ONLY.
dirFiles <- "~/Google Drive/Derda Lab/Glycan Array/AllSeqFiles/" #Folder where all the .txt liga files are stored. No need to put name of files
dirCampaign <- "~/Dropbox/Database/Campaign/" #Folder where the xlsx file is stored with information about experiment. Look at Dropbox/Database/Campaign/CD22_vs_YZ.xlsx for examples.
dirSave<- dirCampaign #Folder where all the images will be saved.
dirMaldi<- "~/Dropbox/Database/" #Place where MALDI file is stored. Default is Dropbox/Database/
dirOrder<- "~/Dropbox/Database/" #Place where order of x-axis plotting file is stored. Default is Dropbox/Database/
x_axis<- 4 ## Options: 1=Mod, 2=Glytoucan, 3=IUPAC, 4=CommonName
TypeOfLibrary<-"YZ" #Options: 3x3 and YZ.
#-------------------------------------------####################-----------------------------------------------------------------
###Do not change anything beyond this point--------------------------------------------------------------------------------------
setwd(dirSave)
#load the campaign file.
fileC<-read_excel(paste0(dirCampaign, campaignName, ".xlsx", sep="")) #reads the Campaign file
#load the test data--------------------------------------------------------------------------------------------
testFiles<-fileC$Test
testFiles<-testFiles[!is.na(testFiles)]
test<-lapply(paste0(dirFiles, testFiles, "-ppm.txt", sep=""), function(x)
read.table(x, header = TRUE, stringsAsFactors = FALSE,fill = TRUE))
test2<-lapply(test, function(x) x[!(names(x) %in% c("index", "mindex", "Primer","Nuc", "AA"))]) #drops the uncessary
for(i in 1:length(test2)){
colnames(test2[[i]]) <- c("Mod",rep(paste0(substr(testFiles[i], 1, 20),
1:(sapply(test2[i], NCOL)-1)[1])))
}
test2<- test2 %>%
reduce(full_join)
test2<-aggregate(. ~Mod, test2, sum)
test2<-test2[!test2$Mod == "XX", ]
colnames(test2) <- c("Mod", rep(paste0("test", 1:(NCOL(test2)-1))))
#Load the control data in the environment--------------------------------------------------------------------------------------
controlFiles<-fileC$Control
controlFiles<-controlFiles[!is.na(controlFiles)]
control<-lapply(paste0(dirFiles, controlFiles, "-ppm.txt", sep=""), function(x)
read.table(x, header = TRUE, stringsAsFactors = FALSE,fill = TRUE)) #reads the Campaign file
control2<-lapply(control, function(x) x[!(names(x) %in% c("index", "mindex", "Primer","Nuc", "AA"))]) #drops the uncessary stuff
for(i in 1:length(control2)){
colnames(control2[[i]]) <- c("Mod",rep(paste0(substr(controlFiles[i], 1, 20),
1:(sapply(control2[i], NCOL)-1)[1])))
}
control2<- control2 %>%
reduce(full_join)
control2<-aggregate(. ~Mod, control2, sum)
control2<-control2[!control2$Mod == "XX", ]
colnames(control2) <- c("Mod", rep(paste0("control", 1:(NCOL(control2)-1))))
#load the naive data in the environment--------------------------------------------------------------------------------------
naiveFiles<-fileC$Naïve
naiveFiles<-naiveFiles[!is.na(naiveFiles)]
naive<- lapply(paste0(dirFiles, naiveFiles, "-ppm.txt", sep=""), function(x)
read.table(x, header=T, stringsAsFactors = FALSE,fill = TRUE))
naive2<- lapply(naive, function(x) x[!(names(x) %in% c("index", "mindex", "Primer","Nuc", "AA"))])
for(i in 1:length(naive2)){
colnames(naive2[[i]]) <- c("Mod",rep(paste0(substr(naiveFiles[i], 1, 20),
1:(sapply(naive2[i], NCOL)-1)[1])))
}
naive2<- naive2 %>%
reduce(full_join)
naive2<-aggregate(. ~Mod, naive2, sum)
naive2<-naive2[!naive2$Mod == "XX", ]
colnames(naive2) <- c("Mod", rep(paste0("naive", 1:(NCOL(naive2)-1))))
listAll<-list(test2, control2, naive2)
mergedData <- Reduce(function(x, y) full_join(x, y, by="Mod"), listAll)
##This is where TMM happens. Need to put a switch to turn it on and off. ------------------------------------------------
pepdat <- sapply(mergedData[,-c(1)], as.numeric)
rownames(pepdat) <- mergedData[,1]
pepdat[is.na(pepdat)] <- 0
barplot(colSums(pepdat), col = c(rep("grey50", 4), rep("grey90", 4)),
ylab = "Library sizes", main="")
colnam<-colnames(pepdat)
for (i in 1:NCOL(pepdat)) {
colnam[i]<-substr((colnames(pepdat)[i]), 1, (nchar(colnames(pepdat)[i])-1))
}
cond<-colnam
cond
design <- model.matrix(~0+cond)
design
DGE <- DGEList(counts=pepdat, group=cond)
DGE
DGE <- calcNormFactors(DGE)
DGE$samples
tmm <- cpm(DGE,normalized.lib.sizes = TRUE)
tmm
c=as.data.frame(tmm)
c$Mod<-row.names(tmm)
rownames(c) <- NULL
mergedDataNorm<-c[c("Mod", setdiff(names(c), "Mod"))]
#End of TMM analysis. --------------------------------------------------------------------------------------------------
mergedDataNorm<-mergedDataNorm[!mergedDataNorm$Mod == "???", ]
testAvg<- apply(mergedDataNorm[,2:NCOL(test2)], 1, mean) #Mean
testStd<- apply(mergedDataNorm[,2:NCOL(test2)], 1, sd) #Standard Deviation
controlAvg<- apply(mergedDataNorm[,(tail(2:NCOL(test2), n=1)+1):(tail(2:NCOL(test2), n=1)+NCOL(control2)-1)], 1, mean) #Mean
controlStd<- apply(mergedDataNorm[,(tail(2:NCOL(test2), n=1)+1):(tail(2:NCOL(test2), n=1)+NCOL(control2)-1)], 1, sd) #Standard Deviation
naiveAvg<- apply(mergedDataNorm[,(tail(2:NCOL(test2), n=1)+NCOL(control2)):(NCOL(mergedDataNorm))], 1, mean) #Mean
naiveStd<- apply(mergedDataNorm[,(tail(2:NCOL(test2), n=1)+NCOL(control2)):(NCOL(mergedDataNorm))], 1, sd) #Standard Deviation
dataT<- data.frame(glycan=mergedDataNorm[1],testAvg,controlAvg, naiveAvg)
longdataT<- mergedDataNorm %>%
gather(Sample, Freq, colnames(mergedDataNorm[2:ncol(mergedDataNorm)]))
jitter <- position_jitter(width = 0.2, height = 0.2)
### Load the order of the plotting------------------------------------------------------------------------------------------
ligaFile<-read_excel(paste0(dirMaldi, "MALDI-names.xlsx"), col_names=T, skip=0)
ligaFile = ligaFile[-1,]
head(ligaFile)
longdataT$IUPAC<- ligaFile$IUPAC[match(longdataT$Mod,
ligaFile$`Glycan Name`)]
longdataT$linker<- ligaFile$Linkage[match(longdataT$Mod,
ligaFile$`Glycan Name`)]
longdataT$CommonName<-ligaFile$`Common Name`[match(longdataT$Mod,
ligaFile$`Glycan Name`)]
axisfile<-read_excel(paste0(dirMaldi,TypeOfLibrary,"-axis.xlsx"), col_names = T, skip=0)
longdataT$Order<- axisfile$Order[match(longdataT$Mod,
axisfile$Alphanum.)]
longdataT$Density<- axisfile$Density[match(longdataT$Mod,
axisfile$Alphanum.)]
longdataT$GlycanNum<-axisfile$`Number Glycan`[match(longdataT$Mod,axisfile$Alphanum.)]
longdataT$IUPAC<-paste0(longdataT$IUPAC, longdataT$linker, "-[", longdataT$GlycanNum, "]")
longdataT$Glytoucan<-ligaFile$`GlyTouCan ID`[match(longdataT$Mod,
ligaFile$`Glycan Name`)]
longdataT$Glytoucan<-paste0(longdataT$Glytoucan,"-[", longdataT$GlycanNum, "]")
longdataT$Mod2<-paste0(longdataT$Mod,"-[", longdataT$GlycanNum, "]")
longdataT$CommonName2<-paste0(longdataT$CommonName,"-[", longdataT$GlycanNum, "]")
longdataT[is.na(longdataT)] <- 0
if (x_axis==1) {
longdataT$x_label<-longdataT$Mod2
} else if (x_axis==2) {
longdataT$x_label<-longdataT$Glytoucan
} else if (x_axis==3){
longdataT$x_label<-longdataT$IUPAC
} else {
longdataT$x_label<-longdataT$CommonName2
}
### Load the plotting parameters------------------------------------------------------------------------------------------
# jitter <- position_jitter(width = 0.2, height = 0.2)
#
# ColorP<-c(rep("black", NCOL(control2)-1), rep("#999999", NCOL(naive2)-1), rep("black", NCOL(test2)-1))
# names(ColorP) <- levels(factor(longdataT$Sample))
# colScale <- scale_colour_manual(name =factor(longdataT$Sample),values = ColorP)
#
# ShapeP<- c(rep(25, NCOL(control2)-1), rep(21, NCOL(naive2)-1), rep(24, NCOL(test2)-1))
# names(ShapeP) <- levels(factor(longdataT$Sample))
# shapeScale <- scale_shape_manual(name =factor(longdataT$Sample),values = ShapeP)
#
# ShapeFill<-c(rep("white", NCOL(control2)-1), rep("#999999", NCOL(naive2)-1), rep("black", NCOL(test2)-1))
# names(ShapeFill) <- levels(factor(longdataT$Sample))
# fillScale <- scale_fill_manual(name =factor(longdataT$Sample),values = ShapeFill)
# ### Load the plotting parameters------------------------------------------------------------------------------------------
# ### Generate Scatterplot1 Map------------------------------------------------------------------------------------------
# scatter1<-ggplot(longdataT)+
# theme_bw()+
# geom_point(position = jitter, aes(x=reorder(x_label, +Order), y=Freq, color = Sample, shape=Sample, fill = factor(Sample)),
# stroke=0.7, size=3)+
# scale_y_log10(limits=c(1, 1e6), labels = trans_format("log10", math_format(10^.x)))+
# colScale+
# shapeScale+
# fillScale+
# labs(y="PPM", x="Glycan")+
# ggtitle(campaignName)+
# theme(axis.text.x=element_text(family="Helvetica", color="black", angle=90, size=7,hjust=1,vjust=0.2),
# axis.text.y=element_text(family="Helvetica", color="black",size=7, face="bold"),
# legend.title=element_text(family="Helvetica", color="black",size=7),
# legend.text=element_text(family="Helvetica", color="black", size=7),
# title=element_text(family="Helvetica", color="black", size=7))
# scatter1
# ggsave(plot = scatter1, width = 17.71, height = 5.2, dpi = 300, units="in",
# filename = paste0(campaignName, "-scatter1.eps", sep=""))
# ggsave(plot = scatter1, width = 17.71, height = 5.2, dpi = 300, units="in",
# filename = paste0(campaignName, "-scatter1.jpg", sep=""))
### Generate Heat Map------------------------------------------------------------------------------------------
jet.colors <- colorRampPalette(c("#00007F", "blue", "#007FFF", "cyan", "#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000"))
hm<-ggplot(longdataT, aes(x=reorder(x_label, +Order), y=fct_rev(factor(Sample)))) +
theme_light()+
geom_tile(aes(fill = log10(Freq)), colour = "black", size=0.3) +
scale_fill_gradientn(colours = jet.colors(7))+
ggtitle(campaignName)+
labs(y="PPM", x="Glycan")+
theme(axis.text.x=element_text(family="Helvetica", color="black", angle=90, size=12,hjust=1,vjust=0.2),
axis.text.y=element_text(family="Helvetica", color="black",size=12, face="bold"),
legend.title=element_text(family="Helvetica", color="black",size=12),
legend.text=element_text(family="Helvetica", color="black", size=12),
title=element_text(family="Helvetica", color="black", size=12))+
coord_equal()
hm
ggsave(plot = hm, width = 17.71, height = 5.2, dpi = 300, units="in",
filename = paste0(campaignName, "-hm.eps", sep=""))
ggsave(plot = hm, width = 17.71, height = 5.2, dpi = 300, units="in",
filename = paste0(campaignName, "-hm.jpg", sep=""))
### Generate Enrichment Data-------------------------------------------------------------------------------------
testEN <- testAvg/naiveAvg
controlEN <- controlAvg/naiveAvg
totalEN<-testEN/controlEN
totalStd<-totalEN*sqrt(((testStd/testAvg)^2)+(((controlStd/controlAvg)^2)))
dataEN<- data.frame(glycan=mergedDataNorm[1],testEN,controlEN, totalEN)
is.na(dataEN) <- sapply(dataEN, is.infinite)
dataEN[is.na(dataEN)] <- 0
dataEN$Glytoucan<- longdataT$Glytoucan[match(dataEN$Mod,
longdataT$Mod)]
dataEN$Order<- longdataT$Order[match(dataEN$Mod,
longdataT$Mod)]
dataEN$Mod2<- longdataT$Mod2[match(dataEN$Mod,
longdataT$Mod)]
dataEN$IUPAC<- longdataT$IUPAC[match(dataEN$Mod,
longdataT$Mod)]
dataEN$CommonName2<- longdataT$CommonName2[match(dataEN$Mod,
longdataT$Mod)]
if (x_axis==1) {
dataEN$x_label<-dataEN$Mod2
} else if (x_axis==2) {
dataEN$x_label<-dataEN$Glytoucan
} else if (x_axis==3){
dataEN$x_label<-dataEN$IUPAC
} else {
dataEN$x_label<-dataEN$CommonName2
}
###Plotting parameters for scatter 2------------------------------------------------------------------------------------------
scatter2<-ggplot(data=dataEN)+
theme_bw()+
geom_point(aes(x=reorder(x_label, +Order),
y=testEN),
stat='identity', size=6, fill="black",
color="black", shape=23)+
geom_point(aes(x=reorder(x_label, +Order),
y=controlEN),
stat='identity', size=6,
color="black", fill="white", shape=23)+
geom_segment(aes(x=reorder(x_label, +Order),
xend=x_label,
y=testEN,
yend=controlEN, size=totalEN))+
scale_y_log10(labels = trans_format("log10", function(x) 10^x))+
scale_size_continuous(range = c(0.1, 1.5))+
labs(y="Enrichment", x="Glycan")+
ggtitle(campaignName)+
scale_fill_manual(values=c("#97CAD8","#DC1452"))+
theme(axis.text.x=element_text(family="Helvetica", color="black", angle=90, size=12,hjust=1,vjust=0.2),
axis.text.y=element_text(family="Helvetica", color="black",size=12, face="bold"),
legend.title=element_text(family="Helvetica", color="black",size=12),
legend.text=element_text(family="Helvetica", color="black", size=12),
title=element_text(family="Helvetica", color="black", size=12))
scatter2
ggsave(plot = scatter2, width = 17.71, height = 5.2, dpi = 300, units="in",
filename = paste0(campaignName, "-scatter2.eps", sep=""))
ggsave(plot = scatter2, width = 17.71, height = 5.2, dpi = 300, units="in",
filename = paste0(campaignName, "-scatter2.jpg", sep=""))
#This is experimental
dataTotal<- data.frame(glycan=mergedDataNorm[1], totalEN, totalStd)
is.na(dataTotal) <- sapply(dataTotal, is.infinite)
dataTotal[is.na(dataTotal)] <- 0
dataTotal$Glytoucan<- longdataT$Glytoucan[match(dataTotal$Mod,
longdataT$Mod)]
dataTotal$Order<- longdataT$Order[match(dataTotal$Mod,
longdataT$Mod)]
dataTotal$Mod2<- longdataT$Mod2[match(dataTotal$Mod,
longdataT$Mod)]
dataTotal$IUPAC<- longdataT$IUPAC[match(dataTotal$Mod,
longdataT$Mod)]
dataTotal$CommonName2<- longdataT$CommonName2[match(dataTotal$Mod,
longdataT$Mod)]
if (x_axis==1) {
dataTotal$x_label<-dataTotal$Mod2
} else if (x_axis==2) {
dataTotal$x_label<-dataTotal$Glytoucan
} else if (x_axis==3){
dataTotal$x_label<-dataTotal$IUPAC
} else {
dataTotal$x_label<-dataTotal$CommonName2
}
root <- function(x) x ^ (1/2)
nonroot <- function(x) x ^ 2
trans <- trans_new(name = "root",
transform = root,
inverse = nonroot)
barchart<-ggplot(dataTotal, aes(x=reorder(x_label, +Order), y=totalEN))+
theme_classic()+
geom_bar(stat="identity", color="black", fill="black",
position=position_dodge()) +
geom_errorbar(aes(ymin=totalEN, ymax=totalEN+totalStd), width=.5, size=.4,
position=position_dodge(.9))+
labs(y="Enrichment", x="Glycan")+
ggtitle(campaignName)+
expand_limits(x = 0, y = 0)+
theme(axis.text.x=element_text(family="Helvetica", color="black", angle=90, size=12,hjust=1,vjust=0.2),
axis.text.y=element_text(family="Helvetica", color="black",size=12, face="bold"),
legend.title=element_text(family="Helvetica", color="black",size=12),
legend.text=element_text(family="Helvetica", color="black", size=12),
title=element_text(family="Helvetica", color="black", size=12))+
theme( # remove the vertical grid lines
panel.grid.major.x = element_blank() ,
# explicitly set the horizontal lines (or they will disappear too)
panel.grid.major.y = element_line( size=.1, color="black" ),
panel.grid.minor.y = element_blank()
)+
scale_y_continuous(trans=trans)+
geom_hline(yintercept = c(1,5,20))
barchart
ggsave(plot = barchart, width = 17.71, height = 5.2, dpi = 300, units="in",
filename = paste0(campaignName, "-barchart.eps", sep=""))
ggsave(plot = barchart, width = 17.71, height = 5.2, dpi = 300, units="in",
filename = paste0(campaignName, "-barchart.jpg", sep=""))
|
\encoding{utf8}
\name{cdfpdq4}
\alias{cdfpdq4}
\title{Cumulative Distribution Function of the Polynomial Density-Quantile4 Distribution}
\description{
This function computes the cumulative probability or nonexceedance probability of the Polynomial Density-Quantile4 (PDQ4) distribution given parameters (\eqn{\xi}, \eqn{\alpha}, \eqn{\kappa}) computed by \code{\link{parpdq4}}. The cumulative distribution function has no explicit form and requires numerical methods. The \R function \code{uniroot()} is used to root the quantile function \code{\link{quapdq4}} to compute the nonexceedance probability. The distribution's canonical definition is in terms of the quantile function (\code{\link{quapdq4}}).
}
\usage{
cdfpdq4(x, para, paracheck=TRUE)
}
\arguments{
\item{x}{A real value vector.}
\item{para}{The parameters from \code{\link{parpdq4}} or \code{\link{vec2par}}.}
\item{paracheck}{A logical switch as to whether the validity of the parameters should be checked. Default is \code{paracheck=TRUE}. This switch is made so that the root solution needed for \code{\link{cdfpdq4}} shows an extreme speed increase because of the repeated calls to \code{quapdq4}.}
}
\value{
Nonexceedance probability (\eqn{F}) for \eqn{x}.
}
\references{
Hosking, J.R.M., 2007, Distributions with maximum entropy subject to constraints on their L-moments or expected order statistics: Journal of Statistical Planning and Inference, v. 137, no. 9, pp. 2870--2891, \doi{10.1016/j.jspi.2006.10.010}.
}
\author{W.H. Asquith}
\seealso{\code{\link{pdfpdq4}}, \code{\link{quapdq4}}, \code{\link{lmompdq4}}, \code{\link{parpdq4}}}
\examples{
\dontrun{
FF <- seq(0.001, 0.999, by=0.001)
para <- list(para=c(0, 0.4332, -0.7029), type="pdq4")
Fpdq4 <- cdfpdq4(quapdq4(FF, para), para)
plot(FF, Fpdq4, type="l", col=grey(0.8), lwd=4)
# should be a 1:1 line, it is }
\dontrun{
para <- list(para=c(0, 0.4332, -0.7029), type="pdq4")
X <- seq(-5, +12, by=(12 - -5) / 500)
plot( X, cdfpdq4(X, para), type="l", col=grey(0.8), lwd=4, ylim=c(0, 1))
lines(X, pf( exp(X), df1=5, df2=4), lty=2)
lines(X, c(NA, diff( cdfpdq4(X, para)) / ((12 - -5) / 500)))
lines(X, c(NA, diff( pf( exp(X), df1=5, df2=4)) / ((12 - -5) / 500)), lty=2) # }
}
\keyword{distribution}
\keyword{cumulative distribution function}
\keyword{Distribution: Polynomial Density-Quantile4}
| /man/cdfpdq4.Rd | no_license | wasquith/lmomco | R | false | false | 2,363 | rd | \encoding{utf8}
\name{cdfpdq4}
\alias{cdfpdq4}
\title{Cumulative Distribution Function of the Polynomial Density-Quantile4 Distribution}
\description{
This function computes the cumulative probability or nonexceedance probability of the Polynomial Density-Quantile4 (PDQ4) distribution given parameters (\eqn{\xi}, \eqn{\alpha}, \eqn{\kappa}) computed by \code{\link{parpdq4}}. The cumulative distribution function has no explicit form and requires numerical methods. The \R function \code{uniroot()} is used to root the quantile function \code{\link{quapdq4}} to compute the nonexceedance probability. The distribution's canonical definition is in terms of the quantile function (\code{\link{quapdq4}}).
}
\usage{
cdfpdq4(x, para, paracheck=TRUE)
}
\arguments{
\item{x}{A real value vector.}
\item{para}{The parameters from \code{\link{parpdq4}} or \code{\link{vec2par}}.}
\item{paracheck}{A logical switch as to whether the validity of the parameters should be checked. Default is \code{paracheck=TRUE}. This switch is made so that the root solution needed for \code{\link{cdfpdq4}} shows an extreme speed increase because of the repeated calls to \code{quapdq4}.}
}
\value{
Nonexceedance probability (\eqn{F}) for \eqn{x}.
}
\references{
Hosking, J.R.M., 2007, Distributions with maximum entropy subject to constraints on their L-moments or expected order statistics: Journal of Statistical Planning and Inference, v. 137, no. 9, pp. 2870--2891, \doi{10.1016/j.jspi.2006.10.010}.
}
\author{W.H. Asquith}
\seealso{\code{\link{pdfpdq4}}, \code{\link{quapdq4}}, \code{\link{lmompdq4}}, \code{\link{parpdq4}}}
\examples{
\dontrun{
FF <- seq(0.001, 0.999, by=0.001)
para <- list(para=c(0, 0.4332, -0.7029), type="pdq4")
Fpdq4 <- cdfpdq4(quapdq4(FF, para), para)
plot(FF, Fpdq4, type="l", col=grey(0.8), lwd=4)
# should be a 1:1 line, it is }
\dontrun{
para <- list(para=c(0, 0.4332, -0.7029), type="pdq4")
X <- seq(-5, +12, by=(12 - -5) / 500)
plot( X, cdfpdq4(X, para), type="l", col=grey(0.8), lwd=4, ylim=c(0, 1))
lines(X, pf( exp(X), df1=5, df2=4), lty=2)
lines(X, c(NA, diff( cdfpdq4(X, para)) / ((12 - -5) / 500)))
lines(X, c(NA, diff( pf( exp(X), df1=5, df2=4)) / ((12 - -5) / 500)), lty=2) # }
}
\keyword{distribution}
\keyword{cumulative distribution function}
\keyword{Distribution: Polynomial Density-Quantile4}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 19175
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 19174
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 19174
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt47_340_389.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 5651
c no.of clauses 19175
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 19174
c
c QBFLIB/Basler/terminator/stmt47_340_389.qdimacs 5651 19175 E1 [1] 0 269 5381 19174 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Basler/terminator/stmt47_340_389/stmt47_340_389.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 720 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 19175
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 19174
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 19174
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt47_340_389.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 5651
c no.of clauses 19175
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 19174
c
c QBFLIB/Basler/terminator/stmt47_340_389.qdimacs 5651 19175 E1 [1] 0 269 5381 19174 RED
|
library(bayess)
### Name: pbino
### Title: Posterior expectation for the binomial capture-recapture model
### Aliases: pbino
### Keywords: capture-recapture models binomial probability posterior
### expectation
### ** Examples
data(eurodip)
year81=eurodip[,1]
nplus=sum(year81>0)
sum((1:400)*pbino(nplus))
| /data/genthat_extracted_code/bayess/examples/pbino.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 315 | r | library(bayess)
### Name: pbino
### Title: Posterior expectation for the binomial capture-recapture model
### Aliases: pbino
### Keywords: capture-recapture models binomial probability posterior
### expectation
### ** Examples
data(eurodip)
year81=eurodip[,1]
nplus=sum(year81>0)
sum((1:400)*pbino(nplus))
|
library(shiny)
library(ggplot2)
library(MASS)
ui <- fluidPage(
titlePanel('Ejercicio modelos polinomiales'),
sidebarLayout(
sidebarPanel(
numericInput('tamanio',
'Tamaño muestral:',
min = 1,
step = 5,
value = 10),
numericInput('cor',
'Correlación:',
min = 0,
max = 1,
step = .1,
value = .5),
actionButton('muestra','Generar muestra'),
hr(),hr(),
numericInput('pol',
'Orden polinómico:',
min = 0,
step = 1,
value = 1),
actionButton('modelo','Añadir modelo')
),
mainPanel(fluidRow(plotOutput('plot1')))
)
)
server <- function(input,output){
mi.dis <- reactive({
input$muestra
t <- isolate(input$tamanio)
c <- isolate(input$cor)
set.seed(Sys.time())
mvrnorm(n = t,mu = 0,Sigma = c)
})
mi.grafico <- reactive({
input$muestra
input$modelo
plot(mi.dis(), ylab = 'Correlación',
xlab = 'Nº obs.')
})
mi.modelo <- reactive({
input$modelo
lm(mi.dis()~ poly(mi.dis(),degree = input$pol))
})
output$plot1 <- renderPlot({
mi.grafico()
tt <- isolate(input$pol)
if (tt != 1)
mi.grafico2()
})
mi.pred <- reactive({
input$modelo
predict(lm(mi.dis()~ poly(mi.dis(), degree = input$pol)))
})
mi.grafico2 <- reactive({
input$modelo
mi.grafico() + lines(lowess(mi.pred())) +
lines(mi.modelo()$fitted.values,type = 'l',col = 'red')
})
}
shinyApp(ui,server)
| /PracticasShiny/Practica2Polinomios.R | no_license | OscarCamean/TecnicasVisualizacion | R | false | false | 1,803 | r | library(shiny)
library(ggplot2)
library(MASS)
ui <- fluidPage(
titlePanel('Ejercicio modelos polinomiales'),
sidebarLayout(
sidebarPanel(
numericInput('tamanio',
'Tamaño muestral:',
min = 1,
step = 5,
value = 10),
numericInput('cor',
'Correlación:',
min = 0,
max = 1,
step = .1,
value = .5),
actionButton('muestra','Generar muestra'),
hr(),hr(),
numericInput('pol',
'Orden polinómico:',
min = 0,
step = 1,
value = 1),
actionButton('modelo','Añadir modelo')
),
mainPanel(fluidRow(plotOutput('plot1')))
)
)
server <- function(input,output){
mi.dis <- reactive({
input$muestra
t <- isolate(input$tamanio)
c <- isolate(input$cor)
set.seed(Sys.time())
mvrnorm(n = t,mu = 0,Sigma = c)
})
mi.grafico <- reactive({
input$muestra
input$modelo
plot(mi.dis(), ylab = 'Correlación',
xlab = 'Nº obs.')
})
mi.modelo <- reactive({
input$modelo
lm(mi.dis()~ poly(mi.dis(),degree = input$pol))
})
output$plot1 <- renderPlot({
mi.grafico()
tt <- isolate(input$pol)
if (tt != 1)
mi.grafico2()
})
mi.pred <- reactive({
input$modelo
predict(lm(mi.dis()~ poly(mi.dis(), degree = input$pol)))
})
mi.grafico2 <- reactive({
input$modelo
mi.grafico() + lines(lowess(mi.pred())) +
lines(mi.modelo()$fitted.values,type = 'l',col = 'red')
})
}
shinyApp(ui,server)
|
#### data ####
path.Pat1 <- system.file(file.path("nifti"), package = "MRIaggr")
nifti.Pat1_TTP_t0 <- readMRI(file.path(path.Pat1, "TTP_t0.nii"), format = "nifti")
nifti.Pat1_DWI_t0 <- readMRI(file.path(path.Pat1, "DWI_t0.nii"), format = "nifti")
nifti.Pat1_MASK_DWI_t0 <- readMRI(file.path(path.Pat1, "MASK_DWI_t0.nii"), format = "nifti")
nifti.Pat1_MASK_T2_FLAIR_t2 <- readMRI(file.path(path.Pat1, "MASK_T2_FLAIR_t2.nii"),
format = "nifti")
MRIaggr.Pa1 <- constMRIaggr(list(nifti.Pat1_TTP_t0, nifti.Pat1_DWI_t0,
nifti.Pat1_MASK_DWI_t0, nifti.Pat1_MASK_T2_FLAIR_t2),
format = "MRIaggr",
ls.MergeParam = list(Lesion = c("MASK_t0","MASK_t2")),
identifier= "Pat1", default_value = "first",
param=c("TTP_t0","DWI_t0","MASK_t0","MASK_t2")
)
region1 <- rbinom(selectN(MRIaggr.Pa1), size = 1, prob = 0.001)
region2 <- 5*rbinom(selectN(MRIaggr.Pa1), size = 1, prob = 0.001)
allocContrast(MRIaggr.Pa1,
param = c("noise3","noise5"),
ls.MergeParam = list(nini = c("noise3","noise5")),
overwrite = TRUE) <- cbind(region1,region2)
# selectContrast(MRIaggr.Pa1, subset = list(Lesion = "MASK_t2"))[["Lesion"]]
#### multiplotMRI ####
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_var = c("i","j","k"))
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_var = c("i","j","k"), xlim = c(10,45))
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_var = c("i","j","k"), xlim = c(10,45), asp = NULL)
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_var = c("i","j","k"), xlim = c(10,45), ylim = c(10,45))
orthoplot(MRIaggr.Pa1, param = "TTP_t0")
orthoplot(MRIaggr.Pa1, param = "DWI_t0")
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_var = c("i","j","k"), index1 = "Lesion")
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_k = 2:3, slice_var = c("i","j","k"), index1 = "Lesion")
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_k = 2:3, slice_var = c("i","j","k"),
index1 = list(subset = "Lesion", outline = TRUE))
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_k = 2:3, slice_var = c("i","j","k"),
index1 = list(subset = list(Lesion = "MASK_t0"), outline = TRUE),
index2 = list(subset = list(Lesion = "MASK_t2"), outline = TRUE)
)
df.contrast <- selectContrast(MRIaggr.Pa1, param = "TTP_t0", format = "vector", slice_k = 1)
df.coords <- selectCoords(MRIaggr.Pa1, slice_k = 1)
selectContrast(MRIaggr.Pa1, param = "TTP_t0", subset = c("nini","Lesion"), slice_i = 5)
# multiplot(df.coords, data.table(df.contrast), slice_var = c("i","j","k"), index1 = "Lesion")
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_var = c("j","k","i"),
breaks = seq(0,38,1))
# multiplot(MRIaggr.Pa1, param = "TTP_t0", index
#### plotMRI ####
df.contrast <- selectContrast(MRIaggr.Pa1, param = "TTP_t0", format = "vector", slice_k = 1)
df.coords <- selectCoords(MRIaggr.Pa1, slice_k = 1)
plotMRI(data.table(df.contrast), df.coords, breaks = quantile(df.contrast), palette = heat.colors(4),
col = NULL, asp = 1, xlim = NULL, ylim = NULL, pch = NULL, cex = NULL, axes = TRUE,
col.NA = "blue", pch.NA = 21, xlab = NULL, ylab = NULL, main = "xxx", cex.main = 1)
| /tests/test-MethodPlot.R | no_license | bozenne/MRIaggr | R | false | false | 3,306 | r | #### data ####
path.Pat1 <- system.file(file.path("nifti"), package = "MRIaggr")
nifti.Pat1_TTP_t0 <- readMRI(file.path(path.Pat1, "TTP_t0.nii"), format = "nifti")
nifti.Pat1_DWI_t0 <- readMRI(file.path(path.Pat1, "DWI_t0.nii"), format = "nifti")
nifti.Pat1_MASK_DWI_t0 <- readMRI(file.path(path.Pat1, "MASK_DWI_t0.nii"), format = "nifti")
nifti.Pat1_MASK_T2_FLAIR_t2 <- readMRI(file.path(path.Pat1, "MASK_T2_FLAIR_t2.nii"),
format = "nifti")
MRIaggr.Pa1 <- constMRIaggr(list(nifti.Pat1_TTP_t0, nifti.Pat1_DWI_t0,
nifti.Pat1_MASK_DWI_t0, nifti.Pat1_MASK_T2_FLAIR_t2),
format = "MRIaggr",
ls.MergeParam = list(Lesion = c("MASK_t0","MASK_t2")),
identifier= "Pat1", default_value = "first",
param=c("TTP_t0","DWI_t0","MASK_t0","MASK_t2")
)
region1 <- rbinom(selectN(MRIaggr.Pa1), size = 1, prob = 0.001)
region2 <- 5*rbinom(selectN(MRIaggr.Pa1), size = 1, prob = 0.001)
allocContrast(MRIaggr.Pa1,
param = c("noise3","noise5"),
ls.MergeParam = list(nini = c("noise3","noise5")),
overwrite = TRUE) <- cbind(region1,region2)
# selectContrast(MRIaggr.Pa1, subset = list(Lesion = "MASK_t2"))[["Lesion"]]
#### multiplotMRI ####
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_var = c("i","j","k"))
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_var = c("i","j","k"), xlim = c(10,45))
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_var = c("i","j","k"), xlim = c(10,45), asp = NULL)
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_var = c("i","j","k"), xlim = c(10,45), ylim = c(10,45))
orthoplot(MRIaggr.Pa1, param = "TTP_t0")
orthoplot(MRIaggr.Pa1, param = "DWI_t0")
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_var = c("i","j","k"), index1 = "Lesion")
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_k = 2:3, slice_var = c("i","j","k"), index1 = "Lesion")
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_k = 2:3, slice_var = c("i","j","k"),
index1 = list(subset = "Lesion", outline = TRUE))
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_k = 2:3, slice_var = c("i","j","k"),
index1 = list(subset = list(Lesion = "MASK_t0"), outline = TRUE),
index2 = list(subset = list(Lesion = "MASK_t2"), outline = TRUE)
)
df.contrast <- selectContrast(MRIaggr.Pa1, param = "TTP_t0", format = "vector", slice_k = 1)
df.coords <- selectCoords(MRIaggr.Pa1, slice_k = 1)
selectContrast(MRIaggr.Pa1, param = "TTP_t0", subset = c("nini","Lesion"), slice_i = 5)
# multiplot(df.coords, data.table(df.contrast), slice_var = c("i","j","k"), index1 = "Lesion")
multiplot(MRIaggr.Pa1, param = "TTP_t0", slice_var = c("j","k","i"),
breaks = seq(0,38,1))
# multiplot(MRIaggr.Pa1, param = "TTP_t0", index
#### plotMRI ####
df.contrast <- selectContrast(MRIaggr.Pa1, param = "TTP_t0", format = "vector", slice_k = 1)
df.coords <- selectCoords(MRIaggr.Pa1, slice_k = 1)
plotMRI(data.table(df.contrast), df.coords, breaks = quantile(df.contrast), palette = heat.colors(4),
col = NULL, asp = 1, xlim = NULL, ylim = NULL, pch = NULL, cex = NULL, axes = TRUE,
col.NA = "blue", pch.NA = 21, xlab = NULL, ylab = NULL, main = "xxx", cex.main = 1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/imports.R
\name{import_function}
\alias{import_function}
\title{Import function}
\usage{
import_function()
}
\description{
Updates namespace
}
| /man/import_function.Rd | permissive | shackett/fluxr | R | false | true | 222 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/imports.R
\name{import_function}
\alias{import_function}
\title{Import function}
\usage{
import_function()
}
\description{
Updates namespace
}
|
## Evan Wasner
## Econ 753
## Assignment 2, Question 1
## Set working directory
setwd("I:/Evan/Documents/Umass/Econ 753/EvanWasner_Econ753_ProblemSets/Assignment 2")
## Libraries
library(foreign)
library(tidyverse)
library(nlme)
## Clear workplace
rm(list = ls())
dev.off()
################
## Question 3 ##
## PART a ##
################
## Import data
chow <- read.dta("http://courses.umass.edu/econ753/berndt/stata/chow.dta")
## Add columns
chow <- mutate(chow, lnrent=log(rent), lnmult=log(mult), lnaccess=log(access),
lnadd=log(add), mem=words*binary*digits, lnmem=log(words*binary*digits),
d61=ifelse(year==61,1,0), d62=ifelse(year==62,1,0), d63=ifelse(year==63,1,0),
d64=ifelse(year==64,1,0), d65=ifelse(year==65,1,0))
## Filter data into different years
chow59 <- filter(chow, year>=54 & year<=59)
chow65 <- filter(chow, year>=60 & year<=65)
## Construct correlation matrices and save data
cor59 <- cor(select(chow59, lnrent, lnmult, lnaccess, lnadd, lnmem))
cor65 <- cor(select(chow65, lnrent, lnmult, lnaccess, lnadd, lnmem))
save(cor59,file="cor59.Rdata")
save(cor65,file="cor65.Rdata")
################
## Question 3 ##
## PART b ##
################
## Run regression of lnrent and save data
chow.lm65 <- lm(lnrent ~ d61 + d62 + d63 + d64 + d65 + lnmult + lnmem + lnaccess,
data=chow65)
save(chow.lm65,file="chow.lm65.Rdata")
## Create table for price indices
priceIndexTable <- data.frame(Year=c("1960", "1961", "1962", "1963", "1964", "1965"),
Estimated_Coefficient=chow.lm65$coefficients[1:6])
priceIndexTable <- mutate(priceIndexTable, Price_Index=ifelse(Year==1960,1,exp(Estimated_Coefficient)))
save(priceIndexTable,file="priceIndexTable.Rdata")
################
## Question 3 ##
## PART e ##
################
chow65 <- mutate(chow65, generalizedvolume=sqrt(volume),
generalizedlnrent=lnrent*sqrt(volume),
generalizedlnmult=lnmult*sqrt(volume),
generalizedlnaccess=lnaccess*sqrt(volume),
generalizedlnadd=lnadd*sqrt(volume),
generalizedlnmem=lnmem*sqrt(volume),
generalizedd61=d61*sqrt(volume),
generalizedd62=d62*sqrt(volume),
generalizedd63=d63*sqrt(volume),
generalizedd64=d64*sqrt(volume),
generalizedd65=d65*sqrt(volume))
chow.generalized.lm65 <- lm(generalizedlnrent ~ d61 + d62 + d63 + d64 + d65 +
generalizedlnmult + generalizedlnmem + generalizedlnaccess,
data=chow65)
chow.gls65 <- gls(lnrent ~ d61 + d62 + d63 + d64 + d65 + lnmult + lnmem + lnaccess,
data=chow65, weights=~sqrt(volume))
## This is just personal stuff testing out how to do regressions with matrices, ignore...
xMatrix <- cbind(numeric(82)+1,
as.matrix(select(chow65, lnmult, lnaccess, lnmem,
d61, d62, d63, d64, d65)))
yMatrix <- as.matrix(chow65$lnrent)
betaMatrix <- solve(t(xMatrix) %*% xMatrix) %*% t(xMatrix) %*% yMatrix
summary(chow.lm65)
uhat <- yMatrix - xMatrix %*% betaMatrix
varCovarMatrix <- t(uhat) %*% uhat
sigma <- t(uhat) %*% uhat / (82 - 9)
diag(sigma[1] * solve(t(xMatrix) %*% xMatrix))
## GLS with Matrix Algebra
xMatrixgls <- as.matrix(select(chow65, generalizedvolume, generalizedlnmult, generalizedlnaccess, generalizedlnmem,
generalizedd61, generalizedd62, generalizedd63, generalizedd64, generalizedd65))
yMatrixgls <- as.matrix(chow65$generalizedlnrent)
betaMatrixgls <- solve(t(xMatrixgls) %*% xMatrixgls) %*% t(xMatrixgls) %*% yMatrixgls
betaMatrixgls
summary(chow.gls65) | /Assignment 2/EvanWasner-Assignment2-Question1.R | no_license | jesuslara17/EvanWasner_Econ753_ProblemSets | R | false | false | 3,722 | r | ## Evan Wasner
## Econ 753
## Assignment 2, Question 1
## Set working directory
setwd("I:/Evan/Documents/Umass/Econ 753/EvanWasner_Econ753_ProblemSets/Assignment 2")
## Libraries
library(foreign)
library(tidyverse)
library(nlme)
## Clear workplace
rm(list = ls())
dev.off()
################
## Question 3 ##
## PART a ##
################
## Import data
chow <- read.dta("http://courses.umass.edu/econ753/berndt/stata/chow.dta")
## Add columns
chow <- mutate(chow, lnrent=log(rent), lnmult=log(mult), lnaccess=log(access),
lnadd=log(add), mem=words*binary*digits, lnmem=log(words*binary*digits),
d61=ifelse(year==61,1,0), d62=ifelse(year==62,1,0), d63=ifelse(year==63,1,0),
d64=ifelse(year==64,1,0), d65=ifelse(year==65,1,0))
## Filter data into different years
chow59 <- filter(chow, year>=54 & year<=59)
chow65 <- filter(chow, year>=60 & year<=65)
## Construct correlation matrices and save data
cor59 <- cor(select(chow59, lnrent, lnmult, lnaccess, lnadd, lnmem))
cor65 <- cor(select(chow65, lnrent, lnmult, lnaccess, lnadd, lnmem))
save(cor59,file="cor59.Rdata")
save(cor65,file="cor65.Rdata")
################
## Question 3 ##
## PART b ##
################
## Run regression of lnrent and save data
chow.lm65 <- lm(lnrent ~ d61 + d62 + d63 + d64 + d65 + lnmult + lnmem + lnaccess,
data=chow65)
save(chow.lm65,file="chow.lm65.Rdata")
## Create table for price indices
priceIndexTable <- data.frame(Year=c("1960", "1961", "1962", "1963", "1964", "1965"),
Estimated_Coefficient=chow.lm65$coefficients[1:6])
priceIndexTable <- mutate(priceIndexTable, Price_Index=ifelse(Year==1960,1,exp(Estimated_Coefficient)))
save(priceIndexTable,file="priceIndexTable.Rdata")
################
## Question 3 ##
## PART e ##
################
chow65 <- mutate(chow65, generalizedvolume=sqrt(volume),
generalizedlnrent=lnrent*sqrt(volume),
generalizedlnmult=lnmult*sqrt(volume),
generalizedlnaccess=lnaccess*sqrt(volume),
generalizedlnadd=lnadd*sqrt(volume),
generalizedlnmem=lnmem*sqrt(volume),
generalizedd61=d61*sqrt(volume),
generalizedd62=d62*sqrt(volume),
generalizedd63=d63*sqrt(volume),
generalizedd64=d64*sqrt(volume),
generalizedd65=d65*sqrt(volume))
chow.generalized.lm65 <- lm(generalizedlnrent ~ d61 + d62 + d63 + d64 + d65 +
generalizedlnmult + generalizedlnmem + generalizedlnaccess,
data=chow65)
chow.gls65 <- gls(lnrent ~ d61 + d62 + d63 + d64 + d65 + lnmult + lnmem + lnaccess,
data=chow65, weights=~sqrt(volume))
## This is just personal stuff testing out how to do regressions with matrices, ignore...
xMatrix <- cbind(numeric(82)+1,
as.matrix(select(chow65, lnmult, lnaccess, lnmem,
d61, d62, d63, d64, d65)))
yMatrix <- as.matrix(chow65$lnrent)
betaMatrix <- solve(t(xMatrix) %*% xMatrix) %*% t(xMatrix) %*% yMatrix
summary(chow.lm65)
uhat <- yMatrix - xMatrix %*% betaMatrix
varCovarMatrix <- t(uhat) %*% uhat
sigma <- t(uhat) %*% uhat / (82 - 9)
diag(sigma[1] * solve(t(xMatrix) %*% xMatrix))
## GLS with Matrix Algebra
xMatrixgls <- as.matrix(select(chow65, generalizedvolume, generalizedlnmult, generalizedlnaccess, generalizedlnmem,
generalizedd61, generalizedd62, generalizedd63, generalizedd64, generalizedd65))
yMatrixgls <- as.matrix(chow65$generalizedlnrent)
betaMatrixgls <- solve(t(xMatrixgls) %*% xMatrixgls) %*% t(xMatrixgls) %*% yMatrixgls
betaMatrixgls
summary(chow.gls65) |
# Packages -----------------
library(downloader)
library(stringr)
library(datastorr)
library(baad.data)
library(multcomp)
library(doBy)
library(mgcv)
library(lmerTest)
library(car)
library(MuMIn)
library(hier.part)
library(gtools)
library(magicaxis)
library(RColorBrewer)
library(hexbin)
library(xtable)
library(knitr)
library(png)
library(grid)
library(gridBase)
library(gridExtra)
library(tinytex)
# source function scripts -----------------
source("R/data_processing.R")
source("R/tables_stats.R")
source("R/figures.R")
source("R/functions-figures.R")
source("R/signifletters.R")
source("R/build.R")
source("R/manuscript_functions.R")
# Data --------------------------------------
baad_all <- baad_data("1.0.0")
baad_climate1 <- addWorldClimMAPMAT(baad_all, "data/worldclimmapmat.rds")
baad_mapmat <- prepare_baadmapmat(baad_climate1)
world_mapmat <- prepare_worldmapmat("data/worldclim_landcover_climspace_withcover.rds")
baad_climate2 <- addMImgdd0(baad_climate1, "data/MI_mGDDD_landcover_filtered.rds")
baad_climate3 <- addPET(baad_climate2, "data/zomerpet.rds")
dataset <- prepare_dataset_1(baad_climate3, plantations=TRUE)
# download_baad("downloads/baad.rds")
cfg <- extract_baad_dictionary(baad_all)
# Stats & tables --------------------------------------
basalafit <- BasalA_fit(baad_all)
table_varpart_gam_old <- make_table_gamr2MATMAP_old(dataset)
table_varpart_gam <- make_table_gamr2MATARID(dataset)
table_varpart_lmer <- mixedr2(dataset)
table_hierpart <- make_table_hierpart(dataset)
afas <- af_as_stat(dataset)
msas <- ms_as_stat(dataset)
table_samplesize <- make_samplesize_table(dataset)
# Figures --------------------------------------
# comment this out because can't download from internet within the virtual machine.
# dir.create('downloads')
# download_tree_png("downloads/ian-symbol-eucalyptus-spp-1.png")
dir.create('figures')
pdf("figures/Figure1.pdf", width = 8, height = 4)
figure1(baad_mapmat, world_mapmat, "downloads/ian-symbol-eucalyptus-spp-1.png")
dev.off()
pdf("figures/Figure2.pdf", width = 8, height = 4)
figure2(dataset)
dev.off()
pdf("figures/Figure3.pdf", width = 8, height = 6)
figure3(dataset)
dev.off()
pdf("figures/Figure4.pdf", width = 8, height = 4)
figure4(dataset, nbin=75)
dev.off()
pdf("figures/Figure5.pdf", width = 8, height = 4)
figure5(dataset)
dev.off()
pdf("figures/Figure6.pdf", width = 8, height = 4)
figure6(dataset)
dev.off()
pdf("figures/FigureS1.pdf", width = 6, height = 6)
figureS1(baad_mapmat, world_mapmat)
dev.off()
pdf("figures/FigureS2.pdf", width = 8, height = 4)
figureS2(table_hierpart,table_varpart_gam_old,table_varpart_lmer)
dev.off()
pdf("figures/FigureS3.pdf", width = 8, height = 4)
figureS3(dataset)
dev.off()
pdf("figures/FigureS4.pdf", width = 8, height = 4)
figureS4(dataset)
dev.off()
pdf("figures/FigureS5.pdf", width = 8, height = 4)
figureS5(dataset)
dev.off()
# Documents -------------
knitr::knit("ms/manuscript.Rnw", output = "ms/manuscript.tex")
pdflatex("ms/manuscript.tex")
knitr::knit("ms/manuscript_suppinfo.Rnw", output = "ms/manuscript_suppinfo.tex")
pdflatex("ms/manuscript_suppinfo.tex")
| /analysis.R | permissive | traitecoevo/baadanalysis | R | false | false | 3,110 | r |
# Packages -----------------
library(downloader)
library(stringr)
library(datastorr)
library(baad.data)
library(multcomp)
library(doBy)
library(mgcv)
library(lmerTest)
library(car)
library(MuMIn)
library(hier.part)
library(gtools)
library(magicaxis)
library(RColorBrewer)
library(hexbin)
library(xtable)
library(knitr)
library(png)
library(grid)
library(gridBase)
library(gridExtra)
library(tinytex)
# source function scripts -----------------
source("R/data_processing.R")
source("R/tables_stats.R")
source("R/figures.R")
source("R/functions-figures.R")
source("R/signifletters.R")
source("R/build.R")
source("R/manuscript_functions.R")
# Data --------------------------------------
baad_all <- baad_data("1.0.0")
baad_climate1 <- addWorldClimMAPMAT(baad_all, "data/worldclimmapmat.rds")
baad_mapmat <- prepare_baadmapmat(baad_climate1)
world_mapmat <- prepare_worldmapmat("data/worldclim_landcover_climspace_withcover.rds")
baad_climate2 <- addMImgdd0(baad_climate1, "data/MI_mGDDD_landcover_filtered.rds")
baad_climate3 <- addPET(baad_climate2, "data/zomerpet.rds")
dataset <- prepare_dataset_1(baad_climate3, plantations=TRUE)
# download_baad("downloads/baad.rds")
cfg <- extract_baad_dictionary(baad_all)
# Stats & tables --------------------------------------
basalafit <- BasalA_fit(baad_all)
table_varpart_gam_old <- make_table_gamr2MATMAP_old(dataset)
table_varpart_gam <- make_table_gamr2MATARID(dataset)
table_varpart_lmer <- mixedr2(dataset)
table_hierpart <- make_table_hierpart(dataset)
afas <- af_as_stat(dataset)
msas <- ms_as_stat(dataset)
table_samplesize <- make_samplesize_table(dataset)
# Figures --------------------------------------
# comment this out because can't download from internet within the virtual machine.
# dir.create('downloads')
# download_tree_png("downloads/ian-symbol-eucalyptus-spp-1.png")
dir.create('figures')
pdf("figures/Figure1.pdf", width = 8, height = 4)
figure1(baad_mapmat, world_mapmat, "downloads/ian-symbol-eucalyptus-spp-1.png")
dev.off()
pdf("figures/Figure2.pdf", width = 8, height = 4)
figure2(dataset)
dev.off()
pdf("figures/Figure3.pdf", width = 8, height = 6)
figure3(dataset)
dev.off()
pdf("figures/Figure4.pdf", width = 8, height = 4)
figure4(dataset, nbin=75)
dev.off()
pdf("figures/Figure5.pdf", width = 8, height = 4)
figure5(dataset)
dev.off()
pdf("figures/Figure6.pdf", width = 8, height = 4)
figure6(dataset)
dev.off()
pdf("figures/FigureS1.pdf", width = 6, height = 6)
figureS1(baad_mapmat, world_mapmat)
dev.off()
pdf("figures/FigureS2.pdf", width = 8, height = 4)
figureS2(table_hierpart,table_varpart_gam_old,table_varpart_lmer)
dev.off()
pdf("figures/FigureS3.pdf", width = 8, height = 4)
figureS3(dataset)
dev.off()
pdf("figures/FigureS4.pdf", width = 8, height = 4)
figureS4(dataset)
dev.off()
pdf("figures/FigureS5.pdf", width = 8, height = 4)
figureS5(dataset)
dev.off()
# Documents -------------
knitr::knit("ms/manuscript.Rnw", output = "ms/manuscript.tex")
pdflatex("ms/manuscript.tex")
knitr::knit("ms/manuscript_suppinfo.Rnw", output = "ms/manuscript_suppinfo.tex")
pdflatex("ms/manuscript_suppinfo.tex")
|
# Code block (exp) is assumed to throw an error, to be compared against (expected_regexp)
# All other options are handed to grepl()
ut_cmp_error <- function(code, expected_regexp, ignore.case = FALSE, perl = FALSE, fixed = FALSE) {
tryCatch({
code
return("No error returned")
}, error = function(e) {
if (grepl(expected_regexp, e$message, ignore.case = ignore.case, perl = perl, fixed = fixed)) {
return(TRUE)
}
return(c(e$message, "Did not match:-", expected_regexp))
})
}
| /R/ut_cmp_error.R | no_license | ravingmantis/unittest | R | false | false | 538 | r | # Code block (exp) is assumed to throw an error, to be compared against (expected_regexp)
# All other options are handed to grepl()
ut_cmp_error <- function(code, expected_regexp, ignore.case = FALSE, perl = FALSE, fixed = FALSE) {
tryCatch({
code
return("No error returned")
}, error = function(e) {
if (grepl(expected_regexp, e$message, ignore.case = ignore.case, perl = perl, fixed = fixed)) {
return(TRUE)
}
return(c(e$message, "Did not match:-", expected_regexp))
})
}
|
#' Demo Stimuli
#'
#' A convenience function to get demo stimuli
#'
#' @param dir the directory in extdata to get files from
#' @param pattern defaults to all files
#' @param ... Other arguments to pass on to `read_tem()`
#'
#' @return stimlist
#' @export
#'
#' @examples
#' demo_stim() %>% plot()
#'
demo_stim <- function(dir = c("test", "composite", "london", "smiling", "lisa", "zoom", "rainbow"),
pattern = NULL, ...) {
dir <- match.arg(dir)
if (dir == "test") { # included in webmorphR
path <- system.file(file.path("extdata", dir), package = "webmorphR")
} else if (requireNamespace("stimsets", quietly = TRUE)) {
path <- system.file(file.path(dir), package = "stimsets")
} else {
stop("You need to install the package stimsets to access these demo images\nremotes::install_github(\"debruine/stimsets\")")
}
stimuli <- read_stim(path, pattern, ...)
stimuli
}
| /R/demo_stim.R | permissive | iPsych/webmorphR | R | false | false | 918 | r | #' Demo Stimuli
#'
#' A convenience function to get demo stimuli
#'
#' @param dir the directory in extdata to get files from
#' @param pattern defaults to all files
#' @param ... Other arguments to pass on to `read_tem()`
#'
#' @return stimlist
#' @export
#'
#' @examples
#' demo_stim() %>% plot()
#'
demo_stim <- function(dir = c("test", "composite", "london", "smiling", "lisa", "zoom", "rainbow"),
pattern = NULL, ...) {
dir <- match.arg(dir)
if (dir == "test") { # included in webmorphR
path <- system.file(file.path("extdata", dir), package = "webmorphR")
} else if (requireNamespace("stimsets", quietly = TRUE)) {
path <- system.file(file.path(dir), package = "stimsets")
} else {
stop("You need to install the package stimsets to access these demo images\nremotes::install_github(\"debruine/stimsets\")")
}
stimuli <- read_stim(path, pattern, ...)
stimuli
}
|
rm(list=ls())
setwd(choose.dir())
library(XLConnect)
# read data from part 1 and part 2 of the sheets
Part1 <- readWorksheetFromFile("German_CreditRating.xls",sheet=2)
Part2 <- readWorksheetFromFile("German_CreditRating.xls",sheet=3)
names(Part1)
names(Part2)
#merge them by observation ids
CustData <- merge(Part1,Part2,by="OBS",all.x=TRUE)
# structure of data types
names(CustData)
str(CustData)
#note there are few missing observations
summary(CustData)
#CustData = CustData[,-1]
#type conversion
num_vars = c(2,9,14,18,19,23)
#Catdata <- CustData[,c(3:8, 10:13, 15:17, 20:22, 24:29)]
Catdata = CustData[,-c(1,num_vars)]
Catdata <- data.frame(apply(Catdata,2,factor))
str(Catdata)
NumData<- data.frame(CustData[,c(2,9,14,18,19,23)])
NewData <- cbind(Catdata,NumData)
str(NewData)
summary(NewData)
#check for missing values
sum(is.na((NewData)))
which(apply(NewData, 1, function(x){sum(is.na(x))})!=0)
dim(NewData)
Full_data = na.omit(NewData)
dim(Full_data)
# #Knn imputation
# library(DMwR)
# data_imputed<-knnImputation(NewData,k=1) #KNN Imputation
# sum(is.na(data_imputed))
table(Full_data$RESPONSE)
# #Split the data into train and test data sets
# data_imputed = Full_data
# rows=seq(1,nrow(data_imputed),1)
# set.seed(123)
# trainRows=sample(rows,(70*nrow(data_imputed))/100)
# train = data_imputed[trainRows,]
# test = data_imputed[-trainRows,]
# names(train)
library(caret)
train_rows = createDataPartition(y = Full_data$RESPONSE,
p=0.7, list = F)
train = Full_data[train_rows,]
test = Full_data[-train_rows,]
table(Full_data$RESPONSE)
table(train$RESPONSE)
table(test$RESPONSE)
#logistic regression model
# dependent variable = Churned
LogReg <- glm(RESPONSE ~., data=train,family=binomial)
summary(LogReg)
residuals(LogReg)
fitted(LogReg)
#deviance(LogReg)
table(train$RESPONSE)
table(test$RESPONSE)
#To get the significance for the overall model we use the following command
# 1-pchisq(deviance(LogReg), df=df.residual(LogReg))
# now let us select features using StepAIC
library(MASS)
step = stepAIC(LogReg, direction="both")
step$anova
BestModel <- glm(RESPONSE ~ NEW_CAR + USED_CAR + EDUCATION + GUARANTOR + OTHER_INSTALL +
OWN_RES + FOREIGN + CHK_ACCT + HISTORY + SAV_ACCT + EMPLOYMENT +
PRESENT_RESIDENT + DURATION + INSTALL_RATE + AGE + AMOUNT, data=train, family=binomial)
summary(BestModel)
coefficients(BestModel)
vif(BestModel)
#interpretation
#What is the equation?
#log(p/1-p) = 2.32 -.073*NewCar1+0.018*UserCar1-...+0.00269*Age-0.00000737*Amount
#For every 1 unit change in Age the log odds of response = credit rating is good
#(versus non-response) increases by 0.0267
#Having the purpose of credit Newcar1 = 1 versus not,changes the
#log odds of Credit rating (good) by -0.0735
#goodness of fit
#1-pchisq(deviance(BestModel), df=df.residual(BestModel))
#the higher the better and evidence to reject the
#hypothesis that the fitted model is correct
dev.off()
plot(train$RESPONSE, BestModel$fitted.values, type="p")
abline(0.5,0)
prob<-predict(BestModel, type="response")
pred_class <- factor(ifelse(prob>0.7, 1, 0))
metrics = table(train$RESPONSE,pred_class)
metrics
accuracy =(metrics[1,1]+metrics[2,2])/(length(pred_class))
Recall = metrics[2,2]/(metrics[2,2]+metrics[2,1])
Precision = metrics[2,2]/(metrics[2,2]+metrics[1,2])
accuracy
Recall
Precision
### on test data
probt <- predict(BestModel, newdata = test, type="response")
predt_class <- factor(ifelse(probt>0.5, 1, 0))
metricst = table(test$RESPONSE,predt_class)
metricst
accuracyt =(metricst[1,1]+metricst[2,2])/(length(predt_class))
Recallt = metricst[2,2]/(metricst[2,2]+metricst[2,1])
Precisiont = metricst[2,2]/(metricst[2,2]+metricst[1,2])
accuracyt
Recallt
Precisiont
library(ROCR)
library(ggplot2)
pred <- prediction(prob, train$RESPONSE)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
#perf
plot(perf, col=rainbow(10),
colorize=T,
print.cutoffs.at = seq(0,1,0.1))
auc <- performance(pred, measure = "auc")
auc <- auc@y.values[[1]]
auc
roc.data <- data.frame(fpr=unlist(perf@x.values),
tpr=unlist(perf@y.values),
model="GLM")
ggplot(roc.data, aes(x=fpr, ymin=0, ymax=tpr)) +
geom_ribbon(alpha=0.2) +
geom_line(aes(y=tpr)) +
ggtitle(paste0("ROC Curve w/ AUC=", auc))
| /Day_24_LogisticRegression_Practise_Assignment/20170129_Batch25_CSE7202c_LabDay_ActivitySheet/20170129_Batch25_CSE7202c_LabDay_LogReg.R | no_license | yarramashok/Batch-1-Beseant-Tech | R | false | false | 4,346 | r | rm(list=ls())
setwd(choose.dir())
library(XLConnect)
# read data from part 1 and part 2 of the sheets
Part1 <- readWorksheetFromFile("German_CreditRating.xls",sheet=2)
Part2 <- readWorksheetFromFile("German_CreditRating.xls",sheet=3)
names(Part1)
names(Part2)
#merge them by observation ids
CustData <- merge(Part1,Part2,by="OBS",all.x=TRUE)
# structure of data types
names(CustData)
str(CustData)
#note there are few missing observations
summary(CustData)
#CustData = CustData[,-1]
#type conversion
num_vars = c(2,9,14,18,19,23)
#Catdata <- CustData[,c(3:8, 10:13, 15:17, 20:22, 24:29)]
Catdata = CustData[,-c(1,num_vars)]
Catdata <- data.frame(apply(Catdata,2,factor))
str(Catdata)
NumData<- data.frame(CustData[,c(2,9,14,18,19,23)])
NewData <- cbind(Catdata,NumData)
str(NewData)
summary(NewData)
#check for missing values
sum(is.na((NewData)))
which(apply(NewData, 1, function(x){sum(is.na(x))})!=0)
dim(NewData)
Full_data = na.omit(NewData)
dim(Full_data)
# #Knn imputation
# library(DMwR)
# data_imputed<-knnImputation(NewData,k=1) #KNN Imputation
# sum(is.na(data_imputed))
table(Full_data$RESPONSE)
# #Split the data into train and test data sets
# data_imputed = Full_data
# rows=seq(1,nrow(data_imputed),1)
# set.seed(123)
# trainRows=sample(rows,(70*nrow(data_imputed))/100)
# train = data_imputed[trainRows,]
# test = data_imputed[-trainRows,]
# names(train)
library(caret)
train_rows = createDataPartition(y = Full_data$RESPONSE,
p=0.7, list = F)
train = Full_data[train_rows,]
test = Full_data[-train_rows,]
table(Full_data$RESPONSE)
table(train$RESPONSE)
table(test$RESPONSE)
#logistic regression model
# dependent variable = Churned
LogReg <- glm(RESPONSE ~., data=train,family=binomial)
summary(LogReg)
residuals(LogReg)
fitted(LogReg)
#deviance(LogReg)
table(train$RESPONSE)
table(test$RESPONSE)
#To get the significance for the overall model we use the following command
# 1-pchisq(deviance(LogReg), df=df.residual(LogReg))
# now let us select features using StepAIC
library(MASS)
step = stepAIC(LogReg, direction="both")
step$anova
BestModel <- glm(RESPONSE ~ NEW_CAR + USED_CAR + EDUCATION + GUARANTOR + OTHER_INSTALL +
OWN_RES + FOREIGN + CHK_ACCT + HISTORY + SAV_ACCT + EMPLOYMENT +
PRESENT_RESIDENT + DURATION + INSTALL_RATE + AGE + AMOUNT, data=train, family=binomial)
summary(BestModel)
coefficients(BestModel)
vif(BestModel)
#interpretation
#What is the equation?
#log(p/1-p) = 2.32 -.073*NewCar1+0.018*UserCar1-...+0.00269*Age-0.00000737*Amount
#For every 1 unit change in Age the log odds of response = credit rating is good
#(versus non-response) increases by 0.0267
#Having the purpose of credit Newcar1 = 1 versus not,changes the
#log odds of Credit rating (good) by -0.0735
#goodness of fit
#1-pchisq(deviance(BestModel), df=df.residual(BestModel))
#the higher the better and evidence to reject the
#hypothesis that the fitted model is correct
dev.off()
plot(train$RESPONSE, BestModel$fitted.values, type="p")
abline(0.5,0)
prob<-predict(BestModel, type="response")
pred_class <- factor(ifelse(prob>0.7, 1, 0))
metrics = table(train$RESPONSE,pred_class)
metrics
accuracy =(metrics[1,1]+metrics[2,2])/(length(pred_class))
Recall = metrics[2,2]/(metrics[2,2]+metrics[2,1])
Precision = metrics[2,2]/(metrics[2,2]+metrics[1,2])
accuracy
Recall
Precision
### on test data
probt <- predict(BestModel, newdata = test, type="response")
predt_class <- factor(ifelse(probt>0.5, 1, 0))
metricst = table(test$RESPONSE,predt_class)
metricst
accuracyt =(metricst[1,1]+metricst[2,2])/(length(predt_class))
Recallt = metricst[2,2]/(metricst[2,2]+metricst[2,1])
Precisiont = metricst[2,2]/(metricst[2,2]+metricst[1,2])
accuracyt
Recallt
Precisiont
library(ROCR)
library(ggplot2)
pred <- prediction(prob, train$RESPONSE)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
#perf
plot(perf, col=rainbow(10),
colorize=T,
print.cutoffs.at = seq(0,1,0.1))
auc <- performance(pred, measure = "auc")
auc <- auc@y.values[[1]]
auc
roc.data <- data.frame(fpr=unlist(perf@x.values),
tpr=unlist(perf@y.values),
model="GLM")
ggplot(roc.data, aes(x=fpr, ymin=0, ymax=tpr)) +
geom_ribbon(alpha=0.2) +
geom_line(aes(y=tpr)) +
ggtitle(paste0("ROC Curve w/ AUC=", auc))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cars_simulations_functions.R
\name{cars_ttest_data}
\alias{cars_ttest_data}
\title{Simulation: Two-sample t-test}
\usage{
cars_ttest_data(
m = 10000,
k = 10,
n_x = 50,
n_y = 50,
mu_x_1 = 3/sqrt(n_x),
mu_y_1 = 0,
mu_x_2 = 1/sqrt(n_x),
mu_y_2 = 1/sqrt(n_y),
sd_x = 1,
sd_y = 1
)
}
\arguments{
\item{m}{Number of hypotheses (default: m=10000)}
\item{k}{Number of alternatives (default: k=10)}
\item{n_x}{Number of samples for X (first group) in each test (default: n_x=50)}
\item{n_y}{Number of samples for Y (second group) in each test (default: n_y=50)}
\item{mu_x_1}{Signal strength for first locations of X}
\item{mu_y_1}{Signal strength for first locations of Y (default: 0)}
\item{mu_x_2}{Signal strength for locations (k+1) to 2k of X}
\item{mu_y_2}{Signal strength for locations (k+1) to 2k of Y}
\item{sd_x}{Standard deviation of each measurement from X}
\item{sd_y}{Standard deviation of each measurement from Y}
}
\value{
List with entries `H` (0/1 vector with null or alternative), `x` (Matrix of dimension m * n_x with X data),
`y` (Matrix with `Y` data) and `var_mat` (Matrix with 2 columns containing the measurement variance for each test * group)
}
\description{
Simulation: Two-sample t-test
}
\references{
The code here is a modification of the example code in the CARS package vignette.
}
| /IHWStatsPaper/man/cars_ttest_data.Rd | permissive | Huber-group-EMBL/covariate-powered-cross-weighted-multiple-testing | R | false | true | 1,422 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cars_simulations_functions.R
\name{cars_ttest_data}
\alias{cars_ttest_data}
\title{Simulation: Two-sample t-test}
\usage{
cars_ttest_data(
m = 10000,
k = 10,
n_x = 50,
n_y = 50,
mu_x_1 = 3/sqrt(n_x),
mu_y_1 = 0,
mu_x_2 = 1/sqrt(n_x),
mu_y_2 = 1/sqrt(n_y),
sd_x = 1,
sd_y = 1
)
}
\arguments{
\item{m}{Number of hypotheses (default: m=10000)}
\item{k}{Number of alternatives (default: k=10)}
\item{n_x}{Number of samples for X (first group) in each test (default: n_x=50)}
\item{n_y}{Number of samples for Y (second group) in each test (default: n_y=50)}
\item{mu_x_1}{Signal strength for first locations of X}
\item{mu_y_1}{Signal strength for first locations of Y (default: 0)}
\item{mu_x_2}{Signal strength for locations (k+1) to 2k of X}
\item{mu_y_2}{Signal strength for locations (k+1) to 2k of Y}
\item{sd_x}{Standard deviation of each measurement from X}
\item{sd_y}{Standard deviation of each measurement from Y}
}
\value{
List with entries `H` (0/1 vector with null or alternative), `x` (Matrix of dimension m * n_x with X data),
`y` (Matrix with `Y` data) and `var_mat` (Matrix with 2 columns containing the measurement variance for each test * group)
}
\description{
Simulation: Two-sample t-test
}
\references{
The code here is a modification of the example code in the CARS package vignette.
}
|
# rundir <- normalizePath("~/internal/runs/2020-02-22T13-49-35.272Z")
histfile <- "history.qs" #file.path(rundir, "history.qs")
hist <- qs::qread(histfile)
outfile <- "history.png" #file.path(rundir, "history.png")
png(filename = outfile)
plot(hist)
dev.off()
png(filename = "plots/") | /plot.R | no_license | ifrit98/bengaliai | R | false | false | 289 | r | # rundir <- normalizePath("~/internal/runs/2020-02-22T13-49-35.272Z")
histfile <- "history.qs" #file.path(rundir, "history.qs")
hist <- qs::qread(histfile)
outfile <- "history.png" #file.path(rundir, "history.png")
png(filename = outfile)
plot(hist)
dev.off()
png(filename = "plots/") |
library(factoextra)
labels <- read.table("cifar-10-batches-bin/batches.meta.txt")
images.rgb <- list()
images.lab <- list()
train_index <- 0
num.images = 10000 # Set to 10000 to retrieve all images per file to memory
for (f in 1:5) {
to.read <- file(paste("cifar-10-batches-bin/data_batch_", f, ".bin", sep=""), "rb")
for(i in 1:num.images) {
l <- readBin(to.read, integer(), size=1, n=1, endian="big")
r <- as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big"))
g <- as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big"))
b <- as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big"))
index <- num.images * (f-1) + i
images.rgb[[index]] = data.frame(r, g, b)
images.lab[[index]] = l+1
train_index <- index
}
to.read <- file("cifar-10-batches-bin/test_batch.bin","rb")
for(i in 1:num.images) {
l <- readBin(to.read, integer(), size=1, n=1, endian="big")
r <- as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big"))
g <- as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big"))
b <- as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big"))
index <- train_index + i
images.rgb[[index]] = data.frame(r, g, b)
images.lab[[index]] = l+1
}
close(to.read)
}
remove(l,r,g,b,f,i,index, to.read)
images.rgb.1 <- list()
images.rgb.2 <- list()
images.rgb.3 <- list()
images.rgb.4 <- list()
images.rgb.5 <- list()
images.rgb.6 <- list()
images.rgb.7 <- list()
images.rgb.8 <- list()
images.rgb.9 <- list()
images.rgb.10 <- list()
num_1 = 1
num_2 = 1
num_3 = 1
num_4 = 1
num_5 = 1
num_6 = 1
num_7 = 1
num_8 = 1
num_9 = 1
num_10 = 1
for(index in 1:length(images.rgb)){
if(images.lab[index] == 1){
images.rgb.1[[num_1]] <- images.rgb[[index]]
num_1 = num_1 + 1
}
else if(images.lab[index] == 2){
images.rgb.2[[num_2]] <- images.rgb[[index]]
num_2 = num_2 + 1
}
else if(images.lab[index] == 3){
images.rgb.3[[num_3]] <- images.rgb[[index]]
num_3 = num_3 + 1
}
else if(images.lab[index] == 4){
images.rgb.4[[num_4]] <- images.rgb[[index]]
num_4 = num_4 + 1
}
else if(images.lab[index] == 5){
images.rgb.5[[num_5]] <- images.rgb[[index]]
num_5 = num_5 + 1
}
else if(images.lab[index] == 6){
images.rgb.6[[num_6]] <- images.rgb[[index]]
num_6 = num_6 + 1
}
else if(images.lab[index] == 7){
images.rgb.7[[num_7]] <- images.rgb[[index]]
num_7 = num_7 + 1
}
else if(images.lab[index] == 8){
images.rgb.8[[num_8]] <- images.rgb[[index]]
num_8 = num_8 + 1
}
else if(images.lab[index] == 9){
images.rgb.9[[num_9]] <- images.rgb[[index]]
num_9 = num_9 + 1
}
else{
images.rgb.10[[num_10]] <- images.rgb[[index]]
num_10 = num_10 + 1
}
}
remove(num_1,num_2,num_3,num_4,num_5,num_6,num_7,num_8,num_9,num_10)
allMeanImages <- list()
for(i in 1:10)
{
images <- list()
curr_category <- get(paste("images.rgb.",i, sep = ""))
sum <- 0.0
count <- length(curr_category)
for(pixels_index in 1: 1024)
{
for(rgb_index in 1:3)
{
for(images_index in 1:count)
{
sum <- sum + curr_category[[images_index]][pixels_index, rgb_index]
}
avg <- sum / count
images <- c(images, avg)
sum <- 0.0
}
}
allMeanImages <- c(allMeanImages, images)
}
#print(eig.val)
#print(eig.val[0:20,1])
#print(eig.val[0:20,1]) | /HW3/HW3-1.R | no_license | owogyx1219/CS498-df | R | false | false | 3,390 | r | library(factoextra)
labels <- read.table("cifar-10-batches-bin/batches.meta.txt")
images.rgb <- list()
images.lab <- list()
train_index <- 0
num.images = 10000 # Set to 10000 to retrieve all images per file to memory
for (f in 1:5) {
to.read <- file(paste("cifar-10-batches-bin/data_batch_", f, ".bin", sep=""), "rb")
for(i in 1:num.images) {
l <- readBin(to.read, integer(), size=1, n=1, endian="big")
r <- as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big"))
g <- as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big"))
b <- as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big"))
index <- num.images * (f-1) + i
images.rgb[[index]] = data.frame(r, g, b)
images.lab[[index]] = l+1
train_index <- index
}
to.read <- file("cifar-10-batches-bin/test_batch.bin","rb")
for(i in 1:num.images) {
l <- readBin(to.read, integer(), size=1, n=1, endian="big")
r <- as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big"))
g <- as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big"))
b <- as.integer(readBin(to.read, raw(), size=1, n=1024, endian="big"))
index <- train_index + i
images.rgb[[index]] = data.frame(r, g, b)
images.lab[[index]] = l+1
}
close(to.read)
}
remove(l,r,g,b,f,i,index, to.read)
images.rgb.1 <- list()
images.rgb.2 <- list()
images.rgb.3 <- list()
images.rgb.4 <- list()
images.rgb.5 <- list()
images.rgb.6 <- list()
images.rgb.7 <- list()
images.rgb.8 <- list()
images.rgb.9 <- list()
images.rgb.10 <- list()
num_1 = 1
num_2 = 1
num_3 = 1
num_4 = 1
num_5 = 1
num_6 = 1
num_7 = 1
num_8 = 1
num_9 = 1
num_10 = 1
for(index in 1:length(images.rgb)){
if(images.lab[index] == 1){
images.rgb.1[[num_1]] <- images.rgb[[index]]
num_1 = num_1 + 1
}
else if(images.lab[index] == 2){
images.rgb.2[[num_2]] <- images.rgb[[index]]
num_2 = num_2 + 1
}
else if(images.lab[index] == 3){
images.rgb.3[[num_3]] <- images.rgb[[index]]
num_3 = num_3 + 1
}
else if(images.lab[index] == 4){
images.rgb.4[[num_4]] <- images.rgb[[index]]
num_4 = num_4 + 1
}
else if(images.lab[index] == 5){
images.rgb.5[[num_5]] <- images.rgb[[index]]
num_5 = num_5 + 1
}
else if(images.lab[index] == 6){
images.rgb.6[[num_6]] <- images.rgb[[index]]
num_6 = num_6 + 1
}
else if(images.lab[index] == 7){
images.rgb.7[[num_7]] <- images.rgb[[index]]
num_7 = num_7 + 1
}
else if(images.lab[index] == 8){
images.rgb.8[[num_8]] <- images.rgb[[index]]
num_8 = num_8 + 1
}
else if(images.lab[index] == 9){
images.rgb.9[[num_9]] <- images.rgb[[index]]
num_9 = num_9 + 1
}
else{
images.rgb.10[[num_10]] <- images.rgb[[index]]
num_10 = num_10 + 1
}
}
remove(num_1,num_2,num_3,num_4,num_5,num_6,num_7,num_8,num_9,num_10)
allMeanImages <- list()
for(i in 1:10)
{
images <- list()
curr_category <- get(paste("images.rgb.",i, sep = ""))
sum <- 0.0
count <- length(curr_category)
for(pixels_index in 1: 1024)
{
for(rgb_index in 1:3)
{
for(images_index in 1:count)
{
sum <- sum + curr_category[[images_index]][pixels_index, rgb_index]
}
avg <- sum / count
images <- c(images, avg)
sum <- 0.0
}
}
allMeanImages <- c(allMeanImages, images)
}
#print(eig.val)
#print(eig.val[0:20,1])
#print(eig.val[0:20,1]) |
context('predictions')
library(MASS)
test_that("predictions are calculated correctly",{
# some random data
# observations in class 1
mu_1 <- c(40,-80)
sd_1 <- c(1,1)
C1 <- mvrnorm(60,mu_1, diag(sd_1^2))
# observations in class 2
mu_2 <- c(-50,60)
sd_2 <- c(1,1)
C2 <- mvrnorm(40,mu_2, diag(sd_2^2))
# full data matrix
X <- rbind(C1,C2)
y <- matrix(c(rep(0,60),rep(1,40)),ncol=1)
# fit model
model <- naive_bayes(X,y)
# generate test data
C1_test <- mvrnorm(5,mu_1, diag(sd_1^2))
C2_test <- mvrnorm(5,mu_2, diag(sd_2^2))
X_test <- rbind(C1_test,C2_test)
y_test <- matrix(c(rep(0,5),rep(1,5)),ncol=1)
# make predictions
pred <- predict(model,X_test)
expect_equal(pred,y_test)
})
| /tests/testthat/test-predict.R | no_license | andreabecsek/NaiveBayes | R | false | false | 722 | r | context('predictions')
library(MASS)
test_that("predictions are calculated correctly",{
# some random data
# observations in class 1
mu_1 <- c(40,-80)
sd_1 <- c(1,1)
C1 <- mvrnorm(60,mu_1, diag(sd_1^2))
# observations in class 2
mu_2 <- c(-50,60)
sd_2 <- c(1,1)
C2 <- mvrnorm(40,mu_2, diag(sd_2^2))
# full data matrix
X <- rbind(C1,C2)
y <- matrix(c(rep(0,60),rep(1,40)),ncol=1)
# fit model
model <- naive_bayes(X,y)
# generate test data
C1_test <- mvrnorm(5,mu_1, diag(sd_1^2))
C2_test <- mvrnorm(5,mu_2, diag(sd_2^2))
X_test <- rbind(C1_test,C2_test)
y_test <- matrix(c(rep(0,5),rep(1,5)),ncol=1)
# make predictions
pred <- predict(model,X_test)
expect_equal(pred,y_test)
})
|
library(ndjson)
### Name: stream_in
### Title: Stream in & flatten an ndjson file into a 'tbl_dt'
### Aliases: stream_in
### ** Examples
f <- system.file("extdata", "test.json", package="ndjson")
nrow(stream_in(f))
gzf <- system.file("extdata", "testgz.json.gz", package="ndjson")
nrow(stream_in(gzf))
| /data/genthat_extracted_code/ndjson/examples/stream_in.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 310 | r | library(ndjson)
### Name: stream_in
### Title: Stream in & flatten an ndjson file into a 'tbl_dt'
### Aliases: stream_in
### ** Examples
f <- system.file("extdata", "test.json", package="ndjson")
nrow(stream_in(f))
gzf <- system.file("extdata", "testgz.json.gz", package="ndjson")
nrow(stream_in(gzf))
|
# This script crashes with 16 GB or less of RAM.
# Rsession will use 30 GB or RAM in the long-run, not sure about peaks.
# Packages and functions --------------------------------------------------------------------
library( tidyverse )
require( rje )
library( Matrix )
library( irlba )
library( uwot )
library( FNN )
library( igraph )
library( cowplot )
# rowVars for sparse matrices:
colVars_spm <- function( spm ) {
stopifnot( is( spm, "dgCMatrix" ) )
ans <- sapply( seq.int(spm@Dim[2]), function(j) {
mean <- sum( spm@x[ (spm@p[j]+1):spm@p[j+1] ] ) / spm@Dim[1]
sum( ( spm@x[ (spm@p[j]+1):spm@p[j+1] ] - mean )^2 ) +
mean^2 * ( spm@Dim[1] - ( spm@p[j+1] - spm@p[j] ) ) } ) / ( spm@Dim[1] - 1 )
names(ans) <- spm@Dimnames[[2]]
ans
}
rowVars_spm <- function( spm ) {
colVars_spm( t(spm) )
}
# define scale_color_sqrt (and functions it requires):
power_trans <- function(power){
# returns transformation object that can be used in ggplot's scale_*_continuous
scales::trans_new(
name = "tmp",
trans = function(x) x^(power),
inverse = function(x) x^(1/power),
breaks = function(lims, p) power_breaks(lims, p=power) )
}
power_breaks <- function(lims, power, n_breaks=5){
# Return vector of breaks that span the lims range evenly _after_ power transformation:
lims[1] <- max(0, lims[1]) # non-integer exponents are not defined for negative values
x <- seq(lims[1]^power, lims[2]^(power), length.out = n_breaks)^(1/power)
# make human-readable by rounding to the closest integer power of 2. Smallest
# and largest ticks are not strictly rounded - instead they are moved within
# the range of values, since ggplot would not display them otherwise:
x <- case_when(
x == max(x) ~ 2^(floor(log2(x))),
x == min(x) ~ 2^(ceiling(log2(x))),
TRUE ~ (2^(round(log2(x))))
)
return(x)
}
semi_scientific_formatting <- function(x) {
# takes numeric vector x and returns character vector where extremely large / small
# numbers are in scientific notation (e.g. 1e-30) while others are untouched:
x <- case_when(
x == 0 ~ as.character(0),
abs(x) < .01 | abs(x) >= 1000 ~ scales::scientific(x, digits = 0),
TRUE ~ as.character(x))}
scale_color_sqrt <- function(...){scale_color_gradientn(
colours = rev(rje::cubeHelix(100))[5:100],
trans = power_trans(1/2),
labels = semi_scientific_formatting,
...)}
# Load data ---------------------------------------------------------------
path <- "~/sds/sd17l002/p/ASD/"
cellinfo <- read.delim( file.path( path, "rawMatrix", "meta.txt" ), stringsAsFactors=FALSE )
counts <- readMM( file.path( path, "rawMatrix", "matrix.mtx" ) )
# make gene symbols unique (by concatenating ensembleID where necessary):
gene_info <- read.delim( file.path( path, "rawMatrix", "genes.tsv" ), header=FALSE, as.is=TRUE ) %>%
mutate(unique = case_when(
duplicated(V2) | duplicated(V2, fromLast=T) ~ paste(V2, V1, sep="_"),
TRUE ~ V2))
rownames(counts) <- gene_info$unique
colnames(counts) <- readLines( file.path( path, "rawMatrix", "barcodes.tsv" ) )
sampleTable <-
cellinfo %>% select( sample : RNA.Integrity.Number ) %>% unique
sampleTable
# extracting gene expression is much faster in column-sparse format:
Tcounts <- as(t(counts), "dgCMatrix") # fast: Tcounts[, "SYN1"]
Ccounts <- as(counts, "dgCMatrix") # fast: Ccounts[, 1337] and colSums(Ccounts)
# Preprocessing -----------------------------------------------------------
# load (or re-execute everything in this section):
sfs <- colSums(Ccounts)
norm_counts <- t(t(Ccounts) / sfs)
rownames(norm_counts) <- rownames(Ccounts)
load(file.path(path, "savepoint", "pca_40pcs_scaling_2311genes.RData"))
load(file.path(path, "savepoint", "umap_euc_spread10.RData"))
# informative genes, PCA, UMAP:
poisson_vmr <- mean(1/sfs)
gene_means <- rowMeans( norm_counts )
gene_vars <- rowVars_spm( norm_counts )
cells_expressing <- colSums( Tcounts != 0 )
is_informative <- gene_vars/gene_means > 1.5 * poisson_vmr & cells_expressing > 100
plot(gene_means, gene_vars/gene_means, pch=".", log = "xy")
points(gene_means[is_informative], (gene_vars/gene_means)[is_informative], pch=".", col = "red" )
pca <- irlba::prcomp_irlba( x = sqrt(t(norm_counts[is_informative,])),
n = 40,
scale. = TRUE)
umap_euc <- uwot::umap( pca$x, spread = 10, n_threads = 40) # euc: euclidean distance
# save(pca,
# file = file.path(path, "savepoint", "pca_40pcs_scaling_2311genes.RData"))
# save(umap_euc,
# file = file.path(path, "savepoint", "umap_euc_spread10.RData"))
# Clusters ---------------------------------------------------
# load (or re-execute everything in this section):
load(file.path(path, "savepoint", "clusters.RData"))
# find NN for each cell:
library( RcppAnnoy )
featureMatrix <- pca$x; k_nn <- 50
annoy <- new( AnnoyEuclidean, ncol(featureMatrix) )
for( i in 1:nrow(featureMatrix) )
annoy$addItem( i-1, featureMatrix[i,] )
annoy$build( 50 ) # builds a forest of n_trees trees. More trees gives higher precision when querying.
nn_cells <- t( sapply( 1:annoy$getNItems(), function(i) annoy$getNNsByItem( i-1, k_nn) + 1 ) )
nndists_cells <- sapply( 1:ncol(nn_cells), function(j) sqrt( rowSums( ( featureMatrix - featureMatrix[ nn_cells[,j], ] )^2 ) ) )
rm(featureMatrix, annoy)
# cluster on nearest neighbor graph (Louvain):
adj <- Matrix(0, nrow = nrow(pca$x), ncol = nrow(pca$x)) # has to be sparse, otherwise takes 80 GB of RAM
for(i in 1:ncol(nn_cells))
adj[ cbind(1:nrow(pca$x), nn_cells[, i]) ] <- 1
for(i in 1:ncol(nn_cells))
adj[ cbind(nn_cells[, i], 1:nrow(pca$x)) ] <- 1
cl_louvain <- cluster_louvain( graph_from_adjacency_matrix(adj, mode = "undirected") )
# merge clusters that are separated by patient heterogeneity:
tmp_clusters <- cl_louvain$membership
tmp_clusters <- case_when(tmp_clusters %in% c(5, 6, 8, 1, 10, 20, 2, 16) ~ 5, # excitatory Neurons
tmp_clusters %in% c(11, 15, 19) ~ 11, # astrocytes
tmp_clusters %in% c(3, 9, 18) ~ 3, # OPCs
tmp_clusters %in% c(22, 17) ~ 22, # endothelial and/or pericytes
TRUE ~ tmp_clusters)
anno_clusters = c(
"3" = "OPC",
"4" = "Oligodendrocyte",
"5" = "neurons_excit",
"7" = "IN_PV",
"11"= "Astrocyte",
"12"= "IN_SV2C",
"13"= "Microglia",
"14"= "IN_VIP",
"21"= "neurons_NRGN",
"22"= "endothelial_and_pericytes",
"23"= "IN_SST"
)
celltypes <- factor(anno_clusters[as.character(tmp_clusters)],
levels= anno_clusters[as.character(sort(unique(tmp_clusters)))])
# Louvain clusters
p_louv <- ggplot()+ coord_fixed() +
geom_point(data = data.frame(umap_euc, cl=factor(tmp_clusters)),
aes(X1, X2, col = cl), size = .1) +
geom_label(data = group_by(data.frame(umap_euc, cl=factor(tmp_clusters)), cl) %>%summarise(X1=mean(X1), X2=mean(X2)),
aes(X1, X2, label = cl))
p_louv
# clusters from paper
p_paper <- ggplot()+ coord_fixed()+
geom_point(data =data.frame(cell = colnames(counts), umap_euc) %>%
left_join(select(cellinfo, cell, cluster), by="cell"),
aes(X1, X2, col = cluster), size = .1) +
geom_label(data = data.frame(cell = colnames(counts), umap_euc) %>%
left_join(select(cellinfo, cell, cluster), by = "cell") %>% group_by(cluster) %>%
summarise(X1=mean(X1), X2=mean(X2)),
aes(X1, X2, label = cluster))
p_paper
# save(list = c("cl_louvain", "tmp_clusters", "celltypes", "anno_clusters",
# "nn_cells", "nn_inothercluster"),
# file = file.path(path, "savepoint", "clusters.RData"))
# Doublets and ambiguous cells ----------------------------------
# load (or re-execute everything in this section):
load(file.path(path, "savepoint", "doublets.RData"))
# number of NN from different cluster:
nn_inothercluster <- colSums(
matrix(tmp_clusters[ t(nn_cells) ],
ncol = nrow(nn_cells)) !=
matrix(rep(tmp_clusters, each = ncol(nn_cells)),
ncol = nrow(nn_cells)) )
# in silico doublets: randomly draw cells from different clusters and pool their UMIs to form a "synthetic" doublet:
cellsA <- sample(1:ncol(counts), 50000)
cellsB <- rep(NA, 50000)
smpA <- cellinfo$sample[cellsA]
clA <- tmp_clusters[cellsA]
tmp <- data.frame(smpA, clA) %>% group_by(smpA, clA) %>% tally
for(i in 1:nrow(tmp)) {
is_smp <- cellinfo$sample[cellsA] == tmp$smpA[i]
is_cl <- tmp_clusters[cellsA] == tmp$clA[i]
# sample amongst cells from same sample and different cluster:
cellsB[ is_smp & is_cl ] <- base::sample(
x = which(cellinfo$sample == tmp$smpA[i] & !tmp_clusters == tmp$clA[i]),
size = tmp$n[i],
replace = T) # in case one cluster is larger than all others combined
}
doublet_raw <- Ccounts[, cellsA] + Ccounts[, cellsB]
doublet_pcs <- predict(pca,
newdata = sqrt( (t(doublet_raw) / colSums(doublet_raw))[, is_informative] ))
# Alternative 1 (clearer):
a <- FNN::get.knn(rbind(pca$x, doublet_pcs), k = 50)
nn_doublets <- a$nn.index
nndists_doublets <- a$nn.dist
# Alternative 2 (faster):
library( RcppAnnoy )
featureMatrix <- rbind(pca$x, doublet_pcs); k_nn <- 50
annoy <- new( AnnoyEuclidean, ncol(featureMatrix) )
for( i in 1:nrow(featureMatrix) )
annoy$addItem( i-1, featureMatrix[i,] )
annoy$build( 50 ) # builds a forest of n_trees trees. More trees gives higher precision when querying.
nn_doublets <- t( sapply( 1:annoy$getNItems(), function(i) annoy$getNNsByItem( i-1, k_nn) + 1 ) )
nndists_doublets <- sapply( 1:ncol(nn_doublets), function(j) sqrt( rowSums( ( featureMatrix - featureMatrix[ nn_doublets[,j], ] )^2 ) ) )
rm(featureMatrix, annoy)
# percentage of synthetic doublets in neighborhood for each cell:
dblts_perc <- rowMeans( nn_doublets > ncol(counts) )[ 1:ncol(counts) ]
# Run UMAP with Annoy's output
ump2 <- uwot::umap( NULL, nn_method = list( idx=nn_doublets, dist=nndists_doublets),
n_threads=40, spread = 15, verbose=TRUE )
is_synth <- 1:nrow(ump2) > nrow(pca$x)
# save(list = c("nn_doublets", "nndists_doublets", "cellsA", "cellsB",
# "dblts_perc", "is_synth", "ump2"),
# file = file.path(path, "savepoint", "doublets.RData"))
# DESeq -------------------------------------------------------------------
library(DESeq2)
library(BiocParallel)
# visualize dirty cells we clean away:
tmp <- data.frame(umap_euc,
diagnosis = cellinfo$diagnosis,
clean = dblts_perc < 3/50 & nn_inothercluster < 1,
Gene = Tcounts[, "TTF2"] / sfs/mean(1/sfs),
cl = factor(celltypes))
ggplot() + coord_fixed()+
geom_point(data=filter(tmp, clean), aes(X1, X2, col = cl), size=.1) +
geom_point(data=filter(tmp, !clean), aes(X1, X2), col = "black", size=.1) +
geom_label(data=group_by(tmp, cl) %>% summarise(X1=mean(X1), X2=mean(X2)), aes(X1, X2, label=cl))
tmp <- as.matrix(table(sample=cellinfo$sample, clean = dblts_perc < 3/50 & nn_inothercluster < 1))
data.frame(sample = rownames(tmp), dirtyProportion = tmp[,1] / (tmp[,1] + tmp[,2])) %>% left_join(sampleTable, by="sample") %>% ggplot(aes(sample, dirtyProportion, col = diagnosis))+geom_point()
# compute for a single cluster
sel <- celltypes == "neurons_excit" & dblts_perc < 3/50 & nn_inothercluster < 1
pseudobulks <- as.matrix(t( fac2sparse(cellinfo$sample[sel]) %*% t(Ccounts[, sel]) ))
coldat <- filter(sampleTable, sample %in% colnames(pseudobulks)) %>%
mutate(individual = factor(individual),
diagnosis = factor(diagnosis, levels = c("Control", "ASD")),
region = factor(region))
rownames(coldat) <- coldat$sample
dds <- DESeq2::DESeqDataSetFromMatrix( pseudobulks,
coldat[colnames(pseudobulks), ],
design = ~ sex + region + age + diagnosis )
# For cluster 5, I tested that we do not need interactions between sex, region and diagnosis. I used
# DESeq's LTR for this (see mail to Simon at mid-September 2019).
dds <- DESeq2::DESeq(dds,
parallel=TRUE, BPPARAM=BiocParallel::MulticoreParam(20))
res_df <- DESeq2::results(dds, name = "diagnosis_ASD_vs_Control") %>% as.data.frame() %>% rownames_to_column("Gene")
data.frame(umap_euc, Gene = Tcounts[, "ZNF770"], sfs=sfs, diagnosis=cellinfo$diagnosis) %>%
ggplot(aes(X1, X2, col=Gene/sfs/mean(1/sfs)))+geom_point(size=.1) +
scale_color_sqrt(name="ZNF770") +
facet_wrap(~ diagnosis) + coord_fixed()
| /asd_workflow.R | no_license | IsabelMarleen/asd_analysis | R | false | false | 12,680 | r | # This script crashes with 16 GB or less of RAM.
# Rsession will use 30 GB or RAM in the long-run, not sure about peaks.
# Packages and functions --------------------------------------------------------------------
library( tidyverse )
require( rje )
library( Matrix )
library( irlba )
library( uwot )
library( FNN )
library( igraph )
library( cowplot )
# rowVars for sparse matrices:
colVars_spm <- function( spm ) {
stopifnot( is( spm, "dgCMatrix" ) )
ans <- sapply( seq.int(spm@Dim[2]), function(j) {
mean <- sum( spm@x[ (spm@p[j]+1):spm@p[j+1] ] ) / spm@Dim[1]
sum( ( spm@x[ (spm@p[j]+1):spm@p[j+1] ] - mean )^2 ) +
mean^2 * ( spm@Dim[1] - ( spm@p[j+1] - spm@p[j] ) ) } ) / ( spm@Dim[1] - 1 )
names(ans) <- spm@Dimnames[[2]]
ans
}
rowVars_spm <- function( spm ) {
colVars_spm( t(spm) )
}
# define scale_color_sqrt (and functions it requires):
power_trans <- function(power){
# returns transformation object that can be used in ggplot's scale_*_continuous
scales::trans_new(
name = "tmp",
trans = function(x) x^(power),
inverse = function(x) x^(1/power),
breaks = function(lims, p) power_breaks(lims, p=power) )
}
power_breaks <- function(lims, power, n_breaks=5){
# Return vector of breaks that span the lims range evenly _after_ power transformation:
lims[1] <- max(0, lims[1]) # non-integer exponents are not defined for negative values
x <- seq(lims[1]^power, lims[2]^(power), length.out = n_breaks)^(1/power)
# make human-readable by rounding to the closest integer power of 2. Smallest
# and largest ticks are not strictly rounded - instead they are moved within
# the range of values, since ggplot would not display them otherwise:
x <- case_when(
x == max(x) ~ 2^(floor(log2(x))),
x == min(x) ~ 2^(ceiling(log2(x))),
TRUE ~ (2^(round(log2(x))))
)
return(x)
}
semi_scientific_formatting <- function(x) {
# takes numeric vector x and returns character vector where extremely large / small
# numbers are in scientific notation (e.g. 1e-30) while others are untouched:
x <- case_when(
x == 0 ~ as.character(0),
abs(x) < .01 | abs(x) >= 1000 ~ scales::scientific(x, digits = 0),
TRUE ~ as.character(x))}
scale_color_sqrt <- function(...){scale_color_gradientn(
colours = rev(rje::cubeHelix(100))[5:100],
trans = power_trans(1/2),
labels = semi_scientific_formatting,
...)}
# Load data ---------------------------------------------------------------
path <- "~/sds/sd17l002/p/ASD/"
cellinfo <- read.delim( file.path( path, "rawMatrix", "meta.txt" ), stringsAsFactors=FALSE )
counts <- readMM( file.path( path, "rawMatrix", "matrix.mtx" ) )
# make gene symbols unique (by concatenating ensembleID where necessary):
gene_info <- read.delim( file.path( path, "rawMatrix", "genes.tsv" ), header=FALSE, as.is=TRUE ) %>%
mutate(unique = case_when(
duplicated(V2) | duplicated(V2, fromLast=T) ~ paste(V2, V1, sep="_"),
TRUE ~ V2))
rownames(counts) <- gene_info$unique
colnames(counts) <- readLines( file.path( path, "rawMatrix", "barcodes.tsv" ) )
sampleTable <-
cellinfo %>% select( sample : RNA.Integrity.Number ) %>% unique
sampleTable
# extracting gene expression is much faster in column-sparse format:
Tcounts <- as(t(counts), "dgCMatrix") # fast: Tcounts[, "SYN1"]
Ccounts <- as(counts, "dgCMatrix") # fast: Ccounts[, 1337] and colSums(Ccounts)
# Preprocessing -----------------------------------------------------------
# load (or re-execute everything in this section):
sfs <- colSums(Ccounts)
norm_counts <- t(t(Ccounts) / sfs)
rownames(norm_counts) <- rownames(Ccounts)
load(file.path(path, "savepoint", "pca_40pcs_scaling_2311genes.RData"))
load(file.path(path, "savepoint", "umap_euc_spread10.RData"))
# informative genes, PCA, UMAP:
poisson_vmr <- mean(1/sfs)
gene_means <- rowMeans( norm_counts )
gene_vars <- rowVars_spm( norm_counts )
cells_expressing <- colSums( Tcounts != 0 )
is_informative <- gene_vars/gene_means > 1.5 * poisson_vmr & cells_expressing > 100
plot(gene_means, gene_vars/gene_means, pch=".", log = "xy")
points(gene_means[is_informative], (gene_vars/gene_means)[is_informative], pch=".", col = "red" )
pca <- irlba::prcomp_irlba( x = sqrt(t(norm_counts[is_informative,])),
n = 40,
scale. = TRUE)
umap_euc <- uwot::umap( pca$x, spread = 10, n_threads = 40) # euc: euclidean distance
# save(pca,
# file = file.path(path, "savepoint", "pca_40pcs_scaling_2311genes.RData"))
# save(umap_euc,
# file = file.path(path, "savepoint", "umap_euc_spread10.RData"))
# Clusters ---------------------------------------------------
# load (or re-execute everything in this section):
load(file.path(path, "savepoint", "clusters.RData"))
# find NN for each cell:
library( RcppAnnoy )
featureMatrix <- pca$x; k_nn <- 50
annoy <- new( AnnoyEuclidean, ncol(featureMatrix) )
for( i in 1:nrow(featureMatrix) )
annoy$addItem( i-1, featureMatrix[i,] )
annoy$build( 50 ) # builds a forest of n_trees trees. More trees gives higher precision when querying.
nn_cells <- t( sapply( 1:annoy$getNItems(), function(i) annoy$getNNsByItem( i-1, k_nn) + 1 ) )
nndists_cells <- sapply( 1:ncol(nn_cells), function(j) sqrt( rowSums( ( featureMatrix - featureMatrix[ nn_cells[,j], ] )^2 ) ) )
rm(featureMatrix, annoy)
# cluster on nearest neighbor graph (Louvain):
adj <- Matrix(0, nrow = nrow(pca$x), ncol = nrow(pca$x)) # has to be sparse, otherwise takes 80 GB of RAM
for(i in 1:ncol(nn_cells))
adj[ cbind(1:nrow(pca$x), nn_cells[, i]) ] <- 1
for(i in 1:ncol(nn_cells))
adj[ cbind(nn_cells[, i], 1:nrow(pca$x)) ] <- 1
cl_louvain <- cluster_louvain( graph_from_adjacency_matrix(adj, mode = "undirected") )
# merge clusters that are separated by patient heterogeneity:
tmp_clusters <- cl_louvain$membership
tmp_clusters <- case_when(tmp_clusters %in% c(5, 6, 8, 1, 10, 20, 2, 16) ~ 5, # excitatory Neurons
tmp_clusters %in% c(11, 15, 19) ~ 11, # astrocytes
tmp_clusters %in% c(3, 9, 18) ~ 3, # OPCs
tmp_clusters %in% c(22, 17) ~ 22, # endothelial and/or pericytes
TRUE ~ tmp_clusters)
anno_clusters = c(
"3" = "OPC",
"4" = "Oligodendrocyte",
"5" = "neurons_excit",
"7" = "IN_PV",
"11"= "Astrocyte",
"12"= "IN_SV2C",
"13"= "Microglia",
"14"= "IN_VIP",
"21"= "neurons_NRGN",
"22"= "endothelial_and_pericytes",
"23"= "IN_SST"
)
celltypes <- factor(anno_clusters[as.character(tmp_clusters)],
levels= anno_clusters[as.character(sort(unique(tmp_clusters)))])
# Louvain clusters
p_louv <- ggplot()+ coord_fixed() +
geom_point(data = data.frame(umap_euc, cl=factor(tmp_clusters)),
aes(X1, X2, col = cl), size = .1) +
geom_label(data = group_by(data.frame(umap_euc, cl=factor(tmp_clusters)), cl) %>%summarise(X1=mean(X1), X2=mean(X2)),
aes(X1, X2, label = cl))
p_louv
# clusters from paper
p_paper <- ggplot()+ coord_fixed()+
geom_point(data =data.frame(cell = colnames(counts), umap_euc) %>%
left_join(select(cellinfo, cell, cluster), by="cell"),
aes(X1, X2, col = cluster), size = .1) +
geom_label(data = data.frame(cell = colnames(counts), umap_euc) %>%
left_join(select(cellinfo, cell, cluster), by = "cell") %>% group_by(cluster) %>%
summarise(X1=mean(X1), X2=mean(X2)),
aes(X1, X2, label = cluster))
p_paper
# save(list = c("cl_louvain", "tmp_clusters", "celltypes", "anno_clusters",
# "nn_cells", "nn_inothercluster"),
# file = file.path(path, "savepoint", "clusters.RData"))
# Doublets and ambiguous cells ----------------------------------
# load (or re-execute everything in this section):
load(file.path(path, "savepoint", "doublets.RData"))
# number of NN from different cluster:
nn_inothercluster <- colSums(
matrix(tmp_clusters[ t(nn_cells) ],
ncol = nrow(nn_cells)) !=
matrix(rep(tmp_clusters, each = ncol(nn_cells)),
ncol = nrow(nn_cells)) )
# in silico doublets: randomly draw cells from different clusters and pool their UMIs to form a "synthetic" doublet:
cellsA <- sample(1:ncol(counts), 50000)
cellsB <- rep(NA, 50000)
smpA <- cellinfo$sample[cellsA]
clA <- tmp_clusters[cellsA]
tmp <- data.frame(smpA, clA) %>% group_by(smpA, clA) %>% tally
for(i in 1:nrow(tmp)) {
is_smp <- cellinfo$sample[cellsA] == tmp$smpA[i]
is_cl <- tmp_clusters[cellsA] == tmp$clA[i]
# sample amongst cells from same sample and different cluster:
cellsB[ is_smp & is_cl ] <- base::sample(
x = which(cellinfo$sample == tmp$smpA[i] & !tmp_clusters == tmp$clA[i]),
size = tmp$n[i],
replace = T) # in case one cluster is larger than all others combined
}
doublet_raw <- Ccounts[, cellsA] + Ccounts[, cellsB]
doublet_pcs <- predict(pca,
newdata = sqrt( (t(doublet_raw) / colSums(doublet_raw))[, is_informative] ))
# Alternative 1 (clearer):
a <- FNN::get.knn(rbind(pca$x, doublet_pcs), k = 50)
nn_doublets <- a$nn.index
nndists_doublets <- a$nn.dist
# Alternative 2 (faster):
library( RcppAnnoy )
featureMatrix <- rbind(pca$x, doublet_pcs); k_nn <- 50
annoy <- new( AnnoyEuclidean, ncol(featureMatrix) )
for( i in 1:nrow(featureMatrix) )
annoy$addItem( i-1, featureMatrix[i,] )
annoy$build( 50 ) # builds a forest of n_trees trees. More trees gives higher precision when querying.
nn_doublets <- t( sapply( 1:annoy$getNItems(), function(i) annoy$getNNsByItem( i-1, k_nn) + 1 ) )
nndists_doublets <- sapply( 1:ncol(nn_doublets), function(j) sqrt( rowSums( ( featureMatrix - featureMatrix[ nn_doublets[,j], ] )^2 ) ) )
rm(featureMatrix, annoy)
# percentage of synthetic doublets in neighborhood for each cell:
dblts_perc <- rowMeans( nn_doublets > ncol(counts) )[ 1:ncol(counts) ]
# Run UMAP with Annoy's output
ump2 <- uwot::umap( NULL, nn_method = list( idx=nn_doublets, dist=nndists_doublets),
n_threads=40, spread = 15, verbose=TRUE )
is_synth <- 1:nrow(ump2) > nrow(pca$x)
# save(list = c("nn_doublets", "nndists_doublets", "cellsA", "cellsB",
# "dblts_perc", "is_synth", "ump2"),
# file = file.path(path, "savepoint", "doublets.RData"))
# DESeq -------------------------------------------------------------------
library(DESeq2)
library(BiocParallel)
# visualize dirty cells we clean away:
tmp <- data.frame(umap_euc,
diagnosis = cellinfo$diagnosis,
clean = dblts_perc < 3/50 & nn_inothercluster < 1,
Gene = Tcounts[, "TTF2"] / sfs/mean(1/sfs),
cl = factor(celltypes))
ggplot() + coord_fixed()+
geom_point(data=filter(tmp, clean), aes(X1, X2, col = cl), size=.1) +
geom_point(data=filter(tmp, !clean), aes(X1, X2), col = "black", size=.1) +
geom_label(data=group_by(tmp, cl) %>% summarise(X1=mean(X1), X2=mean(X2)), aes(X1, X2, label=cl))
tmp <- as.matrix(table(sample=cellinfo$sample, clean = dblts_perc < 3/50 & nn_inothercluster < 1))
data.frame(sample = rownames(tmp), dirtyProportion = tmp[,1] / (tmp[,1] + tmp[,2])) %>% left_join(sampleTable, by="sample") %>% ggplot(aes(sample, dirtyProportion, col = diagnosis))+geom_point()
# compute for a single cluster
sel <- celltypes == "neurons_excit" & dblts_perc < 3/50 & nn_inothercluster < 1
pseudobulks <- as.matrix(t( fac2sparse(cellinfo$sample[sel]) %*% t(Ccounts[, sel]) ))
coldat <- filter(sampleTable, sample %in% colnames(pseudobulks)) %>%
mutate(individual = factor(individual),
diagnosis = factor(diagnosis, levels = c("Control", "ASD")),
region = factor(region))
rownames(coldat) <- coldat$sample
dds <- DESeq2::DESeqDataSetFromMatrix( pseudobulks,
coldat[colnames(pseudobulks), ],
design = ~ sex + region + age + diagnosis )
# For cluster 5, I tested that we do not need interactions between sex, region and diagnosis. I used
# DESeq's LTR for this (see mail to Simon at mid-September 2019).
dds <- DESeq2::DESeq(dds,
parallel=TRUE, BPPARAM=BiocParallel::MulticoreParam(20))
res_df <- DESeq2::results(dds, name = "diagnosis_ASD_vs_Control") %>% as.data.frame() %>% rownames_to_column("Gene")
data.frame(umap_euc, Gene = Tcounts[, "ZNF770"], sfs=sfs, diagnosis=cellinfo$diagnosis) %>%
ggplot(aes(X1, X2, col=Gene/sfs/mean(1/sfs)))+geom_point(size=.1) +
scale_color_sqrt(name="ZNF770") +
facet_wrap(~ diagnosis) + coord_fixed()
|
#------------------------------------------------------------------------------#
# ggplot2 #
# settings #
#------------------------------------------------------------------------------#
library(ggplot2)
ggplot2_theme <- theme_classic() +
theme(plot.title=element_text(hjust=0.5, size=10),
plot.subtitle=element_text(hjust=0.5, size=10),
axis.text=element_text(colour='black'),
axis.ticks=element_line(colour='black'))
theme_set(ggplot2_theme)
| /helper_ggplot2_settings.R | permissive | hochwagenlab/Chr_fusion_hybrids | R | false | false | 609 | r | #------------------------------------------------------------------------------#
# ggplot2 #
# settings #
#------------------------------------------------------------------------------#
library(ggplot2)
ggplot2_theme <- theme_classic() +
theme(plot.title=element_text(hjust=0.5, size=10),
plot.subtitle=element_text(hjust=0.5, size=10),
axis.text=element_text(colour='black'),
axis.ticks=element_line(colour='black'))
theme_set(ggplot2_theme)
|
# Load Packages & Data ----------------------------------------------------
library(dplyr)
library(ggplot2)
library(ggthemes)
library(car)
library(reshape2)
df <-
as.data.frame(read.csv("./data/Boston.csv"))
# Calculating Pearson's Correlation Coeff. between each pair of variables -------------------------------------
correl_matrix <-
Filter(is.numeric, df) |> cor(method = "pearson")
correl_matrix[lower.tri(correl_matrix, diag = T)] <- NA
correl_values <-
melt(correl_matrix, na.rm = T, varnames = c("x", "y"),
value.name = "pearson")
# Heatmap of the Correlation Matrix ---------------------------------------
ggplot(data = correl_values, mapping = aes(x, y, fill = pearson)) +
geom_tile(linejoin = "round", col = "gray") +
labs(
title = "Correlation Heatmap",
x = "",
y = "",
fill = "Pearson's r"
) +
scale_fill_gradient2(
low = "red", mid = "white", high = "blue",
midpoint = 0, limits = c(-1, 1), n.breaks = 5
) +
theme_pander() +
theme(
plot.title = element_text(face = "bold", hjust = .5),
legend.title = element_text(face = "bold", color = "dimgrey",
vjust = 3),
legend.text = element_text(color = "dimgrey"),
axis.text.x = element_text(angle = 50, face = "bold",
color = "gray2"),
axis.text.y = element_text(face = "bold", color = "gray2")
)
# Fitting a Multiple Linear Regression ------------------------------------
linfit <-
lm(
formula = medv ~ crim + zn + chas + nox + rm +
tax + lstat + dis,
data = df
)
summaries <-
S(linfit)
# Variance Inflation Factor of the Model Predictors -----------------------------
vif(linfit)
| /scripts/LinearRegression/multiple_lm.R | no_license | jxareas/Statistical-Learning-Models | R | false | false | 2,003 | r | # Load Packages & Data ----------------------------------------------------
library(dplyr)
library(ggplot2)
library(ggthemes)
library(car)
library(reshape2)
df <-
as.data.frame(read.csv("./data/Boston.csv"))
# Calculating Pearson's Correlation Coeff. between each pair of variables -------------------------------------
correl_matrix <-
Filter(is.numeric, df) |> cor(method = "pearson")
correl_matrix[lower.tri(correl_matrix, diag = T)] <- NA
correl_values <-
melt(correl_matrix, na.rm = T, varnames = c("x", "y"),
value.name = "pearson")
# Heatmap of the Correlation Matrix ---------------------------------------
ggplot(data = correl_values, mapping = aes(x, y, fill = pearson)) +
geom_tile(linejoin = "round", col = "gray") +
labs(
title = "Correlation Heatmap",
x = "",
y = "",
fill = "Pearson's r"
) +
scale_fill_gradient2(
low = "red", mid = "white", high = "blue",
midpoint = 0, limits = c(-1, 1), n.breaks = 5
) +
theme_pander() +
theme(
plot.title = element_text(face = "bold", hjust = .5),
legend.title = element_text(face = "bold", color = "dimgrey",
vjust = 3),
legend.text = element_text(color = "dimgrey"),
axis.text.x = element_text(angle = 50, face = "bold",
color = "gray2"),
axis.text.y = element_text(face = "bold", color = "gray2")
)
# Fitting a Multiple Linear Regression ------------------------------------
linfit <-
lm(
formula = medv ~ crim + zn + chas + nox + rm +
tax + lstat + dis,
data = df
)
summaries <-
S(linfit)
# Variance Inflation Factor of the Model Predictors -----------------------------
vif(linfit)
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(3.97314911878724e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(1.46950620900256e+302, 4.11932782999429e-175, -3.85515401974544e+79, -3.02137085628715e+143, -1.07335709985308e+237, 1.20695523931594e-309, 3.32562378928678e+80, -1.04944149130577e-291, -6.66433666280476e+260, -1.24299680236504e+248, 9.70815500676051e+204, 4.46572320545082e-23, -1.13853964838196e+217, 95.7774360421032, 2.0018737059126e-28, -1187020826924399104, 1.65447250389292e-256, -2.30374790479512e+88, 9.31444420548792e+294, 1.87140051912765e+293, 7.81174850164908e+153, -1.81388628605987e-210, 2.97417034753781e-112, 3.07889205700993e+72, -5.68358142431207e+115, -1.49905137588813e-296, -4.83607699504741e+296, -4.39048939437592e-283, 6.14411608709023e-73, -7.9700945594356e-175, -7.74871223767381e-132, 4.16882816770762e+216, 1.77638799941844e-103, 3.10673888773823e+67, 7.78963466942964e+235, -3.58131929196381e+99, -0.000144958566634, -1.97272183211855e+299, -4.80684530567003e-211, 1.27171785317634e+32, 7.27866839395753e-304, -4.03745792148629e+247, 6.98516021012687e+303, -1.47416531241142e-29, -9.26916759452804e-30, 2.80442413482245e+93, -3.49120966287497e+274, -1.64918989358022e+230, -6.65976989513026e-283, 4.42844269247337e-45, 1.98141864604823e-95, -2.80316332377215e+114, 3.39496965625457e+134, -1.15574798364676e+282, -4.86507829573234e+261, -1.12181685914956e-204, 4.83444858402713e-21, 4.44411230227823e-288, 1.74273204902173e-84, 3.6354008294539e-305), temp = c(1.4174931883648e-311, -9.27191279380401e-227, -3.30454338512553e-220, 0.00326457501838524, -4.11828281046168e-243, -1.95893925610339e-77, -7.57690586869615e+160, 1.77288451463919e+81, 7.30351788343351e+245, 1.14935825540514e+262, 9.09252021533702e-172, 1.65646662424464e-91, 2.77067322468006e+114, 6.44719590123194e+27, -1.82639555575468e-07, -4.2372858822964e-119, -1.19043356885614e+85, 3.31651557487312e-262, 1.82363221083299e-238, 4.35812421290471e+289, 1.11765367033464e-296))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) | /meteor/inst/testfiles/ET0_PriestleyTaylor/AFL_ET0_PriestleyTaylor/ET0_PriestleyTaylor_valgrind_files/1615844444-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 2,235 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(3.97314911878724e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(1.46950620900256e+302, 4.11932782999429e-175, -3.85515401974544e+79, -3.02137085628715e+143, -1.07335709985308e+237, 1.20695523931594e-309, 3.32562378928678e+80, -1.04944149130577e-291, -6.66433666280476e+260, -1.24299680236504e+248, 9.70815500676051e+204, 4.46572320545082e-23, -1.13853964838196e+217, 95.7774360421032, 2.0018737059126e-28, -1187020826924399104, 1.65447250389292e-256, -2.30374790479512e+88, 9.31444420548792e+294, 1.87140051912765e+293, 7.81174850164908e+153, -1.81388628605987e-210, 2.97417034753781e-112, 3.07889205700993e+72, -5.68358142431207e+115, -1.49905137588813e-296, -4.83607699504741e+296, -4.39048939437592e-283, 6.14411608709023e-73, -7.9700945594356e-175, -7.74871223767381e-132, 4.16882816770762e+216, 1.77638799941844e-103, 3.10673888773823e+67, 7.78963466942964e+235, -3.58131929196381e+99, -0.000144958566634, -1.97272183211855e+299, -4.80684530567003e-211, 1.27171785317634e+32, 7.27866839395753e-304, -4.03745792148629e+247, 6.98516021012687e+303, -1.47416531241142e-29, -9.26916759452804e-30, 2.80442413482245e+93, -3.49120966287497e+274, -1.64918989358022e+230, -6.65976989513026e-283, 4.42844269247337e-45, 1.98141864604823e-95, -2.80316332377215e+114, 3.39496965625457e+134, -1.15574798364676e+282, -4.86507829573234e+261, -1.12181685914956e-204, 4.83444858402713e-21, 4.44411230227823e-288, 1.74273204902173e-84, 3.6354008294539e-305), temp = c(1.4174931883648e-311, -9.27191279380401e-227, -3.30454338512553e-220, 0.00326457501838524, -4.11828281046168e-243, -1.95893925610339e-77, -7.57690586869615e+160, 1.77288451463919e+81, 7.30351788343351e+245, 1.14935825540514e+262, 9.09252021533702e-172, 1.65646662424464e-91, 2.77067322468006e+114, 6.44719590123194e+27, -1.82639555575468e-07, -4.2372858822964e-119, -1.19043356885614e+85, 3.31651557487312e-262, 1.82363221083299e-238, 4.35812421290471e+289, 1.11765367033464e-296))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) |
library(signal)
### Name: ifft
### Title: Inverse FFT
### Aliases: ifft
### Keywords: math
### ** Examples
ifft(fft(1:4))
| /data/genthat_extracted_code/signal/examples/ifft.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 129 | r | library(signal)
### Name: ifft
### Title: Inverse FFT
### Aliases: ifft
### Keywords: math
### ** Examples
ifft(fft(1:4))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_disk.R
\name{Vfit_d}
\alias{Vfit_d}
\title{Vfit_d}
\usage{
Vfit_d(k, time, h0 = 0.225, rb = 0.05, tb = 0.01)
}
\arguments{
\item{k}{k}
\item{time}{time}
\item{h0}{h0}
\item{rb}{rb}
\item{tb}{tb}
}
\description{
Returns Vfit
}
\author{
Camille Heylen
}
| /man/Vfit_d.Rd | no_license | femeunier/FilterFlow | R | false | true | 340 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_disk.R
\name{Vfit_d}
\alias{Vfit_d}
\title{Vfit_d}
\usage{
Vfit_d(k, time, h0 = 0.225, rb = 0.05, tb = 0.01)
}
\arguments{
\item{k}{k}
\item{time}{time}
\item{h0}{h0}
\item{rb}{rb}
\item{tb}{tb}
}
\description{
Returns Vfit
}
\author{
Camille Heylen
}
|
rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
requiredata <- read.csv("outcome-of-care-measures.csv", colClasses = "character", na.strings= "Not Available")
causedeath <- requiredata[which(requiredata$State==state),]
state_test <- is.element(state,requiredata[,"State"])
## check valid data
if(!(state_test)){
stop ("invalid state")
}else{
if (outcome=="heart attack") {
a <- causedeath[[11]]
}
else if (outcome=="heart failure") {
a <- causedeath[[17]]
}
else if (outcome=="pneumonia") {
a <- causedeath[[23]]
}
else{
stop ("invalid outcome")
}
}
## best or worst depend on max or min
b <- as.numeric(a)
if (num == "best") {m <- min(b,na.rm=TRUE)## find min without na values
p <- match(m,a)}
if (num == "worst") {m <- max(b,na.rm=TRUE)## find max without na values
p <-match(m,a)}
else {
p <- match((sort(b)[num]),a)
}
NameHospital <- causedeath[[2]][p]
NameHospital
}
##rankhospital("NC", "heart attack", "worst")
##rankhospital("WA", "heart attack", 7)
##rankhospital("MN", "heart attack", 1000)
| /Data-Science/2_R_Programming/Lab/Lab_3/Other_Solution/rankhospital.R | permissive | shanky0507/Coursera-John-Hopkins | R | false | false | 1,308 | r |
rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
requiredata <- read.csv("outcome-of-care-measures.csv", colClasses = "character", na.strings= "Not Available")
causedeath <- requiredata[which(requiredata$State==state),]
state_test <- is.element(state,requiredata[,"State"])
## check valid data
if(!(state_test)){
stop ("invalid state")
}else{
if (outcome=="heart attack") {
a <- causedeath[[11]]
}
else if (outcome=="heart failure") {
a <- causedeath[[17]]
}
else if (outcome=="pneumonia") {
a <- causedeath[[23]]
}
else{
stop ("invalid outcome")
}
}
## best or worst depend on max or min
b <- as.numeric(a)
if (num == "best") {m <- min(b,na.rm=TRUE)## find min without na values
p <- match(m,a)}
if (num == "worst") {m <- max(b,na.rm=TRUE)## find max without na values
p <-match(m,a)}
else {
p <- match((sort(b)[num]),a)
}
NameHospital <- causedeath[[2]][p]
NameHospital
}
##rankhospital("NC", "heart attack", "worst")
##rankhospital("WA", "heart attack", 7)
##rankhospital("MN", "heart attack", 1000)
|
#' @export
#'
frameLocates<-function(vidfile="", w=640, h=480, res2fram=15)
{
f.start<-NULL
h.start<-NULL
l<-w*h # l = number of bytes in a frame
# File information for binary read in
finfo <- file.info(vidfile)
byte.length<-2 # how many bytes make up an element
no.elements<-finfo$size/byte.length
# this should tell you how many total elements make up the video file
to.read <- file(vidfile, "rb") # set to.read file. rb means read binary
alldata<-readBin(to.read, integer(), n=5000000, size=byte.length, endian = "little", signed=FALSE)
close(to.read)
# approx number of frames in video file, can't be certain since some headers might be different sizes
# this value might be an over-estimate
fid<-c(w,h)
# this is the look-up sequence, starts with the magic byte 2, followed by resolution values, w, then h,
# which will repeat throughout the file in a predicable fashion corresponding to each frame. It is likely
# that the number of wh locates will be double the number of actual frames, since there is a w,h at the beginning
# and the end of every frame.
if(length(alldata)>=5000000)
{
wh.locate<-locate.fid(fid,alldata,long=TRUE)
# try wh.locate on a small chunk of all data
diff.wh.locate<-diff(wh.locate)
# difference calc should yield a repeating pattern of header then frame
gaps<-unique(diff.wh.locate)
# if the pattern is simple, this value should be 2
no.unique.locates<-length(gaps)
# reconstruct wh.locate from scrap, starting only with wh.locate[1]
if(no.unique.locates==2)
{
repeats<-trunc(finfo$size/2/(l+wh.locate[1]+gaps[1]))
# how many repeats required to create fill whole file
wh.locate<-cumsum(as.numeric((c(wh.locate[1],rep(gaps,repeats)))))
# cumulative sum up the 1st locate and repeate gaps
#wh.locate<-(wh.locate[-c(which(wh.locate>(finfo$size/byte.length)))])
# commented this out (March 2019) - extraneous and prone to error
# remove any locates that go beyond the length of the data file after import
header.l<-as.integer(rev(wh.locate)[1]-rev(wh.locate)[2])
}
if(no.unique.locates==4)
{
repeats<-trunc((finfo$size/byte.length)/(l))
gap.reps<-rep(gaps[3:4],repeats)
wh.locate<-cumsum(as.numeric((c(wh.locate[3],gap.reps))))
wh.locate<-(wh.locate[-c(which(wh.locate>(finfo$size/byte.length)))])
header.l<-as.integer(rev(wh.locate)[2]-rev(wh.locate)[3])
}
}
if(length(alldata)<5000000)
{
# much faster without calling my function:
fid1.locate<-which(alldata==fid[1])
fid1.locate.adjacent<-fid1.locate+1
fid2.locate<-which(alldata[c(fid1.locate.adjacent)]==fid[2])
wh.locate<-fid1.locate[fid2.locate]
diff.wh.locate<-diff(wh.locate)
# difference calc should yield a repeating pattern of header then frame
gaps<-unique(diff.wh.locate)
# if the pattern is simple, this value should be 2
no.unique.locates<-length(unique(diff.wh.locate))
# if the pattern is simple, this value should be 2
header.l<-as.integer(rev(wh.locate)[1]-rev(wh.locate)[2])
}
# Below define the start indices for the headers (h.start) and frames (f.start)
# check if the first location of the resolution info is a small value or not
# .SEQ files have two instances where resolution is recorded
# .FCF files only have it once toward the end of the header
if(wh.locate[1]<header.l & no.unique.locates==2) # .SEQ files appear to be formatted this way
{
h.start<-wh.locate-header.l
h.start<-h.start[seq(2,length(h.start),2)]
h.end<-h.start+header.l+res2fram-1
f.start<-wh.locate+res2fram
f.start<-f.start[seq(2,length(f.start),2)]
f.end<-f.start+l-1
} else if(wh.locate[1]>header.l & no.unique.locates==2) # some .fcf formatted this way
{
h.start<-wh.locate-header.l
h.start<-h.start[seq(2,length(h.start),2)]
h.end<-h.start+header.l+res2fram-1
f.start<-wh.locate+res2fram
f.start<-f.start[seq(2,length(f.start),2)]
f.end<-f.start+l-1
} else if (wh.locate[1]>=header.l & no.unique.locates>2)
# other .fcf files formatted this way - there may be missed frame at beginning and end of file
{
wh.locate<-wh.locate[-2]
h.start<-wh.locate-header.l
h.start<-h.start[seq(1,length(h.start),2)]
h.start<-h.start[2:length(h.start)]
h.end<-h.start+header.l+res2fram-1
f.start<-wh.locate+res2fram
f.start<-f.start[seq(1,length(f.start),2)]
f.start<-f.start[2:length(f.start)]
f.end<-f.start+l-1
}
# each f.start should correspond to the start of a frame in the video file
# from my impression, the location of the header is 15 elements in front of the first pixel value
# res2fram is set to be 15
# if there are negative valaues for h.start this reflects instances where the header
# in front of the first frame is an irregular length and we have incorrectly calculated
# h.start[1] location. Set it to 1
h.start[h.start<0] <- 1 # Edit Added March 2019
return(list(h.start=h.start, f.start=f.start))
}
| /R/frameLocates.R | no_license | lishucai/Thermimage | R | false | false | 5,105 | r | #' @export
#'
frameLocates<-function(vidfile="", w=640, h=480, res2fram=15)
{
f.start<-NULL
h.start<-NULL
l<-w*h # l = number of bytes in a frame
# File information for binary read in
finfo <- file.info(vidfile)
byte.length<-2 # how many bytes make up an element
no.elements<-finfo$size/byte.length
# this should tell you how many total elements make up the video file
to.read <- file(vidfile, "rb") # set to.read file. rb means read binary
alldata<-readBin(to.read, integer(), n=5000000, size=byte.length, endian = "little", signed=FALSE)
close(to.read)
# approx number of frames in video file, can't be certain since some headers might be different sizes
# this value might be an over-estimate
fid<-c(w,h)
# this is the look-up sequence, starts with the magic byte 2, followed by resolution values, w, then h,
# which will repeat throughout the file in a predicable fashion corresponding to each frame. It is likely
# that the number of wh locates will be double the number of actual frames, since there is a w,h at the beginning
# and the end of every frame.
if(length(alldata)>=5000000)
{
wh.locate<-locate.fid(fid,alldata,long=TRUE)
# try wh.locate on a small chunk of all data
diff.wh.locate<-diff(wh.locate)
# difference calc should yield a repeating pattern of header then frame
gaps<-unique(diff.wh.locate)
# if the pattern is simple, this value should be 2
no.unique.locates<-length(gaps)
# reconstruct wh.locate from scrap, starting only with wh.locate[1]
if(no.unique.locates==2)
{
repeats<-trunc(finfo$size/2/(l+wh.locate[1]+gaps[1]))
# how many repeats required to create fill whole file
wh.locate<-cumsum(as.numeric((c(wh.locate[1],rep(gaps,repeats)))))
# cumulative sum up the 1st locate and repeate gaps
#wh.locate<-(wh.locate[-c(which(wh.locate>(finfo$size/byte.length)))])
# commented this out (March 2019) - extraneous and prone to error
# remove any locates that go beyond the length of the data file after import
header.l<-as.integer(rev(wh.locate)[1]-rev(wh.locate)[2])
}
if(no.unique.locates==4)
{
repeats<-trunc((finfo$size/byte.length)/(l))
gap.reps<-rep(gaps[3:4],repeats)
wh.locate<-cumsum(as.numeric((c(wh.locate[3],gap.reps))))
wh.locate<-(wh.locate[-c(which(wh.locate>(finfo$size/byte.length)))])
header.l<-as.integer(rev(wh.locate)[2]-rev(wh.locate)[3])
}
}
if(length(alldata)<5000000)
{
# much faster without calling my function:
fid1.locate<-which(alldata==fid[1])
fid1.locate.adjacent<-fid1.locate+1
fid2.locate<-which(alldata[c(fid1.locate.adjacent)]==fid[2])
wh.locate<-fid1.locate[fid2.locate]
diff.wh.locate<-diff(wh.locate)
# difference calc should yield a repeating pattern of header then frame
gaps<-unique(diff.wh.locate)
# if the pattern is simple, this value should be 2
no.unique.locates<-length(unique(diff.wh.locate))
# if the pattern is simple, this value should be 2
header.l<-as.integer(rev(wh.locate)[1]-rev(wh.locate)[2])
}
# Below define the start indices for the headers (h.start) and frames (f.start)
# check if the first location of the resolution info is a small value or not
# .SEQ files have two instances where resolution is recorded
# .FCF files only have it once toward the end of the header
if(wh.locate[1]<header.l & no.unique.locates==2) # .SEQ files appear to be formatted this way
{
h.start<-wh.locate-header.l
h.start<-h.start[seq(2,length(h.start),2)]
h.end<-h.start+header.l+res2fram-1
f.start<-wh.locate+res2fram
f.start<-f.start[seq(2,length(f.start),2)]
f.end<-f.start+l-1
} else if(wh.locate[1]>header.l & no.unique.locates==2) # some .fcf formatted this way
{
h.start<-wh.locate-header.l
h.start<-h.start[seq(2,length(h.start),2)]
h.end<-h.start+header.l+res2fram-1
f.start<-wh.locate+res2fram
f.start<-f.start[seq(2,length(f.start),2)]
f.end<-f.start+l-1
} else if (wh.locate[1]>=header.l & no.unique.locates>2)
# other .fcf files formatted this way - there may be missed frame at beginning and end of file
{
wh.locate<-wh.locate[-2]
h.start<-wh.locate-header.l
h.start<-h.start[seq(1,length(h.start),2)]
h.start<-h.start[2:length(h.start)]
h.end<-h.start+header.l+res2fram-1
f.start<-wh.locate+res2fram
f.start<-f.start[seq(1,length(f.start),2)]
f.start<-f.start[2:length(f.start)]
f.end<-f.start+l-1
}
# each f.start should correspond to the start of a frame in the video file
# from my impression, the location of the header is 15 elements in front of the first pixel value
# res2fram is set to be 15
# if there are negative valaues for h.start this reflects instances where the header
# in front of the first frame is an irregular length and we have incorrectly calculated
# h.start[1] location. Set it to 1
h.start[h.start<0] <- 1 # Edit Added March 2019
return(list(h.start=h.start, f.start=f.start))
}
|
# One sheet extraction. Similar to read.csv.
#
#
read.xlsx <- function(file, sheetIndex, sheetName=NULL,
rowIndex=NULL, startRow=NULL, endRow=NULL, colIndex=NULL,
as.data.frame=TRUE, header=TRUE, colClasses=NA,
keepFormulas=FALSE, encoding="unknown", password=NULL, ...)
{
if (is.null(sheetName) & missing(sheetIndex))
stop("Please provide a sheet name OR a sheet index.")
wb <- loadWorkbook(file, password=password)
sheets <- getSheets(wb)
sheet <- if (is.null(sheetName)) {
sheets[[sheetIndex]]
} else {
sheets[[sheetName]]
}
if (is.null(sheet))
stop("Cannot find the sheet you requested in the file!")
rowIndex <- if (is.null(rowIndex)) {
if (is.null(startRow))
startRow <- .jcall(sheet, "I", "getFirstRowNum") + 1
if (is.null(endRow))
endRow <- .jcall(sheet, "I", "getLastRowNum") + 1
startRow:endRow
} else rowIndex
rows <- getRows(sheet, rowIndex)
if (length(rows)==0)
return(NULL) # exit early
cells <- getCells(rows, colIndex)
res <- lapply(cells, getCellValue, keepFormulas=keepFormulas,
encoding=encoding)
if (as.data.frame) {
# need to use the index from the names because of empty cells
ind <- lapply(strsplit(names(res), "\\."), as.numeric)
namesIndM <- do.call(rbind, ind)
row.names <- sort(as.numeric(unique(namesIndM[,1])))
col.names <- paste("V", sort(unique(namesIndM[,2])), sep="")
col.names <- sort(unique(namesIndM[,2]))
cols <- length(col.names)
VV <- matrix(list(NA), nrow=length(row.names), ncol=cols,
dimnames=list(row.names, col.names))
# you need indM for empty rows/columns when indM != namesIndM
indM <- apply(namesIndM, 2, function(x){as.numeric(as.factor(x))})
VV[indM] <- res
if (header){ # first row of cells that you want
colnames(VV) <- VV[1,]
VV <- VV[-1,,drop=FALSE]
}
res <- vector("list", length=cols)
names(res) <- colnames(VV)
for (ic in seq_len(cols)) {
aux <- unlist(VV[,ic], use.names=FALSE)
nonNA <- which(!is.na(aux))
if (length(nonNA)>0) { # not a NA column in the middle of data
ind <- min(nonNA)
if (class(aux[ind])=="numeric") {
# test first not NA cell if it's a date/datetime
dateUtil <- .jnew("org/apache/poi/ss/usermodel/DateUtil")
cell <- cells[[paste(row.names[ind + header], ".", col.names[ic], sep = "")]]
isDatetime <- dateUtil$isCellDateFormatted(cell)
if (isDatetime){
if (identical(aux, round(aux))){ # you have dates
aux <- as.Date(aux-25569, origin="1970-01-01")
} else { # Excel does not know timezones?!
aux <- as.POSIXct((aux-25569)*86400, tz="GMT",
origin="1970-01-01")
}
}
}
}
if (!is.na(colClasses[ic]))
suppressWarnings(class(aux) <- colClasses[ic]) # if it gets specified
res[[ic]] <- aux
}
res <- data.frame(res, ...)
}
res
}
| /R/read.xlsx.R | no_license | gorcha/xlsx | R | false | false | 3,181 | r | # One sheet extraction. Similar to read.csv.
#
#
read.xlsx <- function(file, sheetIndex, sheetName=NULL,
rowIndex=NULL, startRow=NULL, endRow=NULL, colIndex=NULL,
as.data.frame=TRUE, header=TRUE, colClasses=NA,
keepFormulas=FALSE, encoding="unknown", password=NULL, ...)
{
if (is.null(sheetName) & missing(sheetIndex))
stop("Please provide a sheet name OR a sheet index.")
wb <- loadWorkbook(file, password=password)
sheets <- getSheets(wb)
sheet <- if (is.null(sheetName)) {
sheets[[sheetIndex]]
} else {
sheets[[sheetName]]
}
if (is.null(sheet))
stop("Cannot find the sheet you requested in the file!")
rowIndex <- if (is.null(rowIndex)) {
if (is.null(startRow))
startRow <- .jcall(sheet, "I", "getFirstRowNum") + 1
if (is.null(endRow))
endRow <- .jcall(sheet, "I", "getLastRowNum") + 1
startRow:endRow
} else rowIndex
rows <- getRows(sheet, rowIndex)
if (length(rows)==0)
return(NULL) # exit early
cells <- getCells(rows, colIndex)
res <- lapply(cells, getCellValue, keepFormulas=keepFormulas,
encoding=encoding)
if (as.data.frame) {
# need to use the index from the names because of empty cells
ind <- lapply(strsplit(names(res), "\\."), as.numeric)
namesIndM <- do.call(rbind, ind)
row.names <- sort(as.numeric(unique(namesIndM[,1])))
col.names <- paste("V", sort(unique(namesIndM[,2])), sep="")
col.names <- sort(unique(namesIndM[,2]))
cols <- length(col.names)
VV <- matrix(list(NA), nrow=length(row.names), ncol=cols,
dimnames=list(row.names, col.names))
# you need indM for empty rows/columns when indM != namesIndM
indM <- apply(namesIndM, 2, function(x){as.numeric(as.factor(x))})
VV[indM] <- res
if (header){ # first row of cells that you want
colnames(VV) <- VV[1,]
VV <- VV[-1,,drop=FALSE]
}
res <- vector("list", length=cols)
names(res) <- colnames(VV)
for (ic in seq_len(cols)) {
aux <- unlist(VV[,ic], use.names=FALSE)
nonNA <- which(!is.na(aux))
if (length(nonNA)>0) { # not a NA column in the middle of data
ind <- min(nonNA)
if (class(aux[ind])=="numeric") {
# test first not NA cell if it's a date/datetime
dateUtil <- .jnew("org/apache/poi/ss/usermodel/DateUtil")
cell <- cells[[paste(row.names[ind + header], ".", col.names[ic], sep = "")]]
isDatetime <- dateUtil$isCellDateFormatted(cell)
if (isDatetime){
if (identical(aux, round(aux))){ # you have dates
aux <- as.Date(aux-25569, origin="1970-01-01")
} else { # Excel does not know timezones?!
aux <- as.POSIXct((aux-25569)*86400, tz="GMT",
origin="1970-01-01")
}
}
}
}
if (!is.na(colClasses[ic]))
suppressWarnings(class(aux) <- colClasses[ic]) # if it gets specified
res[[ic]] <- aux
}
res <- data.frame(res, ...)
}
res
}
|
# plot scatter points(pred vs reference value)
rm(list=ls())
library(ggplot2)
rmse <- function(x,y)
{
sqrt(mean((x-y)^2))
}
r2 <- function(x,y)
{
cor(x,y)^2
}
## func_eq_r2(m) - Return R2 equations, cut to 2-digt ####
func_eq_r2 = function(m){
r2 = sprintf('%.2f',summary(m)$r.squared)
eq <- substitute(italic(R)^2 == r2)
as.character(as.expression(eq))
}
### get_scatplot(x,y) - scatter plot using ggplot2 and showing rmse, r2, n####
# returned: ggplot object
# show: N, R2, RMSE, MBE
get_scatplot <- function(x,y,x_label,y_label,x_max,y_max,p_title,theme_type,text_pos) {
p_data <- data.frame(x,y)
diff = p_data$y - p_data$x
p_RMSE=round(sqrt(mean(diff^2)),2)
p_nS=dim(p_data)[1]
# x_mean = mean(p_data$x,na.rm=T)
# p_CV_RMSD = p_RMSE/x_mean
# p_MBE = (sum(diff,na.rm=T))/p_nS
p_model=lm(p_data$y~p_data$x)
p_R2=p_model$r.squared
x_lim=c(0,x_max)
y_lim=c(0,y_max)
y_step=y_max/10
p2 <- ggplot(aes(y=y,x=x),data=p_data) +
xlab(x_label) + ylab(y_label) +
xlim(x_lim) + ylim(y_lim) +
ggtitle(p_title) +
theme(text=element_text(size=20))
if(theme_type=='bk'){
p2 <- p2 + geom_point(alpha=0.5,size=2,show.legend=NA) + theme_bw() + geom_blank()
}else{ p2 <- p2 + geom_point(alpha=0.5,size=2) }
if(text_pos=='upleft'){ ## text output on the upper-left
p2 <- p2 +
geom_text(x=0,y=y_max-y_step, label=paste('N = ',p_nS,sep=''),hjust=0,size=5) + # N
geom_text(x=0,y=y_max-y_step*2,label=func_eq_r2(p_model),hjust=0,parse=TRUE,size=5) + # R2
geom_text(x=0,y=y_max-y_step*3,label=paste('RMSE = ',sprintf('%.1f',p_RMSE),sep=''),hjust=0,size=5) # RMSE
# geom_text(x=0,y=y_max-y_step*4,label=paste('MBE = ',sprintf('%.1f',p_MBE),sep=''),hjust=0,size=5) # MB
}else if(text_pos=='upright'){ ## text output on the upper-right
p2 <- p2 +
geom_text(x=x_max*0.75,y=y_max-y_step, label=paste('N = ',p_nS,sep=''),hjust=0,size=5) + # N
geom_text(x=x_max*0.75,y=y_max-y_step*2,label=func_eq_r2(p_model),hjust=0,parse=TRUE,size=5) + # R2
geom_text(x=x_max*0.75,y=y_max-y_step*3,label=paste('RMSE = ',sprintf('%.1f',p_RMSE),sep=''),hjust=0,size=5) # RMSE
# geom_text(x=x_max*0.75,y=y_max-y_step*4,label=paste('MBE = ',sprintf('%.1f',p_MBE),sep=''),hjust=0,size=5) # MB
}
p2 <- p2 +
geom_abline(intercept=0,colour='red',size=1) # 1:1 line
# + geom_smooth(data=p_data, method='rlm',colour='blue',linetype='dashed',size=1,se=FALSE,fullrange=T)
return(p2)
}
### 0 prep data
data <- read.csv(file="./merged_data.csv", head=T)
data <- na.omit(data)
data <- data[data$AveAGB<250,]
set.seed(1)
ind <- sample(2, nrow(data), replace=T,prob = c(0.7,0.3))
data.train <- data[ind==1,]
data.test <- data[ind==2,]
path.plots = 'C:/MyFiles/Research/GraduationDesign/Model/AGBProj/Plots'
### 1.LM
### 1.1 train
load("sel.lm.Rdata")
model.lm<-lm(AveAGB~.,data = data.train[c("AveAGB",sel.lm)])
pred.train <- predict(model.lm,data.train)
field.train <- data.train$AveAGB
x_label="参考值(Mg/ha)"
y_label="估测值(Mg/ha)"
x_max=250; y_max=250
p_title="线性回归"
theme_type='bk'
text_pos="upleft"
p_lm_tr <- get_scatplot(field.train,pred.train,x_label,y_label,x_max,y_max,p_title,theme_type,text_pos)
p_lm_tr
#### 1.2 test
pred.test <- predict(model.lm,data.test)
field.test <- data.test$AveAGB
p_lm_ts <- get_scatplot(field.test,pred.test,x_label,y_label,x_max,y_max,p_title,theme_type,text_pos)
p_lm_ts
ggsave(p_lm_tr,file=paste(path.plots,'lm_tr.jpg',sep='/'),width=5,height=5,units='in')
ggsave(p_lm_ts,file=paste(path.plots,'lm_ts.jpg',sep='/'),width=5,height=5,units='in')
### 2.RF
### 2.1 train
load("sel.rf.Rdata")
library("randomForest")
model.rf<-randomForest(AveAGB~.,data = data.train[c("AveAGB",sel.rf)],ntree=500,mtry=9)
pred.train <- predict(model.rf,data.train)
field.train <- data.train$AveAGB
x_label="参考值(Mg/ha)"
y_label="估测值(Mg/ha)"
x_max=250; y_max=250
p_title="随机森林"
theme_type='bk'
text_pos="upleft"
p_rf_tr <- get_scatplot(field.train,pred.train,x_label,y_label,x_max,y_max,p_title,theme_type,text_pos)
p_rf_tr
#### 2.2 test
pred.test <- predict(model.rf,data.test)
field.test <- data.test$AveAGB
p_rf_ts <- get_scatplot(field.test,pred.test,x_label,y_label,x_max,y_max,p_title,theme_type,text_pos)
p_rf_ts
ggsave(p_rf_tr,file=paste(path.plots,'rf_tr.jpg',sep='/'),width=5,height=5,units='in')
ggsave(p_rf_ts,file=paste(path.plots,'rf_ts.jpg',sep='/'),width=5,height=5,units='in')
### 3.SVM
### 3.1 train
library("e1071")
model.svm<-svm(AveAGB~.,data = data.train[c("AveAGB",sel.rf)],epsilon=0.61,cost=4)
pred.train <- predict(model.svm,data.train)
field.train <- data.train$AveAGB
x_label="参考值(Mg/ha)"
y_label="估测值(Mg/ha)"
x_max=250; y_max=250
p_title="支持向量回归"
theme_type='bk'
text_pos="upleft"
p_svm_tr <- get_scatplot(field.train,pred.train,x_label,y_label,x_max,y_max,p_title,theme_type,text_pos)
p_svm_tr
#### 3.2 test
pred.test <- predict(model.svm,data.test)
field.test <- data.test$AveAGB
p_svm_ts <- get_scatplot(field.test,pred.test,x_label,y_label,x_max,y_max,p_title,theme_type,text_pos)
p_svm_ts
ggsave(p_svm_tr,file=paste(path.plots,'svm_tr.jpg',sep='/'),width=5,height=5,units='in')
ggsave(p_svm_ts,file=paste(path.plots,'svm_ts.jpg',sep='/'),width=5,height=5,units='in')
| /08_scat_plot.R | no_license | silverbullet1472/AGBProj | R | false | false | 5,365 | r | # plot scatter points(pred vs reference value)
rm(list=ls())
library(ggplot2)
rmse <- function(x,y)
{
sqrt(mean((x-y)^2))
}
r2 <- function(x,y)
{
cor(x,y)^2
}
## func_eq_r2(m) - Return R2 equations, cut to 2-digt ####
func_eq_r2 = function(m){
r2 = sprintf('%.2f',summary(m)$r.squared)
eq <- substitute(italic(R)^2 == r2)
as.character(as.expression(eq))
}
### get_scatplot(x,y) - scatter plot using ggplot2 and showing rmse, r2, n####
# returned: ggplot object
# show: N, R2, RMSE, MBE
get_scatplot <- function(x,y,x_label,y_label,x_max,y_max,p_title,theme_type,text_pos) {
p_data <- data.frame(x,y)
diff = p_data$y - p_data$x
p_RMSE=round(sqrt(mean(diff^2)),2)
p_nS=dim(p_data)[1]
# x_mean = mean(p_data$x,na.rm=T)
# p_CV_RMSD = p_RMSE/x_mean
# p_MBE = (sum(diff,na.rm=T))/p_nS
p_model=lm(p_data$y~p_data$x)
p_R2=p_model$r.squared
x_lim=c(0,x_max)
y_lim=c(0,y_max)
y_step=y_max/10
p2 <- ggplot(aes(y=y,x=x),data=p_data) +
xlab(x_label) + ylab(y_label) +
xlim(x_lim) + ylim(y_lim) +
ggtitle(p_title) +
theme(text=element_text(size=20))
if(theme_type=='bk'){
p2 <- p2 + geom_point(alpha=0.5,size=2,show.legend=NA) + theme_bw() + geom_blank()
}else{ p2 <- p2 + geom_point(alpha=0.5,size=2) }
if(text_pos=='upleft'){ ## text output on the upper-left
p2 <- p2 +
geom_text(x=0,y=y_max-y_step, label=paste('N = ',p_nS,sep=''),hjust=0,size=5) + # N
geom_text(x=0,y=y_max-y_step*2,label=func_eq_r2(p_model),hjust=0,parse=TRUE,size=5) + # R2
geom_text(x=0,y=y_max-y_step*3,label=paste('RMSE = ',sprintf('%.1f',p_RMSE),sep=''),hjust=0,size=5) # RMSE
# geom_text(x=0,y=y_max-y_step*4,label=paste('MBE = ',sprintf('%.1f',p_MBE),sep=''),hjust=0,size=5) # MB
}else if(text_pos=='upright'){ ## text output on the upper-right
p2 <- p2 +
geom_text(x=x_max*0.75,y=y_max-y_step, label=paste('N = ',p_nS,sep=''),hjust=0,size=5) + # N
geom_text(x=x_max*0.75,y=y_max-y_step*2,label=func_eq_r2(p_model),hjust=0,parse=TRUE,size=5) + # R2
geom_text(x=x_max*0.75,y=y_max-y_step*3,label=paste('RMSE = ',sprintf('%.1f',p_RMSE),sep=''),hjust=0,size=5) # RMSE
# geom_text(x=x_max*0.75,y=y_max-y_step*4,label=paste('MBE = ',sprintf('%.1f',p_MBE),sep=''),hjust=0,size=5) # MB
}
p2 <- p2 +
geom_abline(intercept=0,colour='red',size=1) # 1:1 line
# + geom_smooth(data=p_data, method='rlm',colour='blue',linetype='dashed',size=1,se=FALSE,fullrange=T)
return(p2)
}
### 0 prep data
data <- read.csv(file="./merged_data.csv", head=T)
data <- na.omit(data)
data <- data[data$AveAGB<250,]
set.seed(1)
ind <- sample(2, nrow(data), replace=T,prob = c(0.7,0.3))
data.train <- data[ind==1,]
data.test <- data[ind==2,]
path.plots = 'C:/MyFiles/Research/GraduationDesign/Model/AGBProj/Plots'
### 1.LM
### 1.1 train
load("sel.lm.Rdata")
model.lm<-lm(AveAGB~.,data = data.train[c("AveAGB",sel.lm)])
pred.train <- predict(model.lm,data.train)
field.train <- data.train$AveAGB
x_label="参考值(Mg/ha)"
y_label="估测值(Mg/ha)"
x_max=250; y_max=250
p_title="线性回归"
theme_type='bk'
text_pos="upleft"
p_lm_tr <- get_scatplot(field.train,pred.train,x_label,y_label,x_max,y_max,p_title,theme_type,text_pos)
p_lm_tr
#### 1.2 test
pred.test <- predict(model.lm,data.test)
field.test <- data.test$AveAGB
p_lm_ts <- get_scatplot(field.test,pred.test,x_label,y_label,x_max,y_max,p_title,theme_type,text_pos)
p_lm_ts
ggsave(p_lm_tr,file=paste(path.plots,'lm_tr.jpg',sep='/'),width=5,height=5,units='in')
ggsave(p_lm_ts,file=paste(path.plots,'lm_ts.jpg',sep='/'),width=5,height=5,units='in')
### 2.RF
### 2.1 train
load("sel.rf.Rdata")
library("randomForest")
model.rf<-randomForest(AveAGB~.,data = data.train[c("AveAGB",sel.rf)],ntree=500,mtry=9)
pred.train <- predict(model.rf,data.train)
field.train <- data.train$AveAGB
x_label="参考值(Mg/ha)"
y_label="估测值(Mg/ha)"
x_max=250; y_max=250
p_title="随机森林"
theme_type='bk'
text_pos="upleft"
p_rf_tr <- get_scatplot(field.train,pred.train,x_label,y_label,x_max,y_max,p_title,theme_type,text_pos)
p_rf_tr
#### 2.2 test
pred.test <- predict(model.rf,data.test)
field.test <- data.test$AveAGB
p_rf_ts <- get_scatplot(field.test,pred.test,x_label,y_label,x_max,y_max,p_title,theme_type,text_pos)
p_rf_ts
ggsave(p_rf_tr,file=paste(path.plots,'rf_tr.jpg',sep='/'),width=5,height=5,units='in')
ggsave(p_rf_ts,file=paste(path.plots,'rf_ts.jpg',sep='/'),width=5,height=5,units='in')
### 3.SVM
### 3.1 train
library("e1071")
model.svm<-svm(AveAGB~.,data = data.train[c("AveAGB",sel.rf)],epsilon=0.61,cost=4)
pred.train <- predict(model.svm,data.train)
field.train <- data.train$AveAGB
x_label="参考值(Mg/ha)"
y_label="估测值(Mg/ha)"
x_max=250; y_max=250
p_title="支持向量回归"
theme_type='bk'
text_pos="upleft"
p_svm_tr <- get_scatplot(field.train,pred.train,x_label,y_label,x_max,y_max,p_title,theme_type,text_pos)
p_svm_tr
#### 3.2 test
pred.test <- predict(model.svm,data.test)
field.test <- data.test$AveAGB
p_svm_ts <- get_scatplot(field.test,pred.test,x_label,y_label,x_max,y_max,p_title,theme_type,text_pos)
p_svm_ts
ggsave(p_svm_tr,file=paste(path.plots,'svm_tr.jpg',sep='/'),width=5,height=5,units='in')
ggsave(p_svm_ts,file=paste(path.plots,'svm_ts.jpg',sep='/'),width=5,height=5,units='in')
|
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ProjectName: Bluebook 2021-VBP
# Purpose: Price
# programmer: Zhe Liu
# Date: 2021-03-12
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
##---- Price ----
## origin price
price.origin <- raw.total %>%
group_by(packid, quarter, province, city) %>%
summarise(sales = sum(sales, na.rm = TRUE),
units = sum(units, na.rm = TRUE)) %>%
ungroup() %>%
mutate(price = sales / units) %>%
select(-sales, -units)
## mean price by city year
price.city <- raw.total %>%
group_by(packid, year, province, city) %>%
summarise(sales = sum(sales, na.rm = TRUE),
units = sum(units, na.rm = TRUE)) %>%
ungroup() %>%
mutate(price_city = sales / units) %>%
select(-sales, -units)
## mean price by province quarter
price.province <- raw.total %>%
group_by(packid, quarter, province) %>%
summarise(sales = sum(sales, na.rm = TRUE),
units = sum(units, na.rm = TRUE)) %>%
ungroup() %>%
mutate(price_prov = sales / units) %>%
select(-sales, -units)
## mean price by province year
price.year <- raw.total %>%
group_by(packid, year, province) %>%
summarise(sales = sum(sales, na.rm = TRUE),
units = sum(units, na.rm = TRUE)) %>%
ungroup() %>%
mutate(price_year = sales / units) %>%
select(-sales, -units)
## mean price by pack quarter
price.pack <- raw.total %>%
group_by(packid, quarter) %>%
summarise(sales = sum(sales, na.rm = TRUE),
units = sum(units, na.rm = TRUE)) %>%
ungroup() %>%
mutate(price_pack = sales / units) %>%
select(-sales, -units)
## mean price by pack year
price.pack.year <- raw.total %>%
group_by(packid, year) %>%
summarise(sales = sum(sales, na.rm = TRUE),
units = sum(units, na.rm = TRUE)) %>%
ungroup() %>%
mutate(price_pack_year = sales / units) %>%
select(-sales, -units)
##---- Result ----
proj.price <- proj.nation %>%
left_join(price.origin, by = c('province', 'city', 'quarter', 'packid')) %>%
left_join(price.city, by = c('province', 'city', 'year', 'packid')) %>%
left_join(price.province, by = c('province', 'quarter', 'packid')) %>%
left_join(price.year, by = c('province', 'year', 'packid')) %>%
left_join(price.pack, by = c('quarter', 'packid')) %>%
left_join(price.pack.year, by = c('year', 'packid')) %>%
mutate(price = if_else(is.na(price), price_city, price),
price = if_else(is.na(price), price_prov, price),
price = if_else(is.na(price), price_year, price),
price = if_else(is.na(price), price_pack, price),
price = if_else(is.na(price), price_pack_year, price)) %>%
mutate(units = sales / price) %>%
filter(units > 0, sales > 0) %>%
select(year, quarter, date, province, city, market, atc4, molecule, packid,
units, sales)
write_feather(proj.price, '03_Outputs/VBP/05_Bluebook_2020_VBP_Projection_Price.feather')
| /04_Codes/VBP/05_Price.R | no_license | Zaphiroth/Bluebook_2021 | R | false | false | 3,097 | r | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ProjectName: Bluebook 2021-VBP
# Purpose: Price
# programmer: Zhe Liu
# Date: 2021-03-12
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
##---- Price ----
## origin price
price.origin <- raw.total %>%
group_by(packid, quarter, province, city) %>%
summarise(sales = sum(sales, na.rm = TRUE),
units = sum(units, na.rm = TRUE)) %>%
ungroup() %>%
mutate(price = sales / units) %>%
select(-sales, -units)
## mean price by city year
price.city <- raw.total %>%
group_by(packid, year, province, city) %>%
summarise(sales = sum(sales, na.rm = TRUE),
units = sum(units, na.rm = TRUE)) %>%
ungroup() %>%
mutate(price_city = sales / units) %>%
select(-sales, -units)
## mean price by province quarter
price.province <- raw.total %>%
group_by(packid, quarter, province) %>%
summarise(sales = sum(sales, na.rm = TRUE),
units = sum(units, na.rm = TRUE)) %>%
ungroup() %>%
mutate(price_prov = sales / units) %>%
select(-sales, -units)
## mean price by province year
price.year <- raw.total %>%
group_by(packid, year, province) %>%
summarise(sales = sum(sales, na.rm = TRUE),
units = sum(units, na.rm = TRUE)) %>%
ungroup() %>%
mutate(price_year = sales / units) %>%
select(-sales, -units)
## mean price by pack quarter
price.pack <- raw.total %>%
group_by(packid, quarter) %>%
summarise(sales = sum(sales, na.rm = TRUE),
units = sum(units, na.rm = TRUE)) %>%
ungroup() %>%
mutate(price_pack = sales / units) %>%
select(-sales, -units)
## mean price by pack year
price.pack.year <- raw.total %>%
group_by(packid, year) %>%
summarise(sales = sum(sales, na.rm = TRUE),
units = sum(units, na.rm = TRUE)) %>%
ungroup() %>%
mutate(price_pack_year = sales / units) %>%
select(-sales, -units)
##---- Result ----
proj.price <- proj.nation %>%
left_join(price.origin, by = c('province', 'city', 'quarter', 'packid')) %>%
left_join(price.city, by = c('province', 'city', 'year', 'packid')) %>%
left_join(price.province, by = c('province', 'quarter', 'packid')) %>%
left_join(price.year, by = c('province', 'year', 'packid')) %>%
left_join(price.pack, by = c('quarter', 'packid')) %>%
left_join(price.pack.year, by = c('year', 'packid')) %>%
mutate(price = if_else(is.na(price), price_city, price),
price = if_else(is.na(price), price_prov, price),
price = if_else(is.na(price), price_year, price),
price = if_else(is.na(price), price_pack, price),
price = if_else(is.na(price), price_pack_year, price)) %>%
mutate(units = sales / price) %>%
filter(units > 0, sales > 0) %>%
select(year, quarter, date, province, city, market, atc4, molecule, packid,
units, sales)
write_feather(proj.price, '03_Outputs/VBP/05_Bluebook_2020_VBP_Projection_Price.feather')
|
#Reading data
source("readData.R")
#Creating a png 480*480
png("plot3.png", height = 480, width = 480)
#Plot
plot(timeStamp, data2plot$Sub_metering_1, type= "l", xlab="", ylab="Energy sub metering")
lines(timeStamp, data2plot$Sub_metering_2, col="red")
lines(timeStamp, data2plot$Sub_metering_3, col="blue")
#legend
legend("topright",
col = c("black", "red", "blue"),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lwd = 1)
dev.off() | /plot3.R | no_license | unishajoshi/ExData_Plotting1 | R | false | false | 469 | r | #Reading data
source("readData.R")
#Creating a png 480*480
png("plot3.png", height = 480, width = 480)
#Plot
plot(timeStamp, data2plot$Sub_metering_1, type= "l", xlab="", ylab="Energy sub metering")
lines(timeStamp, data2plot$Sub_metering_2, col="red")
lines(timeStamp, data2plot$Sub_metering_3, col="blue")
#legend
legend("topright",
col = c("black", "red", "blue"),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lwd = 1)
dev.off() |
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{dbGetStatement}
\alias{dbGetStatement}
\alias{dbGetStatement,DBIResult-method}
\title{Get the statement associated with a result set}
\usage{
dbGetStatement(res, ...)
}
\arguments{
\item{res}{An object inheriting from \code{\linkS4class{DBIResult}}.}
\item{...}{Other arguments passed on to methods.}
}
\value{
a character vector
}
\description{
The default method extracts \code{statement} from the result of
\code{\link{dbGetInfo}(res)}.
}
\seealso{
Other DBIResult generics: \code{\link{dbClearResult}};
\code{\link{dbColumnInfo}}; \code{\link{dbFetch}},
\code{\link{dbFetch,DBIResult-method}},
\code{\link{fetch}}; \code{\link{dbGetRowCount}},
\code{\link{dbGetRowCount,DBIResult-method}};
\code{\link{dbGetRowsAffected}},
\code{\link{dbGetRowsAffected,DBIResult-method}};
\code{\link{dbHasCompleted}},
\code{\link{dbHasCompleted,DBIResult-method}}
}
| /DBI/man/dbGetStatement.Rd | no_license | jackieli123723/clearlinux | R | false | false | 932 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{dbGetStatement}
\alias{dbGetStatement}
\alias{dbGetStatement,DBIResult-method}
\title{Get the statement associated with a result set}
\usage{
dbGetStatement(res, ...)
}
\arguments{
\item{res}{An object inheriting from \code{\linkS4class{DBIResult}}.}
\item{...}{Other arguments passed on to methods.}
}
\value{
a character vector
}
\description{
The default method extracts \code{statement} from the result of
\code{\link{dbGetInfo}(res)}.
}
\seealso{
Other DBIResult generics: \code{\link{dbClearResult}};
\code{\link{dbColumnInfo}}; \code{\link{dbFetch}},
\code{\link{dbFetch,DBIResult-method}},
\code{\link{fetch}}; \code{\link{dbGetRowCount}},
\code{\link{dbGetRowCount,DBIResult-method}};
\code{\link{dbGetRowsAffected}},
\code{\link{dbGetRowsAffected,DBIResult-method}};
\code{\link{dbHasCompleted}},
\code{\link{dbHasCompleted,DBIResult-method}}
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/project.R
\docType{methods}
\name{project.description,ProjectSummary-method}
\alias{project.description,ProjectSummary-method}
\title{Returns a project description}
\usage{
\S4method{project.description}{ProjectSummary}(object)
}
\arguments{
\item{object}{a ProjectSummary}
}
\value{
the project description
}
\description{
Returns a project description
}
\author{
Jose A. Dianes
}
| /vignettes/man/project.description-ProjectSummary-method.Rd | no_license | gccong/ddiR-sirius | R | false | false | 469 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/project.R
\docType{methods}
\name{project.description,ProjectSummary-method}
\alias{project.description,ProjectSummary-method}
\title{Returns a project description}
\usage{
\S4method{project.description}{ProjectSummary}(object)
}
\arguments{
\item{object}{a ProjectSummary}
}
\value{
the project description
}
\description{
Returns a project description
}
\author{
Jose A. Dianes
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
## function to make a matrix
j <- NULL
set <- function(y){
x <<- y
j <<- NULL
}
get <- function()x
setInverse <- function(inverse) j <<- inverse
getInverse <- function() j
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
j <- x$getInverse()
if(!is.null(j)){
message("getting cached data")
return(j)
}
mat <- x$get()
j <- solve(mat,...)
x$setInverse(j)
j
}
| /cachematrix.R | no_license | singjy/ProgrammingAssignment2 | R | false | false | 697 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
## function to make a matrix
j <- NULL
set <- function(y){
x <<- y
j <<- NULL
}
get <- function()x
setInverse <- function(inverse) j <<- inverse
getInverse <- function() j
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
j <- x$getInverse()
if(!is.null(j)){
message("getting cached data")
return(j)
}
mat <- x$get()
j <- solve(mat,...)
x$setInverse(j)
j
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.Monthmean.R
\name{plot.Monthmean}
\alias{plot.Monthmean}
\title{Plot of Monthly Mean Estimates}
\usage{
\method{plot}{Monthmean}(x, ...)
}
\arguments{
\item{x}{a \code{Monthmean} object produced by \code{monthmean}.}
\item{\dots}{additional arguments passed to the plot.}
}
\description{
Plots estimated monthly means.
}
\seealso{
\code{monthmean}
}
\author{
Adrian Barnett \email{a.barnett@qut.edu.au}
}
| /man/plot.Monthmean.Rd | no_license | agbarnett/season | R | false | true | 489 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.Monthmean.R
\name{plot.Monthmean}
\alias{plot.Monthmean}
\title{Plot of Monthly Mean Estimates}
\usage{
\method{plot}{Monthmean}(x, ...)
}
\arguments{
\item{x}{a \code{Monthmean} object produced by \code{monthmean}.}
\item{\dots}{additional arguments passed to the plot.}
}
\description{
Plots estimated monthly means.
}
\seealso{
\code{monthmean}
}
\author{
Adrian Barnett \email{a.barnett@qut.edu.au}
}
|
test_that("probility is uniform when variables are equal", {
d <- data.frame(
aoa = c(1, 1, 2),
gval = c(1, 1, 0),
tstep = c(1, 1, 2),
id = c(1, 2, 3)
)
actual <- netgrowr:::probability_node_added(
beta = c(1, 1),
formula = aoa ~ gval,
data = d,
split_by = "tstep",
label_with = "id"
)
expect_equal(actual, c("1" = 0.5, "2" = 0.5, "3" = 1.0))
})
test_that("unlearned words, but not previously learned words, contribute to computation", {
d <- data.frame(
aoa = c(1, 1, 2, 2, 1, 1, 2, 2),
tstep = c(1, 1, 1, 1, 2, 2, 2, 2),
gval = c(3, 2, 1, 1, NA, NA, 2, 1),
id = c(1, 2, 3, 4, 1, 2, 3, 4)
)
actual <- netgrowr:::probability_node_added(
beta = c(1),
formula = aoa ~ 1,
d,
split_by = "tstep",
label_with = "id"
)
expect_equal(actual, c('1' = 0.25, '2' = 0.25, '3' = 0.5, '4' = 0.5))
actual <- netgrowr:::probability_node_added(
beta = c(1, 2),
formula = aoa ~ gval,
d,
split_by = "tstep",
label_with = "id"
)
expect_equal(as.vector(actual[1] > actual[2]), TRUE)
expect_equal(as.vector(actual[3] + actual[4]), 1.0)
})
| /tests/testthat/test-probability_node_added.R | no_license | crcox/netgrowr | R | false | false | 1,156 | r | test_that("probility is uniform when variables are equal", {
d <- data.frame(
aoa = c(1, 1, 2),
gval = c(1, 1, 0),
tstep = c(1, 1, 2),
id = c(1, 2, 3)
)
actual <- netgrowr:::probability_node_added(
beta = c(1, 1),
formula = aoa ~ gval,
data = d,
split_by = "tstep",
label_with = "id"
)
expect_equal(actual, c("1" = 0.5, "2" = 0.5, "3" = 1.0))
})
test_that("unlearned words, but not previously learned words, contribute to computation", {
d <- data.frame(
aoa = c(1, 1, 2, 2, 1, 1, 2, 2),
tstep = c(1, 1, 1, 1, 2, 2, 2, 2),
gval = c(3, 2, 1, 1, NA, NA, 2, 1),
id = c(1, 2, 3, 4, 1, 2, 3, 4)
)
actual <- netgrowr:::probability_node_added(
beta = c(1),
formula = aoa ~ 1,
d,
split_by = "tstep",
label_with = "id"
)
expect_equal(actual, c('1' = 0.25, '2' = 0.25, '3' = 0.5, '4' = 0.5))
actual <- netgrowr:::probability_node_added(
beta = c(1, 2),
formula = aoa ~ gval,
d,
split_by = "tstep",
label_with = "id"
)
expect_equal(as.vector(actual[1] > actual[2]), TRUE)
expect_equal(as.vector(actual[3] + actual[4]), 1.0)
})
|
#### C3W2 - APIs
# API: Application programming interfaces (like twitter)
# Create and account of developer team
# Create and application like "Statistics"
## Accessing Twitter from R
# use the httr packae to start the process
myapp = oauth_app("twitter",
key = "youtConsumerKeyHere",
secret = "yourConsumerSecretHere")
# Sign into the application
sig = sign_oauth1.0(myapp,
token = "yourTokenHere",
token_secret = "yourTokenSecretHere")
# Take the data as a json file with authentication sig
homeTl = GET("https://api.twitter.com/1.1/statuses/home_timeline.json",sig)
## Converting the json object
json1 = content(homeT1)
json2 = jsonlite::fromJSON(toJSON(json1))
json2[1,1:4]
## How do I know the url to use?
# Twitter documentation --> Resource URL
| /C3W2_APIs.R | no_license | BlackKitsune/datasciencecoursera | R | false | false | 834 | r | #### C3W2 - APIs
# API: Application programming interfaces (like twitter)
# Create and account of developer team
# Create and application like "Statistics"
## Accessing Twitter from R
# use the httr packae to start the process
myapp = oauth_app("twitter",
key = "youtConsumerKeyHere",
secret = "yourConsumerSecretHere")
# Sign into the application
sig = sign_oauth1.0(myapp,
token = "yourTokenHere",
token_secret = "yourTokenSecretHere")
# Take the data as a json file with authentication sig
homeTl = GET("https://api.twitter.com/1.1/statuses/home_timeline.json",sig)
## Converting the json object
json1 = content(homeT1)
json2 = jsonlite::fromJSON(toJSON(json1))
json2[1,1:4]
## How do I know the url to use?
# Twitter documentation --> Resource URL
|
myfunction <- function() {
x <- rnorm(100)
mean(x)
}
second <- function(x) {
x + rnorm(length(x))
} | /mycode.R | no_license | trevorwilf/R_Programming | R | false | false | 109 | r | myfunction <- function() {
x <- rnorm(100)
mean(x)
}
second <- function(x) {
x + rnorm(length(x))
} |
modelInfo <- list(label = "Single Rule Classification",
library = "RWeka",
loop = NULL,
type = c("Classification"),
parameters = data.frame(parameter = c('parameter'),
class = c("character"),
label = "none"),
grid = function(x, y, len = NULL, search = "grid")
data.frame(parameter = "none"),
fit = function(x, y, wts, param, lev, last, classProbs, ...) {
dat <- if(is.data.frame(x)) x else as.data.frame(x)
dat$.outcome <- y
theDots <- list(...)
modelArgs <- c(list(formula = as.formula(".outcome ~ ."),
data = dat),
theDots)
out <- do.call(RWeka::OneR, modelArgs)
out
},
predict = function(modelFit, newdata, submodels = NULL) {
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata)
predict(modelFit, newdata)
},
prob = function(modelFit, newdata, submodels = NULL) {
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata)
predict(modelFit, newdata, type = "probability")
},
levels = function(x) x$obsLevels,
predictors = function(x, ...) predictors(x$terms),
tags = c("Rule-Based Model", "Implicit Feature Selection"),
sort = function(x) x)
| /data/caret_models/OneR.R | permissive | BTopcuoglu/code_review | R | false | false | 1,763 | r | modelInfo <- list(label = "Single Rule Classification",
library = "RWeka",
loop = NULL,
type = c("Classification"),
parameters = data.frame(parameter = c('parameter'),
class = c("character"),
label = "none"),
grid = function(x, y, len = NULL, search = "grid")
data.frame(parameter = "none"),
fit = function(x, y, wts, param, lev, last, classProbs, ...) {
dat <- if(is.data.frame(x)) x else as.data.frame(x)
dat$.outcome <- y
theDots <- list(...)
modelArgs <- c(list(formula = as.formula(".outcome ~ ."),
data = dat),
theDots)
out <- do.call(RWeka::OneR, modelArgs)
out
},
predict = function(modelFit, newdata, submodels = NULL) {
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata)
predict(modelFit, newdata)
},
prob = function(modelFit, newdata, submodels = NULL) {
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata)
predict(modelFit, newdata, type = "probability")
},
levels = function(x) x$obsLevels,
predictors = function(x, ...) predictors(x$terms),
tags = c("Rule-Based Model", "Implicit Feature Selection"),
sort = function(x) x)
|
#' Compute the epsilon-complexity of a time series.
#'
#' @param x A vector of points.
#' @param ds Number of times to downsample the input sequence.
#' @param method The interpolation or approximation method. One of
#' c("bspline", "cspline")
#' @param max_degree The maximum order spline used in the approximation
#' step
#' @param err_norm The norm type used in computing the approximation error.
#' @param sample_type The downsampling type. Either randomly sampled
#' or downsampled in integer steps.
#'
#' @return A \code{list} with :
#' \tabular{ll}{
#' \code{A} \tab The epsilon-complexity intercept coefficient \cr
#' \code{B} \tab The epsilon-complexity slope coefficient \cr
#' \code{fit} \tab The full linear model generated by fitting log(epsilons) ~ log(S) using \code{lm()}. \cr
#' \code{epsilons} \tab The mean sum of absolute errors at each downsample level. \cr
#' \code{S} \tab The fraction of samples maintained at each downsample level. \cr
#' \code{method} \tab The method used or a list of methods if method
#' "all" is used.
#'}
#'@export
#'@importFrom stats lm coefficients
ecomplex <- function(x, ds = 6,
max_degree = 5,
method = c("cspline", "bspline", "lift", "all"),
err_norm = c("mae", "mse", "max"),
sample_type = c("step", "random")) {
if (!is.null(dim(x))) stop("Data must be a vector of numeric values")
x <- as.numeric(x)
if (anyNA(x)) stop("Data contains NA values")
if (length(x) < 100) warning("Complexity estimate may not be stable ",
"for short series")
x <- normalize(x)
method <- match.arg(method)
err_norm <- match.arg(err_norm)
sample_type <- match.arg(sample_type)
func <- structure(list(x = x,
ds = ds,
deg = max_degree,
err_norm = err_norm,
sample_type = sample_type),
class = method)
# Compute error for each downsample level up to 'ds'
res <- get_epsilons(func)
S <- 1 / (2:(length(res$epsilons) + 1))
epsilons <- res$epsilons
method <- res$methods
# Catch lm() errors silently and return NA values
A <- B <- fit <- NA
try({
fit <- lm(log(epsilons) ~ log(S))
A <- unname(stats::coef(fit)[1])
B <- unname(stats::coef(fit)[2])
}, silent = TRUE )
if(is.na(A) || is.na(B)) warning("Coefficients could not be computed.",
" Check data for invalid values.")
structure(list(A = A,
B = B,
fit = fit,
epsilons = epsilons,
S = S,
method = method,
err_norm = err_norm,
sample_type = sample_type),
class = "ecomplex")
}
#' Compute epsilon errors for a time series.
#'
#' Computes the mean absolute error (MAE) of a time
#' series for each downsample level using an
#' interpolation (or approximation) method of type
#' basis-spline, cubic spline or lifting sche
#'
#' @param func A structure with the time series, interpolation method,
#' and parameters for the method.
#' @return A \code{list} with :
#' \tabular{ll}{
#' \code{epsilons} \tab The mean sum of absolute errors at each level \cr
#' \code{methods} \tab The method used or a list of methods if method
#' "all" is used.
#'}
get_epsilons <- function(func) UseMethod("get_epsilons")
get_epsilons.bspline <- function(func){
epsilons <- double(func$ds - 1)
ds <- 2:func$ds
for (k in ds) {
epsilons[k - 1] <- bspline_err(func$x, sample_num = k, max_degree = func$deg)
}
list(epsilons = epsilons, methods = class(func))
}
get_epsilons.cspline <- function(func){
epsilons <- double(func$ds - 1)
ds <- 2:func$ds
for (k in ds) {
epsilons[k - 1] <- cspline_err(func$x, sample_num = k,
max_degree = func$deg,
err_norm = func$err_norm,
sample_type = func$sample_type)
}
list(epsilons = epsilons, methods = class(func))
}
get_epsilons.lift <- function(func) {
ds <- min(func$ds, 6)
epsilons <- unlist(lapply((2:ds), function(y) interp_err(func$x, iwt_mod(y))))
list(epsilons = epsilons, methods = class(func))
}
# Find best fit among all methods. If the series length
# is longer than 500, this defaults to using just the
# cubic spline and lift methods.
get_epsilons.all <- function(func){
methods <- c("cspline", "bspline", "lift")
if (length(func$x) > 500 ) {
methods <- c("cspline", "lift")
}
eps <- lapply(methods,
function(method) get_epsilons({class(func) <- method; func}))
eps <- lapply(eps, function(eps) eps$epsilons)
df <- data.frame(do.call(cbind, eps))
names(df) <- methods
# get minimum epsilons
epsilons <- apply(df, 1, min)
methods_used <- methods[apply(df, 1, which.min)]
list(epsilons = epsilons, methods = methods_used)
}
| /R/ecomplex.R | permissive | nateaff/ecomplex | R | false | false | 5,255 | r |
#' Compute the epsilon-complexity of a time series.
#'
#' @param x A vector of points.
#' @param ds Number of times to downsample the input sequence.
#' @param method The interpolation or approximation method. One of
#' c("bspline", "cspline")
#' @param max_degree The maximum order spline used in the approximation
#' step
#' @param err_norm The norm type used in computing the approximation error.
#' @param sample_type The downsampling type. Either randomly sampled
#' or downsampled in integer steps.
#'
#' @return A \code{list} with :
#' \tabular{ll}{
#' \code{A} \tab The epsilon-complexity intercept coefficient \cr
#' \code{B} \tab The epsilon-complexity slope coefficient \cr
#' \code{fit} \tab The full linear model generated by fitting log(epsilons) ~ log(S) using \code{lm()}. \cr
#' \code{epsilons} \tab The mean sum of absolute errors at each downsample level. \cr
#' \code{S} \tab The fraction of samples maintained at each downsample level. \cr
#' \code{method} \tab The method used or a list of methods if method
#' "all" is used.
#'}
#'@export
#'@importFrom stats lm coefficients
ecomplex <- function(x, ds = 6,
max_degree = 5,
method = c("cspline", "bspline", "lift", "all"),
err_norm = c("mae", "mse", "max"),
sample_type = c("step", "random")) {
if (!is.null(dim(x))) stop("Data must be a vector of numeric values")
x <- as.numeric(x)
if (anyNA(x)) stop("Data contains NA values")
if (length(x) < 100) warning("Complexity estimate may not be stable ",
"for short series")
x <- normalize(x)
method <- match.arg(method)
err_norm <- match.arg(err_norm)
sample_type <- match.arg(sample_type)
func <- structure(list(x = x,
ds = ds,
deg = max_degree,
err_norm = err_norm,
sample_type = sample_type),
class = method)
# Compute error for each downsample level up to 'ds'
res <- get_epsilons(func)
S <- 1 / (2:(length(res$epsilons) + 1))
epsilons <- res$epsilons
method <- res$methods
# Catch lm() errors silently and return NA values
A <- B <- fit <- NA
try({
fit <- lm(log(epsilons) ~ log(S))
A <- unname(stats::coef(fit)[1])
B <- unname(stats::coef(fit)[2])
}, silent = TRUE )
if(is.na(A) || is.na(B)) warning("Coefficients could not be computed.",
" Check data for invalid values.")
structure(list(A = A,
B = B,
fit = fit,
epsilons = epsilons,
S = S,
method = method,
err_norm = err_norm,
sample_type = sample_type),
class = "ecomplex")
}
#' Compute epsilon errors for a time series.
#'
#' Computes the mean absolute error (MAE) of a time
#' series for each downsample level using an
#' interpolation (or approximation) method of type
#' basis-spline, cubic spline or lifting sche
#'
#' @param func A structure with the time series, interpolation method,
#' and parameters for the method.
#' @return A \code{list} with :
#' \tabular{ll}{
#' \code{epsilons} \tab The mean sum of absolute errors at each level \cr
#' \code{methods} \tab The method used or a list of methods if method
#' "all" is used.
#'}
get_epsilons <- function(func) UseMethod("get_epsilons")
get_epsilons.bspline <- function(func){
epsilons <- double(func$ds - 1)
ds <- 2:func$ds
for (k in ds) {
epsilons[k - 1] <- bspline_err(func$x, sample_num = k, max_degree = func$deg)
}
list(epsilons = epsilons, methods = class(func))
}
get_epsilons.cspline <- function(func){
epsilons <- double(func$ds - 1)
ds <- 2:func$ds
for (k in ds) {
epsilons[k - 1] <- cspline_err(func$x, sample_num = k,
max_degree = func$deg,
err_norm = func$err_norm,
sample_type = func$sample_type)
}
list(epsilons = epsilons, methods = class(func))
}
get_epsilons.lift <- function(func) {
ds <- min(func$ds, 6)
epsilons <- unlist(lapply((2:ds), function(y) interp_err(func$x, iwt_mod(y))))
list(epsilons = epsilons, methods = class(func))
}
# Find best fit among all methods. If the series length
# is longer than 500, this defaults to using just the
# cubic spline and lift methods.
get_epsilons.all <- function(func){
methods <- c("cspline", "bspline", "lift")
if (length(func$x) > 500 ) {
methods <- c("cspline", "lift")
}
eps <- lapply(methods,
function(method) get_epsilons({class(func) <- method; func}))
eps <- lapply(eps, function(eps) eps$epsilons)
df <- data.frame(do.call(cbind, eps))
names(df) <- methods
# get minimum epsilons
epsilons <- apply(df, 1, min)
methods_used <- methods[apply(df, 1, which.min)]
list(epsilons = epsilons, methods = methods_used)
}
|
# Merge results
library("tidyverse")
library("lubridate")
data1 <- read_csv("raw_data/orcids_NCMAS_2021-08-09.csv")
data2 <- read_csv("raw_data/orcids_NCMAS_second_2021-08-09.csv")
data3 <- read_csv("raw_data/orcids_NCMAS_third_2021-08-09.csv")
total_data <- read_csv("clean_data/NCMAS_2021-08-09.csv")
# #Are there duplicates in total?
# test_duplicated <-
# total_data %>% select(`Lead CI`) %>%
# group_by(`Lead CI`) %>%
# filter(n()>1)
all_orcids <- bind_rows(data1, data2, data3) %>%
filter(!is.na(orcid))
write_csv(all_orcids,
here::here(paste0("clean_data/all_orcids_NCMAS_",
today(), ".csv")))
all_orcids_lead <- all_orcids %>%
unite(col = "lead_ci", given_name:last_name, sep = " ")
all_data <- total_data %>%
select(project_code:project_title) %>%
filter(project_code != "TOTALS (KSU)") %>%
left_join(all_orcids_lead, by = ("lead_ci" = "lead_ci")) %>%
mutate(
institution = case_when(
str_detect(institution.x, "NSW") ~ "University of New South Wales",
is.na(institution.y) ~institution.x,
TRUE ~ institution.x )) %>%
select(-institution.x, - institution.y) %>%
arrange(institution, lead_ci) %>%
select(orcid, lead_ci, first, last, institution,
project_code, project_title)
write_csv(all_data,
here::here(paste0("clean_data/all_orcids_projects_NCMAS_",
today(), ".csv")))
| /scripts/merge.R | permissive | orchid00/test_bibliographic_collection | R | false | false | 1,436 | r | # Merge results
library("tidyverse")
library("lubridate")
data1 <- read_csv("raw_data/orcids_NCMAS_2021-08-09.csv")
data2 <- read_csv("raw_data/orcids_NCMAS_second_2021-08-09.csv")
data3 <- read_csv("raw_data/orcids_NCMAS_third_2021-08-09.csv")
total_data <- read_csv("clean_data/NCMAS_2021-08-09.csv")
# #Are there duplicates in total?
# test_duplicated <-
# total_data %>% select(`Lead CI`) %>%
# group_by(`Lead CI`) %>%
# filter(n()>1)
all_orcids <- bind_rows(data1, data2, data3) %>%
filter(!is.na(orcid))
write_csv(all_orcids,
here::here(paste0("clean_data/all_orcids_NCMAS_",
today(), ".csv")))
all_orcids_lead <- all_orcids %>%
unite(col = "lead_ci", given_name:last_name, sep = " ")
all_data <- total_data %>%
select(project_code:project_title) %>%
filter(project_code != "TOTALS (KSU)") %>%
left_join(all_orcids_lead, by = ("lead_ci" = "lead_ci")) %>%
mutate(
institution = case_when(
str_detect(institution.x, "NSW") ~ "University of New South Wales",
is.na(institution.y) ~institution.x,
TRUE ~ institution.x )) %>%
select(-institution.x, - institution.y) %>%
arrange(institution, lead_ci) %>%
select(orcid, lead_ci, first, last, institution,
project_code, project_title)
write_csv(all_data,
here::here(paste0("clean_data/all_orcids_projects_NCMAS_",
today(), ".csv")))
|
#insert url
{
url <- "url"
#R reads the url as a HTML doc
my_html <- read_html(url)
# We'll access the first table from the web page(Schedule)
# Trial and Error process
my_tables <- html_nodes(my_html,"table",)[[2]]
team_table <- html_table((my_tables), fill = TRUE)
View(team_table)
team_table <- team_table[-1:-2,]
}
{
a_table <- team_table
#remove missing values if missing
#b_table = a_table[-26,]
#a_table <- b_table
#add columns
a_table$RA <-NA
a_table$RSH <-NA
a_table$RAH <-NA
a_table$RSRN <-NA
a_table$RARN <-NA
a_table$Result <-NA
}
#Name Columns
names(a_table) = c("Date","Opponent","Result","RS","RA","RSH","RAH","RSRN","RARN")
#split text from each designated column
{
str_split("Result","-")
split <- str_split(a_table$Result,"-")
a_table$RS <- sapply(split, "[[",1)
a_table$RA <- sapply(split, "[[",2)
str_split("RS", " ")
split2<- str_split(a_table$RS, " ")
a_table$Result <- sapply(split2, "[[",1)
a_table$RS <- sapply(split2, "[[",2)
str_split("RA", " ")
split3<- str_split(a_table$RA, " ")
a_table$RA <- sapply(split3, "[[",2)
}
#ifelse statement to determine whether runs were scored & allowed at home or on the road
{
a_table$RSH <- ifelse(!grepl("@", a_table$Opponent), a_table$RS, 0)
a_table$RAH <- ifelse(!grepl("@", a_table$Opponent), a_table$RA, 0)
a_table$RSRN <- ifelse(grepl("@", a_table$Opponent), a_table$RS, 0)
a_table$RARN <- ifelse(grepl("@", a_table$Opponent), a_table$RA, 0)
}
#add home and road games column
{
a_table$HG<-NA
a_table$RG<-NA
#count the amount of home and road games
a_table$RG <- str_count(a_table$Opponent, "@")
a_table$HG <- ifelse((a_table$RG == 1), 0, 1)
}
{
ws <- a_table
#convert each string to numeric
ws$RS = as.numeric(as.character(ws$RS))
ws$RA = as.numeric(as.character(ws$RA))
ws$RSH = as.numeric(as.character(ws$RSH))
ws$RAH = as.numeric(as.character(ws$RAH))
ws$RSRN = as.numeric(as.character(ws$RSRN))
ws$RARN = as.numeric(as.character(ws$RARN))
ws$HG = as.numeric(as.character(ws$HG))
ws$RG = as.numeric(as.character(ws$RG))
}
#generate totals row
final <- ws %>%
adorn_totals("row")
#extract totals row from dataset
{
j2 <- tail(final,1)
#add iPF column and format decimal places
j2$iPF <- NA
j2$RAA <- NA
j2$iPF <- ((j2$RSH+j2$RAH)/(j2$HG))/((j2$RSRN+j2$RARN)/(j2$RG))
j2$iPF <- round(j2$iPF,3)
j2$RAA <- ifelse(j2$iPF > 1, j2$iPF-(abs(j2$iPF-1)/2), j2$iPF+(abs(j2$iPF-1)/2))
j2$RAA <- round(j2$RAA,3)
View(j2)
}
| /ParkFactorNCAA.R | permissive | robert-frey/NCAABaseParkFactor | R | false | false | 2,516 | r | #insert url
{
url <- "url"
#R reads the url as a HTML doc
my_html <- read_html(url)
# We'll access the first table from the web page(Schedule)
# Trial and Error process
my_tables <- html_nodes(my_html,"table",)[[2]]
team_table <- html_table((my_tables), fill = TRUE)
View(team_table)
team_table <- team_table[-1:-2,]
}
{
a_table <- team_table
#remove missing values if missing
#b_table = a_table[-26,]
#a_table <- b_table
#add columns
a_table$RA <-NA
a_table$RSH <-NA
a_table$RAH <-NA
a_table$RSRN <-NA
a_table$RARN <-NA
a_table$Result <-NA
}
#Name Columns
names(a_table) = c("Date","Opponent","Result","RS","RA","RSH","RAH","RSRN","RARN")
#split text from each designated column
{
str_split("Result","-")
split <- str_split(a_table$Result,"-")
a_table$RS <- sapply(split, "[[",1)
a_table$RA <- sapply(split, "[[",2)
str_split("RS", " ")
split2<- str_split(a_table$RS, " ")
a_table$Result <- sapply(split2, "[[",1)
a_table$RS <- sapply(split2, "[[",2)
str_split("RA", " ")
split3<- str_split(a_table$RA, " ")
a_table$RA <- sapply(split3, "[[",2)
}
#ifelse statement to determine whether runs were scored & allowed at home or on the road
{
a_table$RSH <- ifelse(!grepl("@", a_table$Opponent), a_table$RS, 0)
a_table$RAH <- ifelse(!grepl("@", a_table$Opponent), a_table$RA, 0)
a_table$RSRN <- ifelse(grepl("@", a_table$Opponent), a_table$RS, 0)
a_table$RARN <- ifelse(grepl("@", a_table$Opponent), a_table$RA, 0)
}
#add home and road games column
{
a_table$HG<-NA
a_table$RG<-NA
#count the amount of home and road games
a_table$RG <- str_count(a_table$Opponent, "@")
a_table$HG <- ifelse((a_table$RG == 1), 0, 1)
}
{
ws <- a_table
#convert each string to numeric
ws$RS = as.numeric(as.character(ws$RS))
ws$RA = as.numeric(as.character(ws$RA))
ws$RSH = as.numeric(as.character(ws$RSH))
ws$RAH = as.numeric(as.character(ws$RAH))
ws$RSRN = as.numeric(as.character(ws$RSRN))
ws$RARN = as.numeric(as.character(ws$RARN))
ws$HG = as.numeric(as.character(ws$HG))
ws$RG = as.numeric(as.character(ws$RG))
}
#generate totals row
final <- ws %>%
adorn_totals("row")
#extract totals row from dataset
{
j2 <- tail(final,1)
#add iPF column and format decimal places
j2$iPF <- NA
j2$RAA <- NA
j2$iPF <- ((j2$RSH+j2$RAH)/(j2$HG))/((j2$RSRN+j2$RARN)/(j2$RG))
j2$iPF <- round(j2$iPF,3)
j2$RAA <- ifelse(j2$iPF > 1, j2$iPF-(abs(j2$iPF-1)/2), j2$iPF+(abs(j2$iPF-1)/2))
j2$RAA <- round(j2$RAA,3)
View(j2)
}
|
\name{MLE of the zero inflated Gamma and Weibull distributions}
\alias{zigamma.mle}
\alias{ziweibull.mle}
\title{MLE of the zero inflated Gamma and Weibull distributions
}
\description{
MLE of the zero inflated Gamma and Weibull distributions.
}
\usage{
zigamma.mle(x, tol = 1e-07)
ziweibull.mle(x, tol = 1e-07)
}
\arguments{
\item{x}{
A numerical vector with positive data and zeros.
}
\item{tol}{
The tolerance value to terminate the Newton-Raphson algorithm.
}
}
\details{
MLE of some zero inflated models is performed.
}
\value{
A list including:
\item{iters}{
The iterations required by the Newton-Raphson to estimate the parameters
of the distribution for the non zero data.
}
\item{loglik}{
The full log-likelihood of the model.
}
\item{param}{
The parameters of the model.
}
}
\references{
Sandra Taylor and Katherine Pollard (2009). Hypothesis Tests for Point-Mass
Mixture Data with Application to Omics Data with Many Zero Values.
Statistical Applications in Geneticsand Molecular Biology, 8(1): 1--43.
Kalimuthu Krishnamoorthy, Meesook Lee and Wang Xiao (2015). Likelihood ratio tests
for comparing several gamma distributions. Environmetrics, 26(8):571-583.
}
\author{
Michail Tsagris.
R implementation and documentation: Michail Tsagris \email{mtsagris@uoc.gr}.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{ \link{zigamma.reg}, \link{gammapois.mle}
}
}
\examples{
x <- rgamma(200, 4, 1)
x[sample(1:200, 20)] <- 0
zigamma.mle(x)
}
| /man/zigamma.mle.Rd | no_license | RfastOfficial/Rfast2 | R | false | false | 1,469 | rd | \name{MLE of the zero inflated Gamma and Weibull distributions}
\alias{zigamma.mle}
\alias{ziweibull.mle}
\title{MLE of the zero inflated Gamma and Weibull distributions
}
\description{
MLE of the zero inflated Gamma and Weibull distributions.
}
\usage{
zigamma.mle(x, tol = 1e-07)
ziweibull.mle(x, tol = 1e-07)
}
\arguments{
\item{x}{
A numerical vector with positive data and zeros.
}
\item{tol}{
The tolerance value to terminate the Newton-Raphson algorithm.
}
}
\details{
MLE of some zero inflated models is performed.
}
\value{
A list including:
\item{iters}{
The iterations required by the Newton-Raphson to estimate the parameters
of the distribution for the non zero data.
}
\item{loglik}{
The full log-likelihood of the model.
}
\item{param}{
The parameters of the model.
}
}
\references{
Sandra Taylor and Katherine Pollard (2009). Hypothesis Tests for Point-Mass
Mixture Data with Application to Omics Data with Many Zero Values.
Statistical Applications in Geneticsand Molecular Biology, 8(1): 1--43.
Kalimuthu Krishnamoorthy, Meesook Lee and Wang Xiao (2015). Likelihood ratio tests
for comparing several gamma distributions. Environmetrics, 26(8):571-583.
}
\author{
Michail Tsagris.
R implementation and documentation: Michail Tsagris \email{mtsagris@uoc.gr}.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{ \link{zigamma.reg}, \link{gammapois.mle}
}
}
\examples{
x <- rgamma(200, 4, 1)
x[sample(1:200, 20)] <- 0
zigamma.mle(x)
}
|
/boot(introduction of zizufa).R | no_license | databatman/My.Rcode.newhand | R | false | false | 1,499 | r | ||
c1 <- c("cscsc", "cdscdscds", "cdcdcs")
c2 <- c("cdcd", "sss", "sdsdsd")
c3 <- c("cdcd", "sswssdss", "sdsssssdsd")
df <- data.frame(c1, c3)
df <- data.frame(df, c2) | /r_module/tests.R | no_license | razyel8/metrics | R | false | false | 164 | r | c1 <- c("cscsc", "cdscdscds", "cdcdcs")
c2 <- c("cdcd", "sss", "sdsdsd")
c3 <- c("cdcd", "sswssdss", "sdsssssdsd")
df <- data.frame(c1, c3)
df <- data.frame(df, c2) |
#' Predict Method for sgspls
#'
#' Predicted values based on sparse group subgroup PLS. New responses are predicted using a fitted model and a new matrix of observations.
#'
#' @param object Object of class inheriting from \code{"sgspls"}.
#' @param newdata Data matrix in which to look for for explanatory variables to be used for prediction.
#' @param ... Not currently used.
#'
#' @export
#' @return \code{perf} returns a list that contains the following performance measures:
#' \code{predict} function produces predicted values, obtained by evaluating the sparse group subgroup PLS.
#' The prediction values are calculated based on the regression coefficients of \code{object$Y} onto \code{object$variates$X}.
#'
predict.sgspls <-
function(object, newdata, ...) {
# predict function for sgspls
newdata <- as.matrix(newdata)
nobs <- nrow(newdata)
p <- ncol(newdata)
nresp <- ncol(object$parameters$Y)
npred <- ncol(object$parameters$X)
ncomp <- object$parameters$ncomp
#-- validation des arguments --#
if (missing(newdata)){
stop("No new data available.")
}
if (length(dim(newdata)) == 0) {
if (length(newdata) != p)
stop("'newdata' must be a numeric matrix with ncol = ", p,
" or a vector of length = ", p, ".")
dim(newdata) = c(1, p)
}
B <- array(0, dim = c(npred, nresp, ncomp))
B_coef <- coef(object, type = "coefficients")
B <- B_coef$B
B0 <- B_coef$B0
pred <- array(dim = c(nobs, nresp, ncomp))
for( i in 1:ncomp ){
pred[,,i] <- newdata%*%B[ , , i] + matrix(rep(B0[i,], each = nobs), nrow = nobs)
}
return(pred)
}
| /R/predict.R | no_license | matt-sutton/sgspls | R | false | false | 1,696 | r | #' Predict Method for sgspls
#'
#' Predicted values based on sparse group subgroup PLS. New responses are predicted using a fitted model and a new matrix of observations.
#'
#' @param object Object of class inheriting from \code{"sgspls"}.
#' @param newdata Data matrix in which to look for for explanatory variables to be used for prediction.
#' @param ... Not currently used.
#'
#' @export
#' @return \code{perf} returns a list that contains the following performance measures:
#' \code{predict} function produces predicted values, obtained by evaluating the sparse group subgroup PLS.
#' The prediction values are calculated based on the regression coefficients of \code{object$Y} onto \code{object$variates$X}.
#'
predict.sgspls <-
function(object, newdata, ...) {
# predict function for sgspls
newdata <- as.matrix(newdata)
nobs <- nrow(newdata)
p <- ncol(newdata)
nresp <- ncol(object$parameters$Y)
npred <- ncol(object$parameters$X)
ncomp <- object$parameters$ncomp
#-- validation des arguments --#
if (missing(newdata)){
stop("No new data available.")
}
if (length(dim(newdata)) == 0) {
if (length(newdata) != p)
stop("'newdata' must be a numeric matrix with ncol = ", p,
" or a vector of length = ", p, ".")
dim(newdata) = c(1, p)
}
B <- array(0, dim = c(npred, nresp, ncomp))
B_coef <- coef(object, type = "coefficients")
B <- B_coef$B
B0 <- B_coef$B0
pred <- array(dim = c(nobs, nresp, ncomp))
for( i in 1:ncomp ){
pred[,,i] <- newdata%*%B[ , , i] + matrix(rep(B0[i,], each = nobs), nrow = nobs)
}
return(pred)
}
|
/PCA_soccerPlayers.R | no_license | CamiloAguilar/soccerPCA | R | false | false | 1,742 | r | ||
## This file contains two functions to allow for the caching
## of an inverted matrix to avoid costly recalculations.
## makeCacheMatrix: This function creates a special "matrix"
## object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setinv <-function(inv) m<<-inv
getinv <-function() m
list(set=set, get=get,setinv=setinv,getinv=getinv)
}
## cacheSolve: This function computes the inverse of the special
## "matrix" returned by makeCacheMatrix above. If the inverse has
## already been calculated (and the matrix has not changed), then
## the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getinv()
if(!is.null(m)){
message("Getting cached data")
return(m)
}
data<-x$get()
m<-solve(data,...)
x$setinv(m)
m
}
| /cachematrix.R | no_license | keithrc/ProgrammingAssignment2 | R | false | false | 898 | r | ## This file contains two functions to allow for the caching
## of an inverted matrix to avoid costly recalculations.
## makeCacheMatrix: This function creates a special "matrix"
## object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setinv <-function(inv) m<<-inv
getinv <-function() m
list(set=set, get=get,setinv=setinv,getinv=getinv)
}
## cacheSolve: This function computes the inverse of the special
## "matrix" returned by makeCacheMatrix above. If the inverse has
## already been calculated (and the matrix has not changed), then
## the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getinv()
if(!is.null(m)){
message("Getting cached data")
return(m)
}
data<-x$get()
m<-solve(data,...)
x$setinv(m)
m
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/locationplot-methods.R
\docType{methods}
\name{ASEset-locationplot}
\alias{ASEset-locationplot}
\alias{locationplot}
\alias{locationplot,ASEset-method}
\title{locationplot ASEset objects}
\usage{
locationplot(x, ...)
\S4method{locationplot}{ASEset}(
x,
type = "fraction",
strand = "*",
yaxis = TRUE,
xaxis = FALSE,
xlab = FALSE,
ylab = TRUE,
xlab.text = "",
ylab.text = "",
legend.colnames = "",
size = c(0.8, 1),
main = NULL,
pValue = FALSE,
cex.main = 0.7,
cex.ylab = 0.6,
cex.legend = 0.5,
OrgDb = NULL,
TxDb = NULL,
verbose = TRUE,
top.fraction.criteria = "maxcount",
allow.whole.chromosome = FALSE,
...
)
}
\arguments{
\item{x}{an ASEset object.}
\item{...}{arguments passed on to barplot function}
\item{type}{'fraction' or 'count'}
\item{strand}{'+','-','*' or 'both'. This argument determines
which strand is plotted. See \code{getAlleleCounts} for more information on
strand.}
\item{yaxis}{wheter the y-axis is to be displayed or not}
\item{xaxis}{wheter the x-axis is to be displayed or not}
\item{xlab}{showing labels for the tic marks}
\item{ylab}{showing labels for the tic marks}
\item{xlab.text}{xlab text}
\item{ylab.text}{ylab text}
\item{legend.colnames}{gives colnames to the legend matrix}
\item{size}{will give extra space in the margins of the inner plots}
\item{main}{text to use as main label}
\item{pValue}{Display p-value}
\item{cex.main}{set main label size}
\item{cex.ylab}{set ylab label size}
\item{cex.legend}{set legend label size}
\item{OrgDb}{an OrgDb object from which to plot a gene map. If given
together with argument TxDb this will only be used to extract genesymbols.}
\item{TxDb}{a TxDb object from which to plot an exon map.}
\item{verbose}{Setting \code{verbose=TRUE} gives details of procedure during
function run}
\item{top.fraction.criteria}{'maxcount', 'ref' or 'phase'}
\item{allow.whole.chromosome}{logical, overrides 200kb region limit, defaults to FALSE}
}
\description{
plotting ASE effects over a specific genomic region
}
\details{
The locationplot methods visualises how fractions are distributed over a
larger region of genes on one chromosome. It takes and ASEset object as well
as additional information on plot type (see \code{\link{barplot}}), strand
type (see \code{\link{getAlleleCounts}}), colouring, as well as annotation.
The annotation is taken either from the bioconductor OrgDb-sets, the TxDb
sets or both. It is obviously important to make sure that the genome build
used is the same as used in aligning the RNA-seq data.
}
\examples{
data(ASEset)
locationplot(ASEset)
#SNPs are plotted in the order in which they are found.
#This can be sorted according to location as follows:
locationplot(ASEset[order(start(rowRanges(ASEset))),])
#for ASEsets with fewer SNPs the 'count' type plot is
# useful for detailed visualization.
locationplot(ASEset,type='count',strand='*')
}
\seealso{
\itemize{ \item The \code{\link{ASEset}} class which the
locationplot function can be called up on. }
}
\author{
Jesper R. Gadin, Lasse Folkersen
}
\keyword{locationplot}
| /man/locationplot.Rd | no_license | pappewaio/AllelicImbalance | R | false | true | 3,173 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/locationplot-methods.R
\docType{methods}
\name{ASEset-locationplot}
\alias{ASEset-locationplot}
\alias{locationplot}
\alias{locationplot,ASEset-method}
\title{locationplot ASEset objects}
\usage{
locationplot(x, ...)
\S4method{locationplot}{ASEset}(
x,
type = "fraction",
strand = "*",
yaxis = TRUE,
xaxis = FALSE,
xlab = FALSE,
ylab = TRUE,
xlab.text = "",
ylab.text = "",
legend.colnames = "",
size = c(0.8, 1),
main = NULL,
pValue = FALSE,
cex.main = 0.7,
cex.ylab = 0.6,
cex.legend = 0.5,
OrgDb = NULL,
TxDb = NULL,
verbose = TRUE,
top.fraction.criteria = "maxcount",
allow.whole.chromosome = FALSE,
...
)
}
\arguments{
\item{x}{an ASEset object.}
\item{...}{arguments passed on to barplot function}
\item{type}{'fraction' or 'count'}
\item{strand}{'+','-','*' or 'both'. This argument determines
which strand is plotted. See \code{getAlleleCounts} for more information on
strand.}
\item{yaxis}{wheter the y-axis is to be displayed or not}
\item{xaxis}{wheter the x-axis is to be displayed or not}
\item{xlab}{showing labels for the tic marks}
\item{ylab}{showing labels for the tic marks}
\item{xlab.text}{xlab text}
\item{ylab.text}{ylab text}
\item{legend.colnames}{gives colnames to the legend matrix}
\item{size}{will give extra space in the margins of the inner plots}
\item{main}{text to use as main label}
\item{pValue}{Display p-value}
\item{cex.main}{set main label size}
\item{cex.ylab}{set ylab label size}
\item{cex.legend}{set legend label size}
\item{OrgDb}{an OrgDb object from which to plot a gene map. If given
together with argument TxDb this will only be used to extract genesymbols.}
\item{TxDb}{a TxDb object from which to plot an exon map.}
\item{verbose}{Setting \code{verbose=TRUE} gives details of procedure during
function run}
\item{top.fraction.criteria}{'maxcount', 'ref' or 'phase'}
\item{allow.whole.chromosome}{logical, overrides 200kb region limit, defaults to FALSE}
}
\description{
plotting ASE effects over a specific genomic region
}
\details{
The locationplot methods visualises how fractions are distributed over a
larger region of genes on one chromosome. It takes and ASEset object as well
as additional information on plot type (see \code{\link{barplot}}), strand
type (see \code{\link{getAlleleCounts}}), colouring, as well as annotation.
The annotation is taken either from the bioconductor OrgDb-sets, the TxDb
sets or both. It is obviously important to make sure that the genome build
used is the same as used in aligning the RNA-seq data.
}
\examples{
data(ASEset)
locationplot(ASEset)
#SNPs are plotted in the order in which they are found.
#This can be sorted according to location as follows:
locationplot(ASEset[order(start(rowRanges(ASEset))),])
#for ASEsets with fewer SNPs the 'count' type plot is
# useful for detailed visualization.
locationplot(ASEset,type='count',strand='*')
}
\seealso{
\itemize{ \item The \code{\link{ASEset}} class which the
locationplot function can be called up on. }
}
\author{
Jesper R. Gadin, Lasse Folkersen
}
\keyword{locationplot}
|
library(sde)
X0=5
N=252
t0=0
T=100
M=1
theta1=0.559
theta2=0.238
theta3=0.074
X<-sde.sim(X0=X0, N=N, M=M, t0=t0, T=T, theta=c(theta1, theta2, theta3), model="CIR")
plot(X)
X0=1
N=252
t0=0
T=100
M=100
theta1=0.559
theta2=0.238
theta3=0.074
X<-sde.sim(X0=X0, N=N, M=M, t0=t0, T=T, theta=c(theta1, theta2, theta3), model="CIR")
dt=(T-t0)/N
X=exp(-dt*X)
X.mean = rowMeans(X)
X.sd = apply(X,1,sd)
plot(as.vector(time(X)),X.mean,type="l",xlab="time",ylab="value")
lines(as.vector(time(X)),X.mean + (1.96*X.sd)/sqrt(M),col = "purple")
lines(as.vector(time(X)),X.mean - (1.96*X.sd)/sqrt(M),col = "purple")
default <- function (i,j,X,dt) {
if (j <= i) {
return(1) }
if (j == i+1) {
return (mean(exp(-dt*X[j,])))
}
return (mean(exp(-dt*colSums(X[(i+1):j,]))))
}
l = matrix(1,N,N)
for (i in 1:N) {
for (j in 1:N) {
l[i,j] = default(i, j, X, dt) }
}
mean(l)
image (1:N,1:N,log(l))
X0=10
N=252
t0=0
T=1
M=1
theta1=0.00125
theta2=0.25
theta3=0.1
X<-sde.sim(X0=X0, N=N, M=M, t0=t0, T=T, theta=c(theta1, theta2, theta3), model="CIR")
plot(X)
X0=10
N=252
t0=0
T=1
M=100
theta1=0.00125
theta2=0.25
theta3=0.1
X<-sde.sim(X0=X0, N=N, M=M, t0=t0, T=T, theta=c(theta1, theta2, theta3), model="CIR")
dt=(T-t0)/N
X.mean = rowMeans(X)
X.sd = apply(X,1,sd)
plot(as.vector(time(X)),X.mean,type="l",xlab="time",ylab="value")
lines(as.vector(time(X)),X.mean + (1.96*X.sd)/sqrt(M),col = "purple")
lines(as.vector(time(X)),X.mean - (1.96*X.sd)/sqrt(M),col = "purple")
default <- function (i,j,X,dt) {
if (j <= i) {
return(1) }
if (j == i+1) {
return (mean(exp(-dt*X[j,])))
}
return (mean(exp(-dt*colSums(X[(i+1):j,]))))
}
l = matrix(1,N,N)
for (i in 1:N) {
for (j in 1:N) {
l[i,j] = default(i, j, X, dt) }
}
image (1:N,1:N,log(l))
X0=10
N=100
t0=0
T=12
M=1000
theta=c(0.559, 0.238, 0.074)
X0a=10
Na=100
t0a=0
Ta=12
Ma=1000
thetaa=c(0.514878, 0.082, 0.67)
X <- sde.sim(X0=X0, N=N, M=M, t0=t0, T=T, theta=theta, model="CIR")
Y <- sde.sim(X0=X0a, N=Na, M=Ma, t0=t0a, T=Ta, theta=thetaa, model="CIR")
W=X+Y
dt=(T-t0)/N
W.mean = rowMeans(W)
W.sd = apply(W,1,sd)
plot(as.vector(time(W)),W.mean,type="l",xlab="time",ylab="value")
lines(as.vector(time(W)),W.mean + (1.96*W.sd)/sqrt(M), col="purple")
lines(as.vector(time(W)),W.mean - (1.96*W.sd)/sqrt(M),col="purple")
default <- function (i,j,W,dt) { if (j <= i) {
return(1) }
if (j == i+1) {
return (mean(exp(-dt*W[j,])))
}
return (mean(exp(-dt*colSums(W[(i+1):j,])))) }
l = matrix(1,N,N)
for (i in 1:N) { for (j in 1:N) {
l[i,j] = default(i, j, W, dt)
}
}
image (1:N,1:N,log(l))
| /PricingWithCIR.r | no_license | lomberer/Credit-Risk-1 | R | false | false | 2,587 | r | library(sde)
X0=5
N=252
t0=0
T=100
M=1
theta1=0.559
theta2=0.238
theta3=0.074
X<-sde.sim(X0=X0, N=N, M=M, t0=t0, T=T, theta=c(theta1, theta2, theta3), model="CIR")
plot(X)
X0=1
N=252
t0=0
T=100
M=100
theta1=0.559
theta2=0.238
theta3=0.074
X<-sde.sim(X0=X0, N=N, M=M, t0=t0, T=T, theta=c(theta1, theta2, theta3), model="CIR")
dt=(T-t0)/N
X=exp(-dt*X)
X.mean = rowMeans(X)
X.sd = apply(X,1,sd)
plot(as.vector(time(X)),X.mean,type="l",xlab="time",ylab="value")
lines(as.vector(time(X)),X.mean + (1.96*X.sd)/sqrt(M),col = "purple")
lines(as.vector(time(X)),X.mean - (1.96*X.sd)/sqrt(M),col = "purple")
default <- function (i,j,X,dt) {
if (j <= i) {
return(1) }
if (j == i+1) {
return (mean(exp(-dt*X[j,])))
}
return (mean(exp(-dt*colSums(X[(i+1):j,]))))
}
l = matrix(1,N,N)
for (i in 1:N) {
for (j in 1:N) {
l[i,j] = default(i, j, X, dt) }
}
mean(l)
image (1:N,1:N,log(l))
X0=10
N=252
t0=0
T=1
M=1
theta1=0.00125
theta2=0.25
theta3=0.1
X<-sde.sim(X0=X0, N=N, M=M, t0=t0, T=T, theta=c(theta1, theta2, theta3), model="CIR")
plot(X)
X0=10
N=252
t0=0
T=1
M=100
theta1=0.00125
theta2=0.25
theta3=0.1
X<-sde.sim(X0=X0, N=N, M=M, t0=t0, T=T, theta=c(theta1, theta2, theta3), model="CIR")
dt=(T-t0)/N
X.mean = rowMeans(X)
X.sd = apply(X,1,sd)
plot(as.vector(time(X)),X.mean,type="l",xlab="time",ylab="value")
lines(as.vector(time(X)),X.mean + (1.96*X.sd)/sqrt(M),col = "purple")
lines(as.vector(time(X)),X.mean - (1.96*X.sd)/sqrt(M),col = "purple")
default <- function (i,j,X,dt) {
if (j <= i) {
return(1) }
if (j == i+1) {
return (mean(exp(-dt*X[j,])))
}
return (mean(exp(-dt*colSums(X[(i+1):j,]))))
}
l = matrix(1,N,N)
for (i in 1:N) {
for (j in 1:N) {
l[i,j] = default(i, j, X, dt) }
}
image (1:N,1:N,log(l))
X0=10
N=100
t0=0
T=12
M=1000
theta=c(0.559, 0.238, 0.074)
X0a=10
Na=100
t0a=0
Ta=12
Ma=1000
thetaa=c(0.514878, 0.082, 0.67)
X <- sde.sim(X0=X0, N=N, M=M, t0=t0, T=T, theta=theta, model="CIR")
Y <- sde.sim(X0=X0a, N=Na, M=Ma, t0=t0a, T=Ta, theta=thetaa, model="CIR")
W=X+Y
dt=(T-t0)/N
W.mean = rowMeans(W)
W.sd = apply(W,1,sd)
plot(as.vector(time(W)),W.mean,type="l",xlab="time",ylab="value")
lines(as.vector(time(W)),W.mean + (1.96*W.sd)/sqrt(M), col="purple")
lines(as.vector(time(W)),W.mean - (1.96*W.sd)/sqrt(M),col="purple")
default <- function (i,j,W,dt) { if (j <= i) {
return(1) }
if (j == i+1) {
return (mean(exp(-dt*W[j,])))
}
return (mean(exp(-dt*colSums(W[(i+1):j,])))) }
l = matrix(1,N,N)
for (i in 1:N) { for (j in 1:N) {
l[i,j] = default(i, j, W, dt)
}
}
image (1:N,1:N,log(l))
|
# Code to apply the BMARD method to a simulation defined from a MA(4) process
# the periodogram matrix can be loaded or computed new simulations , uncomment the appropiate section
argsbash<- commandArgs()[6]
library(Rcpp)
library(TSA)
library(doParallel)
## change the directory accordingly
Rcpp::sourceCpp('~/BMARD_V112020.cpp')
# time series lenght is 500 points
# last value of the periodogram filtered, (frequency = .5)
# epsilon and epsilonBW are specified based on practical reasons are the MH step proposal
# for other parameters see specification in cpp code
multirun <- function(S){
trunc<- sample(20:30,1);
SpectralBDP(Tsize = 500, omegas =seq(.002,.498,by=.002), Pgram = S , Nsamp=Nsamp, L=trunc, epsilon=rep( runif(1, .05,.2) ,trunc) , epsilon_BW=runif(1,.05,.2) , SamplingRate = 1,
MaxFreq = 0.5, Sup = Nsamp, Lambda_init = sample(1:20,1) , Lambda_prior = "flat",
Lambda_max = 50L, Lambda_prop_dist = "up1down1", alpha_const = F,
alpha_init = 1, a_alpha_L = .1, b_alpha_L = .1, q = 1)
}
#number of mcmc iterations
Nsamp=100000
#number of chains
chains<-6
BDP=list()
size=500
# for new simulations use the following code and generate new MA(4) processes
S<-arima.sim(n = size, list(ma=c(-.3,-.6,-.3,.6) ), sd = 1, n.start = 10000)
#standarized serie
S<-(S-mean(S))/sd(S)
# for use the simulated data load the matrix by chaing accordingly the directory where you saved the matrix
database<-readRDS("~/pgrammatrixMA4.rds")
B<-list()
for(k in 1:chains){
#uncomment the appropiate option for new simualtion (first) or the computed periodogram (second)
# B[[k]]<- periodogram(S, plot=F)$spec[-250]
# B[[k]]<-database[as.numeric(argsbash) ,]
}
#run the method in parallel, the cores is thinking the cpus are going to be partitioned in 3 clusters using 33% of the cpus each with gives the best performance
BDP<-mclapply(B, multirun, mc.cores = 3)
# save the results in a database for posterior use, change the directory accordingly
saveRDS(BDP, file= paste( "~/BayesiansimulMA4_V" ,argsbash, ".rds", sep = "" ) )
| /Rcode_auxiliary/simulationMA4.R | no_license | Cuauhtemoctzin/BMARD | R | false | false | 2,123 | r | # Code to apply the BMARD method to a simulation defined from a MA(4) process
# the periodogram matrix can be loaded or computed new simulations , uncomment the appropiate section
argsbash<- commandArgs()[6]
library(Rcpp)
library(TSA)
library(doParallel)
## change the directory accordingly
Rcpp::sourceCpp('~/BMARD_V112020.cpp')
# time series lenght is 500 points
# last value of the periodogram filtered, (frequency = .5)
# epsilon and epsilonBW are specified based on practical reasons are the MH step proposal
# for other parameters see specification in cpp code
multirun <- function(S){
trunc<- sample(20:30,1);
SpectralBDP(Tsize = 500, omegas =seq(.002,.498,by=.002), Pgram = S , Nsamp=Nsamp, L=trunc, epsilon=rep( runif(1, .05,.2) ,trunc) , epsilon_BW=runif(1,.05,.2) , SamplingRate = 1,
MaxFreq = 0.5, Sup = Nsamp, Lambda_init = sample(1:20,1) , Lambda_prior = "flat",
Lambda_max = 50L, Lambda_prop_dist = "up1down1", alpha_const = F,
alpha_init = 1, a_alpha_L = .1, b_alpha_L = .1, q = 1)
}
#number of mcmc iterations
Nsamp=100000
#number of chains
chains<-6
BDP=list()
size=500
# for new simulations use the following code and generate new MA(4) processes
S<-arima.sim(n = size, list(ma=c(-.3,-.6,-.3,.6) ), sd = 1, n.start = 10000)
#standarized serie
S<-(S-mean(S))/sd(S)
# for use the simulated data load the matrix by chaing accordingly the directory where you saved the matrix
database<-readRDS("~/pgrammatrixMA4.rds")
B<-list()
for(k in 1:chains){
#uncomment the appropiate option for new simualtion (first) or the computed periodogram (second)
# B[[k]]<- periodogram(S, plot=F)$spec[-250]
# B[[k]]<-database[as.numeric(argsbash) ,]
}
#run the method in parallel, the cores is thinking the cpus are going to be partitioned in 3 clusters using 33% of the cpus each with gives the best performance
BDP<-mclapply(B, multirun, mc.cores = 3)
# save the results in a database for posterior use, change the directory accordingly
saveRDS(BDP, file= paste( "~/BayesiansimulMA4_V" ,argsbash, ".rds", sep = "" ) )
|
###########################################################################################-
###########################################################################################-
##
## Graphing LGA flight history ----
##
###########################################################################################-
###########################################################################################-
#=========================================================================================#
# Setting up ----
#=========================================================================================#
run_in_parallel <- FALSE
date_to_map <- lubridate::as_date("2020-04-06")
start_time <- hms::as_hms("00:00:01")
end_time <- hms::as_hms("23:59:59")
#-----------------------------------------------------------------------------------------#
# Loading libraries
#-----------------------------------------------------------------------------------------#
library(magrittr)
library(moveVis)
library(move)
library(tidyverse)
library(lubridate)
library(glue)
library(hms)
library(here)
library(DBI)
library(RSQLite)
library(dbplyr)
library(viridis)
library(fs)
library(ggdark)
library(doSNOW)
library(parallel)
library(doParallel)
library(tictoc)
#-----------------------------------------------------------------------------------------#
# Parsing dates for filtering data "server" side
#-----------------------------------------------------------------------------------------#
month_to_map <- month(date_to_map)
day_to_map <- day(date_to_map)
start_hour <- hour(start_time)
start_minute <- minute(start_time)
end_hour <- hour(end_time)
end_minute <- minute(end_time)
#-----------------------------------------------------------------------------------------#
# Creating frames folder
#-----------------------------------------------------------------------------------------#
frames_folder <- here(glue("plots/graph_frames/{date_to_map}/30_sec"))
dir_create(frames_folder)
#-----------------------------------------------------------------------------------------#
# Setting map view parameters
#-----------------------------------------------------------------------------------------#
# Bounding box around LGA (big enough to show aproaches from all directions)
lga_bbox <-
tibble(
longitude = c(-74.232575, -73.516318),
latitude = c(40.503766, 41.046881)
)
# Where the runways cross
lga_center <-
c(
longitude = -73.874861,
latitude = 40.780347
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# CRS
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
my_crs <- "+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
#=========================================================================================#
# Loading and cleaning data ----
#=========================================================================================#
# Downloaded in "LGA flight history.R"
#-----------------------------------------------------------------------------------------#
# Pulling data
#-----------------------------------------------------------------------------------------#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Connecting to database
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
lga_tracks_db <- dbConnect(SQLite(), "data/lga_tracks_db.sqlite3")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Pulling
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
arrivals_tracks <-
lga_tracks_db %>%
tbl("arrivals_tracks") %>%
filter(
month == month_to_map,
day == day_to_map,
hour %>% between(start_hour, end_hour),
minute %>% between(start_minute, end_minute)
) %>%
collect() %>%
mutate(time = as_datetime(time, tz = "US/Eastern")) %>%
# Records with missing values are probably not "real" data points, so dropping them
drop_na(time, longitude, latitude, unique_flight) %>%
# Some of the records are doubled, which makes moveVis mad, so keeping only one
distinct(time, unique_flight, .keep_all = TRUE) %>%
# Restricting path data to coordinates inside the bounding box, to reduce unnecessary processing and
# memory overhead, and also so that the eventual summary statistics correspond to paths that
# are visible on the map
filter(
longitude %>% between(lga_bbox$longitude[1], lga_bbox$longitude[2]),
latitude %>% between(lga_bbox$latitude[1], lga_bbox$latitude[2])
)
dbDisconnect(lga_tracks_db)
# I want a frame for each half-minute of the day, but it's likely that not every half-minute interval will be
# present in the data, which means it will not be in the "move" object, which will mess up the frames.
# To avoid this, I'll add a whole day of fake data, corresponding to a persistent point that is placed
# behind the time label, by using the same formula I use to position the label.
date_min <-
as_datetime(str_c(date_to_map, start_time, sep = " "), tz = "US/Eastern") %>%
floor_date(unit = "30 seconds")
date_max <-
as_datetime(str_c(date_to_map, end_time, sep = " "), tz = "US/Eastern") %>%
ceiling_date(unit = "30 seconds")
arrivals_tracks <-
bind_rows(
tibble(
time = seq(date_min, date_max, "30 secs"),
longitude = mean(lga_bbox$longitude),
latitude = max(lga_bbox$latitude) - ((max(lga_bbox$latitude) - min(lga_bbox$latitude)) * 0.05),
unique_flight = "fake_flight"
),
arrivals_tracks
)
#-----------------------------------------------------------------------------------------#
# Converting for moveVis
#-----------------------------------------------------------------------------------------#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Turning the data.frame into a MoveStack
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
arrivals_move <-
df2move(
arrivals_tracks,
proj = my_crs,
x = "longitude",
y = "latitude",
time = "time",
track_id = "unique_flight"
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Aligning
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# To get accurate positions at each half-minute, this aligns the timestamps by interpolating the
# position of each flight at the given half-minute interval
arrivals_move_aligned <-
align_move(
arrivals_move,
res = 30,
digit = 0,
unit = "secs"
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Cleaning up
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Removing intermediate objects, because rapidly saving many ggplots (often in parallel) can
# be memory intensive
# rm(arrivals_move)
# rm(arrivals_tracks)
gc()
#-----------------------------------------------------------------------------------------#
# data.frame of distinct times and time label coords
#-----------------------------------------------------------------------------------------#
# This will be fed to "geom_label"
distinct_times <-
tibble(
time = seq(date_min, date_max, "30 secs")
) %>%
mutate(
time_chr = format(time, format = "%a %m/%e - %R"),
longitude = mean(lga_bbox$longitude),
latitude = max(lga_bbox$latitude) - ((max(lga_bbox$latitude) - min(lga_bbox$latitude)) * 0.05)
)
#-----------------------------------------------------------------------------------------#
# Extracting data from the aligned MoveStack
#-----------------------------------------------------------------------------------------#
arrivals_move_aligned_trimmed <-
arrivals_move_aligned@data %>%
as_tibble(rownames = "row_names") %>%
# Removing "fake_flight" from the count
filter(!row_names %>% str_detect("fake")) %>%
# Re-creating unique_flight
mutate(unique_flight = row_names %>% str_remove_all("\\..+")) %>%
# There shouldn't be any flight coords outside the bbox, but just to make sure...
filter(
x %>% between(lga_bbox$longitude[1], lga_bbox$longitude[2]),
y %>% between(lga_bbox$latitude[1], lga_bbox$latitude[2])
)
#-----------------------------------------------------------------------------------------#
# Summarizing
#-----------------------------------------------------------------------------------------#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# For each minute
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
arrivals_move_aligned_trimmed_summarized <-
arrivals_move_aligned_trimmed %>%
# The y-axis makes sense as "Flights / Minute", so keeping "60 secs" here. Joining with
# "min_max_seq_df" by "hour" and "minute" will replicate this count for each 30 sec frame
mutate(floor_time = floor_date(time, unit = "60 secs")) %>%
# Records will be doubled, as there are two 30-second segments within each 60 second segment,
# so here I'm removing the duplicates
distinct(unique_flight, floor_time, .keep_all = TRUE) %>%
count(floor_time, name = "flights_count") %>%
mutate(
day = day(floor_time),
hour = hour(floor_time),
minute = minute(floor_time)
) %>%
# Times are rounded to different resolutions, which means "time" will not match, so I'm removing it
select(-floor_time)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Complete summarized data
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# 30 second grid with fake coords
min_max_seq_df <-
tibble(
time = seq(date_min, date_max, "30 secs"),
longitude = 0,
latitude = 0,
unique_flight = "none"
) %>%
mutate(
day = day(time),
hour = hour(time),
minute = minute(time)
)
# Expanding the summarized data to have a record for each 30 second interval
arrivals_summarized <-
full_join(
min_max_seq_df,
arrivals_move_aligned_trimmed_summarized,
by = c("day", "hour", "minute")
) %>%
mutate(
flights_count = replace_na(flights_count, 0),
moving_flights_count = movingFun(flights_count, 120, "mean", na.rm = TRUE)
)
#=========================================================================================#
# Graph ----
#=========================================================================================#
#-----------------------------------------------------------------------------------------#
# Drawing graphs
#-----------------------------------------------------------------------------------------#
# To construct a manual legend, specifying `group = 1` and `colour = [text]` for the label
graph_frames <-
distinct_times$time %>%
map(
~ arrivals_summarized %>%
ggplot() +
# 0-point
geom_hline(yintercept = 0, color = "gray20", linetype = 2) +
# Daily average
geom_hline(
aes(
yintercept = mean(flights_count),
group = 1,
colour = "Daily average"
),
size = .75
) +
# Count in each minute
geom_line(
aes(
x = time,
y = flights_count,
group = 1,
colour = "Each minute"
),
size = .5
) +
# A loess smooth
geom_smooth(
aes(
x = time,
y = moving_flights_count,
group = 1,
colour = "loess (span = 0.5)"
),
method = "loess",
formula = y ~ x,
span = .5,
se = FALSE,
size = 1
) +
# A moving average over a window of 1 hour
geom_line(
aes(
x = time,
y = moving_flights_count,
group = 1,
colour = "Moving average (window = 1 hour)"
),
size = 1
) +
# Vertical line scanning with time
geom_vline(aes(xintercept = .x), color = "white") +
# Constructing manual legend and setting colors of lines (on the graph and in the legend)
scale_colour_manual(
name = NULL,
breaks = c("Each minute", "Moving average (window = 1 hour)", "loess (span = 0.5)", "Daily average"),
values = c("gray25", "#FDE725", "#424186", "#2AB07F"),
guide = guide_legend(direction = "vertical", override.aes = aes(size = 1.5))
) +
# Plot display specs
scale_y_continuous(name = "Flights / Minute", breaks = seq(0, 10, 2)) +
scale_x_datetime(
name = "Time",
date_labels = "%k:%M",
date_breaks = "2 hours"
) +
coord_cartesian(
xlim = c(date_min, date_max),
ylim = c(0, 11)
) +
dark_theme_gray() +
# Tweaking the plot display
theme(
legend.justification = c(0, 1), # Upper left
legend.position = c(0, 1), # Upper left
legend.background = element_rect(fill = NA, color = NA), # No background
legend.title = element_blank(), # Mo title
legend.margin = margin(t = 0, r = 10, b = 5, l = 10),
legend.key = element_rect(fill = NA), # No fill
text = element_text(size = 12) # Make it big
)
) %>%
# To give clusterApplyLB a way of accurately naming frames in order, if running in parallel
imap( ~ `attr<-`(.x, which = "frame", .y))
gc()
#-----------------------------------------------------------------------------------------#
# Saving graph frames ----
#-----------------------------------------------------------------------------------------#
if (run_in_parallel == TRUE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Setting up parallel saivng of graphs
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
cl <- makeSOCKcluster(3)
clusterExport(cl, c("frames_folder", "ggsave", "here", "glue"))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Running in parallel
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
format(now(), "%r"); tic()
clusterApplyLB(
cl = cl,
x = graph_frames,
fun = function(graph_frame) {
if (attr(graph_frame, which = 'frame') == 1) cat(length(graph_frames), " total\n", sep = "")
suppressMessages(
ggsave(
filename = glue("{frames_folder}/graph_frame_{attr(graph_frame, which = 'frame')}.png"),
plot = graph_frame,
width = 8,
height = 3.5,
units = "in",
dpi = 200
)
)
if (attr(graph_frame, which = 'frame') %% 10 == 0) cat(attr(graph_frame, which = 'frame'), ",", sep = "")
}
)
format(now(), "%r"); toc()
stopCluster(cl)
} else if (run_in_parallel == FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Running in series
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
format(now(), "%r"); tic()
graph_frames %>%
iwalk(
~ {
# Printing progress
if (.y == 1) cat(length(graph_frames), " total\n", sep = "")
if (.y %% 10 == 0) cat(.y, ",", sep = "")
suppressMessages(
ggsave(
filename = glue("{frames_folder}/graph_frame_{.y}.png"),
plot = .x,
width = 8,
height = 3.5,
units = "in",
dpi = 200
)
)
# Collecting garbage every 100 frames
if (.y %% 100 == 0) gc()
}
)
format(now(), "%r"); toc()
}
rm(graph_frames)
gc()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# # ---- THIS IS THE END! ----
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
| /code/Graphing_LGA_flight_history.R | no_license | cgettings/Airplanes | R | false | false | 16,076 | r | ###########################################################################################-
###########################################################################################-
##
## Graphing LGA flight history ----
##
###########################################################################################-
###########################################################################################-
#=========================================================================================#
# Setting up ----
#=========================================================================================#
run_in_parallel <- FALSE
date_to_map <- lubridate::as_date("2020-04-06")
start_time <- hms::as_hms("00:00:01")
end_time <- hms::as_hms("23:59:59")
#-----------------------------------------------------------------------------------------#
# Loading libraries
#-----------------------------------------------------------------------------------------#
library(magrittr)
library(moveVis)
library(move)
library(tidyverse)
library(lubridate)
library(glue)
library(hms)
library(here)
library(DBI)
library(RSQLite)
library(dbplyr)
library(viridis)
library(fs)
library(ggdark)
library(doSNOW)
library(parallel)
library(doParallel)
library(tictoc)
#-----------------------------------------------------------------------------------------#
# Parsing dates for filtering data "server" side
#-----------------------------------------------------------------------------------------#
month_to_map <- month(date_to_map)
day_to_map <- day(date_to_map)
start_hour <- hour(start_time)
start_minute <- minute(start_time)
end_hour <- hour(end_time)
end_minute <- minute(end_time)
#-----------------------------------------------------------------------------------------#
# Creating frames folder
#-----------------------------------------------------------------------------------------#
frames_folder <- here(glue("plots/graph_frames/{date_to_map}/30_sec"))
dir_create(frames_folder)
#-----------------------------------------------------------------------------------------#
# Setting map view parameters
#-----------------------------------------------------------------------------------------#
# Bounding box around LGA (big enough to show aproaches from all directions)
lga_bbox <-
tibble(
longitude = c(-74.232575, -73.516318),
latitude = c(40.503766, 41.046881)
)
# Where the runways cross
lga_center <-
c(
longitude = -73.874861,
latitude = 40.780347
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# CRS
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
my_crs <- "+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
#=========================================================================================#
# Loading and cleaning data ----
#=========================================================================================#
# Downloaded in "LGA flight history.R"
#-----------------------------------------------------------------------------------------#
# Pulling data
#-----------------------------------------------------------------------------------------#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Connecting to database
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
lga_tracks_db <- dbConnect(SQLite(), "data/lga_tracks_db.sqlite3")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Pulling
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
arrivals_tracks <-
lga_tracks_db %>%
tbl("arrivals_tracks") %>%
filter(
month == month_to_map,
day == day_to_map,
hour %>% between(start_hour, end_hour),
minute %>% between(start_minute, end_minute)
) %>%
collect() %>%
mutate(time = as_datetime(time, tz = "US/Eastern")) %>%
# Records with missing values are probably not "real" data points, so dropping them
drop_na(time, longitude, latitude, unique_flight) %>%
# Some of the records are doubled, which makes moveVis mad, so keeping only one
distinct(time, unique_flight, .keep_all = TRUE) %>%
# Restricting path data to coordinates inside the bounding box, to reduce unnecessary processing and
# memory overhead, and also so that the eventual summary statistics correspond to paths that
# are visible on the map
filter(
longitude %>% between(lga_bbox$longitude[1], lga_bbox$longitude[2]),
latitude %>% between(lga_bbox$latitude[1], lga_bbox$latitude[2])
)
dbDisconnect(lga_tracks_db)
# I want a frame for each half-minute of the day, but it's likely that not every half-minute interval will be
# present in the data, which means it will not be in the "move" object, which will mess up the frames.
# To avoid this, I'll add a whole day of fake data, corresponding to a persistent point that is placed
# behind the time label, by using the same formula I use to position the label.
date_min <-
as_datetime(str_c(date_to_map, start_time, sep = " "), tz = "US/Eastern") %>%
floor_date(unit = "30 seconds")
date_max <-
as_datetime(str_c(date_to_map, end_time, sep = " "), tz = "US/Eastern") %>%
ceiling_date(unit = "30 seconds")
arrivals_tracks <-
bind_rows(
tibble(
time = seq(date_min, date_max, "30 secs"),
longitude = mean(lga_bbox$longitude),
latitude = max(lga_bbox$latitude) - ((max(lga_bbox$latitude) - min(lga_bbox$latitude)) * 0.05),
unique_flight = "fake_flight"
),
arrivals_tracks
)
#-----------------------------------------------------------------------------------------#
# Converting for moveVis
#-----------------------------------------------------------------------------------------#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Turning the data.frame into a MoveStack
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
arrivals_move <-
df2move(
arrivals_tracks,
proj = my_crs,
x = "longitude",
y = "latitude",
time = "time",
track_id = "unique_flight"
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Aligning
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# To get accurate positions at each half-minute, this aligns the timestamps by interpolating the
# position of each flight at the given half-minute interval
arrivals_move_aligned <-
align_move(
arrivals_move,
res = 30,
digit = 0,
unit = "secs"
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Cleaning up
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Removing intermediate objects, because rapidly saving many ggplots (often in parallel) can
# be memory intensive
# rm(arrivals_move)
# rm(arrivals_tracks)
gc()
#-----------------------------------------------------------------------------------------#
# data.frame of distinct times and time label coords
#-----------------------------------------------------------------------------------------#
# This will be fed to "geom_label"
distinct_times <-
tibble(
time = seq(date_min, date_max, "30 secs")
) %>%
mutate(
time_chr = format(time, format = "%a %m/%e - %R"),
longitude = mean(lga_bbox$longitude),
latitude = max(lga_bbox$latitude) - ((max(lga_bbox$latitude) - min(lga_bbox$latitude)) * 0.05)
)
#-----------------------------------------------------------------------------------------#
# Extracting data from the aligned MoveStack
#-----------------------------------------------------------------------------------------#
arrivals_move_aligned_trimmed <-
arrivals_move_aligned@data %>%
as_tibble(rownames = "row_names") %>%
# Removing "fake_flight" from the count
filter(!row_names %>% str_detect("fake")) %>%
# Re-creating unique_flight
mutate(unique_flight = row_names %>% str_remove_all("\\..+")) %>%
# There shouldn't be any flight coords outside the bbox, but just to make sure...
filter(
x %>% between(lga_bbox$longitude[1], lga_bbox$longitude[2]),
y %>% between(lga_bbox$latitude[1], lga_bbox$latitude[2])
)
#-----------------------------------------------------------------------------------------#
# Summarizing
#-----------------------------------------------------------------------------------------#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# For each minute
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
arrivals_move_aligned_trimmed_summarized <-
arrivals_move_aligned_trimmed %>%
# The y-axis makes sense as "Flights / Minute", so keeping "60 secs" here. Joining with
# "min_max_seq_df" by "hour" and "minute" will replicate this count for each 30 sec frame
mutate(floor_time = floor_date(time, unit = "60 secs")) %>%
# Records will be doubled, as there are two 30-second segments within each 60 second segment,
# so here I'm removing the duplicates
distinct(unique_flight, floor_time, .keep_all = TRUE) %>%
count(floor_time, name = "flights_count") %>%
mutate(
day = day(floor_time),
hour = hour(floor_time),
minute = minute(floor_time)
) %>%
# Times are rounded to different resolutions, which means "time" will not match, so I'm removing it
select(-floor_time)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Complete summarized data
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# 30 second grid with fake coords
min_max_seq_df <-
tibble(
time = seq(date_min, date_max, "30 secs"),
longitude = 0,
latitude = 0,
unique_flight = "none"
) %>%
mutate(
day = day(time),
hour = hour(time),
minute = minute(time)
)
# Expanding the summarized data to have a record for each 30 second interval
arrivals_summarized <-
full_join(
min_max_seq_df,
arrivals_move_aligned_trimmed_summarized,
by = c("day", "hour", "minute")
) %>%
mutate(
flights_count = replace_na(flights_count, 0),
moving_flights_count = movingFun(flights_count, 120, "mean", na.rm = TRUE)
)
#=========================================================================================#
# Graph ----
#=========================================================================================#
#-----------------------------------------------------------------------------------------#
# Drawing graphs
#-----------------------------------------------------------------------------------------#
# To construct a manual legend, specifying `group = 1` and `colour = [text]` for the label
graph_frames <-
distinct_times$time %>%
map(
~ arrivals_summarized %>%
ggplot() +
# 0-point
geom_hline(yintercept = 0, color = "gray20", linetype = 2) +
# Daily average
geom_hline(
aes(
yintercept = mean(flights_count),
group = 1,
colour = "Daily average"
),
size = .75
) +
# Count in each minute
geom_line(
aes(
x = time,
y = flights_count,
group = 1,
colour = "Each minute"
),
size = .5
) +
# A loess smooth
geom_smooth(
aes(
x = time,
y = moving_flights_count,
group = 1,
colour = "loess (span = 0.5)"
),
method = "loess",
formula = y ~ x,
span = .5,
se = FALSE,
size = 1
) +
# A moving average over a window of 1 hour
geom_line(
aes(
x = time,
y = moving_flights_count,
group = 1,
colour = "Moving average (window = 1 hour)"
),
size = 1
) +
# Vertical line scanning with time
geom_vline(aes(xintercept = .x), color = "white") +
# Constructing manual legend and setting colors of lines (on the graph and in the legend)
scale_colour_manual(
name = NULL,
breaks = c("Each minute", "Moving average (window = 1 hour)", "loess (span = 0.5)", "Daily average"),
values = c("gray25", "#FDE725", "#424186", "#2AB07F"),
guide = guide_legend(direction = "vertical", override.aes = aes(size = 1.5))
) +
# Plot display specs
scale_y_continuous(name = "Flights / Minute", breaks = seq(0, 10, 2)) +
scale_x_datetime(
name = "Time",
date_labels = "%k:%M",
date_breaks = "2 hours"
) +
coord_cartesian(
xlim = c(date_min, date_max),
ylim = c(0, 11)
) +
dark_theme_gray() +
# Tweaking the plot display
theme(
legend.justification = c(0, 1), # Upper left
legend.position = c(0, 1), # Upper left
legend.background = element_rect(fill = NA, color = NA), # No background
legend.title = element_blank(), # Mo title
legend.margin = margin(t = 0, r = 10, b = 5, l = 10),
legend.key = element_rect(fill = NA), # No fill
text = element_text(size = 12) # Make it big
)
) %>%
# To give clusterApplyLB a way of accurately naming frames in order, if running in parallel
imap( ~ `attr<-`(.x, which = "frame", .y))
gc()
#-----------------------------------------------------------------------------------------#
# Saving graph frames ----
#-----------------------------------------------------------------------------------------#
if (run_in_parallel == TRUE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Setting up parallel saivng of graphs
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
cl <- makeSOCKcluster(3)
clusterExport(cl, c("frames_folder", "ggsave", "here", "glue"))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Running in parallel
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
format(now(), "%r"); tic()
clusterApplyLB(
cl = cl,
x = graph_frames,
fun = function(graph_frame) {
if (attr(graph_frame, which = 'frame') == 1) cat(length(graph_frames), " total\n", sep = "")
suppressMessages(
ggsave(
filename = glue("{frames_folder}/graph_frame_{attr(graph_frame, which = 'frame')}.png"),
plot = graph_frame,
width = 8,
height = 3.5,
units = "in",
dpi = 200
)
)
if (attr(graph_frame, which = 'frame') %% 10 == 0) cat(attr(graph_frame, which = 'frame'), ",", sep = "")
}
)
format(now(), "%r"); toc()
stopCluster(cl)
} else if (run_in_parallel == FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Running in series
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
format(now(), "%r"); tic()
graph_frames %>%
iwalk(
~ {
# Printing progress
if (.y == 1) cat(length(graph_frames), " total\n", sep = "")
if (.y %% 10 == 0) cat(.y, ",", sep = "")
suppressMessages(
ggsave(
filename = glue("{frames_folder}/graph_frame_{.y}.png"),
plot = .x,
width = 8,
height = 3.5,
units = "in",
dpi = 200
)
)
# Collecting garbage every 100 frames
if (.y %% 100 == 0) gc()
}
)
format(now(), "%r"); toc()
}
rm(graph_frames)
gc()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# # ---- THIS IS THE END! ----
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vcf.R
\name{formatVCF}
\alias{formatVCF}
\title{Convert SNP from Infinium array to VCF file}
\usage{
formatVCF(sdf, annoS, annoI, vcf = NULL, genome = "hg19", verbose = FALSE)
}
\arguments{
\item{sdf}{SigDF}
\item{annoS}{SNP variant annotation, available at
https://github.com/zhou-lab/InfiniumAnnotationV1/tree/main/Anno/EPIC
EPIC.hg19.snp_overlap_b151.rds
EPIC.hg38.snp_overlap_b151.rds}
\item{annoI}{Infinium-I variant annotation, available at
https://github.com/zhou-lab/InfiniumAnnotationV1/tree/main/Anno/EPIC
EPIC.hg19.typeI_overlap_b151.rds
EPIC.hg38.typeI_overlap_b151.rds}
\item{vcf}{output VCF file path, if NULL output to console}
\item{genome}{genome}
\item{verbose}{print more messages}
}
\value{
VCF file. If vcf is NULL, a data.frame is output to
console. The data.frame does not contain VCF headers.
Note the vcf is not sorted. You can sort with
awk '$1 ~ /^#/ {print $0;next} {print $0 | "sort -k1,1 -k2,2n"}'
}
\description{
Convert SNP from Infinium array to VCF file
}
\examples{
sesameDataCacheAll() # if not done yet
sdf <- sesameDataGet('EPIC.1.SigDF')
\dontrun{
## download annoS and annoI from
## https://github.com/zhou-lab/InfiniumAnnotationV1/tree/main/Anno/EPIC
## output to console
head(formatVCF(sdf, annoS, annoI))
}
}
| /man/formatVCF.Rd | permissive | zwdzwd/sesame | R | false | true | 1,338 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vcf.R
\name{formatVCF}
\alias{formatVCF}
\title{Convert SNP from Infinium array to VCF file}
\usage{
formatVCF(sdf, annoS, annoI, vcf = NULL, genome = "hg19", verbose = FALSE)
}
\arguments{
\item{sdf}{SigDF}
\item{annoS}{SNP variant annotation, available at
https://github.com/zhou-lab/InfiniumAnnotationV1/tree/main/Anno/EPIC
EPIC.hg19.snp_overlap_b151.rds
EPIC.hg38.snp_overlap_b151.rds}
\item{annoI}{Infinium-I variant annotation, available at
https://github.com/zhou-lab/InfiniumAnnotationV1/tree/main/Anno/EPIC
EPIC.hg19.typeI_overlap_b151.rds
EPIC.hg38.typeI_overlap_b151.rds}
\item{vcf}{output VCF file path, if NULL output to console}
\item{genome}{genome}
\item{verbose}{print more messages}
}
\value{
VCF file. If vcf is NULL, a data.frame is output to
console. The data.frame does not contain VCF headers.
Note the vcf is not sorted. You can sort with
awk '$1 ~ /^#/ {print $0;next} {print $0 | "sort -k1,1 -k2,2n"}'
}
\description{
Convert SNP from Infinium array to VCF file
}
\examples{
sesameDataCacheAll() # if not done yet
sdf <- sesameDataGet('EPIC.1.SigDF')
\dontrun{
## download annoS and annoI from
## https://github.com/zhou-lab/InfiniumAnnotationV1/tree/main/Anno/EPIC
## output to console
head(formatVCF(sdf, annoS, annoI))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kyCG.R
\name{getDatabaseSetOverlap}
\alias{getDatabaseSetOverlap}
\title{getDatabaseSetOverlap tests for the overlap of set of probes (querySet) in a
single given feature (database set)}
\usage{
getDatabaseSetOverlap(querySet, databaseSets, platform = NA, verbose = TRUE)
}
\arguments{
\item{querySet}{Vector of probes corresponding to a single database set
of interest.}
\item{databaseSets}{List of vectors corresponding to the database sets of
interest with associated meta data as an attribute to each element.}
\item{platform}{String corresponding to the type of platform to use. Either
MM285, EPIC, HM450, or HM27. If it is not provided, it will be inferred
from the query set probeIDs (Default: NA).}
\item{verbose}{Logical value indicating whether to display intermediate
text output about the type of test. Optional. (Default: FALSE)}
}
\value{
A sparse data.frame containing all of the meta data from all database
sets.
}
\description{
getDatabaseSetOverlap tests for the overlap of set of probes (querySet) in a
single given feature (database set)
}
\examples{
querySet=c("cg29176188_TC21", "cg29176794_TC21")
databaseSet=c("cg29176188_TC21", "cg29176794_TC21")
getDatabaseSetOverlap(querySet, databaseSet)
}
| /man/getDatabaseSetOverlap.Rd | permissive | a-augustin/sesame | R | false | true | 1,301 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kyCG.R
\name{getDatabaseSetOverlap}
\alias{getDatabaseSetOverlap}
\title{getDatabaseSetOverlap tests for the overlap of set of probes (querySet) in a
single given feature (database set)}
\usage{
getDatabaseSetOverlap(querySet, databaseSets, platform = NA, verbose = TRUE)
}
\arguments{
\item{querySet}{Vector of probes corresponding to a single database set
of interest.}
\item{databaseSets}{List of vectors corresponding to the database sets of
interest with associated meta data as an attribute to each element.}
\item{platform}{String corresponding to the type of platform to use. Either
MM285, EPIC, HM450, or HM27. If it is not provided, it will be inferred
from the query set probeIDs (Default: NA).}
\item{verbose}{Logical value indicating whether to display intermediate
text output about the type of test. Optional. (Default: FALSE)}
}
\value{
A sparse data.frame containing all of the meta data from all database
sets.
}
\description{
getDatabaseSetOverlap tests for the overlap of set of probes (querySet) in a
single given feature (database set)
}
\examples{
querySet=c("cg29176188_TC21", "cg29176794_TC21")
databaseSet=c("cg29176188_TC21", "cg29176794_TC21")
getDatabaseSetOverlap(querySet, databaseSet)
}
|
library(AppliedPredictiveModeling)
data(segmentationOriginal)
library(caret)
library(e1071)
set.seed(125)
training <- segmentationOriginal[segmentationOriginal$Case == "Train",]
test <- segmentationOriginal[segmentationOriginal$Case == "Test",]
fit <- train(Class~., data = training, method = "rpart")
library(pgmm)
data(olive)
olive = olive[,-1]
fit <- train(Area~., data = olive, method = "rpart")
newdata = as.data.frame(t(colMeans(olive)))
prediction <- predict(fit, newdata = newdata)
library(ElemStatLearn)
data(SAheart)
set.seed(8484)
train = sample(1:dim(SAheart)[1],size=dim(SAheart)[1]/2,replace=F)
trainSA = SAheart[train,]
testSA = SAheart[-train,]
set.seed(13234)
fit <- train(chd~age+alcohol+obesity+tobacco+typea+ldl, data = trainSA, method = "glm", family = "binomial")
missClass <- function(values,prediction)
{
sum(((prediction > 0.5)*1) != values)/length(values)
}
missClass(testSA$chd,predict(fit, newdata = testSA))
missClass(trainSA$chd,predict(fit, newdata = trainSA))
library(ElemStatLearn)
data(vowel.train)
data(vowel.test)
vowel.test$y <- as.factor(vowel.test$y)
vowel.train$y <- as.factor(vowel.train$y)
set.seed(33833)
fitRF <- train(y~., data = vowel.train, method = "rf")
fitGBM <- train(y~., data = vowel.train, method = "gbm", verbose = FALSE)
predRF <- predict(fitRF, newdata = vowel.test)
predGBM <- predict(fitGBM, newdata = vowel.test)
AccuracyRF <- confusionMatrix(predRF, vowel.test$y)
AccuracyGBM <- confusionMatrix(predGBM, vowel.test$y)
indexOfAgreed <- (predRF == predGBM)
AgreementAccuracy <- confusionMatrix(predRF[indexOfAgreed], vowel.test$y[indexOfAgreed])
set.seed(3433)
library(AppliedPredictiveModeling)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
set.seed(62433)
fitRF <- train(diagnosis~., data = training, method = "rf")
fitGBM <- train(diagnosis~., data = training, method = "gbm", verbose = FALSE)
fitLDA <- train(diagnosis~., data = training, method = "lda")
predRF <- predict(fitRF, newdata = testing)
predGBM <- predict(fitGBM, newdata = testing)
predLDA <- predict(fitLDA, newdata = testing)
dfStacked <- data.frame(predRF, predGBM, predLDA, diagnosis =testing$diagnosis)
fitStacked <- train(diagnosis~., data = dfStacked, method = "rf")
predStacked <- predict(fitStacked, newdata = dfStacked)
confusionMatrix(predRF, testing$diagnosis)$overall['Accuracy']
confusionMatrix(predGBM, testing$diagnosis)$overall['Accuracy']
confusionMatrix(predLDA, testing$diagnosis)$overall['Accuracy']
confusionMatrix(predStacked, testing$diagnosis)$overall['Accuracy']
set.seed(3523)
library(AppliedPredictiveModeling)
data(concrete)
inTrain = createDataPartition(concrete$CompressiveStrength, p = 3/4)[[1]]
training = concrete[ inTrain,]
testing = concrete[-inTrain,]
set.seed(233)
fitLasso <- train(CompressiveStrength~., data = training, method = "lasso")
plot.enet(fitLasso$finalModel, xvar="penalty", use.color=TRUE)
set.seed(325)
svm <- svm(CompressiveStrength~., data = training)
predSVM <- predict(svm, newdata = testing)
sqrt(sum((predSVM - testing$CompressiveStrength)^2))
library(lubridate) # For year() function below
library(forecast)
library(readr)
dat = read_csv("~/Downloads/gaData.csv")
training = dat[year(dat$date) < 2012,]
testing = dat[(year(dat$date)) > 2011,]
tstrain = ts(training$visitsTumblr)
forecastModel <- bats(tstrain, h = 235, level = .95)
predForecast <- forecast(tstrain, h = 235, level = .95)
forecasting <- predict(forecastModel, newdata = testing$visitsTumblr)
df <- data.frame(visitsTumblr = testing$visitsTumblr, predForecast$lower, predForecast$upper)
df$forecasting.lower <= df$visitsTumblr & df$visitsTumblr <= df$forecasting.upper
accuracy(predForecast, testing$visitsTumblr)
plot(predForecast)
| /Quiz 4.R | no_license | tantanchen/Machine-Learning-Assignment | R | false | false | 3,856 | r | library(AppliedPredictiveModeling)
data(segmentationOriginal)
library(caret)
library(e1071)
set.seed(125)
training <- segmentationOriginal[segmentationOriginal$Case == "Train",]
test <- segmentationOriginal[segmentationOriginal$Case == "Test",]
fit <- train(Class~., data = training, method = "rpart")
library(pgmm)
data(olive)
olive = olive[,-1]
fit <- train(Area~., data = olive, method = "rpart")
newdata = as.data.frame(t(colMeans(olive)))
prediction <- predict(fit, newdata = newdata)
library(ElemStatLearn)
data(SAheart)
set.seed(8484)
train = sample(1:dim(SAheart)[1],size=dim(SAheart)[1]/2,replace=F)
trainSA = SAheart[train,]
testSA = SAheart[-train,]
set.seed(13234)
fit <- train(chd~age+alcohol+obesity+tobacco+typea+ldl, data = trainSA, method = "glm", family = "binomial")
missClass <- function(values,prediction)
{
sum(((prediction > 0.5)*1) != values)/length(values)
}
missClass(testSA$chd,predict(fit, newdata = testSA))
missClass(trainSA$chd,predict(fit, newdata = trainSA))
library(ElemStatLearn)
data(vowel.train)
data(vowel.test)
vowel.test$y <- as.factor(vowel.test$y)
vowel.train$y <- as.factor(vowel.train$y)
set.seed(33833)
fitRF <- train(y~., data = vowel.train, method = "rf")
fitGBM <- train(y~., data = vowel.train, method = "gbm", verbose = FALSE)
predRF <- predict(fitRF, newdata = vowel.test)
predGBM <- predict(fitGBM, newdata = vowel.test)
AccuracyRF <- confusionMatrix(predRF, vowel.test$y)
AccuracyGBM <- confusionMatrix(predGBM, vowel.test$y)
indexOfAgreed <- (predRF == predGBM)
AgreementAccuracy <- confusionMatrix(predRF[indexOfAgreed], vowel.test$y[indexOfAgreed])
set.seed(3433)
library(AppliedPredictiveModeling)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
set.seed(62433)
fitRF <- train(diagnosis~., data = training, method = "rf")
fitGBM <- train(diagnosis~., data = training, method = "gbm", verbose = FALSE)
fitLDA <- train(diagnosis~., data = training, method = "lda")
predRF <- predict(fitRF, newdata = testing)
predGBM <- predict(fitGBM, newdata = testing)
predLDA <- predict(fitLDA, newdata = testing)
dfStacked <- data.frame(predRF, predGBM, predLDA, diagnosis =testing$diagnosis)
fitStacked <- train(diagnosis~., data = dfStacked, method = "rf")
predStacked <- predict(fitStacked, newdata = dfStacked)
confusionMatrix(predRF, testing$diagnosis)$overall['Accuracy']
confusionMatrix(predGBM, testing$diagnosis)$overall['Accuracy']
confusionMatrix(predLDA, testing$diagnosis)$overall['Accuracy']
confusionMatrix(predStacked, testing$diagnosis)$overall['Accuracy']
set.seed(3523)
library(AppliedPredictiveModeling)
data(concrete)
inTrain = createDataPartition(concrete$CompressiveStrength, p = 3/4)[[1]]
training = concrete[ inTrain,]
testing = concrete[-inTrain,]
set.seed(233)
fitLasso <- train(CompressiveStrength~., data = training, method = "lasso")
plot.enet(fitLasso$finalModel, xvar="penalty", use.color=TRUE)
set.seed(325)
svm <- svm(CompressiveStrength~., data = training)
predSVM <- predict(svm, newdata = testing)
sqrt(sum((predSVM - testing$CompressiveStrength)^2))
library(lubridate) # For year() function below
library(forecast)
library(readr)
dat = read_csv("~/Downloads/gaData.csv")
training = dat[year(dat$date) < 2012,]
testing = dat[(year(dat$date)) > 2011,]
tstrain = ts(training$visitsTumblr)
forecastModel <- bats(tstrain, h = 235, level = .95)
predForecast <- forecast(tstrain, h = 235, level = .95)
forecasting <- predict(forecastModel, newdata = testing$visitsTumblr)
df <- data.frame(visitsTumblr = testing$visitsTumblr, predForecast$lower, predForecast$upper)
df$forecasting.lower <= df$visitsTumblr & df$visitsTumblr <= df$forecasting.upper
accuracy(predForecast, testing$visitsTumblr)
plot(predForecast)
|
library(shiny)
library(shinydashboard)
library(leaflet)
library(lubridate)
library(dplyr)
library(sf)
library(RSocrata)
library(googledrive)
library(readxl)
library(stringi)
library(shinythemes)
library(tidyr)
library(shinycssloaders)
library(rmapshaper)
library(plotly)
library(DT)
launchApp <- function (wd) {
script_path <- file.path(wd, "R")
for (file in list.files(script_path)) {
source(file.path(script_path, file), encoding = "UTF-8", local = T)
}
# Get data
df <- get_covid_data()
esc <- get_school_data(df)
df <- compute_percentages(df, esc)
evo <- get_evo(esc)
# Init stuff
globalObjects = ls(.GlobalEnv)
if(".aecay.df" %in% globalObjects){
oldDataset1 = .GlobalEnv$.aecay.df
}
.GlobalEnv$.aecay.df <- df
globalObjects = ls(.GlobalEnv)
if(".aecay.esc" %in% globalObjects){
oldDataset2 = .GlobalEnv$.aecay.esc
}
.GlobalEnv$.aecay.esc <- esc
globalObjects = ls(.GlobalEnv)
if(".aecay.esc" %in% globalObjects){
oldDataset2 = .GlobalEnv$.aecay.esc
}
.GlobalEnv$.aecay.evo <- evo
# Run
shinyApp(ui = ui(), server = server)
}
launchApp(".")
| /app.R | permissive | ecorreig/escoles | R | false | false | 1,126 | r | library(shiny)
library(shinydashboard)
library(leaflet)
library(lubridate)
library(dplyr)
library(sf)
library(RSocrata)
library(googledrive)
library(readxl)
library(stringi)
library(shinythemes)
library(tidyr)
library(shinycssloaders)
library(rmapshaper)
library(plotly)
library(DT)
launchApp <- function (wd) {
script_path <- file.path(wd, "R")
for (file in list.files(script_path)) {
source(file.path(script_path, file), encoding = "UTF-8", local = T)
}
# Get data
df <- get_covid_data()
esc <- get_school_data(df)
df <- compute_percentages(df, esc)
evo <- get_evo(esc)
# Init stuff
globalObjects = ls(.GlobalEnv)
if(".aecay.df" %in% globalObjects){
oldDataset1 = .GlobalEnv$.aecay.df
}
.GlobalEnv$.aecay.df <- df
globalObjects = ls(.GlobalEnv)
if(".aecay.esc" %in% globalObjects){
oldDataset2 = .GlobalEnv$.aecay.esc
}
.GlobalEnv$.aecay.esc <- esc
globalObjects = ls(.GlobalEnv)
if(".aecay.esc" %in% globalObjects){
oldDataset2 = .GlobalEnv$.aecay.esc
}
.GlobalEnv$.aecay.evo <- evo
# Run
shinyApp(ui = ui(), server = server)
}
launchApp(".")
|
astazi , la Comisia juridica a Senatului , Corneliu Vadim Tudor , Bin Laden - ul politicii romanesti , este asteptat sa dea explicatii .
e o procedura prealabila dezbaterii din plenul Senatului , unde , saptamina viitoare , se va discuta ridicarea imunitatii .
Vadim afiseaza nepasarea omului intangibil , desi , dincolo de declaratiile pompoase , se vede un bitiit de pantaloni .
nu pentru ca se discuta in Comisia juridica sau se dezbate in Senat .
din astea a mai vazut el .
spaima cea mare a lui Vadim vine de la Delegatia Permanenta a PSD .
alaltaieri , conducerea partidului de guvernamint s - a reunit pentru a discuta subiectul Vadim .
va imaginati ca partidul lui Adrian Nastase avea niscai indoieli in legatura cu ce are de facut ?
nici vorba !
nu mai avea nevoie nici de dezbateri , nici de clarificari .
si atunci , de ce a organizat o delegatie permanenta pentru aceasta chestiune ?
raspunsul e foarte simplu .
fara o decizie in unanimitate a amintitului organ , la dezbaterile din Senat unii parlamentari ar fi putut vota conform propriei constiinte .
adica in favoarea lui Vadim si , implicit , impotriva ridicarii imunitatii acestuia .
pentru a evita o asemenea situatie nefericita , Adrian Nastase a recurs la un siretlic politic .
a convocat Delegatia Permanenta si a pus in discutie pozitia partidului fata de problema in cauza .
au mai aparut oameni care sa ii ia apararea lui Vadim ?
daca , in particular , multi dintre batrinii PSD gindesc ca si presedintele PRM , in fata intregii conduceri a partidului n - au mai deschis gura .
si daca nu si - au mai putut permite un asemenea lux , decizia fiind luata in unanimitate , cu siguranta ca nici la votul din Senat nu vor exprima alta pozitie decit cea hotarita la Delegatia Permanenta .
toate aceste considerente ne fac sa credem ca , de data asta , Partidul Social Democrat a luat o decizie definitiva in cazul Vadim .
nu mai vin nici alegerile , pentru ca zaltatul sa scape nejudecat , nu mai exista nici un alt partid care sa se puna de - a curmezisul .
ce ar insemna impingerea lui Vadim in ghearele justitiei ?
dincolo de un fapt divers ce va stirni nenumarate discutii , punerea la punct a presedintelui Partidului Romania Mare , cu instrumentele oferite de democratie si de statul de drept , ar avea consecinte destul de importante .
o operatiune reusita ar da PSD - ului o aura europeana , de partid capabil sa combata extremismul si nationalismul .
un esec ar fi fatal nu doar pentru PSD , ci si pentru tara .
imaginati - va ca toate disputele in jurul ridicarii imunitatii si judecarii lui Vadim ii vor aduce acestuia o expunere enorma .
pe linga polarizarea nemultumirii generate de dificultatile paturilor nevoiase , el poate atrage si simpatia nemultumitilor din PSD .
ar fi o adevarata nenorocire .
incercarea de strunire a lui Vadim poate fi si o enorma victorie politica , dar si un dezastru de mari proportii .
partidul Social Democrat a declansat la unison o operatiune impotriva unui co - sangvin .
fie vom avea parte de o aplicare a legii , fie vom avea parte de ce - i mai rau .
un incest !
| /data/Newspapers/2001.10.24.editorial.70075.0749.r | no_license | narcis96/decrypting-alpha | R | false | false | 3,115 | r | astazi , la Comisia juridica a Senatului , Corneliu Vadim Tudor , Bin Laden - ul politicii romanesti , este asteptat sa dea explicatii .
e o procedura prealabila dezbaterii din plenul Senatului , unde , saptamina viitoare , se va discuta ridicarea imunitatii .
Vadim afiseaza nepasarea omului intangibil , desi , dincolo de declaratiile pompoase , se vede un bitiit de pantaloni .
nu pentru ca se discuta in Comisia juridica sau se dezbate in Senat .
din astea a mai vazut el .
spaima cea mare a lui Vadim vine de la Delegatia Permanenta a PSD .
alaltaieri , conducerea partidului de guvernamint s - a reunit pentru a discuta subiectul Vadim .
va imaginati ca partidul lui Adrian Nastase avea niscai indoieli in legatura cu ce are de facut ?
nici vorba !
nu mai avea nevoie nici de dezbateri , nici de clarificari .
si atunci , de ce a organizat o delegatie permanenta pentru aceasta chestiune ?
raspunsul e foarte simplu .
fara o decizie in unanimitate a amintitului organ , la dezbaterile din Senat unii parlamentari ar fi putut vota conform propriei constiinte .
adica in favoarea lui Vadim si , implicit , impotriva ridicarii imunitatii acestuia .
pentru a evita o asemenea situatie nefericita , Adrian Nastase a recurs la un siretlic politic .
a convocat Delegatia Permanenta si a pus in discutie pozitia partidului fata de problema in cauza .
au mai aparut oameni care sa ii ia apararea lui Vadim ?
daca , in particular , multi dintre batrinii PSD gindesc ca si presedintele PRM , in fata intregii conduceri a partidului n - au mai deschis gura .
si daca nu si - au mai putut permite un asemenea lux , decizia fiind luata in unanimitate , cu siguranta ca nici la votul din Senat nu vor exprima alta pozitie decit cea hotarita la Delegatia Permanenta .
toate aceste considerente ne fac sa credem ca , de data asta , Partidul Social Democrat a luat o decizie definitiva in cazul Vadim .
nu mai vin nici alegerile , pentru ca zaltatul sa scape nejudecat , nu mai exista nici un alt partid care sa se puna de - a curmezisul .
ce ar insemna impingerea lui Vadim in ghearele justitiei ?
dincolo de un fapt divers ce va stirni nenumarate discutii , punerea la punct a presedintelui Partidului Romania Mare , cu instrumentele oferite de democratie si de statul de drept , ar avea consecinte destul de importante .
o operatiune reusita ar da PSD - ului o aura europeana , de partid capabil sa combata extremismul si nationalismul .
un esec ar fi fatal nu doar pentru PSD , ci si pentru tara .
imaginati - va ca toate disputele in jurul ridicarii imunitatii si judecarii lui Vadim ii vor aduce acestuia o expunere enorma .
pe linga polarizarea nemultumirii generate de dificultatile paturilor nevoiase , el poate atrage si simpatia nemultumitilor din PSD .
ar fi o adevarata nenorocire .
incercarea de strunire a lui Vadim poate fi si o enorma victorie politica , dar si un dezastru de mari proportii .
partidul Social Democrat a declansat la unison o operatiune impotriva unui co - sangvin .
fie vom avea parte de o aplicare a legii , fie vom avea parte de ce - i mai rau .
un incest !
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## Function that creates a special matrix to deal with
## the cache problem
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get, setsolve = setsolve,
getsolve = getsolve)
}
## Write a short comment describing this function
## Function that checks if there is a value for the
## inverse matrix in the cache, if so it returns it,
## else it will calculate a new inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if (!is.null(m)) {
message("getting matrix from cache")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
| /cachematrix.R | no_license | 11emilia11/ProgrammingAssignment2 | R | false | false | 995 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## Function that creates a special matrix to deal with
## the cache problem
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get, setsolve = setsolve,
getsolve = getsolve)
}
## Write a short comment describing this function
## Function that checks if there is a value for the
## inverse matrix in the cache, if so it returns it,
## else it will calculate a new inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if (!is.null(m)) {
message("getting matrix from cache")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
## try http:// if https:// URLs are not supported
# source("https://bioconductor.org/biocLite.R")
# biocLite("DNAcopy")
library(DNAcopy)
args<-commandArgs(TRUE)
filename <- args[1]
data_ = read.csv(filename, header=TRUE,stringsAsFactors=FALSE)
head(data_)
bin_size <- mean(data_$end - data_$start)
negligible <- 0.00001 # replace zeros because they screw up the data when we use log functions
filtered.data <- data_
filtered.data$coverage[filtered.data$coverage==0] <- negligible
covered_data <- filtered.data[filtered.data$coverage>negligible,]
chromosome.names <- names(table(covered_data$chromosome)[table(covered_data$chromosome)>100])
filtered.data <- filtered.data[filtered.data$chromosome %in% chromosome.names,]
unique(filtered.data$chromosome)
###### order by chromosome names ########
ordered_common_chromosome_names <- c(seq(1,100),paste("chr",seq(1,100),sep=""),paste("Chr",seq(1,100),sep=""),c("X","Y","M","MT","Chr0","chr0","0"))
all_chromosomes_some_ordered <- c(intersect(ordered_common_chromosome_names,unique(filtered.data$chromosome)),setdiff(unique(filtered.data$chromosome),ordered_common_chromosome_names))
filtered.data$chromosome <- factor(filtered.data$chromosome, levels=all_chromosomes_some_ordered)
filtered.data <- filtered.data[order(filtered.data$chromosome),]
###############
RPB <- filtered.data$coverage
CNV_norm = log2(RPB / mean(RPB)) # RPB / mean(RPB)
CNV_smooth = smooth.CNA(CNA(
genomdat=CNV_norm,
chrom=filtered.data$chromosome,
maploc=filtered.data$end,
data.type='logratio'))
segs = segment(CNV_smooth, verbose=0, alpha=0.01, min.width=5)
#######
# Need to sort thisShort by chr first:
thisShort = segs[[2]];
thisShort$chrom <- factor(thisShort$chrom,levels=all_chromosomes_some_ordered)
head(thisShort)
m <- matrix(data=0, nrow=nrow(filtered.data), ncol=1);
prevEnd <- 0;
for (i in 1:nrow(thisShort))
{
thisStart <- prevEnd + 1;
thisEnd <- prevEnd + thisShort$num.mark[i];
m[thisStart:thisEnd, 1] <- 2^thisShort$seg.mean[i]; # thisShort$seg.mean[i];
prevEnd = thisEnd;
}
fixed_curr <- m[, 1]
filtered.data$segmented_coverage <- round(fixed_curr*mean(RPB))
filtered.data$coverage <- round(filtered.data$coverage)
# Write to file
write.table(filtered.data,paste(substr(filename,1,nchar(filename)-4),".segmented.csv",sep=""),row.names=FALSE,quote=FALSE,sep=',')
for (chrom in unique(filtered.data$chromosome)) {
print(chrom)
write.table(filtered.data[filtered.data$chromosome==chrom,],paste(substr(filename,1,nchar(filename)-4),".segmented.", chrom,".csv",sep=""),row.names=FALSE,quote=FALSE,sep=',')
}
| /bin/segment_copy_number.R | permissive | andrewolal/SplitThreader | R | false | false | 2,629 | r | ## try http:// if https:// URLs are not supported
# source("https://bioconductor.org/biocLite.R")
# biocLite("DNAcopy")
library(DNAcopy)
args<-commandArgs(TRUE)
filename <- args[1]
data_ = read.csv(filename, header=TRUE,stringsAsFactors=FALSE)
head(data_)
bin_size <- mean(data_$end - data_$start)
negligible <- 0.00001 # replace zeros because they screw up the data when we use log functions
filtered.data <- data_
filtered.data$coverage[filtered.data$coverage==0] <- negligible
covered_data <- filtered.data[filtered.data$coverage>negligible,]
chromosome.names <- names(table(covered_data$chromosome)[table(covered_data$chromosome)>100])
filtered.data <- filtered.data[filtered.data$chromosome %in% chromosome.names,]
unique(filtered.data$chromosome)
###### order by chromosome names ########
ordered_common_chromosome_names <- c(seq(1,100),paste("chr",seq(1,100),sep=""),paste("Chr",seq(1,100),sep=""),c("X","Y","M","MT","Chr0","chr0","0"))
all_chromosomes_some_ordered <- c(intersect(ordered_common_chromosome_names,unique(filtered.data$chromosome)),setdiff(unique(filtered.data$chromosome),ordered_common_chromosome_names))
filtered.data$chromosome <- factor(filtered.data$chromosome, levels=all_chromosomes_some_ordered)
filtered.data <- filtered.data[order(filtered.data$chromosome),]
###############
RPB <- filtered.data$coverage
CNV_norm = log2(RPB / mean(RPB)) # RPB / mean(RPB)
CNV_smooth = smooth.CNA(CNA(
genomdat=CNV_norm,
chrom=filtered.data$chromosome,
maploc=filtered.data$end,
data.type='logratio'))
segs = segment(CNV_smooth, verbose=0, alpha=0.01, min.width=5)
#######
# Need to sort thisShort by chr first:
thisShort = segs[[2]];
thisShort$chrom <- factor(thisShort$chrom,levels=all_chromosomes_some_ordered)
head(thisShort)
m <- matrix(data=0, nrow=nrow(filtered.data), ncol=1);
prevEnd <- 0;
for (i in 1:nrow(thisShort))
{
thisStart <- prevEnd + 1;
thisEnd <- prevEnd + thisShort$num.mark[i];
m[thisStart:thisEnd, 1] <- 2^thisShort$seg.mean[i]; # thisShort$seg.mean[i];
prevEnd = thisEnd;
}
fixed_curr <- m[, 1]
filtered.data$segmented_coverage <- round(fixed_curr*mean(RPB))
filtered.data$coverage <- round(filtered.data$coverage)
# Write to file
write.table(filtered.data,paste(substr(filename,1,nchar(filename)-4),".segmented.csv",sep=""),row.names=FALSE,quote=FALSE,sep=',')
for (chrom in unique(filtered.data$chromosome)) {
print(chrom)
write.table(filtered.data[filtered.data$chromosome==chrom,],paste(substr(filename,1,nchar(filename)-4),".segmented.", chrom,".csv",sep=""),row.names=FALSE,quote=FALSE,sep=',')
}
|
#' Add two numbers
#' @description The function works with numbers and does arithmrtic addition
#'
#' @export
#' @param x A number
#' @param y A number
#' @return sum of two numbers: \code{x} and \code{y}
#' @examples
#' add2num(1, 2)
#' add2num(-7, 0)
add2num <- function(x, y) {
x + y
}
#' negate number
#' @description The function negates number passed as a parameter.
#' \code{negate(1)} returns \code{-1}
#'
#' @export
#' @param x A number
#' @return a number with opposite sign
#' @examples
#' negate(1)
#' negate(-1)
negate <- function(x) {
-x
}
| /R/arithmetics.R | permissive | EvgenyPetrovsky/hello.rpkg | R | false | false | 561 | r | #' Add two numbers
#' @description The function works with numbers and does arithmrtic addition
#'
#' @export
#' @param x A number
#' @param y A number
#' @return sum of two numbers: \code{x} and \code{y}
#' @examples
#' add2num(1, 2)
#' add2num(-7, 0)
add2num <- function(x, y) {
x + y
}
#' negate number
#' @description The function negates number passed as a parameter.
#' \code{negate(1)} returns \code{-1}
#'
#' @export
#' @param x A number
#' @return a number with opposite sign
#' @examples
#' negate(1)
#' negate(-1)
negate <- function(x) {
-x
}
|
library(tidyverse)
library(tictoc)
## files of interest
fls <- list.files(path = "Results/", pattern = "deltas")
## load results from delta_ews.R script
load(paste0("Results/", fls[1]))
# dataset with biomes:
load('~/Documents/Projects/ESDL_earlyadopter/ESDL/Results/terrestrial_biomes.RData')
load('~/Documents/Projects/ESDL_earlyadopter/ESDL/Results/marine_biomes.RData')
deltas %>%
select(lon, lat, starts_with("delta")) %>%
pivot_longer(starts_with("delta"), names_to = "stat", values_to = "value") %>%
ggplot(aes(value)) +
geom_density() +
facet_wrap(~stat, scales = "free")
deltas <- left_join(
deltas, #df_biomes)
df_marine)
quant <- deltas %>%
select(lon, lat, starts_with("delta"), biome) %>%
pivot_longer(starts_with("delta"), names_to = "stat", values_to = "value") %>%
# filter(!is.na(value)) %>%
group_by(biome, stat) %>%
summarise(q95 = quantile(value, prob = 0.95),
q05 = quantile(value, prob = 0.05))
quant %>%
pivot_longer(starts_with("q"), names_to = "quantile", values_to = "value") %>%
ggplot(aes(value, biome)) +
geom_point() +
geom_line(aes(group = biome)) +
facet_wrap(~stat, scales = 'free_x')
deltas %>%
select(lon, lat, starts_with("delta"), biome) %>%
pivot_longer(starts_with("delta"), names_to = "stat", values_to = "value") %>%
ggplot(aes(value, biome)) +
geom_boxplot(aes(fill = biome, color = biome),
size = 0.2, show.legend = FALSE, alpha = 0.3) +
facet_wrap(~stat, scales = "free_x") +
theme_light(base_size = 7)
## Detection
tic()
deltas %>%
select(lon, lat, starts_with("delta"), biome) %>%
pivot_longer(starts_with("delta"), names_to = "stat", values_to = "value") %>%
left_join(quant) %>%
mutate(
detected = value > q95 | value < q05,
) %>%
ggplot(aes(lon,lat)) +
geom_tile(aes(fill = detected)) +
scale_fill_manual(values = c("grey50", "orange")) +
facet_wrap(~stat) +
theme_void()
toc()
## EWS types:
tic()
deltas %>%
select(lon, lat, starts_with("delta"), biome) %>%
pivot_longer(starts_with("delta"), names_to = "stat", values_to = "value") %>%
filter(stat == "delta_std" | stat == "delta_ac1") %>%
left_join(quant) %>%
mutate(
ews_type = ifelse(value > q95, "CSD",
ifelse(value < q05, "CSU", "none"))
) %>%
ggplot(aes(lon,lat)) +
geom_tile(aes(fill = ews_type)) +
scale_fill_manual(values = c("blue", "orange", "grey50")) +
facet_wrap(~stat) +
theme_void()
toc()
## detected pixels with delta: remember than now the upper and lower percentails can be
## asymmetrical (e.g. >0.4 could be >95%, but <0.2 could be <0.05%) because the distributions
## are not necessarily centered at zero. I hope the inclusion of such asymmetry improves
## explanatory power
tic()
df_delta_detected <- deltas %>%
select(lon, lat, starts_with("delta"), biome) %>%
pivot_longer(starts_with("delta"), names_to = "stat", values_to = "value") %>%
left_join(quant) %>%
mutate(ews = if_else(value > q95 | value < q05 , TRUE, FALSE))
toc() # 0.2 secs
## sample:
df_delta_detected %>%
group_by(lon, lat) %>%
summarize(n_ews = sum(ews)) %>%
ungroup() %>%
count(n_ews)
tic()
df_delta_detected <- df_delta_detected %>%
select( -q95, -q05) %>%
group_by(lon,lat) %>%
mutate(n_ews = sum(ews, na.rm = TRUE)) %>%
ungroup() %>% group_by(lon,lat,biome) %>%
pivot_wider(names_from = stat, values_from = c(ews, value)) %>%
mutate(
ews_type = case_when(
n_ews == 0 ~ "none",
ews_delta_ac1 == TRUE & ews_delta_std == TRUE & value_delta_ac1 > 0 & value_delta_std > 0 ~ "csd",
ews_delta_ac1 == TRUE & ews_delta_std == TRUE & value_delta_ac1 < 0 & value_delta_std < 0 ~ "csu",
ews_delta_ac1 == TRUE & ews_delta_std == TRUE & value_delta_ac1 > 0 & value_delta_std < 0 ~ "amb",
ews_delta_ac1 == TRUE & ews_delta_std == TRUE & value_delta_ac1 < 0 & value_delta_std > 0 ~ "amb"))
toc() # 37 secs, 75 on marine data
### Summary stats on detection
df_delta_detected %>%
mutate(detection = n_ews > 0) %>%
ungroup() %>% group_by(biome, detection) %>%
count() %>%
ungroup() %>%
pivot_wider(names_from = detection, values_from = n, names_prefix = "det_") %>%
group_by(biome) %>%
summarize(prop = det_TRUE / (det_TRUE + det_FALSE))
## Result: Between 26-35% of the terrestrial biomes of the world are showing signals of resilience loss. 21-31 % marine rehalms
## The proportion is with respect to the total area otherwise it's a ratio
## Result: very few pixels correspond cleanly with csu (140) or csd (242). Over 1000s for marine
df_delta_detected %>%
select(ews_type, n_ews) %>%
ungroup() %>%
count(ews_type)
## This is a script for a sample that takes into account pixels per biome and detection: but the
## sample remains unbalanced. Does one need to make the groups of equal size? how does one handle
## groups with very few obs like mangroves?
df_delta_detected %>%
select(n_ews) %>%
mutate(detection = n_ews > 0) %>%
ungroup() %>% group_by(detection, biome) %>%
#slice_sample(prop = 0.2, replace = FALSE) %>%
count() %>%
print(n=34)
## save results:
save(df_delta_detected,
file = "~/Documents/Projects/ESDL_earlyadopter/ESDL/Results/210301_delta_detected_ChlorA_log.RData")
fls
#### Do the sampling: ####
load("~/Documents/Projects/ESDL_earlyadopter/ESDL/Results/210301_delta_detected_ChlorA_log.RData")
load("~/Documents/Projects/ESDL_earlyadopter/ESDL/Results/210301_delta_detected_GPP_log.RData")
load("~/Documents/Projects/ESDL_earlyadopter/ESDL/Results/210301_delta_detected_TER_log.RData")
tic()
df_delta_detected <- df_delta_detected %>%
mutate(biome = fct_explicit_na(biome, na_level = "(Missing)"))
toc() # 24sec
df_delta_detected %>%
ggplot(aes(lon,lat)) +
geom_tile(aes(fill = n_ews))
detected <- df_delta_detected %>%
select(n_ews) %>%
filter(n_ews > 0)
pxls <- detected %>%
ungroup() %>% group_by(biome) %>%
tally()
undetected <- df_delta_detected %>%
filter(n_ews == 0) %>%
ungroup() %>% #group_by(biome) %>%
split(., f = .$biome) %>%
map2(., .y = pxls$n,
.f = function(x,y) slice_sample(.data = x, n = y, replace = FALSE)) %>%
bind_rows() %>%
select(lon, lat, biome, n_ews)
undetected %>%
ungroup() %>% group_by(biome) %>%
tally(name = "n2") %>%
left_join(pxls) %>%
mutate(balanced = n == n2)
pxl_sample <- bind_rows(detected, undetected)
write_csv(pxl_sample,
file = "Results/sample_pixels_delta_TER.csv")
## compare with old results:
load("Results/200917_detected_gpp.RData")
comp <- df_ews %>%
rename(old_ews = n_ews) %>%
select(-biome_code, -c(kur:fd)) %>%
right_join(df_delta_detected) %>%
select(old_ews, n_ews) %>%
ungroup()
comp[is.na(comp[,"old_ews"]), "old_ews"] <- 0
comp %>%
count(old_ews, n_ews) %>%
pivot_wider(names_from = n_ews, names_prefix = "n_", values_from = n)
cor(comp$old_ews > 0, comp$n_ews > 0) # not good
old_sample %>%
ggplot(aes(lon, lat)) +
geom_tile(aes(fill = n_ews))
#### old graphs ####
deltas %>%
filter(delta_fd != 0) %>%
ggplot(aes(delta_fd)) +
geom_density() +
geom_vline(aes(xintercept = q95),
data = quant %>% filter(stat == "delta_fd")) +
geom_vline(aes(xintercept = q05),
data = quant %>% filter(stat == "delta_fd"))
| /sampling_deltas.R | permissive | juanrocha/ESDL | R | false | false | 7,645 | r | library(tidyverse)
library(tictoc)
## files of interest
fls <- list.files(path = "Results/", pattern = "deltas")
## load results from delta_ews.R script
load(paste0("Results/", fls[1]))
# dataset with biomes:
load('~/Documents/Projects/ESDL_earlyadopter/ESDL/Results/terrestrial_biomes.RData')
load('~/Documents/Projects/ESDL_earlyadopter/ESDL/Results/marine_biomes.RData')
deltas %>%
select(lon, lat, starts_with("delta")) %>%
pivot_longer(starts_with("delta"), names_to = "stat", values_to = "value") %>%
ggplot(aes(value)) +
geom_density() +
facet_wrap(~stat, scales = "free")
deltas <- left_join(
deltas, #df_biomes)
df_marine)
quant <- deltas %>%
select(lon, lat, starts_with("delta"), biome) %>%
pivot_longer(starts_with("delta"), names_to = "stat", values_to = "value") %>%
# filter(!is.na(value)) %>%
group_by(biome, stat) %>%
summarise(q95 = quantile(value, prob = 0.95),
q05 = quantile(value, prob = 0.05))
quant %>%
pivot_longer(starts_with("q"), names_to = "quantile", values_to = "value") %>%
ggplot(aes(value, biome)) +
geom_point() +
geom_line(aes(group = biome)) +
facet_wrap(~stat, scales = 'free_x')
deltas %>%
select(lon, lat, starts_with("delta"), biome) %>%
pivot_longer(starts_with("delta"), names_to = "stat", values_to = "value") %>%
ggplot(aes(value, biome)) +
geom_boxplot(aes(fill = biome, color = biome),
size = 0.2, show.legend = FALSE, alpha = 0.3) +
facet_wrap(~stat, scales = "free_x") +
theme_light(base_size = 7)
## Detection
tic()
deltas %>%
select(lon, lat, starts_with("delta"), biome) %>%
pivot_longer(starts_with("delta"), names_to = "stat", values_to = "value") %>%
left_join(quant) %>%
mutate(
detected = value > q95 | value < q05,
) %>%
ggplot(aes(lon,lat)) +
geom_tile(aes(fill = detected)) +
scale_fill_manual(values = c("grey50", "orange")) +
facet_wrap(~stat) +
theme_void()
toc()
## EWS types:
tic()
deltas %>%
select(lon, lat, starts_with("delta"), biome) %>%
pivot_longer(starts_with("delta"), names_to = "stat", values_to = "value") %>%
filter(stat == "delta_std" | stat == "delta_ac1") %>%
left_join(quant) %>%
mutate(
ews_type = ifelse(value > q95, "CSD",
ifelse(value < q05, "CSU", "none"))
) %>%
ggplot(aes(lon,lat)) +
geom_tile(aes(fill = ews_type)) +
scale_fill_manual(values = c("blue", "orange", "grey50")) +
facet_wrap(~stat) +
theme_void()
toc()
## detected pixels with delta: remember than now the upper and lower percentails can be
## asymmetrical (e.g. >0.4 could be >95%, but <0.2 could be <0.05%) because the distributions
## are not necessarily centered at zero. I hope the inclusion of such asymmetry improves
## explanatory power
tic()
df_delta_detected <- deltas %>%
select(lon, lat, starts_with("delta"), biome) %>%
pivot_longer(starts_with("delta"), names_to = "stat", values_to = "value") %>%
left_join(quant) %>%
mutate(ews = if_else(value > q95 | value < q05 , TRUE, FALSE))
toc() # 0.2 secs
## sample:
df_delta_detected %>%
group_by(lon, lat) %>%
summarize(n_ews = sum(ews)) %>%
ungroup() %>%
count(n_ews)
tic()
df_delta_detected <- df_delta_detected %>%
select( -q95, -q05) %>%
group_by(lon,lat) %>%
mutate(n_ews = sum(ews, na.rm = TRUE)) %>%
ungroup() %>% group_by(lon,lat,biome) %>%
pivot_wider(names_from = stat, values_from = c(ews, value)) %>%
mutate(
ews_type = case_when(
n_ews == 0 ~ "none",
ews_delta_ac1 == TRUE & ews_delta_std == TRUE & value_delta_ac1 > 0 & value_delta_std > 0 ~ "csd",
ews_delta_ac1 == TRUE & ews_delta_std == TRUE & value_delta_ac1 < 0 & value_delta_std < 0 ~ "csu",
ews_delta_ac1 == TRUE & ews_delta_std == TRUE & value_delta_ac1 > 0 & value_delta_std < 0 ~ "amb",
ews_delta_ac1 == TRUE & ews_delta_std == TRUE & value_delta_ac1 < 0 & value_delta_std > 0 ~ "amb"))
toc() # 37 secs, 75 on marine data
### Summary stats on detection
df_delta_detected %>%
mutate(detection = n_ews > 0) %>%
ungroup() %>% group_by(biome, detection) %>%
count() %>%
ungroup() %>%
pivot_wider(names_from = detection, values_from = n, names_prefix = "det_") %>%
group_by(biome) %>%
summarize(prop = det_TRUE / (det_TRUE + det_FALSE))
## Result: Between 26-35% of the terrestrial biomes of the world are showing signals of resilience loss. 21-31 % marine rehalms
## The proportion is with respect to the total area otherwise it's a ratio
## Result: very few pixels correspond cleanly with csu (140) or csd (242). Over 1000s for marine
df_delta_detected %>%
select(ews_type, n_ews) %>%
ungroup() %>%
count(ews_type)
## This is a script for a sample that takes into account pixels per biome and detection: but the
## sample remains unbalanced. Does one need to make the groups of equal size? how does one handle
## groups with very few obs like mangroves?
df_delta_detected %>%
select(n_ews) %>%
mutate(detection = n_ews > 0) %>%
ungroup() %>% group_by(detection, biome) %>%
#slice_sample(prop = 0.2, replace = FALSE) %>%
count() %>%
print(n=34)
## save results:
save(df_delta_detected,
file = "~/Documents/Projects/ESDL_earlyadopter/ESDL/Results/210301_delta_detected_ChlorA_log.RData")
fls
#### Do the sampling: ####
load("~/Documents/Projects/ESDL_earlyadopter/ESDL/Results/210301_delta_detected_ChlorA_log.RData")
load("~/Documents/Projects/ESDL_earlyadopter/ESDL/Results/210301_delta_detected_GPP_log.RData")
load("~/Documents/Projects/ESDL_earlyadopter/ESDL/Results/210301_delta_detected_TER_log.RData")
tic()
df_delta_detected <- df_delta_detected %>%
mutate(biome = fct_explicit_na(biome, na_level = "(Missing)"))
toc() # 24sec
df_delta_detected %>%
ggplot(aes(lon,lat)) +
geom_tile(aes(fill = n_ews))
detected <- df_delta_detected %>%
select(n_ews) %>%
filter(n_ews > 0)
pxls <- detected %>%
ungroup() %>% group_by(biome) %>%
tally()
undetected <- df_delta_detected %>%
filter(n_ews == 0) %>%
ungroup() %>% #group_by(biome) %>%
split(., f = .$biome) %>%
map2(., .y = pxls$n,
.f = function(x,y) slice_sample(.data = x, n = y, replace = FALSE)) %>%
bind_rows() %>%
select(lon, lat, biome, n_ews)
undetected %>%
ungroup() %>% group_by(biome) %>%
tally(name = "n2") %>%
left_join(pxls) %>%
mutate(balanced = n == n2)
pxl_sample <- bind_rows(detected, undetected)
write_csv(pxl_sample,
file = "Results/sample_pixels_delta_TER.csv")
## compare with old results:
load("Results/200917_detected_gpp.RData")
comp <- df_ews %>%
rename(old_ews = n_ews) %>%
select(-biome_code, -c(kur:fd)) %>%
right_join(df_delta_detected) %>%
select(old_ews, n_ews) %>%
ungroup()
comp[is.na(comp[,"old_ews"]), "old_ews"] <- 0
comp %>%
count(old_ews, n_ews) %>%
pivot_wider(names_from = n_ews, names_prefix = "n_", values_from = n)
cor(comp$old_ews > 0, comp$n_ews > 0) # not good
old_sample %>%
ggplot(aes(lon, lat)) +
geom_tile(aes(fill = n_ews))
#### old graphs ####
deltas %>%
filter(delta_fd != 0) %>%
ggplot(aes(delta_fd)) +
geom_density() +
geom_vline(aes(xintercept = q95),
data = quant %>% filter(stat == "delta_fd")) +
geom_vline(aes(xintercept = q05),
data = quant %>% filter(stat == "delta_fd"))
|
#In the 20×20 grid below, four numbers along a diagonal line have been marked in red.
#The product of these numbers is 26 × 63 × 78 × 14 = 1788696.
#What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20×20 grid?
con <- file('/home/wuqili/R practise/Project Euler/011.csv')
g <- read.csv(con, header = FALSE)
result <- 0
for(i in 1:20){
for(j in 1:20){
if(i <= 17){
if(j <= 3){
result <- max(result, prod(g[i, j:j+3], na.rm = TRUE), prod(g[i:i+3, j], na.rm = TRUE), prod(g[i, j], g[i+1, j+1], g[i+2, j+2], g[i+3, j+3], na.rm = TRUE))
}
if(j > 3){
result <- max(result, prod(g[i, j:j+3], na.rm = TRUE), prod(g[i:i+3, j], na.rm = TRUE), prod(g[i, j], g[i+1, j+1], g[i+2, j+2], g[i+3, j+3], na.rm = TRUE), prod(g[i, j], g[i+1, j-1], g[i+2, j-2], g[i+3, j-3], na.rm = TRUE))
}
}
}
}
print(result) | /Project Euler/011-greatest product in grid.R | no_license | QiliWu/leetcode-and-Project-Euler | R | false | false | 922 | r | #In the 20×20 grid below, four numbers along a diagonal line have been marked in red.
#The product of these numbers is 26 × 63 × 78 × 14 = 1788696.
#What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20×20 grid?
con <- file('/home/wuqili/R practise/Project Euler/011.csv')
g <- read.csv(con, header = FALSE)
result <- 0
for(i in 1:20){
for(j in 1:20){
if(i <= 17){
if(j <= 3){
result <- max(result, prod(g[i, j:j+3], na.rm = TRUE), prod(g[i:i+3, j], na.rm = TRUE), prod(g[i, j], g[i+1, j+1], g[i+2, j+2], g[i+3, j+3], na.rm = TRUE))
}
if(j > 3){
result <- max(result, prod(g[i, j:j+3], na.rm = TRUE), prod(g[i:i+3, j], na.rm = TRUE), prod(g[i, j], g[i+1, j+1], g[i+2, j+2], g[i+3, j+3], na.rm = TRUE), prod(g[i, j], g[i+1, j-1], g[i+2, j-2], g[i+3, j-3], na.rm = TRUE))
}
}
}
}
print(result) |
# Scripts for plotting suitability maps, vegetation, future projections
# and changes (leading/trailing edged) as well as summary plots for landscape units
# Clear workspace
rm(list=ls())
#source("Scripts/00_Functions_trailing-edge.r")
Computer <- "HP"
#-----------------#
# Set directories #
#-----------------#
if (Computer == "HP") {
wdir <- 'C:/Users/Naia Morueta Holme/Documents/Documents_share/Projects/'
idir <- 'E:/Lead-trail/Projections/V4/'
vdir <- 'E:/Vegetation/PROCESSED/'
bgdir <- paste0(wdir,'100_Postdoc/Data/Background_layers/PROCESSED/')
spdir2 <- paste0(wdir,'101_TBC3_modelling/Lead-trail_R-project/Data/Species/')
fdir <- 'E:/Lead-trail/Projections/Figures_V5/Four_squares/'
spdir <- 'E:/Phylo_modelling/Data/Species/Processed2/'
mdir <- paste0(wdir, '101_TBC3_modelling/Lead-trail_R-project/ModelResults/Maxent/V4/')
}
#----------------#
# Load libraries #
#----------------#
require(raster)
# require(rgdal)
# require(fields)
#-----------------------#
# Parameters for script #
#-----------------------#
# What species?
allSpecies <- sort(unique(read.csv(paste0(spdir2, 'Species_CLN_matching_v2.csv'), as.is=T)[,'Scientific_name']))
# CC scenarios
allScenarios <- c("HST", "GFDL_B1","GFDL_A2","PCM_A2","CNRM_rcp85","CCSM4_rcp85","MIROC_rcp85",
"PCM_B1","MIROC3_2_A2","csiro_A1B","GISS_AOM_A1B","MIROC5_rcp26","MIROC_rcp45",
"MIROC_rcp60","GISS_rcp26","MRI_rcp26","MPI_rcp45","IPSL_rcp85","Fgoals_rcp85")
myrange=c(2:9,11:19)
#myrange=13
# Which model type
mxModelType = "cwd-djf-jja-ppt"
# Landscape units
lu <- readRDS(paste(bgdir, "tbc3_landscape_units_p.rdata",sep=""))
lunames <- as.character(lu@data$Name[which(!is.na(lu@data$Name))])
# Load urban-ag mask
noveg <- readRDS(paste0(vdir,'CLN_Non-veg_30m.rdata'))
noveg270 <- aggregate(noveg, 9, fun=mean, na.rm=T)
rm(noveg)
#Define scenarios to lump:
# # Based on Bay Area climate - note: temperature cut-off is at ~3.2 degrees C, ppt at 0C
# wawet = c("PCM_B1","GISS_AOM_A1B","MPI_rcp45","MRI_rcp26","GISS_rcp26")
# howet = c("CCSM4_rcp85","CNRM_rcp85","PCM_A2","IPSL_rcp85")
# wadry = c("GFDL_B1", "MIROC5_rcp26")
# hodry = c("GFDL_A2","MIROC_rcp85","MIROC_rcp60","MIROC3_2_A2","Fgoals_rcp85","MIROC_rcp45")
# Based on Bay Area climate - note: temperature cut-off is at ~2.5 degrees C, ppt at 0C
wawet = c("PCM_B1","GISS_rcp26","MPI_rcp45","MRI_rcp26")
howet = c("GISS_AOM_A1B","CCSM4_rcp85","CNRM_rcp85","PCM_A2","IPSL_rcp85")
wadry = c("GFDL_B1","MIROC5_rcp26")
hodry = c("GFDL_A2","MIROC_rcp85","MIROC_rcp60","MIROC3_2_A2","Fgoals_rcp85","MIROC_rcp45")
Gr = list(wawet,howet,wadry,hodry)
rm(wawet,howet,wadry,hodry)
#----------#
# Function #
#----------#
# Function to choose color code based on the mean suitability across a region
# in the future as a fraction of present suitability
# (0-25%: red, 25-75%: orange, 75-125%: grey, >125%: green)
pickCol = function(psuit,fsuit) {
if(fsuit < 0.25*psuit) {
return("red") # if mean future suitability less than 25% of present suitability
} else if (fsuit < 0.75*psuit) {
return("orange") # if mean future suitability less than 75% than present suitability
} else if (fsuit <= 1.25*psuit) {
return("grey") # if mean future suit. between 75-125% of present suit
} else {
return("green") # if mean future suit. larger than present
}
}
#---------------#
# Set variables #
#---------------#
a=1
b=33
allRES <- list()
for(a in 1:length(allSpecies)) {
#for(a in 1:3) {
mySpecies <- allSpecies[a]
writeLines(mySpecies)
#load historic and future suitabilities
hfile <- paste0(idir,mxModelType,'/',mySpecies,'/','1951-1980_suitability_Bay_HST.tif')
ffiles <- lapply(allScenarios[myrange],function(x) paste0(idir,mxModelType,'/',mySpecies,'/','2070-2099_suitability_Bay_',x,'.tif'))
hst <- raster(hfile)
fut <- stack(ffiles)
rm(hfile,ffiles)
#load vegetation mask
spname <- sub(' ','_',mySpecies)
# mask1file <- paste0(vdir,'CLN_mask1/CLN_mask1_',spname,'.rdata')
# if(file.exists(mask1file)) {
# mask1 <- readRDS(mask1file)
# projection(mask1) <- projection(hst)
# }
#
# mask2 <- readRDS(paste0(vdir,'CLN_mask2/CLN_mask2_',spname,'.rdata'))
# projection(mask2) <- projection(hst)
mymask <- readRDS(paste0(vdir,'CLN_mask3/CLN_mask3_',spname,'.rdata'))
projection(mymask) <- projection(hst)
# Presence/absence threshold from maxent models
mx.dir <- paste(mdir, mySpecies, mxModelType, 'fullmodel', sep='/')
mx <- readRDS(paste(mx.dir, 'ModelObject.rdata', sep='/'))
Threshold = 'Equal.training.sensitivity.and.specificity.logistic.threshold'
mx.th = as.numeric(mx@results[Threshold,])
rm(mx.dir, mx, Threshold)
luRES <- list()
# for(b in 1:4) {
for(b in 1:length(lunames)) {
luName <- lunames[b]
writeLines(luName)
fdir2 <- paste0(fdir, luName,'/')
if(!dir.exists(fdir2)) {dir.create(fdir2, recursive=F)}
# ffile <- paste0(fdir2, mySpecies,'_4square.png')
# png(ffile, width=600, height=200)
#
# par(mfrow=c(1,3),mar=c(rep(0.1,4)),oma=rep(0,4))
slu <- subset(lu,Name==luName)
# masking out urban-ag
smask1 <- mask(crop(noveg270,slu),slu) #note that value 1 is what we want to EXCLUDE (ag-urban)
# masking out with vegmask 3
smask2 <- mask(crop(mymask,slu),slu) #note that value 1 is what we want to INCLUDE (vegtype)
p0 <- mask(crop(hst,slu),slu)
f0 <- mask(crop(fut,slu),slu)
subRES <- list()
for(z in 1:3) {
# all pixels
if(z==1) {
p <- p0
f <- f0
type <- 'All'
#exclude ag-urban
} else if(z==2) {
p <- mask(p0, smask1, inverse=T)
f <- mask(f0, smask1, inverse=T)
type <- 'Natural'
# exclude off vegtype
} else if(z==3) {
p <- mask(p0, smask2)
f <- mask(f0, smask2)
type <- 'InVegtype'
}
psuit = mean(getValues(p),na.rm=T)
fsuits = c()
for(i in 1:nlayers(f)) {
res = mean(getValues(f[[i]]),na.rm=T)
fsuits=c(fsuits,res)
}
Means = sapply(Gr,function(x) {mean(fsuits[which(allScenarios[myrange]%in%x)])})
# Plot colored summaries of change
if(!is.na(psuit)) {
myCols = sapply(Means,function(x) {pickCol(psuit=psuit,fsuit=x)})
} else {
myCols = NA
}
subRES[[z]] <- myCols
names(subRES)[[z]] <- type
ffile <- paste0(fdir2, mySpecies,'_',type,'_4square.png')
png(ffile, width=200, height=200)
par(mar=c(rep(0,4)),oma=rep(0,4))
if(!is.na(psuit)) {
plot(-1,xlim=c(0.5,3.5),ylim=c(0.5,3.5),axes=F,xlab="",ylab="")
rect(xleft=c(1,2,1,2),ybottom=c(2,2,1,1),xright=c(2,3,2,3),ytop=c(3,3,2,2), col=myCols)
} else {
plot(0,0)
}
dev.off()
}
luRES[[b]] <- subRES
names(luRES)[[b]] <- luName
}
allRES[[a]] <- luRES
names(allRES)[[a]] <- mySpecies
}
saveRDS(allRES, paste0(fdir,'All_four_square_results.rdata'))
#----------------#
# Overview plots #
#----------------#
# Plot overview matrix of 4-squares for all species in all landscape units
allSpecies <- sort(names(allRES),decreasing=T)
lunames <- sort(names(allRES[[1]]))
# For 4-squares within vegtype only
pdf(file=paste0(fdir, "Overview_allSp_allLU_InVegtype.pdf"),width=12,height=9, pointsize=12)
par(mfrow=c(1,1),mar=c(0.1,5.5,4,0.5),cex=1)
plot(0,main='', ylim = c(1,length(allSpecies)+1), xlim = c(1,length(lunames)+1),axes=F, xlab="", ylab="",col='white')
text(x=0.5,y=1:length(allSpecies), labels=allSpecies, pos=2, cex=0.6, xpd=T)
text(x=1:length(lunames), y=length(allSpecies)+1, labels=lunames, srt=45, xpd=T, pos=4, cex=0.6)
w <- 0.4
s <- 1
l <- 1
#myCols <- c('red','green','orange','grey')
for(s in 1:length(allSpecies)) {
#for(s in 1:4) {
sp <- allSpecies[s]
# for(l in 1:3) {
for(l in 1:length(lunames)) {
lu <- lunames[l]
myCols <- allRES[[sp]][[lu]][[3]]
rect(xleft=c(l-w,l,l-w,l),ybottom=c(s,s,s-w,s-w),xright=c(l,l+w,l,l+w),ytop=c(s+w,s+w,s,s), col=myCols)
}
}
dev.off()
# For 4-squares within natural areas and within vegtype
pdf(file=paste0(fdir, "Overview_allSp_allLU_Natural_or_InVegtype.pdf"),width=24,height=9, pointsize=12)
par(mfrow=c(1,1),mar=c(0.1,5.5,4,0.5),cex=1)
plot(0,main='', ylim = c(1,length(allSpecies)+1), xlim = c(1,2*length(lunames)+1),axes=F, xlab="", ylab="",col='white')
text(x=0.5,y=1:length(allSpecies), labels=allSpecies, pos=2, cex=0.6, xpd=T)
text(x=seq(1,2*length(lunames),by=2), y=length(allSpecies)+1, labels=lunames, srt=45, xpd=T, pos=4, cex=0.6)
w <- 0.4
s <- 1
l <- 1
#myCols <- c('red','green','orange','grey')
# add background stripes
i <- seq(1.5,length(lunames)*2,by=4)
rect(xleft=i-w*2.5, ybottom=0, xright=i+w*2.5, ytop=length(allSpecies)+w*2,border=F, col='lightblue')
for(s in 1:length(allSpecies)) {
#for(s in 1:4) {
sp <- allSpecies[s]
# for(l in 1:3) {
for(ll in 1:length(lunames)) {
lu <- lunames[ll]
myCols1 <- allRES[[sp]][[lu]][[2]]
myCols2 <- allRES[[sp]][[lu]][[3]]
l=seq(1,2*length(lunames),by=2)[ll]
rect(xleft=c(l-w,l,l-w,l),ybottom=c(s,s,s-w,s-w),xright=c(l,l+w,l,l+w),ytop=c(s+w,s+w,s,s), col=myCols1)
l=seq(2,2*length(lunames),by=2)[[ll]]
rect(xleft=c(l-w,l,l-w,l),ybottom=c(s,s,s-w,s-w),xright=c(l,l+w,l,l+w),ytop=c(s+w,s+w,s,s), col=myCols2)
}
}
dev.off()
| /02_Plotting/04_Map_four-squares.R | no_license | naiamh/Lead-trail_project | R | false | false | 9,419 | r |
# Scripts for plotting suitability maps, vegetation, future projections
# and changes (leading/trailing edged) as well as summary plots for landscape units
# Clear workspace
rm(list=ls())
#source("Scripts/00_Functions_trailing-edge.r")
Computer <- "HP"
#-----------------#
# Set directories #
#-----------------#
if (Computer == "HP") {
wdir <- 'C:/Users/Naia Morueta Holme/Documents/Documents_share/Projects/'
idir <- 'E:/Lead-trail/Projections/V4/'
vdir <- 'E:/Vegetation/PROCESSED/'
bgdir <- paste0(wdir,'100_Postdoc/Data/Background_layers/PROCESSED/')
spdir2 <- paste0(wdir,'101_TBC3_modelling/Lead-trail_R-project/Data/Species/')
fdir <- 'E:/Lead-trail/Projections/Figures_V5/Four_squares/'
spdir <- 'E:/Phylo_modelling/Data/Species/Processed2/'
mdir <- paste0(wdir, '101_TBC3_modelling/Lead-trail_R-project/ModelResults/Maxent/V4/')
}
#----------------#
# Load libraries #
#----------------#
require(raster)
# require(rgdal)
# require(fields)
#-----------------------#
# Parameters for script #
#-----------------------#
# What species?
allSpecies <- sort(unique(read.csv(paste0(spdir2, 'Species_CLN_matching_v2.csv'), as.is=T)[,'Scientific_name']))
# CC scenarios
allScenarios <- c("HST", "GFDL_B1","GFDL_A2","PCM_A2","CNRM_rcp85","CCSM4_rcp85","MIROC_rcp85",
"PCM_B1","MIROC3_2_A2","csiro_A1B","GISS_AOM_A1B","MIROC5_rcp26","MIROC_rcp45",
"MIROC_rcp60","GISS_rcp26","MRI_rcp26","MPI_rcp45","IPSL_rcp85","Fgoals_rcp85")
myrange=c(2:9,11:19)
#myrange=13
# Which model type
mxModelType = "cwd-djf-jja-ppt"
# Landscape units
lu <- readRDS(paste(bgdir, "tbc3_landscape_units_p.rdata",sep=""))
lunames <- as.character(lu@data$Name[which(!is.na(lu@data$Name))])
# Load urban-ag mask
noveg <- readRDS(paste0(vdir,'CLN_Non-veg_30m.rdata'))
noveg270 <- aggregate(noveg, 9, fun=mean, na.rm=T)
rm(noveg)
#Define scenarios to lump:
# # Based on Bay Area climate - note: temperature cut-off is at ~3.2 degrees C, ppt at 0C
# wawet = c("PCM_B1","GISS_AOM_A1B","MPI_rcp45","MRI_rcp26","GISS_rcp26")
# howet = c("CCSM4_rcp85","CNRM_rcp85","PCM_A2","IPSL_rcp85")
# wadry = c("GFDL_B1", "MIROC5_rcp26")
# hodry = c("GFDL_A2","MIROC_rcp85","MIROC_rcp60","MIROC3_2_A2","Fgoals_rcp85","MIROC_rcp45")
# Based on Bay Area climate - note: temperature cut-off is at ~2.5 degrees C, ppt at 0C
wawet = c("PCM_B1","GISS_rcp26","MPI_rcp45","MRI_rcp26")
howet = c("GISS_AOM_A1B","CCSM4_rcp85","CNRM_rcp85","PCM_A2","IPSL_rcp85")
wadry = c("GFDL_B1","MIROC5_rcp26")
hodry = c("GFDL_A2","MIROC_rcp85","MIROC_rcp60","MIROC3_2_A2","Fgoals_rcp85","MIROC_rcp45")
Gr = list(wawet,howet,wadry,hodry)
rm(wawet,howet,wadry,hodry)
#----------#
# Function #
#----------#
# Function to choose color code based on the mean suitability across a region
# in the future as a fraction of present suitability
# (0-25%: red, 25-75%: orange, 75-125%: grey, >125%: green)
pickCol = function(psuit,fsuit) {
if(fsuit < 0.25*psuit) {
return("red") # if mean future suitability less than 25% of present suitability
} else if (fsuit < 0.75*psuit) {
return("orange") # if mean future suitability less than 75% than present suitability
} else if (fsuit <= 1.25*psuit) {
return("grey") # if mean future suit. between 75-125% of present suit
} else {
return("green") # if mean future suit. larger than present
}
}
#---------------#
# Set variables #
#---------------#
a=1
b=33
allRES <- list()
for(a in 1:length(allSpecies)) {
#for(a in 1:3) {
mySpecies <- allSpecies[a]
writeLines(mySpecies)
#load historic and future suitabilities
hfile <- paste0(idir,mxModelType,'/',mySpecies,'/','1951-1980_suitability_Bay_HST.tif')
ffiles <- lapply(allScenarios[myrange],function(x) paste0(idir,mxModelType,'/',mySpecies,'/','2070-2099_suitability_Bay_',x,'.tif'))
hst <- raster(hfile)
fut <- stack(ffiles)
rm(hfile,ffiles)
#load vegetation mask
spname <- sub(' ','_',mySpecies)
# mask1file <- paste0(vdir,'CLN_mask1/CLN_mask1_',spname,'.rdata')
# if(file.exists(mask1file)) {
# mask1 <- readRDS(mask1file)
# projection(mask1) <- projection(hst)
# }
#
# mask2 <- readRDS(paste0(vdir,'CLN_mask2/CLN_mask2_',spname,'.rdata'))
# projection(mask2) <- projection(hst)
mymask <- readRDS(paste0(vdir,'CLN_mask3/CLN_mask3_',spname,'.rdata'))
projection(mymask) <- projection(hst)
# Presence/absence threshold from maxent models
mx.dir <- paste(mdir, mySpecies, mxModelType, 'fullmodel', sep='/')
mx <- readRDS(paste(mx.dir, 'ModelObject.rdata', sep='/'))
Threshold = 'Equal.training.sensitivity.and.specificity.logistic.threshold'
mx.th = as.numeric(mx@results[Threshold,])
rm(mx.dir, mx, Threshold)
luRES <- list()
# for(b in 1:4) {
for(b in 1:length(lunames)) {
luName <- lunames[b]
writeLines(luName)
fdir2 <- paste0(fdir, luName,'/')
if(!dir.exists(fdir2)) {dir.create(fdir2, recursive=F)}
# ffile <- paste0(fdir2, mySpecies,'_4square.png')
# png(ffile, width=600, height=200)
#
# par(mfrow=c(1,3),mar=c(rep(0.1,4)),oma=rep(0,4))
slu <- subset(lu,Name==luName)
# masking out urban-ag
smask1 <- mask(crop(noveg270,slu),slu) #note that value 1 is what we want to EXCLUDE (ag-urban)
# masking out with vegmask 3
smask2 <- mask(crop(mymask,slu),slu) #note that value 1 is what we want to INCLUDE (vegtype)
p0 <- mask(crop(hst,slu),slu)
f0 <- mask(crop(fut,slu),slu)
subRES <- list()
for(z in 1:3) {
# all pixels
if(z==1) {
p <- p0
f <- f0
type <- 'All'
#exclude ag-urban
} else if(z==2) {
p <- mask(p0, smask1, inverse=T)
f <- mask(f0, smask1, inverse=T)
type <- 'Natural'
# exclude off vegtype
} else if(z==3) {
p <- mask(p0, smask2)
f <- mask(f0, smask2)
type <- 'InVegtype'
}
psuit = mean(getValues(p),na.rm=T)
fsuits = c()
for(i in 1:nlayers(f)) {
res = mean(getValues(f[[i]]),na.rm=T)
fsuits=c(fsuits,res)
}
Means = sapply(Gr,function(x) {mean(fsuits[which(allScenarios[myrange]%in%x)])})
# Plot colored summaries of change
if(!is.na(psuit)) {
myCols = sapply(Means,function(x) {pickCol(psuit=psuit,fsuit=x)})
} else {
myCols = NA
}
subRES[[z]] <- myCols
names(subRES)[[z]] <- type
ffile <- paste0(fdir2, mySpecies,'_',type,'_4square.png')
png(ffile, width=200, height=200)
par(mar=c(rep(0,4)),oma=rep(0,4))
if(!is.na(psuit)) {
plot(-1,xlim=c(0.5,3.5),ylim=c(0.5,3.5),axes=F,xlab="",ylab="")
rect(xleft=c(1,2,1,2),ybottom=c(2,2,1,1),xright=c(2,3,2,3),ytop=c(3,3,2,2), col=myCols)
} else {
plot(0,0)
}
dev.off()
}
luRES[[b]] <- subRES
names(luRES)[[b]] <- luName
}
allRES[[a]] <- luRES
names(allRES)[[a]] <- mySpecies
}
saveRDS(allRES, paste0(fdir,'All_four_square_results.rdata'))
#----------------#
# Overview plots #
#----------------#
# Plot overview matrix of 4-squares for all species in all landscape units
allSpecies <- sort(names(allRES),decreasing=T)
lunames <- sort(names(allRES[[1]]))
# For 4-squares within vegtype only
pdf(file=paste0(fdir, "Overview_allSp_allLU_InVegtype.pdf"),width=12,height=9, pointsize=12)
par(mfrow=c(1,1),mar=c(0.1,5.5,4,0.5),cex=1)
plot(0,main='', ylim = c(1,length(allSpecies)+1), xlim = c(1,length(lunames)+1),axes=F, xlab="", ylab="",col='white')
text(x=0.5,y=1:length(allSpecies), labels=allSpecies, pos=2, cex=0.6, xpd=T)
text(x=1:length(lunames), y=length(allSpecies)+1, labels=lunames, srt=45, xpd=T, pos=4, cex=0.6)
w <- 0.4
s <- 1
l <- 1
#myCols <- c('red','green','orange','grey')
for(s in 1:length(allSpecies)) {
#for(s in 1:4) {
sp <- allSpecies[s]
# for(l in 1:3) {
for(l in 1:length(lunames)) {
lu <- lunames[l]
myCols <- allRES[[sp]][[lu]][[3]]
rect(xleft=c(l-w,l,l-w,l),ybottom=c(s,s,s-w,s-w),xright=c(l,l+w,l,l+w),ytop=c(s+w,s+w,s,s), col=myCols)
}
}
dev.off()
# For 4-squares within natural areas and within vegtype
pdf(file=paste0(fdir, "Overview_allSp_allLU_Natural_or_InVegtype.pdf"),width=24,height=9, pointsize=12)
par(mfrow=c(1,1),mar=c(0.1,5.5,4,0.5),cex=1)
plot(0,main='', ylim = c(1,length(allSpecies)+1), xlim = c(1,2*length(lunames)+1),axes=F, xlab="", ylab="",col='white')
text(x=0.5,y=1:length(allSpecies), labels=allSpecies, pos=2, cex=0.6, xpd=T)
text(x=seq(1,2*length(lunames),by=2), y=length(allSpecies)+1, labels=lunames, srt=45, xpd=T, pos=4, cex=0.6)
w <- 0.4
s <- 1
l <- 1
#myCols <- c('red','green','orange','grey')
# add background stripes
i <- seq(1.5,length(lunames)*2,by=4)
rect(xleft=i-w*2.5, ybottom=0, xright=i+w*2.5, ytop=length(allSpecies)+w*2,border=F, col='lightblue')
for(s in 1:length(allSpecies)) {
#for(s in 1:4) {
sp <- allSpecies[s]
# for(l in 1:3) {
for(ll in 1:length(lunames)) {
lu <- lunames[ll]
myCols1 <- allRES[[sp]][[lu]][[2]]
myCols2 <- allRES[[sp]][[lu]][[3]]
l=seq(1,2*length(lunames),by=2)[ll]
rect(xleft=c(l-w,l,l-w,l),ybottom=c(s,s,s-w,s-w),xright=c(l,l+w,l,l+w),ytop=c(s+w,s+w,s,s), col=myCols1)
l=seq(2,2*length(lunames),by=2)[[ll]]
rect(xleft=c(l-w,l,l-w,l),ybottom=c(s,s,s-w,s-w),xright=c(l,l+w,l,l+w),ytop=c(s+w,s+w,s,s), col=myCols2)
}
}
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{separar}
\alias{separar}
\title{Separar em construção e validação}
\usage{
separar(bd, n_validacao = 100, seed = 500)
}
\arguments{
\item{bd}{banco de dados total.}
\item{n_validacao}{numero de casos na validação}
\item{seed}{semente usada}
}
\description{
Separar em construção e validação
}
| /man/separar.Rd | no_license | dfalbel/captchaReceita2 | R | false | true | 398 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{separar}
\alias{separar}
\title{Separar em construção e validação}
\usage{
separar(bd, n_validacao = 100, seed = 500)
}
\arguments{
\item{bd}{banco de dados total.}
\item{n_validacao}{numero de casos na validação}
\item{seed}{semente usada}
}
\description{
Separar em construção e validação
}
|
#setwd("C:/Users/gudge16/CloudStation/R_class_2018/Lecture_10/Shiny_app_template")
#getwd()
library(shiny)
library(dplyr)
library(ggplot2)
bcl <- read.csv('bclData.csv', stringsAsFactors = F)
ui <- fluidPage(
titlePanel("BC Liquor Store prices"),
sidebarLayout(sidebarPanel(
sliderInput(
"priceInput",
"Price",
min = 0,
max = 100,
value = c(25, 40),
pre = "$"
),
radioButtons(
"typeInput",
"Product type",
choices = c("BEER", "REFRESHMENT", "SPIRITS", "WINE"),
selected = "WINE"
),
uiOutput("countryOutput")
),
mainPanel(plotOutput("coolplot"),
br(), br(),
tableOutput("results"))
))
server <- function(input, output, session) {
output$countryOutput <- renderUI({
selectInput("countryInput", "Country",
sort(unique(bcl$Country)), selected = "CANADA")
})
filtered <- reactive({
if (is.null(input$countryInput)) {
return(NULL)
}
bcl %>%
filter(
Price >= input$priceInput[1],
Price <= input$priceInput[2],
Type == input$typeInput,
Country == input$countryInput
)
})
output$coolplot <- renderPlot({
if (is.null(filtered())) {
return() # If our filtered list has nulls, don't include those
}
ggplot(filtered(), aes(Alcohol_Content)) +
geom_histogram()
})
output$results <- renderTable({
filtered <-
bcl %>%
filter(
Price >= input$priceInput[1],
Price <= input$priceInput[2],
Type == input$typeInput,
Country == input$countryInput
)
filtered
})
}
shinyApp(ui = ui, server = server)
| /Shiny_app_template/appP2.R | no_license | ingridknapp/potpourri | R | false | false | 1,669 | r | #setwd("C:/Users/gudge16/CloudStation/R_class_2018/Lecture_10/Shiny_app_template")
#getwd()
library(shiny)
library(dplyr)
library(ggplot2)
bcl <- read.csv('bclData.csv', stringsAsFactors = F)
ui <- fluidPage(
titlePanel("BC Liquor Store prices"),
sidebarLayout(sidebarPanel(
sliderInput(
"priceInput",
"Price",
min = 0,
max = 100,
value = c(25, 40),
pre = "$"
),
radioButtons(
"typeInput",
"Product type",
choices = c("BEER", "REFRESHMENT", "SPIRITS", "WINE"),
selected = "WINE"
),
uiOutput("countryOutput")
),
mainPanel(plotOutput("coolplot"),
br(), br(),
tableOutput("results"))
))
server <- function(input, output, session) {
output$countryOutput <- renderUI({
selectInput("countryInput", "Country",
sort(unique(bcl$Country)), selected = "CANADA")
})
filtered <- reactive({
if (is.null(input$countryInput)) {
return(NULL)
}
bcl %>%
filter(
Price >= input$priceInput[1],
Price <= input$priceInput[2],
Type == input$typeInput,
Country == input$countryInput
)
})
output$coolplot <- renderPlot({
if (is.null(filtered())) {
return() # If our filtered list has nulls, don't include those
}
ggplot(filtered(), aes(Alcohol_Content)) +
geom_histogram()
})
output$results <- renderTable({
filtered <-
bcl %>%
filter(
Price >= input$priceInput[1],
Price <= input$priceInput[2],
Type == input$typeInput,
Country == input$countryInput
)
filtered
})
}
shinyApp(ui = ui, server = server)
|
psvz2plot <- function(x) {
# Pseudosigma-versus-Z^2 Plot (EDTTS p. 449)
n <- length(x)
k <- (n + 1)/2
sort.x <- sort(x)
sort.p <- p(sort.x)
if(n %% 2 == 0) {
#even
low <- sort.x[1:floor(k)]
up <- rev(sort.x[(floor(k) + 1):n])
p.up <- rev(sort.p[(floor(k) + 1):n])
}
else {
#odd
low <- sort.x[1:(k - 1)]
up <- rev(sort.x[(k + 1):n])
p.up <- rev(sort.p[(k + 1):n])
}
spread <- up - low
mv <- mid(low, up)
z <- qnorm(p.up)
z2 <- z^2
ps <- spread/(2 * z)
plot(z2, ps)
abline(median(x), 0)
value <- data.frame(low, up, spread, mv, p.up, z, z2, ps)
value
} | /psvz2plot.R | no_license | robertandrewstevens/R | R | false | false | 618 | r | psvz2plot <- function(x) {
# Pseudosigma-versus-Z^2 Plot (EDTTS p. 449)
n <- length(x)
k <- (n + 1)/2
sort.x <- sort(x)
sort.p <- p(sort.x)
if(n %% 2 == 0) {
#even
low <- sort.x[1:floor(k)]
up <- rev(sort.x[(floor(k) + 1):n])
p.up <- rev(sort.p[(floor(k) + 1):n])
}
else {
#odd
low <- sort.x[1:(k - 1)]
up <- rev(sort.x[(k + 1):n])
p.up <- rev(sort.p[(k + 1):n])
}
spread <- up - low
mv <- mid(low, up)
z <- qnorm(p.up)
z2 <- z^2
ps <- spread/(2 * z)
plot(z2, ps)
abline(median(x), 0)
value <- data.frame(low, up, spread, mv, p.up, z, z2, ps)
value
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find_cutoff.R
\name{re.cutoff_cpp}
\alias{re.cutoff_cpp}
\title{A function to find the split point}
\usage{
re.cutoff_cpp(g, vi, x, inx.s, cnode, minbucket)
}
\arguments{
\item{g}{the effect size}
\item{vi}{the sampling variance}
\item{x}{the splitting moderator}
\item{inx.s}{indicates whether a study belongs to the candidate parent leaf}
\item{cnode}{the terminal nodes that the studies belong to in the current tree}
\item{minbucket}{the minimum number of the studies in a terminal node}
}
\value{
a vector including the split point, Q, and tau2
}
\description{
A function to find the split point
}
\keyword{internal}
| /man/re.cutoff_cpp.Rd | no_license | waltersom/metacart | R | false | true | 733 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find_cutoff.R
\name{re.cutoff_cpp}
\alias{re.cutoff_cpp}
\title{A function to find the split point}
\usage{
re.cutoff_cpp(g, vi, x, inx.s, cnode, minbucket)
}
\arguments{
\item{g}{the effect size}
\item{vi}{the sampling variance}
\item{x}{the splitting moderator}
\item{inx.s}{indicates whether a study belongs to the candidate parent leaf}
\item{cnode}{the terminal nodes that the studies belong to in the current tree}
\item{minbucket}{the minimum number of the studies in a terminal node}
}
\value{
a vector including the split point, Q, and tau2
}
\description{
A function to find the split point
}
\keyword{internal}
|
######################################################################################################
#Conditional statements and gap filling
#################################################################################################
#load packages
lapply(c("tidyverse", "lubridate", "reshape", "stringr", "plotly", "roll", "data.table", "clifro"), library, character.only = TRUE)
#Gap filling NAN values using lag and lead values
df<-df %>%
mutate(temp_filled = if_else(is.na(lag(temp)) == F, temp - lag(temp), temp))
#Create or fill detailed Quality_Flag column based on multiple-column data conditions and assign quality level
df$QC_flag <- ifelse(df$Rain > 0 & df$Temp < 1, "SVC: Snowfall: QC'd by EH", ifelse(df$Rain > 0 & df$Temp < 5, "SVC: Potential snowfall: QC'd by EH", "AV: QC'd by EH"))
#Create or fill short form quality flag column and quality level column useful for graphing aesthetics
##Quality levels and flags subjective to scheme group is using
##See Hakai QC standards document
df_qcd<-df %>%
mutate(QC_flag_shortened = gsub(":.*","",df$QC_flag),
QC_Level = ifelse(df$Quality_flag_shortened =="AV", "2", ifelse(df$Quality_flag_shortened =="EV", "3", "2")))
| /conditional-statements.R | permissive | HakaiInstitute/wx-tools | R | false | false | 1,206 | r | ######################################################################################################
#Conditional statements and gap filling
#################################################################################################
#load packages
lapply(c("tidyverse", "lubridate", "reshape", "stringr", "plotly", "roll", "data.table", "clifro"), library, character.only = TRUE)
#Gap filling NAN values using lag and lead values
df<-df %>%
mutate(temp_filled = if_else(is.na(lag(temp)) == F, temp - lag(temp), temp))
#Create or fill detailed Quality_Flag column based on multiple-column data conditions and assign quality level
df$QC_flag <- ifelse(df$Rain > 0 & df$Temp < 1, "SVC: Snowfall: QC'd by EH", ifelse(df$Rain > 0 & df$Temp < 5, "SVC: Potential snowfall: QC'd by EH", "AV: QC'd by EH"))
#Create or fill short form quality flag column and quality level column useful for graphing aesthetics
##Quality levels and flags subjective to scheme group is using
##See Hakai QC standards document
df_qcd<-df %>%
mutate(QC_flag_shortened = gsub(":.*","",df$QC_flag),
QC_Level = ifelse(df$Quality_flag_shortened =="AV", "2", ifelse(df$Quality_flag_shortened =="EV", "3", "2")))
|
testlist <- list(type = 1L, z = 5.8668939628805e-310)
result <- do.call(esreg::G1_fun,testlist)
str(result) | /esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609891288-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 107 | r | testlist <- list(type = 1L, z = 5.8668939628805e-310)
result <- do.call(esreg::G1_fun,testlist)
str(result) |
sn2ft2x2 <- function(s, n) {
ls <- length(s)
ln <- length(n)
if (ls != ln)
stop("S and N have to be vectors of equal length\n")
if (ls != 2)
stop("S and N have to be vectors of length 2\n")
Q <- matrix(rep(NA, 2), 2, 2)
Q[1, ] <- s
Q[2, ] <- n - s
dimnames(Q) <- list(A = c("Y", "N"), B = c("Y",
"N"))
return(Q)
}
| /R/sn2ft2x2.R | no_license | cran/evidence | R | false | false | 377 | r | sn2ft2x2 <- function(s, n) {
ls <- length(s)
ln <- length(n)
if (ls != ln)
stop("S and N have to be vectors of equal length\n")
if (ls != 2)
stop("S and N have to be vectors of length 2\n")
Q <- matrix(rep(NA, 2), 2, 2)
Q[1, ] <- s
Q[2, ] <- n - s
dimnames(Q) <- list(A = c("Y", "N"), B = c("Y",
"N"))
return(Q)
}
|
#!/usr/bin/env Rscript
Args = commandArgs(trailingOnly=TRUE)
library(tidyverse)
gene_list <- read.table(Args[grep(".txt",Args)], stringsAsFactors = F)$V1
load(Args[grep("RData",Args)])
print(ls())
#changing this so it can take CDS lengths and hits for expression matching instead - simplest way is to see if the CDS data objects exist and reassign to the UTR names
# so can only supply one or the other- will preferentially use CDS if both were in the RData file
if(exists("iCLIP_CDS_hits")) { iCLIP_3UTR_hits <- iCLIP_CDS_hits}
if(exists("CDS_lengths")) { UTR_lengths <- CDS_lengths }
# filter gene list so that only genes for which we have both a 3'UTR length and an expression value are included (no point in considering genes without an annotated 3'UTR)
# also ensure we have the same set of genes for both of these.
UTR_lengths <- UTR_lengths[rownames(UTR_lengths) %in% rownames(Expression_norm_per_kb),]
Expression_norm_per_kb <- Expression_norm_per_kb[rownames(Expression_norm_per_kb) %in% rownames(UTR_lengths),]
gene_list <- gene_list[gene_list %in% rownames(UTR_lengths)]
geneSetName <- Args[grep(".txt",Args)]
geneSetName <-gsub("(.*).txt","\\1", geneSetName)
geneSetName <-gsub(".*/(.*?)$","\\1", geneSetName)
Celltype <- Args[grep(".RData",Args)]
Celltype <- gsub("(.*).RData", "\\1", Celltype)
Celltype <- gsub(".*/(.*?)$", "\\1", Celltype)
print(geneSetName)
print(Celltype)
### Define functions for expression/3'UTR length matching
#takes counts data (should only contain counts unless baseMeanGrep specified which only selects columns with counts), some form of processed data (resData) which includes a column (resCol) containing the assignment of genes into gene sets.
#Will then identify expression matched genes for required gene sets, from the remaining genes that are not in any of the gene sets specified
# Will assume that gene names are rownames (of both countData - which must be the case - and resData), unless geneCol != NA in which case will look for column entitled geneCol specified
# Instead of providing a column in resData containing assigment to gene sets, can provide the genes within each set - in this case should be provided as a list and resCol left as NA
# will calculate base mean for countData across
# Default for new column containing expression matched (plus original gene set) assignment is "Expression_matched_genes"
# also adding option to consider 3'UTR length: will look for all genes no more than 25% different; or if this is less than 1000 genes will look for closest 1000 (but with min cutoff of 10bp)
# UTRdata should have rownames and a single column containing UTR lengths
ExpressionMatch <- function(countData, resData, geneSets, geneSetNames = NA, resCol = NA, geneCol = NA, nRandom = 100,
baseMeanGrep = "", newCol = "Expression_matched_genes", avoid_genes = NULL, UTR3p = F, UTRdata = NULL,
UTRthreshold = 10){
countData <- as.data.frame(countData)
resData <- as.data.frame(resData)
countData$baseMean <- rowMeans(countData[,grepl(baseMeanGrep, colnames(countData))])
geneSet_counts <- list()
if(is.na(geneCol)){
countData <- countData[rownames(countData) %in% rownames(resData),]
}
else {
countData <- countData[rownames(countData) %in% resData$geneCol,]
}
if(is.na(resCol)){
resData[,newCol] <- NA
if(is.na(geneSetNames)){
geneSetNames <- paste("geneSet",1:length(geneSets),sep = "_")
}
for(i in 1:length(geneSets)){
if(is.na(geneCol)){
resData[rownames(resData) %in% geneSets[[i]],newCol] <- geneSetNames[i]
}
else {
resData[resData[,geneCol] %in% geneSets[[i]],newCol] <- geneSetNames[i]
}
}
for(i in 1:length(geneSets)){
genes_in_set <- geneSets[[i]]
geneSet_counts[[i]] <- countData[rownames(countData) %in% genes_in_set,]
countData <- countData[!rownames(countData) %in% genes_in_set,]
}
}
else {
resData[,newCol] <- as.character(resData[,resCol])
if(is.na(geneSetNames)){
geneSetNames <- geneSets
}
for(i in 1:length(geneSets)){
resData[,newCol] <- gsub(geneSets[i], geneSetNames[i], resData[,newCol])
if(is.na(geneCol)){
genes_in_set <- rownames(resData)[resData[,resCol] == geneSets[i]]
}
else {
genes_in_set <- resData[resData[,resCol] == geneSets[i], geneCol]
}
geneSet_counts[[i]] <- countData[rownames(countData) %in% genes_in_set,]
countData <- countData[!rownames(countData) %in% genes_in_set,]
}
}
countData <- countData[!rownames(countData) %in% avoid_genes,]
if(UTR3p == T){
geneSet_UTRs <- list()
for(i in 1:length(geneSets)){
geneSet_UTRs[[i]] <- UTRdata[match(rownames(geneSet_counts[[i]]),rownames(UTRdata)),1]
}
UTRdata <- na.omit(UTRdata)
UTRdata$threshold <- UTRdata[,1] > UTRthreshold
UTRdata <- UTRdata[UTRdata$threshold == T,]
countData <- countData[rownames(countData) %in% rownames(UTRdata),]
}
for(i in 1:length(geneSets)){
expression_matched_genes <- character()
for(j in 1:nrow(geneSet_counts[[i]])){
if(UTR3p==T){
UTRdata1 <- UTRdata[rownames(UTRdata) %in% rownames(countData),]
utr_length <- geneSet_UTRs[[i]][j]
utr_min <- floor(0.75*utr_length)
utr_max <- ceiling(1.25*utr_length)
genes_to_keep <- rownames(UTRdata1)[UTRdata1[,1] > utr_min & UTRdata1[,1] < utr_max]
if(length(genes_to_keep) < 1000){
UTRdata1$diff <- abs(UTRdata1[,1] - utr_length)
genes_to_keep <- rownames(UTRdata1[order(UTRdata1$diff),])[1:1000]
}
countData1 <- countData[rownames(countData) %in% genes_to_keep,]
}
else {
countData1 <- countData
}
closest_gene_index <- sample(order(abs(countData1$baseMean - geneSet_counts[[i]]$baseMean[j]))[1:nRandom],1)
expression_matched_genes[j] <- rownames(countData1)[closest_gene_index]
countData <- countData[!rownames(countData) == rownames(countData1)[closest_gene_index],]
}
if(is.na(geneCol)){
resData[rownames(resData) %in% expression_matched_genes,newCol] <- paste("Expression matched to",geneSetNames[i])
}
else {
resData[resData[,geneCol] %in% expression_matched_genes,newCol] <- paste("Expression matched to",geneSetNames[i])
}
}
return(resData)
}
# To generate multiple set of expression matched genes:
ExpressionMatch_multi <- function(nSets, countData, resData, geneSets, geneSetNames = NA, resCol = NA, geneCol = NA, nRandom = 100,
baseMeanGrep = "", newCol = "Expression_matched_genes", avoid_genes = NULL, UTR3p = F, UTRdata = NULL,
UTRthreshold = 10){
if(is.na(geneSetNames)){
if(is.na(resCol)){
geneSetNames <- paste("geneSet",1:length(geneSets),sep = "_")
}
else {
geneSetNames <- geneSets
}
}
expression_matched_sets <- list()
for(s in 1:nSets){
expression_matched_sets[[s]] <- list()
eM <- ExpressionMatch(countData = countData, resData = resData, geneSets = geneSets, geneSetNames = geneSetNames, resCol = resCol,
geneCol = geneCol, nRandom = nRandom, baseMeanGrep = baseMeanGrep, newCol = newCol, avoid_genes = avoid_genes,
UTR3p = UTR3p, UTRdata = UTRdata, UTRthreshold = UTRthreshold)
eM_levels <- levels(na.omit(as.factor(eM[,newCol])))
eM_levels <- eM_levels[!eM_levels %in% geneSetNames]
for(l in eM_levels){
if(is.na(geneCol)){
expression_matched_sets[[s]][[as.character(l)]] <- as.character(na.omit(rownames(eM)[eM[,newCol]== l]))
}
}
}
return(expression_matched_sets)
}
expression_matched <-ExpressionMatch_multi(nSets = 100, countData = Expression_norm_per_kb, resData = Expression_norm_per_kb, geneSets = list(gene_list), geneSetNames = geneSetName, nRandom = 100, UTR3p = T, UTRdata = UTR_lengths)
exMatch_iCLIP3UTR_overlap <- sapply(expression_matched, function(x) sum(x[[1]] %in% iCLIP_3UTR_hits))
iCLIP_hits <- data.frame(Gene = gene_list)
iCLIP_hits$iCLIP_hit <- iCLIP_hits$Gene %in% iCLIP_3UTR_hits
exMatch_overlaps <- data.frame(Overlap = exMatch_iCLIP3UTR_overlap)
exMatch_overlaps$upper_95percent <- rep(quantile(exMatch_iCLIP3UTR_overlap, 0.95),length(exMatch_iCLIP3UTR_overlap))
exMatch_overlaps$lower_5percent <- rep(quantile(exMatch_iCLIP3UTR_overlap, 0.05),length(exMatch_iCLIP3UTR_overlap))
pdf(paste0(geneSetName, "_", Celltype, ".pdf"))
ggplot(iCLIP_hits, aes(x = geneSetName))+geom_bar(aes(fill = iCLIP_hit)) + scale_fill_manual(values =c("grey60","mediumturquoise")) + stat_summary(data = exMatch_overlaps, fun = "median", geom = "bar", width=0.7, fill = "darkcyan", aes(y = Overlap))+geom_errorbar(data = exMatch_overlaps[1,],aes(ymin= lower_5percent, ymax =upper_95percent), width=0.5)+ labs(y = "Number of genes", title = paste0(geneSetName,"\n",Celltype))
dev.off()
Expression_norm_per_kb$Mean <- rowMeans(Expression_norm_per_kb)
exMatch_expression <- data.frame(Gene = gene_list, Expression = Expression_norm_per_kb$Mean[match(gene_list, rownames(Expression_norm_per_kb))], Gene_set = geneSetName, Type = geneSetName)
for(i in 1:length(expression_matched)){
exMatch_expression <- rbind(exMatch_expression, data.frame(Gene = expression_matched[[i]][[1]], Expression = Expression_norm_per_kb$Mean[match(expression_matched[[i]][[1]], rownames(Expression_norm_per_kb))], Gene_set = paste0("EM_", i), Type ="Expression matched"))
}
for(i in 1:10){
random_genes <- sample(rownames(Expression_norm_per_kb),180)
exMatch_expression <- rbind(exMatch_expression, data.frame(Gene = random_genes, Expression = Expression_norm_per_kb$Mean[match(random_genes, rownames(Expression_norm_per_kb))], Gene_set = paste0("Random_", i), Type = "Random"))
}
exMatch_expression$Type <- factor(exMatch_expression$Type, levels = c(geneSetName,"Expression matched", "Random"))
exMatch_expression$Gene_set <- as.factor(exMatch_expression$Gene_set)
exMatch_expression$Gene_set <- relevel(exMatch_expression$Gene_set, geneSetName)
### How about 3'UTR length?
exMatch_expression$UTRlength <- UTR_lengths[match(exMatch_expression$Gene, rownames(UTR_lengths)),1]
pdf(paste0(geneSetName, "_", Celltype, "_expression_UTR_match.pdf"), width=9)
ggplot(exMatch_expression, aes(x = Gene_set, y = Expression, fill = Type)) + geom_boxplot(outlier.size =0) + scale_fill_manual(values = c("firebrick","lightblue1", "lightgoldenrod"))
ggplot(exMatch_expression, aes(x = Gene_set, y = UTRlength, fill = Type)) + geom_boxplot(outlier.size =0)+scale_y_log10() + scale_fill_manual(values = c("firebrick","lightblue1", "lightgoldenrod"))
dev.off()
exMatch_expression$iCLIP_hit_FDR0.05 <- exMatch_expression$Gene %in% iCLIP_3UTR_hits
write.table(exMatch_expression, file = paste0(geneSetName, "_",Celltype,"_expression_matched.txt"), sep = "\t", quote = F, row.names =F)
exMatch_processed_data <- list(iCLIP_hits=iCLIP_hits, exMatch_overlaps= exMatch_overlaps, exMatch_expression=exMatch_expression)
saveRDS(exMatch_processed_data, paste0(geneSetName, "_", Celltype, "_expressionMatch_processed_data.rds"))
| /expression_match_iCLIP.R | permissive | LouiseMatheson/ZFP36_L1_CD4_code | R | false | false | 11,191 | r | #!/usr/bin/env Rscript
Args = commandArgs(trailingOnly=TRUE)
library(tidyverse)
gene_list <- read.table(Args[grep(".txt",Args)], stringsAsFactors = F)$V1
load(Args[grep("RData",Args)])
print(ls())
#changing this so it can take CDS lengths and hits for expression matching instead - simplest way is to see if the CDS data objects exist and reassign to the UTR names
# so can only supply one or the other- will preferentially use CDS if both were in the RData file
if(exists("iCLIP_CDS_hits")) { iCLIP_3UTR_hits <- iCLIP_CDS_hits}
if(exists("CDS_lengths")) { UTR_lengths <- CDS_lengths }
# filter gene list so that only genes for which we have both a 3'UTR length and an expression value are included (no point in considering genes without an annotated 3'UTR)
# also ensure we have the same set of genes for both of these.
UTR_lengths <- UTR_lengths[rownames(UTR_lengths) %in% rownames(Expression_norm_per_kb),]
Expression_norm_per_kb <- Expression_norm_per_kb[rownames(Expression_norm_per_kb) %in% rownames(UTR_lengths),]
gene_list <- gene_list[gene_list %in% rownames(UTR_lengths)]
geneSetName <- Args[grep(".txt",Args)]
geneSetName <-gsub("(.*).txt","\\1", geneSetName)
geneSetName <-gsub(".*/(.*?)$","\\1", geneSetName)
Celltype <- Args[grep(".RData",Args)]
Celltype <- gsub("(.*).RData", "\\1", Celltype)
Celltype <- gsub(".*/(.*?)$", "\\1", Celltype)
print(geneSetName)
print(Celltype)
### Define functions for expression/3'UTR length matching
#takes counts data (should only contain counts unless baseMeanGrep specified which only selects columns with counts), some form of processed data (resData) which includes a column (resCol) containing the assignment of genes into gene sets.
#Will then identify expression matched genes for required gene sets, from the remaining genes that are not in any of the gene sets specified
# Will assume that gene names are rownames (of both countData - which must be the case - and resData), unless geneCol != NA in which case will look for column entitled geneCol specified
# Instead of providing a column in resData containing assigment to gene sets, can provide the genes within each set - in this case should be provided as a list and resCol left as NA
# will calculate base mean for countData across
# Default for new column containing expression matched (plus original gene set) assignment is "Expression_matched_genes"
# also adding option to consider 3'UTR length: will look for all genes no more than 25% different; or if this is less than 1000 genes will look for closest 1000 (but with min cutoff of 10bp)
# UTRdata should have rownames and a single column containing UTR lengths
ExpressionMatch <- function(countData, resData, geneSets, geneSetNames = NA, resCol = NA, geneCol = NA, nRandom = 100,
baseMeanGrep = "", newCol = "Expression_matched_genes", avoid_genes = NULL, UTR3p = F, UTRdata = NULL,
UTRthreshold = 10){
countData <- as.data.frame(countData)
resData <- as.data.frame(resData)
countData$baseMean <- rowMeans(countData[,grepl(baseMeanGrep, colnames(countData))])
geneSet_counts <- list()
if(is.na(geneCol)){
countData <- countData[rownames(countData) %in% rownames(resData),]
}
else {
countData <- countData[rownames(countData) %in% resData$geneCol,]
}
if(is.na(resCol)){
resData[,newCol] <- NA
if(is.na(geneSetNames)){
geneSetNames <- paste("geneSet",1:length(geneSets),sep = "_")
}
for(i in 1:length(geneSets)){
if(is.na(geneCol)){
resData[rownames(resData) %in% geneSets[[i]],newCol] <- geneSetNames[i]
}
else {
resData[resData[,geneCol] %in% geneSets[[i]],newCol] <- geneSetNames[i]
}
}
for(i in 1:length(geneSets)){
genes_in_set <- geneSets[[i]]
geneSet_counts[[i]] <- countData[rownames(countData) %in% genes_in_set,]
countData <- countData[!rownames(countData) %in% genes_in_set,]
}
}
else {
resData[,newCol] <- as.character(resData[,resCol])
if(is.na(geneSetNames)){
geneSetNames <- geneSets
}
for(i in 1:length(geneSets)){
resData[,newCol] <- gsub(geneSets[i], geneSetNames[i], resData[,newCol])
if(is.na(geneCol)){
genes_in_set <- rownames(resData)[resData[,resCol] == geneSets[i]]
}
else {
genes_in_set <- resData[resData[,resCol] == geneSets[i], geneCol]
}
geneSet_counts[[i]] <- countData[rownames(countData) %in% genes_in_set,]
countData <- countData[!rownames(countData) %in% genes_in_set,]
}
}
countData <- countData[!rownames(countData) %in% avoid_genes,]
if(UTR3p == T){
geneSet_UTRs <- list()
for(i in 1:length(geneSets)){
geneSet_UTRs[[i]] <- UTRdata[match(rownames(geneSet_counts[[i]]),rownames(UTRdata)),1]
}
UTRdata <- na.omit(UTRdata)
UTRdata$threshold <- UTRdata[,1] > UTRthreshold
UTRdata <- UTRdata[UTRdata$threshold == T,]
countData <- countData[rownames(countData) %in% rownames(UTRdata),]
}
for(i in 1:length(geneSets)){
expression_matched_genes <- character()
for(j in 1:nrow(geneSet_counts[[i]])){
if(UTR3p==T){
UTRdata1 <- UTRdata[rownames(UTRdata) %in% rownames(countData),]
utr_length <- geneSet_UTRs[[i]][j]
utr_min <- floor(0.75*utr_length)
utr_max <- ceiling(1.25*utr_length)
genes_to_keep <- rownames(UTRdata1)[UTRdata1[,1] > utr_min & UTRdata1[,1] < utr_max]
if(length(genes_to_keep) < 1000){
UTRdata1$diff <- abs(UTRdata1[,1] - utr_length)
genes_to_keep <- rownames(UTRdata1[order(UTRdata1$diff),])[1:1000]
}
countData1 <- countData[rownames(countData) %in% genes_to_keep,]
}
else {
countData1 <- countData
}
closest_gene_index <- sample(order(abs(countData1$baseMean - geneSet_counts[[i]]$baseMean[j]))[1:nRandom],1)
expression_matched_genes[j] <- rownames(countData1)[closest_gene_index]
countData <- countData[!rownames(countData) == rownames(countData1)[closest_gene_index],]
}
if(is.na(geneCol)){
resData[rownames(resData) %in% expression_matched_genes,newCol] <- paste("Expression matched to",geneSetNames[i])
}
else {
resData[resData[,geneCol] %in% expression_matched_genes,newCol] <- paste("Expression matched to",geneSetNames[i])
}
}
return(resData)
}
# To generate multiple set of expression matched genes:
ExpressionMatch_multi <- function(nSets, countData, resData, geneSets, geneSetNames = NA, resCol = NA, geneCol = NA, nRandom = 100,
baseMeanGrep = "", newCol = "Expression_matched_genes", avoid_genes = NULL, UTR3p = F, UTRdata = NULL,
UTRthreshold = 10){
if(is.na(geneSetNames)){
if(is.na(resCol)){
geneSetNames <- paste("geneSet",1:length(geneSets),sep = "_")
}
else {
geneSetNames <- geneSets
}
}
expression_matched_sets <- list()
for(s in 1:nSets){
expression_matched_sets[[s]] <- list()
eM <- ExpressionMatch(countData = countData, resData = resData, geneSets = geneSets, geneSetNames = geneSetNames, resCol = resCol,
geneCol = geneCol, nRandom = nRandom, baseMeanGrep = baseMeanGrep, newCol = newCol, avoid_genes = avoid_genes,
UTR3p = UTR3p, UTRdata = UTRdata, UTRthreshold = UTRthreshold)
eM_levels <- levels(na.omit(as.factor(eM[,newCol])))
eM_levels <- eM_levels[!eM_levels %in% geneSetNames]
for(l in eM_levels){
if(is.na(geneCol)){
expression_matched_sets[[s]][[as.character(l)]] <- as.character(na.omit(rownames(eM)[eM[,newCol]== l]))
}
}
}
return(expression_matched_sets)
}
expression_matched <-ExpressionMatch_multi(nSets = 100, countData = Expression_norm_per_kb, resData = Expression_norm_per_kb, geneSets = list(gene_list), geneSetNames = geneSetName, nRandom = 100, UTR3p = T, UTRdata = UTR_lengths)
exMatch_iCLIP3UTR_overlap <- sapply(expression_matched, function(x) sum(x[[1]] %in% iCLIP_3UTR_hits))
iCLIP_hits <- data.frame(Gene = gene_list)
iCLIP_hits$iCLIP_hit <- iCLIP_hits$Gene %in% iCLIP_3UTR_hits
exMatch_overlaps <- data.frame(Overlap = exMatch_iCLIP3UTR_overlap)
exMatch_overlaps$upper_95percent <- rep(quantile(exMatch_iCLIP3UTR_overlap, 0.95),length(exMatch_iCLIP3UTR_overlap))
exMatch_overlaps$lower_5percent <- rep(quantile(exMatch_iCLIP3UTR_overlap, 0.05),length(exMatch_iCLIP3UTR_overlap))
pdf(paste0(geneSetName, "_", Celltype, ".pdf"))
ggplot(iCLIP_hits, aes(x = geneSetName))+geom_bar(aes(fill = iCLIP_hit)) + scale_fill_manual(values =c("grey60","mediumturquoise")) + stat_summary(data = exMatch_overlaps, fun = "median", geom = "bar", width=0.7, fill = "darkcyan", aes(y = Overlap))+geom_errorbar(data = exMatch_overlaps[1,],aes(ymin= lower_5percent, ymax =upper_95percent), width=0.5)+ labs(y = "Number of genes", title = paste0(geneSetName,"\n",Celltype))
dev.off()
Expression_norm_per_kb$Mean <- rowMeans(Expression_norm_per_kb)
exMatch_expression <- data.frame(Gene = gene_list, Expression = Expression_norm_per_kb$Mean[match(gene_list, rownames(Expression_norm_per_kb))], Gene_set = geneSetName, Type = geneSetName)
for(i in 1:length(expression_matched)){
exMatch_expression <- rbind(exMatch_expression, data.frame(Gene = expression_matched[[i]][[1]], Expression = Expression_norm_per_kb$Mean[match(expression_matched[[i]][[1]], rownames(Expression_norm_per_kb))], Gene_set = paste0("EM_", i), Type ="Expression matched"))
}
for(i in 1:10){
random_genes <- sample(rownames(Expression_norm_per_kb),180)
exMatch_expression <- rbind(exMatch_expression, data.frame(Gene = random_genes, Expression = Expression_norm_per_kb$Mean[match(random_genes, rownames(Expression_norm_per_kb))], Gene_set = paste0("Random_", i), Type = "Random"))
}
exMatch_expression$Type <- factor(exMatch_expression$Type, levels = c(geneSetName,"Expression matched", "Random"))
exMatch_expression$Gene_set <- as.factor(exMatch_expression$Gene_set)
exMatch_expression$Gene_set <- relevel(exMatch_expression$Gene_set, geneSetName)
### How about 3'UTR length?
exMatch_expression$UTRlength <- UTR_lengths[match(exMatch_expression$Gene, rownames(UTR_lengths)),1]
pdf(paste0(geneSetName, "_", Celltype, "_expression_UTR_match.pdf"), width=9)
ggplot(exMatch_expression, aes(x = Gene_set, y = Expression, fill = Type)) + geom_boxplot(outlier.size =0) + scale_fill_manual(values = c("firebrick","lightblue1", "lightgoldenrod"))
ggplot(exMatch_expression, aes(x = Gene_set, y = UTRlength, fill = Type)) + geom_boxplot(outlier.size =0)+scale_y_log10() + scale_fill_manual(values = c("firebrick","lightblue1", "lightgoldenrod"))
dev.off()
exMatch_expression$iCLIP_hit_FDR0.05 <- exMatch_expression$Gene %in% iCLIP_3UTR_hits
write.table(exMatch_expression, file = paste0(geneSetName, "_",Celltype,"_expression_matched.txt"), sep = "\t", quote = F, row.names =F)
exMatch_processed_data <- list(iCLIP_hits=iCLIP_hits, exMatch_overlaps= exMatch_overlaps, exMatch_expression=exMatch_expression)
saveRDS(exMatch_processed_data, paste0(geneSetName, "_", Celltype, "_expressionMatch_processed_data.rds"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.prodlim.R
\name{plot.prodlim}
\alias{lines.prodlim}
\alias{plot.prodlim}
\title{Plotting event probabilities over time}
\usage{
\method{plot}{prodlim}(x, type, cause = 1, select, newdata, add = FALSE,
col, lty, lwd, ylim, xlim, ylab, xlab = "Time", timeconverter,
legend = TRUE, logrank = FALSE, marktime = FALSE, confint = TRUE,
automar, atrisk = ifelse(add, FALSE, TRUE), timeOrigin = 0, axes = TRUE,
background = TRUE, percent = TRUE, minAtrisk = 0, limit = 10, ...)
}
\arguments{
\item{x}{an object of class `prodlim' as returned by the
\code{prodlim} function.}
\item{type}{Either \code{"surv"} or \code{"cuminc"} controls what part of the object is plotted.}
\item{cause}{determines the cause of the cumulative incidence
function. Currently one cause is allowed at a time, but you may
call the function again with add=TRUE to add the lines of the other
causes.}
\item{select}{Select which lines to plot. This can be used when there
are many strata or many competing risks to select a subset of the lines.
However, a more clean way to select covariate stratat is to use argument \code{newdata}.
Another application is when there are many competing risks and it is desired (for the stacked plot)
to stack and show only a subset of the cumulative incidence functions.}
\item{newdata}{a data frame containing covariate strata for which to show
curves. When omitted element \code{X} of object
\code{x} is used.}
\item{add}{if \code{TRUE} curves are added to an existing plot.}
\item{col}{color for curves. Default is \code{1:number(curves)}}
\item{lty}{line type for curves. Default is 1.}
\item{lwd}{line width for all curves. Default is 3.}
\item{ylim}{limits of the y-axis}
\item{xlim}{limits of the x-axis}
\item{ylab}{label for the y-axis}
\item{xlab}{label for the x-axis}
\item{legend}{if TRUE a legend is plotted by calling the function
legend. Optional arguments of the function \code{legend} can be
given in the form \code{legend.x=val} where x is the name of the
argument and val the desired value. See also Details.}
\item{logrank}{If TRUE, the logrank p-value will be extracted from
a call to \code{survdiff} and added to the legend. This works only
for survival models, i.e. Kaplan-Meier with discrete predictors.}
\item{marktime}{if TRUE the curves are tick-marked at right
censoring times by invoking the function \code{markTime}. Optional
arguments of the function \code{markTime} can be given in the form
\code{confint.x=val} as with legend. See also Details.}
\item{confint}{if TRUE pointwise confidence intervals are plotted
by invoking the function \code{confInt}. Optional arguments of the
function \code{confInt} can be given in the form
\code{confint.x=val} as with legend. See also Details.}
\item{automar}{If TRUE the function trys to find suitable values
for the figure margins around the main plotting region.}
\item{atrisk}{if TRUE display numbers of subjects at risk by
invoking the function \code{atRisk}. Optional arguments of the
function \code{atRisk} can be given in the form \code{atrisk.x=val}
as with legend. See also Details.}
\item{timeOrigin}{Start of the time axis}
\item{axes}{If true axes are drawn. See details.}
\item{background}{If \code{TRUE} the background color and grid
color can be controlled using smart arguments SmartControl, such as
background.bg="yellow" or background.bg=c("gray66","gray88"). The
following defaults are passed to \code{background} by
\code{plot.prodlim}: horizontal=seq(0,1,.25), vertical=NULL,
bg="gray77", fg="white". See \code{background} for all arguments,
and the examples below.}
\item{percent}{If true the y-axis is labeled in percent.}
\item{minAtrisk}{Integer. Show the curve only until the number
at-risk is at least \code{minAtrisk}}
\item{limit}{When newdata is not specified and the number of lines
in element \code{X} of object \code{x} exceeds limits, only the
results for covariate constellations of the first, the middle and
the last row in \code{X} are shown. Otherwise all lines of \code{X}
are shown.}
\item{...}{Parameters that are filtered by
\code{\link{SmartControl}} and then passed to the functions
\code{\link{plot}}, \code{\link{legend}}, \code{\link{axis}},
\code{\link{atRisk}}, \code{\link{confInt}},
\code{\link{markTime}}, \code{\link{backGround}}}
}
\value{
The (invisible) object.
}
\description{
Function to plot survival and cumulative incidence curves against time.
}
\details{
From version 1.1.3 on the arguments legend.args, atrisk.args, confint.args
are obsolete and only available for backward compatibility. Instead
arguments for the invoked functions \code{atRisk}, \code{legend},
\code{confInt}, \code{markTime}, \code{axis} are simply specified as
\code{atrisk.cex=2}. The specification is not case sensitive, thus
\code{atRisk.cex=2} or \code{atRISK.cex=2} will have the same effect. The
function \code{axis} is called twice, and arguments of the form
\code{axis1.labels}, \code{axis1.at} are used for the time axis whereas
\code{axis2.pos}, \code{axis1.labels}, etc. are used for the y-axis.
These arguments are processed via \code{\dots{}} of \code{plot.prodlim} and
inside by using the function \code{SmartControl}. Documentation of these
arguments can be found in the help pages of the corresponding functions.
}
\note{
Similar functionality is provided by the function
\code{\link{plot.survfit}} of the survival library
}
\examples{
## simulate right censored data from a two state model
set.seed(100)
dat <- SimSurv(100)
# with(dat,plot(Hist(time,status)))
### marginal Kaplan-Meier estimator
kmfit <- prodlim(Hist(time, status) ~ 1, data = dat)
plot(kmfit)
# change time range
plot(kmfit,xlim=c(0,4))
# change scale of y-axis
plot(kmfit,percent=FALSE)
# mortality instead of survival
plot(kmfit,type="cuminc")
# change axis label and position of ticks
plot(kmfit,
xlim=c(0,10),
axis1.at=seq(0,10,1),
axis1.labels=0:10,
xlab="Years",
axis2.las=2,
atrisk.at=seq(0,10,2.5),
atrisk.title="")
# change background color
plot(kmfit,
xlim=c(0,10),
confint.citype="shadow",
col=1,
axis1.at=0:10,
axis1.labels=0:10,
xlab="Years",
axis2.las=2,
atrisk.at=seq(0,10,2.5),
atrisk.title="",
background=TRUE,
background.fg="white",
background.horizontal=seq(0,1,.25/2),
background.vertical=seq(0,10,2.5),
background.bg=c("gray88"))
# change type of confidence limits
plot(kmfit,
xlim=c(0,10),
confint.citype="dots",
col=4,
background=TRUE,
background.bg=c("white","gray88"),
background.fg="gray77",
background.horizontal=seq(0,1,.25/2),
background.vertical=seq(0,10,2))
### Kaplan-Meier in discrete strata
kmfitX <- prodlim(Hist(time, status) ~ X1, data = dat)
plot(kmfitX)
# move legend
plot(kmfitX,legend.x="bottomleft",atRisk.cex=1.3,
atrisk.title="No. subjects")
## Control the order of strata
## since version 1.5.1 prodlim does obey the order of
## factor levels
dat$group <- factor(cut(dat$X2,c(-Inf,0,0.5,Inf)),
labels=c("High","Intermediate","Low"))
kmfitG <- prodlim(Hist(time, status) ~ group, data = dat)
plot(kmfitG)
## relevel
dat$group2 <- factor(cut(dat$X2,c(-Inf,0,0.5,Inf)),
levels=c("(0.5, Inf]","(0,0.5]","(-Inf,0]"),
labels=c("Low","Intermediate","High"))
kmfitG2 <- prodlim(Hist(time, status) ~ group2, data = dat)
plot(kmfitG2)
# add log-rank test to legend
plot(kmfitX,
atRisk.cex=1.3,
logrank=TRUE,
legend.x="topright",
atrisk.title="at-risk")
# change atrisk labels
plot(kmfitX,
legend.x="bottomleft",
atrisk.title="Patients",
atrisk.cex=0.9,
atrisk.labels=c("X1=0","X1=1"))
# multiple categorical factors
kmfitXG <- prodlim(Hist(time,status)~X1+group2,data=dat)
plot(kmfitXG,select=1:2)
### Kaplan-Meier in continuous strata
kmfitX2 <- prodlim(Hist(time, status) ~ X2, data = dat)
plot(kmfitX2,xlim=c(0,10))
# specify values of X2 for which to show the curves
plot(kmfitX2,xlim=c(0,10),newdata=data.frame(X2=c(-1.8,0,1.2)))
### Cluster-correlated data
library(survival)
cdat <- cbind(SimSurv(20),patnr=sample(1:5,size=20,replace=TRUE))
kmfitC <- prodlim(Hist(time, status) ~ cluster(patnr), data = cdat)
plot(kmfitC)
plot(kmfitC,atrisk.labels=c("Units","Patients"))
kmfitC2 <- prodlim(Hist(time, status) ~ X1+cluster(patnr), data = cdat)
plot(kmfitC2)
plot(kmfitC2,atrisk.labels=c("Teeth","Patients","Teeth","Patients"),
atrisk.col=c(1,1,2,2))
### Cluster-correlated data with strata
n = 50
foo = runif(n)
bar = rexp(n)
baz = rexp(n,1/2)
d = stack(data.frame(foo,bar,baz))
d$cl = sample(10, 3*n, replace=TRUE)
fit = prodlim(Surv(values) ~ ind + cluster(cl), data=d)
plot(fit)
## simulate right censored data from a competing risk model
datCR <- SimCompRisk(100)
with(datCR,plot(Hist(time,event)))
### marginal Aalen-Johansen estimator
ajfit <- prodlim(Hist(time, event) ~ 1, data = datCR)
plot(ajfit) # same as plot(ajfit,cause=1)
# cause 2
plot(ajfit,cause=2)
# both in one
plot(ajfit,cause=1)
plot(ajfit,cause=2,add=TRUE,col=2)
### stacked plot
plot(ajfit,cause="stacked",select=2)
### stratified Aalen-Johansen estimator
ajfitX1 <- prodlim(Hist(time, event) ~ X1, data = datCR)
plot(ajfitX1)
## add total number at-risk to a stratified curve
ttt = 1:10
plot(ajfitX1,atrisk.at=ttt,col=2:3)
plot(ajfit,add=TRUE,col=1)
atRisk(ajfit,newdata=datCR,col=1,times=ttt,line=3,labels="Total")
## stratified Aalen-Johansen estimator in nearest neighborhoods
## of a continuous variable
ajfitX <- prodlim(Hist(time, event) ~ X1+X2, data = datCR)
plot(ajfitX,newdata=data.frame(X1=c(1,1,0),X2=c(4,10,10)))
plot(ajfitX,newdata=data.frame(X1=c(1,1,0),X2=c(4,10,10)),cause=2)
## stacked plot
plot(ajfitX,
newdata=data.frame(X1=0,X2=0.1),
cause="stacked",
legend.title="X1=0,X2=0.1",
legend.legend=paste("cause:",getStates(ajfitX$model.response)),
plot.main="Subject specific stacked plot")
}
\author{
Thomas Alexander Gerds <tag@biostat.ku.dk>
}
\seealso{
\code{\link{plot}}, \code{\link{legend}}, \code{\link{axis}},
\code{\link{prodlim}},\code{\link{plot.Hist}},\code{\link{summary.prodlim}},
\code{\link{neighborhood}}, \code{\link{atRisk}}, \code{\link{confInt}},
\code{\link{markTime}}, \code{\link{backGround}}
}
\keyword{survival}
| /man/plot.prodlim.Rd | no_license | statwonk/prodlim | R | false | true | 10,414 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.prodlim.R
\name{plot.prodlim}
\alias{lines.prodlim}
\alias{plot.prodlim}
\title{Plotting event probabilities over time}
\usage{
\method{plot}{prodlim}(x, type, cause = 1, select, newdata, add = FALSE,
col, lty, lwd, ylim, xlim, ylab, xlab = "Time", timeconverter,
legend = TRUE, logrank = FALSE, marktime = FALSE, confint = TRUE,
automar, atrisk = ifelse(add, FALSE, TRUE), timeOrigin = 0, axes = TRUE,
background = TRUE, percent = TRUE, minAtrisk = 0, limit = 10, ...)
}
\arguments{
\item{x}{an object of class `prodlim' as returned by the
\code{prodlim} function.}
\item{type}{Either \code{"surv"} or \code{"cuminc"} controls what part of the object is plotted.}
\item{cause}{determines the cause of the cumulative incidence
function. Currently one cause is allowed at a time, but you may
call the function again with add=TRUE to add the lines of the other
causes.}
\item{select}{Select which lines to plot. This can be used when there
are many strata or many competing risks to select a subset of the lines.
However, a more clean way to select covariate stratat is to use argument \code{newdata}.
Another application is when there are many competing risks and it is desired (for the stacked plot)
to stack and show only a subset of the cumulative incidence functions.}
\item{newdata}{a data frame containing covariate strata for which to show
curves. When omitted element \code{X} of object
\code{x} is used.}
\item{add}{if \code{TRUE} curves are added to an existing plot.}
\item{col}{color for curves. Default is \code{1:number(curves)}}
\item{lty}{line type for curves. Default is 1.}
\item{lwd}{line width for all curves. Default is 3.}
\item{ylim}{limits of the y-axis}
\item{xlim}{limits of the x-axis}
\item{ylab}{label for the y-axis}
\item{xlab}{label for the x-axis}
\item{legend}{if TRUE a legend is plotted by calling the function
legend. Optional arguments of the function \code{legend} can be
given in the form \code{legend.x=val} where x is the name of the
argument and val the desired value. See also Details.}
\item{logrank}{If TRUE, the logrank p-value will be extracted from
a call to \code{survdiff} and added to the legend. This works only
for survival models, i.e. Kaplan-Meier with discrete predictors.}
\item{marktime}{if TRUE the curves are tick-marked at right
censoring times by invoking the function \code{markTime}. Optional
arguments of the function \code{markTime} can be given in the form
\code{confint.x=val} as with legend. See also Details.}
\item{confint}{if TRUE pointwise confidence intervals are plotted
by invoking the function \code{confInt}. Optional arguments of the
function \code{confInt} can be given in the form
\code{confint.x=val} as with legend. See also Details.}
\item{automar}{If TRUE the function trys to find suitable values
for the figure margins around the main plotting region.}
\item{atrisk}{if TRUE display numbers of subjects at risk by
invoking the function \code{atRisk}. Optional arguments of the
function \code{atRisk} can be given in the form \code{atrisk.x=val}
as with legend. See also Details.}
\item{timeOrigin}{Start of the time axis}
\item{axes}{If true axes are drawn. See details.}
\item{background}{If \code{TRUE} the background color and grid
color can be controlled using smart arguments SmartControl, such as
background.bg="yellow" or background.bg=c("gray66","gray88"). The
following defaults are passed to \code{background} by
\code{plot.prodlim}: horizontal=seq(0,1,.25), vertical=NULL,
bg="gray77", fg="white". See \code{background} for all arguments,
and the examples below.}
\item{percent}{If true the y-axis is labeled in percent.}
\item{minAtrisk}{Integer. Show the curve only until the number
at-risk is at least \code{minAtrisk}}
\item{limit}{When newdata is not specified and the number of lines
in element \code{X} of object \code{x} exceeds limits, only the
results for covariate constellations of the first, the middle and
the last row in \code{X} are shown. Otherwise all lines of \code{X}
are shown.}
\item{...}{Parameters that are filtered by
\code{\link{SmartControl}} and then passed to the functions
\code{\link{plot}}, \code{\link{legend}}, \code{\link{axis}},
\code{\link{atRisk}}, \code{\link{confInt}},
\code{\link{markTime}}, \code{\link{backGround}}}
}
\value{
The (invisible) object.
}
\description{
Function to plot survival and cumulative incidence curves against time.
}
\details{
From version 1.1.3 on the arguments legend.args, atrisk.args, confint.args
are obsolete and only available for backward compatibility. Instead
arguments for the invoked functions \code{atRisk}, \code{legend},
\code{confInt}, \code{markTime}, \code{axis} are simply specified as
\code{atrisk.cex=2}. The specification is not case sensitive, thus
\code{atRisk.cex=2} or \code{atRISK.cex=2} will have the same effect. The
function \code{axis} is called twice, and arguments of the form
\code{axis1.labels}, \code{axis1.at} are used for the time axis whereas
\code{axis2.pos}, \code{axis1.labels}, etc. are used for the y-axis.
These arguments are processed via \code{\dots{}} of \code{plot.prodlim} and
inside by using the function \code{SmartControl}. Documentation of these
arguments can be found in the help pages of the corresponding functions.
}
\note{
Similar functionality is provided by the function
\code{\link{plot.survfit}} of the survival library
}
\examples{
## simulate right censored data from a two state model
set.seed(100)
dat <- SimSurv(100)
# with(dat,plot(Hist(time,status)))
### marginal Kaplan-Meier estimator
kmfit <- prodlim(Hist(time, status) ~ 1, data = dat)
plot(kmfit)
# change time range
plot(kmfit,xlim=c(0,4))
# change scale of y-axis
plot(kmfit,percent=FALSE)
# mortality instead of survival
plot(kmfit,type="cuminc")
# change axis label and position of ticks
plot(kmfit,
xlim=c(0,10),
axis1.at=seq(0,10,1),
axis1.labels=0:10,
xlab="Years",
axis2.las=2,
atrisk.at=seq(0,10,2.5),
atrisk.title="")
# change background color
plot(kmfit,
xlim=c(0,10),
confint.citype="shadow",
col=1,
axis1.at=0:10,
axis1.labels=0:10,
xlab="Years",
axis2.las=2,
atrisk.at=seq(0,10,2.5),
atrisk.title="",
background=TRUE,
background.fg="white",
background.horizontal=seq(0,1,.25/2),
background.vertical=seq(0,10,2.5),
background.bg=c("gray88"))
# change type of confidence limits
plot(kmfit,
xlim=c(0,10),
confint.citype="dots",
col=4,
background=TRUE,
background.bg=c("white","gray88"),
background.fg="gray77",
background.horizontal=seq(0,1,.25/2),
background.vertical=seq(0,10,2))
### Kaplan-Meier in discrete strata
kmfitX <- prodlim(Hist(time, status) ~ X1, data = dat)
plot(kmfitX)
# move legend
plot(kmfitX,legend.x="bottomleft",atRisk.cex=1.3,
atrisk.title="No. subjects")
## Control the order of strata
## since version 1.5.1 prodlim does obey the order of
## factor levels
dat$group <- factor(cut(dat$X2,c(-Inf,0,0.5,Inf)),
labels=c("High","Intermediate","Low"))
kmfitG <- prodlim(Hist(time, status) ~ group, data = dat)
plot(kmfitG)
## relevel
dat$group2 <- factor(cut(dat$X2,c(-Inf,0,0.5,Inf)),
levels=c("(0.5, Inf]","(0,0.5]","(-Inf,0]"),
labels=c("Low","Intermediate","High"))
kmfitG2 <- prodlim(Hist(time, status) ~ group2, data = dat)
plot(kmfitG2)
# add log-rank test to legend
plot(kmfitX,
atRisk.cex=1.3,
logrank=TRUE,
legend.x="topright",
atrisk.title="at-risk")
# change atrisk labels
plot(kmfitX,
legend.x="bottomleft",
atrisk.title="Patients",
atrisk.cex=0.9,
atrisk.labels=c("X1=0","X1=1"))
# multiple categorical factors
kmfitXG <- prodlim(Hist(time,status)~X1+group2,data=dat)
plot(kmfitXG,select=1:2)
### Kaplan-Meier in continuous strata
kmfitX2 <- prodlim(Hist(time, status) ~ X2, data = dat)
plot(kmfitX2,xlim=c(0,10))
# specify values of X2 for which to show the curves
plot(kmfitX2,xlim=c(0,10),newdata=data.frame(X2=c(-1.8,0,1.2)))
### Cluster-correlated data
library(survival)
cdat <- cbind(SimSurv(20),patnr=sample(1:5,size=20,replace=TRUE))
kmfitC <- prodlim(Hist(time, status) ~ cluster(patnr), data = cdat)
plot(kmfitC)
plot(kmfitC,atrisk.labels=c("Units","Patients"))
kmfitC2 <- prodlim(Hist(time, status) ~ X1+cluster(patnr), data = cdat)
plot(kmfitC2)
plot(kmfitC2,atrisk.labels=c("Teeth","Patients","Teeth","Patients"),
atrisk.col=c(1,1,2,2))
### Cluster-correlated data with strata
n = 50
foo = runif(n)
bar = rexp(n)
baz = rexp(n,1/2)
d = stack(data.frame(foo,bar,baz))
d$cl = sample(10, 3*n, replace=TRUE)
fit = prodlim(Surv(values) ~ ind + cluster(cl), data=d)
plot(fit)
## simulate right censored data from a competing risk model
datCR <- SimCompRisk(100)
with(datCR,plot(Hist(time,event)))
### marginal Aalen-Johansen estimator
ajfit <- prodlim(Hist(time, event) ~ 1, data = datCR)
plot(ajfit) # same as plot(ajfit,cause=1)
# cause 2
plot(ajfit,cause=2)
# both in one
plot(ajfit,cause=1)
plot(ajfit,cause=2,add=TRUE,col=2)
### stacked plot
plot(ajfit,cause="stacked",select=2)
### stratified Aalen-Johansen estimator
ajfitX1 <- prodlim(Hist(time, event) ~ X1, data = datCR)
plot(ajfitX1)
## add total number at-risk to a stratified curve
ttt = 1:10
plot(ajfitX1,atrisk.at=ttt,col=2:3)
plot(ajfit,add=TRUE,col=1)
atRisk(ajfit,newdata=datCR,col=1,times=ttt,line=3,labels="Total")
## stratified Aalen-Johansen estimator in nearest neighborhoods
## of a continuous variable
ajfitX <- prodlim(Hist(time, event) ~ X1+X2, data = datCR)
plot(ajfitX,newdata=data.frame(X1=c(1,1,0),X2=c(4,10,10)))
plot(ajfitX,newdata=data.frame(X1=c(1,1,0),X2=c(4,10,10)),cause=2)
## stacked plot
plot(ajfitX,
newdata=data.frame(X1=0,X2=0.1),
cause="stacked",
legend.title="X1=0,X2=0.1",
legend.legend=paste("cause:",getStates(ajfitX$model.response)),
plot.main="Subject specific stacked plot")
}
\author{
Thomas Alexander Gerds <tag@biostat.ku.dk>
}
\seealso{
\code{\link{plot}}, \code{\link{legend}}, \code{\link{axis}},
\code{\link{prodlim}},\code{\link{plot.Hist}},\code{\link{summary.prodlim}},
\code{\link{neighborhood}}, \code{\link{atRisk}}, \code{\link{confInt}},
\code{\link{markTime}}, \code{\link{backGround}}
}
\keyword{survival}
|
#
# Generate interface to global variables.
#
# Two types of global variables
# const primitive types
# and non constant values of any type
# See inst/examples/globals/
#
# For the non-constant values, we get the address of the variable
# and work with that. This is a <type>Ref. So, if it is an int,
# we define a class intRef extending RC++Reference and if it is a
# typedef'ed struct named A, e.g. typedef struct { ... } A
# we have a ARef. And a pointer to an A, then an ARefRef
#
generateGlobalVariableCode =
#
# Create the code to access the global variables.
#
#
function(tu, files = character(), gvars = getGlobalVariables(tu, files), ...)
{
if(is.character(gvars))
gvars = getGlobalVariables(tu, files)[gvars]
if(!length(gvars))
return(structure(list(vars = list(), consts = list()),
class = "DynamicGlobalVariableCode"))
consts = computeGlobalConstants(tu, gvars, files = files)
# Remove the constants from the gvars
if(length(consts$cmds)) {
idx = match(names(consts$cmds), names(gvars))
gvars = gvars[ - idx ]
}
# Now deal with the non constants
if(length(gvars)) {
vars = lapply(gvars, resolveType, tu)
# Want to combine fields across variables, i.e. listwith
# native, r, ...
ans = lapply(names(vars),
function(id)
generateDynamicGlobalVariableCode(id, vars[[id]]@type, tu, ...))
names(ans) = names(ans)
ans = groupEls(ans)
# ans$initGlobalVarFun = RFunctionDefinition('initGlobalVars',
# c('initGlobalVars = ',
# 'function()',
# '{',
# ans$alias,
# '}'))
} else
ans = list()
ans = list(vars = ans,
consts = consts)
class(ans) <- "DynamicGlobalVariableCode"
ans
}
setOldClass( "GlobalVariableCode")
setOldClass( "DynamicGlobalVariableCode")
setMethod("writeCode", "GlobalVariableCode",
#
# Output the code to interface to the non-constant global variables.
#
function(obj, target, file = stdout(), ..., includes = character())
{
if(length(obj) == 0)
return(TRUE)
if(is.character(file)) {
file = file(file, "w")
on.exit(close(file))
}
if(target == "r") {
sapply(obj$vars$classDef, cat, "\n", file = file, ...)
writeCode(obj$vars$initGlobalVarFun, target, file = file, ...)
} else if(target == "native") {
writeIncludes(includes, file)
sapply(obj$vars$get, function(x) { writeCode(x, target, file = file, ...); cat("\n", file = file) ; TRUE})
sapply(obj$vars$valueOf, function(x) { writeCode(x, target, file = file, ...); cat("\n", file = file) ; TRUE})
}
return(TRUE)
})
setMethod("writeCode", "DynamicGlobalVariableCode",
#
# Output the code to interface to the non-constant global variables.
#
function(obj, target, file = stdout(), ..., includes = character())
{
if(length(obj) == 0)
return(TRUE)
if(is.character(file)) {
file = file(file, "w")
on.exit(close(file))
}
if(target == "r") {
#sapply(obj$vars$classDef, cat, "\n", file = file, ...)
sapply(obj$vars$raccessor, writeCode, target, file)
sapply(obj$vars$rbinding, writeCode, target, file, "\n")
} else if(target == "native") {
writeIncludes(includes, file)
sapply(obj$vars$get, function(x) { writeCode(x, target, file = file, ...); cat("\n", file = file) ; TRUE})
sapply(obj$vars$set, function(x) { writeCode(x, target, file = file, ...); cat("\n", file = file) ; TRUE})
}
invisible(return(TRUE))
})
setGeneric("getSizeofTypeName", function(type) standardGeneric("getSizeofTypeName"))
setMethod("getSizeofTypeName", "ANY",
function(type) {
if(is(type, "PendingType") || is(type, "ResolvedTypeReference"))
return(getSizeofTypeName(forceResolve(type)))
type@name
})
setMethod("getSizeofTypeName", "ArrayType",
function(type) {
type@type = forceResolve(type@type)
els = paste("[", getArrayDimension(type), "]", collapse = " ")
paste(type@type@name, els) # "[", type@length, "]")
})
setMethod("getSizeofTypeName", "PointerType",
function(type) {
paste(getSizeofTypeName(type@type), paste(rep("*", length = type@depth), collapse = ""))
})
generateDynamicGlobalVariableCode =
# Newer version than below
#
function(name, type, nodes, typeMap = list(), package = character())
{
getFunName = paste("get", name, sep = "_")
isArray = is(type, "ArrayType")
k = coerceRValue("value", type, type, typeMap)
if(!inherits(k, "IfStatement"))
k = paste("value", "=", k)
# hasCopy = if(isArray) !is(type@type, "BuiltinPrimitiveType") else !is(type, "BuiltinPrimitiveType")
hasCopy = isArray || !is(type, "BuiltinPrimitiveType")
hasCopy = TRUE
params = c("value", if(isArray) c("from", "to"), if(hasCopy) "copy")
defaults = c("", if(isArray) c("1", type@length), if(hasCopy) "TRUE")
rcode = c(
"if(missing(value))",
paste(" .Call('", paste("R_get", name, sep = "_"), "'", if(hasCopy) ", as.logical(copy)", if(isArray) ", as.integer(from), as.integer(to)", ")", sep = ""),
"else {",
paste(" ", k),
paste(" .Call('", paste("R_set", name, sep = "_"), "', value)", sep = ""),
" value",
"}"
)
# R function for getting or setting value
rfun = RFunctionDefinition(getFunName, rcode, params, defaults = defaults,
obj = if(hasCopy) "RFunctionDefinition" else "RAnonymousFunctionDefinition")
rdef = ActiveBinding(name, if(hasCopy) getFunName else rfun)
cgetName = paste("R_get", name, sep = "_")
#XXX This needs to be moved to a general place and used in other contexts
# e.g. in convertValueToR()
# Add a copy parameter to that? Or create a convertValueRefToR()
if(isArray) {
type = fixArrayElementTypeNames(type)
# convertValueToR(name, PointerType(type@type), character())
# arrayTypeName = getReferenceClassName(type@type)
# arrayClass = paste(capitalize(type@type@name, first = FALSE), "Array", sep = "")
dims = getArrayDimension(type)
cast = paste("*(const ", type@name, "(*)", paste("[", dims, "]", sep = "", collapse = ""), ") &")
# hasCopy = !is(type@type, "BuiltinPrimitiveType")
arrayRValue = paste(getCopyArrayName(type),
"(",
paste(dims, collapse = ", "),
", ",
#XXX Cast which we don't want "(", getNativeDeclaration("", type, , addSemiColon = FALSE, const = TRUE),")",
cast,
#XXXXXXX Do we know we have a copy parameter?
name, if(hasCopy) ", INTEGER(copy)[0]", ", from - 1, to - 1",
")")
arrayClass = paste(capitalize(type@type@name, first = FALSE), paste(rep("Array", length(dims)), collapse = ""), sep = "")
#??? Use intPtr or intArray in the tag name of the external pointer? If we use intArray, then change the code we generate to look for that.
tagName = getReferenceClassName(type)
# ref = paste("R_createArrayReference(", name, ",", dQuote(arrayClass), ",", dQuote(tagName), ",", type@length, ", sizeof(", getSizeofTypeName(type), "))")
ref = createNativeReference(name, type)
} else {
ref = convertValueToR(paste("&", name), PointerType(type), character(), typeMap = typeMap)
}
# used in both set and get.
rvalue = convertValueToR(name, type, character(), typeMap = typeMap)
getCCode = CRoutineDefinition(cgetName,
c(externC,
"SEXP",
paste(cgetName, "(", if(hasCopy) "SEXP copy", if(isArray) paste(if(hasCopy) ", ", " SEXP start, SEXP end", collapse = ""), ")"),
"{",
"SEXP ans;",
"",
if(hasCopy)
c("if(!LOGICAL(copy)[0])",
paste(" ans =", ref, ";"),
"else"
),
if(isArray)
c(" {",
"int from, to;",
"if(Rf_length(start) == 2) to = INTEGER(start)[1]; ",
"else if(Rf_length(start) == 0 || Rf_length(end) == 0) {",
' PROBLEM "need scalar values for range of array to fetch"',
" ERROR",
"}",
"from = INTEGER(start)[0]; to = INTEGER(end)[0];",
""),
paste(" ans = ", if(isArray) arrayRValue else rvalue, ";"),
if(isArray) " }",
"",
"return(ans);",
"}"))
csetName = paste("R_set", name, sep = "_")
rconvert = convertRValue(name, "value", type, "value", typeMap = typeMap)
setCCode = CRoutineDefinition(csetName,
c(externC,
"SEXP",
paste(csetName, "(SEXP value)"),
"{",
"SEXP ans = R_NilValue;",
"",
rconvert,
# Don't copy the object back
# paste("ans = ", rvalue, ";"),
"",
"return(ans);",
"}"))
list(set = setCCode, get = getCCode, raccessor = if(hasCopy) rfun else NULL, rbinding = rdef,
registration = list(list(name = setCCode@name, nargs = 1),
list(name = getCCode@name, nargs = 0 + hasCopy + if(isArray) 2 else 0)))
}
generateGlobalVarCode =
# Class definition
# C routine to fetch the value of the variable
# and to set the value. XXX
# R code to create R variables in .onAttach/.First.lib
#
# XXX Need to tidy this up. Want to make var a PointerType
# of one more than the actual value passed in. #
# But need to calculate the class name properly and in different
# ways in the function?
#
function(name, var, nodes, typeMap = list(), package = character(), defaultBaseClass = 'RC++Reference')
{
if(is(var, "PointerType")) {
addrType = var
addrType@depth = as.integer(var@depth + 1)
typeName = var@typeName
} else {
typeName = var@name
addrType = new("PointerType", typeName = typeName, depth = as.integer(1), type = var)
}
className = getReferenceClassName(addrType)
classDef = paste("setClass('", className, "', contains = '", defaultBaseClass , "')", sep = "")
# The routine to fetch the value of the address of the variable.
rname = paste("R_get", name, sep = "_")
get = CRoutineDefinition(rname,
c("SEXP",
paste(rname, "()"),
"{",
paste("return( R_MAKE_VAR_REFERENCE(&", name, ', "', className, '"));', sep = ""),
"}"), 0L)
# R code to call the get routine and assign the result to an R variable.
if(length(package) == 0) {
rname = paste("'", rname, "'", sep = '')
assignLocation = "globalenv()"
} else {
package = paste(",", package)
assignLocation = paste("package", package, sep = ":")
}
alias = paste("assign('", name, "', .Call( ", rname, "), ", assignLocation, ")", package, sep = "")
# A routine to get the current value of the variable.
rname = paste("R_get_valueOf", className, sep = "_")
#XXX
# addrTypeName = getReferenceClassName(addrType)
# typeName = var@name
tp = ifelse(is(var, "BuiltinPrimitiveType"), var@name, getReferenceClassName(var))
type = paste('"', tp, '"', sep = "")
valueOf = CRoutineDefinition(rname,
c("SEXP",
paste(rname, "(SEXP obj)"),
"{",
getNativeDeclaration("tmp", addrType),
paste("tmp = (", getNativeDeclaration("", addrType, addSemiColon = FALSE), ") ",
derefNativeReference("obj", type, type), ";"),
"if(!tmp) {",
' PROBLEM "NULL value found"',
" ERROR",
"}",
paste("return(", convertValueToR("*tmp", var, "tmp", typeMap = typeMap), ");"),
"}"), 1L)
list(classDef = classDef, get = get, alias = alias, valueOf = valueOf)
}
| /R/globalVars.R | no_license | kashenfelter/RGCCTranslationUnit | R | false | false | 13,351 | r |
#
# Generate interface to global variables.
#
# Two types of global variables
# const primitive types
# and non constant values of any type
# See inst/examples/globals/
#
# For the non-constant values, we get the address of the variable
# and work with that. This is a <type>Ref. So, if it is an int,
# we define a class intRef extending RC++Reference and if it is a
# typedef'ed struct named A, e.g. typedef struct { ... } A
# we have a ARef. And a pointer to an A, then an ARefRef
#
generateGlobalVariableCode =
#
# Create the code to access the global variables.
#
#
function(tu, files = character(), gvars = getGlobalVariables(tu, files), ...)
{
if(is.character(gvars))
gvars = getGlobalVariables(tu, files)[gvars]
if(!length(gvars))
return(structure(list(vars = list(), consts = list()),
class = "DynamicGlobalVariableCode"))
consts = computeGlobalConstants(tu, gvars, files = files)
# Remove the constants from the gvars
if(length(consts$cmds)) {
idx = match(names(consts$cmds), names(gvars))
gvars = gvars[ - idx ]
}
# Now deal with the non constants
if(length(gvars)) {
vars = lapply(gvars, resolveType, tu)
# Want to combine fields across variables, i.e. listwith
# native, r, ...
ans = lapply(names(vars),
function(id)
generateDynamicGlobalVariableCode(id, vars[[id]]@type, tu, ...))
names(ans) = names(ans)
ans = groupEls(ans)
# ans$initGlobalVarFun = RFunctionDefinition('initGlobalVars',
# c('initGlobalVars = ',
# 'function()',
# '{',
# ans$alias,
# '}'))
} else
ans = list()
ans = list(vars = ans,
consts = consts)
class(ans) <- "DynamicGlobalVariableCode"
ans
}
setOldClass( "GlobalVariableCode")
setOldClass( "DynamicGlobalVariableCode")
setMethod("writeCode", "GlobalVariableCode",
#
# Output the code to interface to the non-constant global variables.
#
function(obj, target, file = stdout(), ..., includes = character())
{
if(length(obj) == 0)
return(TRUE)
if(is.character(file)) {
file = file(file, "w")
on.exit(close(file))
}
if(target == "r") {
sapply(obj$vars$classDef, cat, "\n", file = file, ...)
writeCode(obj$vars$initGlobalVarFun, target, file = file, ...)
} else if(target == "native") {
writeIncludes(includes, file)
sapply(obj$vars$get, function(x) { writeCode(x, target, file = file, ...); cat("\n", file = file) ; TRUE})
sapply(obj$vars$valueOf, function(x) { writeCode(x, target, file = file, ...); cat("\n", file = file) ; TRUE})
}
return(TRUE)
})
setMethod("writeCode", "DynamicGlobalVariableCode",
#
# Output the code to interface to the non-constant global variables.
#
function(obj, target, file = stdout(), ..., includes = character())
{
if(length(obj) == 0)
return(TRUE)
if(is.character(file)) {
file = file(file, "w")
on.exit(close(file))
}
if(target == "r") {
#sapply(obj$vars$classDef, cat, "\n", file = file, ...)
sapply(obj$vars$raccessor, writeCode, target, file)
sapply(obj$vars$rbinding, writeCode, target, file, "\n")
} else if(target == "native") {
writeIncludes(includes, file)
sapply(obj$vars$get, function(x) { writeCode(x, target, file = file, ...); cat("\n", file = file) ; TRUE})
sapply(obj$vars$set, function(x) { writeCode(x, target, file = file, ...); cat("\n", file = file) ; TRUE})
}
invisible(return(TRUE))
})
setGeneric("getSizeofTypeName", function(type) standardGeneric("getSizeofTypeName"))
setMethod("getSizeofTypeName", "ANY",
function(type) {
if(is(type, "PendingType") || is(type, "ResolvedTypeReference"))
return(getSizeofTypeName(forceResolve(type)))
type@name
})
setMethod("getSizeofTypeName", "ArrayType",
function(type) {
type@type = forceResolve(type@type)
els = paste("[", getArrayDimension(type), "]", collapse = " ")
paste(type@type@name, els) # "[", type@length, "]")
})
setMethod("getSizeofTypeName", "PointerType",
function(type) {
paste(getSizeofTypeName(type@type), paste(rep("*", length = type@depth), collapse = ""))
})
generateDynamicGlobalVariableCode =
# Newer version than below
#
function(name, type, nodes, typeMap = list(), package = character())
{
getFunName = paste("get", name, sep = "_")
isArray = is(type, "ArrayType")
k = coerceRValue("value", type, type, typeMap)
if(!inherits(k, "IfStatement"))
k = paste("value", "=", k)
# hasCopy = if(isArray) !is(type@type, "BuiltinPrimitiveType") else !is(type, "BuiltinPrimitiveType")
hasCopy = isArray || !is(type, "BuiltinPrimitiveType")
hasCopy = TRUE
params = c("value", if(isArray) c("from", "to"), if(hasCopy) "copy")
defaults = c("", if(isArray) c("1", type@length), if(hasCopy) "TRUE")
rcode = c(
"if(missing(value))",
paste(" .Call('", paste("R_get", name, sep = "_"), "'", if(hasCopy) ", as.logical(copy)", if(isArray) ", as.integer(from), as.integer(to)", ")", sep = ""),
"else {",
paste(" ", k),
paste(" .Call('", paste("R_set", name, sep = "_"), "', value)", sep = ""),
" value",
"}"
)
# R function for getting or setting value
rfun = RFunctionDefinition(getFunName, rcode, params, defaults = defaults,
obj = if(hasCopy) "RFunctionDefinition" else "RAnonymousFunctionDefinition")
rdef = ActiveBinding(name, if(hasCopy) getFunName else rfun)
cgetName = paste("R_get", name, sep = "_")
#XXX This needs to be moved to a general place and used in other contexts
# e.g. in convertValueToR()
# Add a copy parameter to that? Or create a convertValueRefToR()
if(isArray) {
type = fixArrayElementTypeNames(type)
# convertValueToR(name, PointerType(type@type), character())
# arrayTypeName = getReferenceClassName(type@type)
# arrayClass = paste(capitalize(type@type@name, first = FALSE), "Array", sep = "")
dims = getArrayDimension(type)
cast = paste("*(const ", type@name, "(*)", paste("[", dims, "]", sep = "", collapse = ""), ") &")
# hasCopy = !is(type@type, "BuiltinPrimitiveType")
arrayRValue = paste(getCopyArrayName(type),
"(",
paste(dims, collapse = ", "),
", ",
#XXX Cast which we don't want "(", getNativeDeclaration("", type, , addSemiColon = FALSE, const = TRUE),")",
cast,
#XXXXXXX Do we know we have a copy parameter?
name, if(hasCopy) ", INTEGER(copy)[0]", ", from - 1, to - 1",
")")
arrayClass = paste(capitalize(type@type@name, first = FALSE), paste(rep("Array", length(dims)), collapse = ""), sep = "")
#??? Use intPtr or intArray in the tag name of the external pointer? If we use intArray, then change the code we generate to look for that.
tagName = getReferenceClassName(type)
# ref = paste("R_createArrayReference(", name, ",", dQuote(arrayClass), ",", dQuote(tagName), ",", type@length, ", sizeof(", getSizeofTypeName(type), "))")
ref = createNativeReference(name, type)
} else {
ref = convertValueToR(paste("&", name), PointerType(type), character(), typeMap = typeMap)
}
# used in both set and get.
rvalue = convertValueToR(name, type, character(), typeMap = typeMap)
getCCode = CRoutineDefinition(cgetName,
c(externC,
"SEXP",
paste(cgetName, "(", if(hasCopy) "SEXP copy", if(isArray) paste(if(hasCopy) ", ", " SEXP start, SEXP end", collapse = ""), ")"),
"{",
"SEXP ans;",
"",
if(hasCopy)
c("if(!LOGICAL(copy)[0])",
paste(" ans =", ref, ";"),
"else"
),
if(isArray)
c(" {",
"int from, to;",
"if(Rf_length(start) == 2) to = INTEGER(start)[1]; ",
"else if(Rf_length(start) == 0 || Rf_length(end) == 0) {",
' PROBLEM "need scalar values for range of array to fetch"',
" ERROR",
"}",
"from = INTEGER(start)[0]; to = INTEGER(end)[0];",
""),
paste(" ans = ", if(isArray) arrayRValue else rvalue, ";"),
if(isArray) " }",
"",
"return(ans);",
"}"))
csetName = paste("R_set", name, sep = "_")
rconvert = convertRValue(name, "value", type, "value", typeMap = typeMap)
setCCode = CRoutineDefinition(csetName,
c(externC,
"SEXP",
paste(csetName, "(SEXP value)"),
"{",
"SEXP ans = R_NilValue;",
"",
rconvert,
# Don't copy the object back
# paste("ans = ", rvalue, ";"),
"",
"return(ans);",
"}"))
list(set = setCCode, get = getCCode, raccessor = if(hasCopy) rfun else NULL, rbinding = rdef,
registration = list(list(name = setCCode@name, nargs = 1),
list(name = getCCode@name, nargs = 0 + hasCopy + if(isArray) 2 else 0)))
}
generateGlobalVarCode =
# Class definition
# C routine to fetch the value of the variable
# and to set the value. XXX
# R code to create R variables in .onAttach/.First.lib
#
# XXX Need to tidy this up. Want to make var a PointerType
# of one more than the actual value passed in. #
# But need to calculate the class name properly and in different
# ways in the function?
#
function(name, var, nodes, typeMap = list(), package = character(), defaultBaseClass = 'RC++Reference')
{
if(is(var, "PointerType")) {
addrType = var
addrType@depth = as.integer(var@depth + 1)
typeName = var@typeName
} else {
typeName = var@name
addrType = new("PointerType", typeName = typeName, depth = as.integer(1), type = var)
}
className = getReferenceClassName(addrType)
classDef = paste("setClass('", className, "', contains = '", defaultBaseClass , "')", sep = "")
# The routine to fetch the value of the address of the variable.
rname = paste("R_get", name, sep = "_")
get = CRoutineDefinition(rname,
c("SEXP",
paste(rname, "()"),
"{",
paste("return( R_MAKE_VAR_REFERENCE(&", name, ', "', className, '"));', sep = ""),
"}"), 0L)
# R code to call the get routine and assign the result to an R variable.
if(length(package) == 0) {
rname = paste("'", rname, "'", sep = '')
assignLocation = "globalenv()"
} else {
package = paste(",", package)
assignLocation = paste("package", package, sep = ":")
}
alias = paste("assign('", name, "', .Call( ", rname, "), ", assignLocation, ")", package, sep = "")
# A routine to get the current value of the variable.
rname = paste("R_get_valueOf", className, sep = "_")
#XXX
# addrTypeName = getReferenceClassName(addrType)
# typeName = var@name
tp = ifelse(is(var, "BuiltinPrimitiveType"), var@name, getReferenceClassName(var))
type = paste('"', tp, '"', sep = "")
valueOf = CRoutineDefinition(rname,
c("SEXP",
paste(rname, "(SEXP obj)"),
"{",
getNativeDeclaration("tmp", addrType),
paste("tmp = (", getNativeDeclaration("", addrType, addSemiColon = FALSE), ") ",
derefNativeReference("obj", type, type), ";"),
"if(!tmp) {",
' PROBLEM "NULL value found"',
" ERROR",
"}",
paste("return(", convertValueToR("*tmp", var, "tmp", typeMap = typeMap), ");"),
"}"), 1L)
list(classDef = classDef, get = get, alias = alias, valueOf = valueOf)
}
|
#' @title mfsurv.stats
#' @description A function to calculate the deviance information criterion (DIC) for fitted model objects of class \code{mfsurv}
#' for which a log-likelihood can be obtained, according to the formula \emph{DIC = -2 * (L - P)},
#' where \emph{L} is the log likelihood of the data given the posterior means of the parameter and
#' \emph{P} is the estimate of the effective number of parameters in the model.
#' @param object an object of class \code{mfsurv}, the output of \code{mfsurv()}.
#' @return list.
#' @export
mfsurv.stats <- function(object){
.Deprecated("stats")
return(stats(object = object))
}
#' @title summary.mfsurv
#' @description Returns a summary of a mfsurv object via \code{\link[coda]{summary.mcmc}}.
#' @param object an object of class \code{mfsurv}, the output of \code{\link{mfsurv}}.
#' @param parameter one of three parameters of the mfsurv output. Indicate either "betas", "gammas" or "lambda".
#' @param ... additional parameter
#' @return list. Empirical mean, standard deviation and quantiles for each variable.
#' @rdname mfsurv
#' @export
mfsurv.summary <- function(object, parameter){
.Deprecated("summary")
return(summary(object = object, parameter = parameter))
}
| /R/deprecated.R | permissive | gonzalezrostani/BayesMFSurv | R | false | false | 1,244 | r |
#' @title mfsurv.stats
#' @description A function to calculate the deviance information criterion (DIC) for fitted model objects of class \code{mfsurv}
#' for which a log-likelihood can be obtained, according to the formula \emph{DIC = -2 * (L - P)},
#' where \emph{L} is the log likelihood of the data given the posterior means of the parameter and
#' \emph{P} is the estimate of the effective number of parameters in the model.
#' @param object an object of class \code{mfsurv}, the output of \code{mfsurv()}.
#' @return list.
#' @export
mfsurv.stats <- function(object){
.Deprecated("stats")
return(stats(object = object))
}
#' @title summary.mfsurv
#' @description Returns a summary of a mfsurv object via \code{\link[coda]{summary.mcmc}}.
#' @param object an object of class \code{mfsurv}, the output of \code{\link{mfsurv}}.
#' @param parameter one of three parameters of the mfsurv output. Indicate either "betas", "gammas" or "lambda".
#' @param ... additional parameter
#' @return list. Empirical mean, standard deviation and quantiles for each variable.
#' @rdname mfsurv
#' @export
mfsurv.summary <- function(object, parameter){
.Deprecated("summary")
return(summary(object = object, parameter = parameter))
}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
callFastRRBLUP <- function(y, geno, lociPerChr, lociLoc, Vu, Ve, maxIter, nThreads) {
.Call(`_AlphaSimR_callFastRRBLUP`, y, geno, lociPerChr, lociLoc, Vu, Ve, maxIter, nThreads)
}
callRRBLUP <- function(y, x, reps, geno, lociPerChr, lociLoc, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP`, y, x, reps, geno, lociPerChr, lociLoc, useReps, nThreads)
}
callRRBLUP2 <- function(y, x, reps, geno, lociPerChr, lociLoc, Vu, Ve, tol, maxIter, useEM, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP2`, y, x, reps, geno, lociPerChr, lociLoc, Vu, Ve, tol, maxIter, useEM, useReps, nThreads)
}
callRRBLUP_D <- function(y, x, reps, geno, lociPerChr, lociLoc, maxIter, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP_D`, y, x, reps, geno, lociPerChr, lociLoc, maxIter, useReps, nThreads)
}
callRRBLUP_D2 <- function(y, x, reps, geno, lociPerChr, lociLoc, maxIter, Va, Vd, Ve, tol, useEM, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP_D2`, y, x, reps, geno, lociPerChr, lociLoc, maxIter, Va, Vd, Ve, tol, useEM, useReps, nThreads)
}
callRRBLUP_MV <- function(Y, x, reps, geno, lociPerChr, lociLoc, maxIter, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP_MV`, Y, x, reps, geno, lociPerChr, lociLoc, maxIter, useReps, nThreads)
}
callRRBLUP_GCA <- function(y, x, reps, geno, lociPerChr, lociLoc, maxIter, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP_GCA`, y, x, reps, geno, lociPerChr, lociLoc, maxIter, useReps, nThreads)
}
callRRBLUP_GCA2 <- function(y, x, reps, geno, lociPerChr, lociLoc, maxIter, Vu1, Vu2, Ve, tol, useEM, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP_GCA2`, y, x, reps, geno, lociPerChr, lociLoc, maxIter, Vu1, Vu2, Ve, tol, useEM, useReps, nThreads)
}
callRRBLUP_SCA <- function(y, x, reps, geno, lociPerChr, lociLoc, maxIter, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP_SCA`, y, x, reps, geno, lociPerChr, lociLoc, maxIter, useReps, nThreads)
}
callRRBLUP_SCA2 <- function(y, x, reps, geno, lociPerChr, lociLoc, maxIter, Vu1, Vu2, Vu3, Ve, tol, useEM, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP_SCA2`, y, x, reps, geno, lociPerChr, lociLoc, maxIter, Vu1, Vu2, Vu3, Ve, tol, useEM, useReps, nThreads)
}
writeASGenotypes <- function(g, locations, allLocations, snpchips, names, missing, fname) {
invisible(.Call(`_AlphaSimR_writeASGenotypes`, g, locations, allLocations, snpchips, names, missing, fname))
}
writeASHaplotypes <- function(g, locations, allLocations, snpchips, names, missing, fname) {
invisible(.Call(`_AlphaSimR_writeASHaplotypes`, g, locations, allLocations, snpchips, names, missing, fname))
}
calcGenParam <- function(trait, pop, nThreads) {
.Call(`_AlphaSimR_calcGenParam`, trait, pop, nThreads)
}
getGeno <- function(geno, lociPerChr, lociLoc, nThreads) {
.Call(`_AlphaSimR_getGeno`, geno, lociPerChr, lociLoc, nThreads)
}
getMaternalGeno <- function(geno, lociPerChr, lociLoc, nThreads) {
.Call(`_AlphaSimR_getMaternalGeno`, geno, lociPerChr, lociLoc, nThreads)
}
getPaternalGeno <- function(geno, lociPerChr, lociLoc, nThreads) {
.Call(`_AlphaSimR_getPaternalGeno`, geno, lociPerChr, lociLoc, nThreads)
}
getHaplo <- function(geno, lociPerChr, lociLoc, nThreads) {
.Call(`_AlphaSimR_getHaplo`, geno, lociPerChr, lociLoc, nThreads)
}
getOneHaplo <- function(geno, lociPerChr, lociLoc, haplo, nThreads) {
.Call(`_AlphaSimR_getOneHaplo`, geno, lociPerChr, lociLoc, haplo, nThreads)
}
writeGeno <- function(geno, lociPerChr, lociLoc, filePath, nThreads) {
invisible(.Call(`_AlphaSimR_writeGeno`, geno, lociPerChr, lociLoc, filePath, nThreads))
}
writeOneHaplo <- function(geno, lociPerChr, lociLoc, haplo, filePath, nThreads) {
invisible(.Call(`_AlphaSimR_writeOneHaplo`, geno, lociPerChr, lociLoc, haplo, filePath, nThreads))
}
calcGenoFreq <- function(geno, lociPerChr, lociLoc, nThreads) {
.Call(`_AlphaSimR_calcGenoFreq`, geno, lociPerChr, lociLoc, nThreads)
}
calcChrFreq <- function(geno) {
.Call(`_AlphaSimR_calcChrFreq`, geno)
}
getGv <- function(trait, pop, nThreads) {
.Call(`_AlphaSimR_getGv`, trait, pop, nThreads)
}
getHybridGv <- function(trait, females, femaleParents, males, maleParents, nThreads) {
.Call(`_AlphaSimR_getHybridGv`, trait, females, femaleParents, males, maleParents, nThreads)
}
getFounderIbd <- function(founder, nChr) {
.Call(`_AlphaSimR_getFounderIbd`, founder, nChr)
}
getNonFounderIbd <- function(recHist, mother, father) {
.Call(`_AlphaSimR_getNonFounderIbd`, recHist, mother, father)
}
createIbdMat <- function(ibd, chr, nLoci, ploidy, nThreads) {
.Call(`_AlphaSimR_createIbdMat`, ibd, chr, nLoci, ploidy, nThreads)
}
cross <- function(motherGeno, mother, fatherGeno, father, femaleMap, maleMap, trackRec, motherPloidy, fatherPloidy, v, p, motherCentromere, fatherCentromere, quadProb, nThreads) {
.Call(`_AlphaSimR_cross`, motherGeno, mother, fatherGeno, father, femaleMap, maleMap, trackRec, motherPloidy, fatherPloidy, v, p, motherCentromere, fatherCentromere, quadProb, nThreads)
}
createDH2 <- function(geno, nDH, genMap, v, p, trackRec, nThreads) {
.Call(`_AlphaSimR_createDH2`, geno, nDH, genMap, v, p, trackRec, nThreads)
}
createReducedGenome <- function(geno, nProgeny, genMap, v, p, trackRec, ploidy, centromere, quadProb, nThreads) {
.Call(`_AlphaSimR_createReducedGenome`, geno, nProgeny, genMap, v, p, trackRec, ploidy, centromere, quadProb, nThreads)
}
#' @title Population variance
#'
#' @description
#' Calculates the population variance matrix as
#' opposed to the sample variance matrix calculated
#' by \code{\link{var}}. i.e. divides by n instead
#' of n-1
#'
#' @param X an n by m matrix
#'
#' @return an m by m variance-covariance matrix
#'
#' @export
popVar <- function(X) {
.Call(`_AlphaSimR_popVar`, X)
}
mergeGeno <- function(x, y) {
.Call(`_AlphaSimR_mergeGeno`, x, y)
}
mergeMultGeno <- function(popList, nInd, nBin, ploidy) {
.Call(`_AlphaSimR_mergeMultGeno`, popList, nInd, nBin, ploidy)
}
mergeMultIntMat <- function(X, nRow, nCol) {
.Call(`_AlphaSimR_mergeMultIntMat`, X, nRow, nCol)
}
sampleInt <- function(n, N) {
.Call(`_AlphaSimR_sampleInt`, n, N)
}
sampAllComb <- function(nLevel1, nLevel2, n) {
.Call(`_AlphaSimR_sampAllComb`, nLevel1, nLevel2, n)
}
sampHalfDialComb <- function(nLevel, n) {
.Call(`_AlphaSimR_sampHalfDialComb`, nLevel, n)
}
calcCoef <- function(X, Y) {
.Call(`_AlphaSimR_calcCoef`, X, Y)
}
getNumThreads <- function() {
.Call(`_AlphaSimR_getNumThreads`)
}
packHaplo <- function(haplo, ploidy, inbred) {
.Call(`_AlphaSimR_packHaplo`, haplo, ploidy, inbred)
}
writePlinkPed <- function(fam, haplo, nInd, ploidy, nLoc, file) {
invisible(.Call(`_AlphaSimR_writePlinkPed`, fam, haplo, nInd, ploidy, nLoc, file))
}
MaCS <- function(args, maxSites, inbred, ploidy, nThreads, seed) {
.Call(`_AlphaSimR_MaCS`, args, maxSites, inbred, ploidy, nThreads, seed)
}
| /R/RcppExports.R | no_license | Ederdbs/AlphaSimR | R | false | false | 7,052 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
callFastRRBLUP <- function(y, geno, lociPerChr, lociLoc, Vu, Ve, maxIter, nThreads) {
.Call(`_AlphaSimR_callFastRRBLUP`, y, geno, lociPerChr, lociLoc, Vu, Ve, maxIter, nThreads)
}
callRRBLUP <- function(y, x, reps, geno, lociPerChr, lociLoc, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP`, y, x, reps, geno, lociPerChr, lociLoc, useReps, nThreads)
}
callRRBLUP2 <- function(y, x, reps, geno, lociPerChr, lociLoc, Vu, Ve, tol, maxIter, useEM, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP2`, y, x, reps, geno, lociPerChr, lociLoc, Vu, Ve, tol, maxIter, useEM, useReps, nThreads)
}
callRRBLUP_D <- function(y, x, reps, geno, lociPerChr, lociLoc, maxIter, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP_D`, y, x, reps, geno, lociPerChr, lociLoc, maxIter, useReps, nThreads)
}
callRRBLUP_D2 <- function(y, x, reps, geno, lociPerChr, lociLoc, maxIter, Va, Vd, Ve, tol, useEM, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP_D2`, y, x, reps, geno, lociPerChr, lociLoc, maxIter, Va, Vd, Ve, tol, useEM, useReps, nThreads)
}
callRRBLUP_MV <- function(Y, x, reps, geno, lociPerChr, lociLoc, maxIter, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP_MV`, Y, x, reps, geno, lociPerChr, lociLoc, maxIter, useReps, nThreads)
}
callRRBLUP_GCA <- function(y, x, reps, geno, lociPerChr, lociLoc, maxIter, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP_GCA`, y, x, reps, geno, lociPerChr, lociLoc, maxIter, useReps, nThreads)
}
callRRBLUP_GCA2 <- function(y, x, reps, geno, lociPerChr, lociLoc, maxIter, Vu1, Vu2, Ve, tol, useEM, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP_GCA2`, y, x, reps, geno, lociPerChr, lociLoc, maxIter, Vu1, Vu2, Ve, tol, useEM, useReps, nThreads)
}
callRRBLUP_SCA <- function(y, x, reps, geno, lociPerChr, lociLoc, maxIter, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP_SCA`, y, x, reps, geno, lociPerChr, lociLoc, maxIter, useReps, nThreads)
}
callRRBLUP_SCA2 <- function(y, x, reps, geno, lociPerChr, lociLoc, maxIter, Vu1, Vu2, Vu3, Ve, tol, useEM, useReps, nThreads) {
.Call(`_AlphaSimR_callRRBLUP_SCA2`, y, x, reps, geno, lociPerChr, lociLoc, maxIter, Vu1, Vu2, Vu3, Ve, tol, useEM, useReps, nThreads)
}
writeASGenotypes <- function(g, locations, allLocations, snpchips, names, missing, fname) {
invisible(.Call(`_AlphaSimR_writeASGenotypes`, g, locations, allLocations, snpchips, names, missing, fname))
}
writeASHaplotypes <- function(g, locations, allLocations, snpchips, names, missing, fname) {
invisible(.Call(`_AlphaSimR_writeASHaplotypes`, g, locations, allLocations, snpchips, names, missing, fname))
}
calcGenParam <- function(trait, pop, nThreads) {
.Call(`_AlphaSimR_calcGenParam`, trait, pop, nThreads)
}
getGeno <- function(geno, lociPerChr, lociLoc, nThreads) {
.Call(`_AlphaSimR_getGeno`, geno, lociPerChr, lociLoc, nThreads)
}
getMaternalGeno <- function(geno, lociPerChr, lociLoc, nThreads) {
.Call(`_AlphaSimR_getMaternalGeno`, geno, lociPerChr, lociLoc, nThreads)
}
getPaternalGeno <- function(geno, lociPerChr, lociLoc, nThreads) {
.Call(`_AlphaSimR_getPaternalGeno`, geno, lociPerChr, lociLoc, nThreads)
}
getHaplo <- function(geno, lociPerChr, lociLoc, nThreads) {
.Call(`_AlphaSimR_getHaplo`, geno, lociPerChr, lociLoc, nThreads)
}
getOneHaplo <- function(geno, lociPerChr, lociLoc, haplo, nThreads) {
.Call(`_AlphaSimR_getOneHaplo`, geno, lociPerChr, lociLoc, haplo, nThreads)
}
writeGeno <- function(geno, lociPerChr, lociLoc, filePath, nThreads) {
invisible(.Call(`_AlphaSimR_writeGeno`, geno, lociPerChr, lociLoc, filePath, nThreads))
}
writeOneHaplo <- function(geno, lociPerChr, lociLoc, haplo, filePath, nThreads) {
invisible(.Call(`_AlphaSimR_writeOneHaplo`, geno, lociPerChr, lociLoc, haplo, filePath, nThreads))
}
calcGenoFreq <- function(geno, lociPerChr, lociLoc, nThreads) {
.Call(`_AlphaSimR_calcGenoFreq`, geno, lociPerChr, lociLoc, nThreads)
}
calcChrFreq <- function(geno) {
.Call(`_AlphaSimR_calcChrFreq`, geno)
}
getGv <- function(trait, pop, nThreads) {
.Call(`_AlphaSimR_getGv`, trait, pop, nThreads)
}
getHybridGv <- function(trait, females, femaleParents, males, maleParents, nThreads) {
.Call(`_AlphaSimR_getHybridGv`, trait, females, femaleParents, males, maleParents, nThreads)
}
getFounderIbd <- function(founder, nChr) {
.Call(`_AlphaSimR_getFounderIbd`, founder, nChr)
}
getNonFounderIbd <- function(recHist, mother, father) {
.Call(`_AlphaSimR_getNonFounderIbd`, recHist, mother, father)
}
createIbdMat <- function(ibd, chr, nLoci, ploidy, nThreads) {
.Call(`_AlphaSimR_createIbdMat`, ibd, chr, nLoci, ploidy, nThreads)
}
cross <- function(motherGeno, mother, fatherGeno, father, femaleMap, maleMap, trackRec, motherPloidy, fatherPloidy, v, p, motherCentromere, fatherCentromere, quadProb, nThreads) {
.Call(`_AlphaSimR_cross`, motherGeno, mother, fatherGeno, father, femaleMap, maleMap, trackRec, motherPloidy, fatherPloidy, v, p, motherCentromere, fatherCentromere, quadProb, nThreads)
}
createDH2 <- function(geno, nDH, genMap, v, p, trackRec, nThreads) {
.Call(`_AlphaSimR_createDH2`, geno, nDH, genMap, v, p, trackRec, nThreads)
}
createReducedGenome <- function(geno, nProgeny, genMap, v, p, trackRec, ploidy, centromere, quadProb, nThreads) {
.Call(`_AlphaSimR_createReducedGenome`, geno, nProgeny, genMap, v, p, trackRec, ploidy, centromere, quadProb, nThreads)
}
#' @title Population variance
#'
#' @description
#' Calculates the population variance matrix as
#' opposed to the sample variance matrix calculated
#' by \code{\link{var}}. i.e. divides by n instead
#' of n-1
#'
#' @param X an n by m matrix
#'
#' @return an m by m variance-covariance matrix
#'
#' @export
popVar <- function(X) {
.Call(`_AlphaSimR_popVar`, X)
}
mergeGeno <- function(x, y) {
.Call(`_AlphaSimR_mergeGeno`, x, y)
}
mergeMultGeno <- function(popList, nInd, nBin, ploidy) {
.Call(`_AlphaSimR_mergeMultGeno`, popList, nInd, nBin, ploidy)
}
mergeMultIntMat <- function(X, nRow, nCol) {
.Call(`_AlphaSimR_mergeMultIntMat`, X, nRow, nCol)
}
sampleInt <- function(n, N) {
.Call(`_AlphaSimR_sampleInt`, n, N)
}
sampAllComb <- function(nLevel1, nLevel2, n) {
.Call(`_AlphaSimR_sampAllComb`, nLevel1, nLevel2, n)
}
sampHalfDialComb <- function(nLevel, n) {
.Call(`_AlphaSimR_sampHalfDialComb`, nLevel, n)
}
calcCoef <- function(X, Y) {
.Call(`_AlphaSimR_calcCoef`, X, Y)
}
getNumThreads <- function() {
.Call(`_AlphaSimR_getNumThreads`)
}
packHaplo <- function(haplo, ploidy, inbred) {
.Call(`_AlphaSimR_packHaplo`, haplo, ploidy, inbred)
}
writePlinkPed <- function(fam, haplo, nInd, ploidy, nLoc, file) {
invisible(.Call(`_AlphaSimR_writePlinkPed`, fam, haplo, nInd, ploidy, nLoc, file))
}
MaCS <- function(args, maxSites, inbred, ploidy, nThreads, seed) {
.Call(`_AlphaSimR_MaCS`, args, maxSites, inbred, ploidy, nThreads, seed)
}
|
#' Information about all common courts
#'
#' A dataset containing basic information about Polish common courts and their
#' organisational structure. Up-to-date version of this dataset could be
#' obtained with \code{get_dump_courts(simplify = TRUE)}.
#'
#' @encoding UTF-8
#'
#' @format A data frame with 291 rows and 6 variables:
#' \tabular{rlll}{
#' n \tab name \tab class \tab description \cr
#' [,1] \tab id \tab integer \tab court ID in the repository \cr
#' [,2] \tab name \tab character \tab full name of the court \cr
#' [,3] \tab type \tab character \tab type of the court; district, regional or
#' appeal \cr
#' [,4] \tab code \tab character \tab court's code, see details below \cr
#' [,5] \tab parentCourt \tab integer \tab ID of the superior court \cr
#' [,6] \tab divisions \tab list \tab details of the court's divisions, see
#' details below \cr
#' }
#' Court's code represents nested structure. It is in format "15BBCCDD" where
#' "15" - code representing common courts,
#' "BB" - code of the coresponding appeal court,
#' "CC" - code of the corresponding regional court ("00" for appeal courts),
#' "DD" - code of the district court ("00" for appeal and regional courts).
#' All codes are multiples of five, unique within their level of nesting.
#'
#' Information about divisions is stored in dataframes with following columns.
#' \tabular{rlll}{
#' n \tab name \tab class \tab description \cr
#' [,1] \tab id \tab integer \tab unique ID \cr
#' [,2] \tab name \tab character \tab full name of the division \cr
#' [,3] \tab code \tab character \tab division's code, see details below \cr
#' [,4] \tab type \tab character \tab type of the division \cr
#' }
#' Code of the division is in the format "AABCCDD", where
#' "AA" - code of unit, "00" for local divisions, multiples of five for other
#' units (branches),
#' "B" - type of unit, "0" for local division, "1" branch offices, "2" for
#' branch division,
#' "CC" - number of division in the given court; multiples of five or four
#' (in case of more than 20 divisions) or three (more than 25 divisions);
#' in addition when one division has more sections, subsequent sections
#' have codes increased by one (in reference to the previous one),
#' "DD" - code corresponding to the type of the division; multiples of three,
#' according to the following table (in Polish):
#' \tabular{cl}{
#' code \tab type's name \cr
#' 3 \tab Wydział Cywilny \cr
#' 6 \tab Wydział Karny \cr
#' 9 \tab Wydział Cywilny Rodzinny \cr
#' 12 \tab Wydział Rodzinny i Nieletnich \cr
#' 15 \tab Wydział Pracy\cr
#' 18 \tab Wydział Ubezpieczeń Społecznych \cr
#' 21 \tab Wydział Pracy i Ubezpieczeń Społecznych \cr
#' 24 \tab Wydział Ksiąg Wieczystych \cr
#' 27 \tab Wydział Gospodarczy \cr
#' 30 \tab Wydział Gospodarczy Rejestrowy \cr
#' 33 \tab Wydział Gospodarczy Rejestru Zastawów \cr
#' 36 \tab Wydział Krajowego Rejestru Sadowego \cr
#' 39 \tab Wydział Penitencjarny i Nadzoru nad Wykonywaniem Orzeczeń Karnych \cr
#' 42 \tab Wydział Grodzki \cr
#' 45 \tab Wydział Antymonopolowy \cr
#' 48 \tab Wydział Spraw Geologicznych i Górniczych \cr
#' 51 \tab Wydział Egzekucyjny \cr
#' 54 \tab Wydział Wykonawczy \cr
#' 57 \tab Wydział Wizytacyjny \cr
#' 60 \tab Wydział Lustracyjny \cr
#' }
#'
#' There is no missing data except for structural \code{NA}s in \code{parentCourt}.
#'
#' @source \url{https://saos-test.icm.edu.pl/api/dump/commonCourts}
"courts" | /R/courts.R | no_license | bartekch/saos | R | false | false | 3,519 | r | #' Information about all common courts
#'
#' A dataset containing basic information about Polish common courts and their
#' organisational structure. Up-to-date version of this dataset could be
#' obtained with \code{get_dump_courts(simplify = TRUE)}.
#'
#' @encoding UTF-8
#'
#' @format A data frame with 291 rows and 6 variables:
#' \tabular{rlll}{
#' n \tab name \tab class \tab description \cr
#' [,1] \tab id \tab integer \tab court ID in the repository \cr
#' [,2] \tab name \tab character \tab full name of the court \cr
#' [,3] \tab type \tab character \tab type of the court; district, regional or
#' appeal \cr
#' [,4] \tab code \tab character \tab court's code, see details below \cr
#' [,5] \tab parentCourt \tab integer \tab ID of the superior court \cr
#' [,6] \tab divisions \tab list \tab details of the court's divisions, see
#' details below \cr
#' }
#' Court's code represents nested structure. It is in format "15BBCCDD" where
#' "15" - code representing common courts,
#' "BB" - code of the coresponding appeal court,
#' "CC" - code of the corresponding regional court ("00" for appeal courts),
#' "DD" - code of the district court ("00" for appeal and regional courts).
#' All codes are multiples of five, unique within their level of nesting.
#'
#' Information about divisions is stored in dataframes with following columns.
#' \tabular{rlll}{
#' n \tab name \tab class \tab description \cr
#' [,1] \tab id \tab integer \tab unique ID \cr
#' [,2] \tab name \tab character \tab full name of the division \cr
#' [,3] \tab code \tab character \tab division's code, see details below \cr
#' [,4] \tab type \tab character \tab type of the division \cr
#' }
#' Code of the division is in the format "AABCCDD", where
#' "AA" - code of unit, "00" for local divisions, multiples of five for other
#' units (branches),
#' "B" - type of unit, "0" for local division, "1" branch offices, "2" for
#' branch division,
#' "CC" - number of division in the given court; multiples of five or four
#' (in case of more than 20 divisions) or three (more than 25 divisions);
#' in addition when one division has more sections, subsequent sections
#' have codes increased by one (in reference to the previous one),
#' "DD" - code corresponding to the type of the division; multiples of three,
#' according to the following table (in Polish):
#' \tabular{cl}{
#' code \tab type's name \cr
#' 3 \tab Wydział Cywilny \cr
#' 6 \tab Wydział Karny \cr
#' 9 \tab Wydział Cywilny Rodzinny \cr
#' 12 \tab Wydział Rodzinny i Nieletnich \cr
#' 15 \tab Wydział Pracy\cr
#' 18 \tab Wydział Ubezpieczeń Społecznych \cr
#' 21 \tab Wydział Pracy i Ubezpieczeń Społecznych \cr
#' 24 \tab Wydział Ksiąg Wieczystych \cr
#' 27 \tab Wydział Gospodarczy \cr
#' 30 \tab Wydział Gospodarczy Rejestrowy \cr
#' 33 \tab Wydział Gospodarczy Rejestru Zastawów \cr
#' 36 \tab Wydział Krajowego Rejestru Sadowego \cr
#' 39 \tab Wydział Penitencjarny i Nadzoru nad Wykonywaniem Orzeczeń Karnych \cr
#' 42 \tab Wydział Grodzki \cr
#' 45 \tab Wydział Antymonopolowy \cr
#' 48 \tab Wydział Spraw Geologicznych i Górniczych \cr
#' 51 \tab Wydział Egzekucyjny \cr
#' 54 \tab Wydział Wykonawczy \cr
#' 57 \tab Wydział Wizytacyjny \cr
#' 60 \tab Wydział Lustracyjny \cr
#' }
#'
#' There is no missing data except for structural \code{NA}s in \code{parentCourt}.
#'
#' @source \url{https://saos-test.icm.edu.pl/api/dump/commonCourts}
"courts" |
#' Supervised and unsupervised uniform manifold approximation and projection (UMAP)
#'
#' `step_umap` creates a *specification* of a recipe step that
#' will project a set of features into a smaller space.
#'
#' @param recipe A recipe object. The step will be added to the
#' sequence of operations for this recipe.
#' @param ... One or more selector functions to choose variables. For
#' `step_umap`, this indicates the variables to be encoded into a numeric
#' format. Numeric and factor variables can be used. See
#' [recipes::selections()] for more details. For the `tidy` method, these are
#' not currently used.
#' @param role For model terms created by this step, what analysis role should
#' they be assigned?. By default, the function assumes that the new embedding
#' columns created by the original variables will be used as predictors in a
#' model.
#' @param min_dist The effective minimum distance between embedded points.
#' @param num_comp An integer for the number of UMAP components.
#' @param neighbors An integer for the number of nearest neighbors used to construct
#' the target simplicial set.
#' @param epochs Number of iterations for the neighbor optimization. See
#' [uwot::umap()] for mroe details.
#' @param learn_rate Positive number of the learning rate for the optimization
#' process.
#' @param outcome A call to `vars` to specify which variable is
#' used as the outcome in the encoding process (if any).
#' @param options A list of options to pass to [uwot::umap()]. The arguments
#' `X`, `n_neighbors`, `n_components`, `min_dist`, `n_epochs`, `ret_model`, and
#' `learning_rate` should not be passed here. By default, `verbose` and
#' `n_threads` are set.
#' @param seed Two integers to control the random numbers used by the
#' numerical methods. The default pulls from the main session's stream of
#' numbers and will give reproducible results if the seed is set prior to
#' calling [prep.recipe()] or [bake.recipe()].
#' @param retain A single logical for whether the original predictors should
#' be kept (in addition to the new embedding variables).
#' @param object An object that defines the encoding. This is
#' `NULL` until the step is trained by [recipes::prep.recipe()].
#' @param skip A logical. Should the step be skipped when the recipe is baked
#' by [recipes::bake.recipe()]? While all operations are baked when
#' [recipes::prep.recipe()] is run, some operations may not be able to be
#' conducted on new data (e.g. processing the outcome variable(s)). Care should
#' be taken when using `skip = TRUE` as it may affect the computations for
#' subsequent operations
#' @param trained A logical to indicate if the quantities for preprocessing
#' have been estimated.
#' @param id A character string that is unique to this step to identify it.
#' @return An updated version of `recipe` with the new step added to the
#' sequence of existing steps (if any). For the `tidy` method, a tibble with a
#' column called `terms` (the selectors or variables for embedding) is
#' returned.
#' @keywords datagen
#' @concept preprocessing encoding
#' @export
#' @details
#' UMAP, short for Uniform Manifold Approximation and Projection, is a nonlinear
#' dimension reduction technique that finds local, low-dimensional
#' representations of the data. It can be run unsupervised or supervised with
#' different types of outcome data (e.g. numeric, factor, etc).
#'
#' @references
#' McInnes, L., & Healy, J. (2018). UMAP: Uniform Manifold Approximation and
#' Projection for Dimension Reduction. \url{ https://arxiv.org/abs/1802.03426}.
#'
#' "How UMAP Works" \url{https://umap-learn.readthedocs.io/en/latest/how_umap_works.html}
#'
#'
#' @examples
#' library(recipes)
#' library(dplyr)
#' library(ggplot2)
#'
#' split <- seq.int(1, 150, by = 9)
#' tr <- iris[-split, ]
#' te <- iris[ split, ]
#'
#' set.seed(11)
#' supervised <-
#' recipe(Species ~ ., data = tr) %>%
#' step_center(all_predictors()) %>%
#' step_scale(all_predictors()) %>%
#' step_umap(all_predictors(), outcome = vars(Species), num_comp = 2) %>%
#' prep(training = tr)
#'
#' theme_set(theme_bw())
#'
#' bake(supervised, new_data = te, Species, starts_with("umap")) %>%
#' ggplot(aes(x = umap_1, y = umap_2, col = Species)) +
#' geom_point(alpha = .5)
step_umap <-
function(recipe,
...,
role = "predictor",
trained = FALSE,
outcome = NULL,
neighbors = 15,
num_comp = 2,
min_dist = 0.01,
learn_rate = 1,
epochs = NULL,
options = list(verbose = FALSE, n_threads = 1),
seed = sample(10^5, 2),
retain = FALSE,
object = NULL,
skip = FALSE,
id = rand_id("umap")) {
recipes::recipes_pkg_check("uwot")
if (is.numeric(seed) & !is.integer(seed)) {
seed <- as.integer(seed)
}
if (length(seed) != 2) {
rlang::abort("Two integers are required for `seed`.")
}
add_step(
recipe,
step_umap_new(
terms = ellipse_check(...),
role = role,
trained = trained,
outcome = outcome,
neighbors = neighbors,
num_comp = num_comp,
min_dist = min_dist,
learn_rate = learn_rate,
epochs = epochs,
options = options,
seed = seed,
retain = retain,
object = object,
skip = skip,
id = id
)
)
}
# TODO sep options for fit and predict, rm tr embeddings,
step_umap_new <-
function(terms, role, trained, outcome, neighbors, num_comp, min_dist,
learn_rate, epochs, options, seed, retain, object, skip, id) {
step(
subclass = "umap",
terms = terms,
role = role,
trained = trained,
outcome = outcome,
neighbors = neighbors,
num_comp = num_comp,
min_dist = min_dist,
learn_rate = learn_rate,
epochs = epochs,
options = options,
seed = seed,
retain = retain,
object = object,
skip = skip,
id = id
)
}
umap_fit_call <- function(obj, y = NULL) {
cl <- rlang::call2("umap", .ns = "uwot", X = rlang::expr(training[, col_names]))
if (!is.null(y)) {
cl$y <- rlang::expr(training[[y_name]])
}
cl$n_neighbors <- obj$neighbors
cl$n_components <- obj$num_comp
cl$n_epochs <- obj$epochs
cl$learning_rate <- obj$learn_rate
cl$min_dist <- obj$min_dist
if (length(obj$options) > 0) {
cl <- rlang::call_modify(cl, !!!obj$options)
}
cl$ret_model <- TRUE
cl
}
#' @export
prep.step_umap <- function(x, training, info = NULL, ...) {
col_names <- terms_select(x$terms, info = info)
if (length(x$outcome) > 0) {
y_name <- terms_select(x$outcome, info = info)
} else {
y_name <- NULL
}
withr::with_seed(
x$seed[1],
res <- rlang::eval_tidy(umap_fit_call(x, y = y_name))
)
res$xnames <- col_names
step_umap_new(
terms = x$terms,
role = x$role,
trained = TRUE,
outcome = y_name,
neighbors = x$neighbors,
num_comp = x$num_comp,
min_dist = x$min_dist,
learn_rate = x$learn_rate,
epochs = x$epochs,
options = x$options,
seed = x$seed,
retain = x$retain,
object = res,
skip = x$skip,
id = x$id
)
}
#' @export
bake.step_umap <- function(object, new_data, ...) {
withr::with_seed(
object$seed[2],
res <-
uwot::umap_transform(
model = object$object,
X = new_data[, object$object$xnames]
)
)
colnames(res) <- names0(ncol(res), "umap_")
res <- dplyr::as_tibble(res)
new_data <- bind_cols(new_data, res)
if (!object$retain) {
new_data[, object$object$xnames] <- NULL
}
new_data
}
#' @export
print.step_umap <-
function(x, width = max(20, options()$width - 31), ...) {
cat("UMAP embedding for ", sep = "")
printer(x$object$xnames, x$terms, x$trained, width = width)
invisible(x)
}
#' @rdname step_umap
#' @param x A `step_umap` object.
#' @export
#' @export tidy.step_umap
tidy.step_umap <- function(x, ...) {
if (is_trained(x)) {
res <- tibble(terms = x$object$xnames)
} else {
term_names <- sel2char(x$terms)
res <- tibble(terms = term_names)
}
res$id <- x$id
res
}
| /R/umap.R | no_license | konradsemsch/embed-1 | R | false | false | 8,289 | r | #' Supervised and unsupervised uniform manifold approximation and projection (UMAP)
#'
#' `step_umap` creates a *specification* of a recipe step that
#' will project a set of features into a smaller space.
#'
#' @param recipe A recipe object. The step will be added to the
#' sequence of operations for this recipe.
#' @param ... One or more selector functions to choose variables. For
#' `step_umap`, this indicates the variables to be encoded into a numeric
#' format. Numeric and factor variables can be used. See
#' [recipes::selections()] for more details. For the `tidy` method, these are
#' not currently used.
#' @param role For model terms created by this step, what analysis role should
#' they be assigned?. By default, the function assumes that the new embedding
#' columns created by the original variables will be used as predictors in a
#' model.
#' @param min_dist The effective minimum distance between embedded points.
#' @param num_comp An integer for the number of UMAP components.
#' @param neighbors An integer for the number of nearest neighbors used to construct
#' the target simplicial set.
#' @param epochs Number of iterations for the neighbor optimization. See
#' [uwot::umap()] for mroe details.
#' @param learn_rate Positive number of the learning rate for the optimization
#' process.
#' @param outcome A call to `vars` to specify which variable is
#' used as the outcome in the encoding process (if any).
#' @param options A list of options to pass to [uwot::umap()]. The arguments
#' `X`, `n_neighbors`, `n_components`, `min_dist`, `n_epochs`, `ret_model`, and
#' `learning_rate` should not be passed here. By default, `verbose` and
#' `n_threads` are set.
#' @param seed Two integers to control the random numbers used by the
#' numerical methods. The default pulls from the main session's stream of
#' numbers and will give reproducible results if the seed is set prior to
#' calling [prep.recipe()] or [bake.recipe()].
#' @param retain A single logical for whether the original predictors should
#' be kept (in addition to the new embedding variables).
#' @param object An object that defines the encoding. This is
#' `NULL` until the step is trained by [recipes::prep.recipe()].
#' @param skip A logical. Should the step be skipped when the recipe is baked
#' by [recipes::bake.recipe()]? While all operations are baked when
#' [recipes::prep.recipe()] is run, some operations may not be able to be
#' conducted on new data (e.g. processing the outcome variable(s)). Care should
#' be taken when using `skip = TRUE` as it may affect the computations for
#' subsequent operations
#' @param trained A logical to indicate if the quantities for preprocessing
#' have been estimated.
#' @param id A character string that is unique to this step to identify it.
#' @return An updated version of `recipe` with the new step added to the
#' sequence of existing steps (if any). For the `tidy` method, a tibble with a
#' column called `terms` (the selectors or variables for embedding) is
#' returned.
#' @keywords datagen
#' @concept preprocessing encoding
#' @export
#' @details
#' UMAP, short for Uniform Manifold Approximation and Projection, is a nonlinear
#' dimension reduction technique that finds local, low-dimensional
#' representations of the data. It can be run unsupervised or supervised with
#' different types of outcome data (e.g. numeric, factor, etc).
#'
#' @references
#' McInnes, L., & Healy, J. (2018). UMAP: Uniform Manifold Approximation and
#' Projection for Dimension Reduction. \url{ https://arxiv.org/abs/1802.03426}.
#'
#' "How UMAP Works" \url{https://umap-learn.readthedocs.io/en/latest/how_umap_works.html}
#'
#'
#' @examples
#' library(recipes)
#' library(dplyr)
#' library(ggplot2)
#'
#' split <- seq.int(1, 150, by = 9)
#' tr <- iris[-split, ]
#' te <- iris[ split, ]
#'
#' set.seed(11)
#' supervised <-
#' recipe(Species ~ ., data = tr) %>%
#' step_center(all_predictors()) %>%
#' step_scale(all_predictors()) %>%
#' step_umap(all_predictors(), outcome = vars(Species), num_comp = 2) %>%
#' prep(training = tr)
#'
#' theme_set(theme_bw())
#'
#' bake(supervised, new_data = te, Species, starts_with("umap")) %>%
#' ggplot(aes(x = umap_1, y = umap_2, col = Species)) +
#' geom_point(alpha = .5)
step_umap <-
function(recipe,
...,
role = "predictor",
trained = FALSE,
outcome = NULL,
neighbors = 15,
num_comp = 2,
min_dist = 0.01,
learn_rate = 1,
epochs = NULL,
options = list(verbose = FALSE, n_threads = 1),
seed = sample(10^5, 2),
retain = FALSE,
object = NULL,
skip = FALSE,
id = rand_id("umap")) {
recipes::recipes_pkg_check("uwot")
if (is.numeric(seed) & !is.integer(seed)) {
seed <- as.integer(seed)
}
if (length(seed) != 2) {
rlang::abort("Two integers are required for `seed`.")
}
add_step(
recipe,
step_umap_new(
terms = ellipse_check(...),
role = role,
trained = trained,
outcome = outcome,
neighbors = neighbors,
num_comp = num_comp,
min_dist = min_dist,
learn_rate = learn_rate,
epochs = epochs,
options = options,
seed = seed,
retain = retain,
object = object,
skip = skip,
id = id
)
)
}
# TODO sep options for fit and predict, rm tr embeddings,
step_umap_new <-
function(terms, role, trained, outcome, neighbors, num_comp, min_dist,
learn_rate, epochs, options, seed, retain, object, skip, id) {
step(
subclass = "umap",
terms = terms,
role = role,
trained = trained,
outcome = outcome,
neighbors = neighbors,
num_comp = num_comp,
min_dist = min_dist,
learn_rate = learn_rate,
epochs = epochs,
options = options,
seed = seed,
retain = retain,
object = object,
skip = skip,
id = id
)
}
umap_fit_call <- function(obj, y = NULL) {
cl <- rlang::call2("umap", .ns = "uwot", X = rlang::expr(training[, col_names]))
if (!is.null(y)) {
cl$y <- rlang::expr(training[[y_name]])
}
cl$n_neighbors <- obj$neighbors
cl$n_components <- obj$num_comp
cl$n_epochs <- obj$epochs
cl$learning_rate <- obj$learn_rate
cl$min_dist <- obj$min_dist
if (length(obj$options) > 0) {
cl <- rlang::call_modify(cl, !!!obj$options)
}
cl$ret_model <- TRUE
cl
}
#' @export
prep.step_umap <- function(x, training, info = NULL, ...) {
col_names <- terms_select(x$terms, info = info)
if (length(x$outcome) > 0) {
y_name <- terms_select(x$outcome, info = info)
} else {
y_name <- NULL
}
withr::with_seed(
x$seed[1],
res <- rlang::eval_tidy(umap_fit_call(x, y = y_name))
)
res$xnames <- col_names
step_umap_new(
terms = x$terms,
role = x$role,
trained = TRUE,
outcome = y_name,
neighbors = x$neighbors,
num_comp = x$num_comp,
min_dist = x$min_dist,
learn_rate = x$learn_rate,
epochs = x$epochs,
options = x$options,
seed = x$seed,
retain = x$retain,
object = res,
skip = x$skip,
id = x$id
)
}
#' @export
bake.step_umap <- function(object, new_data, ...) {
withr::with_seed(
object$seed[2],
res <-
uwot::umap_transform(
model = object$object,
X = new_data[, object$object$xnames]
)
)
colnames(res) <- names0(ncol(res), "umap_")
res <- dplyr::as_tibble(res)
new_data <- bind_cols(new_data, res)
if (!object$retain) {
new_data[, object$object$xnames] <- NULL
}
new_data
}
#' @export
print.step_umap <-
function(x, width = max(20, options()$width - 31), ...) {
cat("UMAP embedding for ", sep = "")
printer(x$object$xnames, x$terms, x$trained, width = width)
invisible(x)
}
#' @rdname step_umap
#' @param x A `step_umap` object.
#' @export
#' @export tidy.step_umap
tidy.step_umap <- function(x, ...) {
if (is_trained(x)) {
res <- tibble(terms = x$object$xnames)
} else {
term_names <- sel2char(x$terms)
res <- tibble(terms = term_names)
}
res$id <- x$id
res
}
|
t <- read.table("E:\\coursera\\household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
## Format date to Type Date
t$Date <- as.Date(t$Date, "%d/%m/%Y")
## Filter data set from Feb. 1, 2007 to Feb. 2, 2007
t <- subset(t,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
## Remove incomplete observation
t <- t[complete.cases(t),]
## Combine Date and Time column
dateTime <- paste(t$Date, t$Time)
## Name the vector
dateTime <- setNames(dateTime, "DateTime")
## Remove Date and Time column
t <- t[ ,!(names(t) %in% c("Date","Time"))]
## Add DateTime column
t <- cbind(dateTime, t)
## Format dateTime Column
t$dateTime <- as.POSIXct(dateTime)
## Create Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(t, {
plot(Global_active_power~dateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~dateTime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~dateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~dateTime,col='Red')
lines(Sub_metering_3~dateTime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~dateTime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
}) | /plot4.R | no_license | ahedib/Exploratory-Data-Analysis | R | false | false | 1,525 | r | t <- read.table("E:\\coursera\\household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
## Format date to Type Date
t$Date <- as.Date(t$Date, "%d/%m/%Y")
## Filter data set from Feb. 1, 2007 to Feb. 2, 2007
t <- subset(t,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
## Remove incomplete observation
t <- t[complete.cases(t),]
## Combine Date and Time column
dateTime <- paste(t$Date, t$Time)
## Name the vector
dateTime <- setNames(dateTime, "DateTime")
## Remove Date and Time column
t <- t[ ,!(names(t) %in% c("Date","Time"))]
## Add DateTime column
t <- cbind(dateTime, t)
## Format dateTime Column
t$dateTime <- as.POSIXct(dateTime)
## Create Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(t, {
plot(Global_active_power~dateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~dateTime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~dateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~dateTime,col='Red')
lines(Sub_metering_3~dateTime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~dateTime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
}) |
#' Make Track
#'
#' Simple convenience function for creating a \code{track} class object from X-Y-Time movement data. A \code{track} class object can be conveniently plotted and analyzed within \code{bcpa}.
#'
#' @param X vector of X locations
#' @param Y vector of Y locations
#' @param Time vector of time (can be POSIX)
#' @return a \code{track} class data frame, with three columns: \code{X}, \code{Y} and \code{Time}.
#' @seealso plot.track
#' @examples
#' X <- cumsum(arima.sim(n=100, model=list(ar=0.8)))
#' Y <- cumsum(arima.sim(n=100, model=list(ar=0.8)))
#' Time <- 1:100
#' mytrack <- MakeTrack(X,Y,Time)
#' plot(mytrack)
MakeTrack <- function(X, Y, Time)
{
MyTrack <- data.frame(Time = Time, X = X, Y = Y)
class(MyTrack) <- c("track", "data.frame")
return(MyTrack)
} | /R/MakeTrack.R | no_license | cran/bcpa | R | false | false | 787 | r | #' Make Track
#'
#' Simple convenience function for creating a \code{track} class object from X-Y-Time movement data. A \code{track} class object can be conveniently plotted and analyzed within \code{bcpa}.
#'
#' @param X vector of X locations
#' @param Y vector of Y locations
#' @param Time vector of time (can be POSIX)
#' @return a \code{track} class data frame, with three columns: \code{X}, \code{Y} and \code{Time}.
#' @seealso plot.track
#' @examples
#' X <- cumsum(arima.sim(n=100, model=list(ar=0.8)))
#' Y <- cumsum(arima.sim(n=100, model=list(ar=0.8)))
#' Time <- 1:100
#' mytrack <- MakeTrack(X,Y,Time)
#' plot(mytrack)
MakeTrack <- function(X, Y, Time)
{
MyTrack <- data.frame(Time = Time, X = X, Y = Y)
class(MyTrack) <- c("track", "data.frame")
return(MyTrack)
} |
#' original Marge for R
#'
#' Deprecated. A function for people who type too fast for their own good and perform a lot
#' of merges in R.
#' @export
marge.og <- function(...) {
cat("@@@:-)\n")
}
#' Marge for R
#'
#' A function for people who type too fast for their own good and perform a lot
#' of merges in R.
#' @param speak What should Marge say? Defaults to "Shut up, Becky!"
#' @export
marge <- function(speak = "Shut up, Becky!", ...) {
if (!is.character(speak)) {
speak <- "Shut up, Becky!"
}
margeimg <- "
OOOO
OOOOOOOOO
OOOOOOOOOOOO
OOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOO \\_| \\_|O
OOOO \\/ \\/ \\
OOOO (o )o )
O/c \\__/ --.
O\\_ , -'
O| '\\_______) %s
| _)
| |
OOOOOOO
/ \\
[http://www.asciiworld.com/-Simpsons-.html]
"
cat(sprintf(margeimg, speak))
}
| /R/marge.R | no_license | mfoos/maRge | R | false | false | 1,039 | r | #' original Marge for R
#'
#' Deprecated. A function for people who type too fast for their own good and perform a lot
#' of merges in R.
#' @export
marge.og <- function(...) {
cat("@@@:-)\n")
}
#' Marge for R
#'
#' A function for people who type too fast for their own good and perform a lot
#' of merges in R.
#' @param speak What should Marge say? Defaults to "Shut up, Becky!"
#' @export
marge <- function(speak = "Shut up, Becky!", ...) {
if (!is.character(speak)) {
speak <- "Shut up, Becky!"
}
margeimg <- "
OOOO
OOOOOOOOO
OOOOOOOOOOOO
OOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOOOOOOOOOOO
OOOOO \\_| \\_|O
OOOO \\/ \\/ \\
OOOO (o )o )
O/c \\__/ --.
O\\_ , -'
O| '\\_______) %s
| _)
| |
OOOOOOO
/ \\
[http://www.asciiworld.com/-Simpsons-.html]
"
cat(sprintf(margeimg, speak))
}
|
#!/usr/bin/env Rscript
## transcript produced by Segtools 1.1.14
segtools.r.dirname <-
system2("python",
c("-c", "'import segtools; print segtools.get_r_dirname()'"),
stdout = TRUE)
source(file.path(segtools.r.dirname, 'common.R'))
source(file.path(segtools.r.dirname, 'overlap.R'))
save.overlap.performance('./MCseq30bp50bp/overlap-ordered', 'overlap.performance', './MCseq30bp50bp/overlap-ordered/overlap.tab', row.normalize = 'FALSE', mnemonic_file = './MCseq30bp50bp/overlap-direct/mnemonics1_new.txt', clobber = FALSE, col_mnemonic_file = './MCseq30bp50bp/overlap-direct/mnemonics2_new.txt')
save.overlap.heatmap('./MCseq30bp50bp/overlap-ordered', 'overlap', './MCseq30bp50bp/overlap-ordered/overlap.tab', clobber = FALSE, col_mnemonic_file = './MCseq30bp50bp/overlap-direct/mnemonics2_new.txt', cluster = FALSE, max_contrast = FALSE, row.normalize = 'FALSE', mnemonic_file = './MCseq30bp50bp/overlap-direct/mnemonics1_new.txt')
| /notebook/stablemarriage/MCseq30bp50bp/overlap-ordered/overlap.R | no_license | xwinxu/sequencing-resolution-analysis | R | false | false | 958 | r | #!/usr/bin/env Rscript
## transcript produced by Segtools 1.1.14
segtools.r.dirname <-
system2("python",
c("-c", "'import segtools; print segtools.get_r_dirname()'"),
stdout = TRUE)
source(file.path(segtools.r.dirname, 'common.R'))
source(file.path(segtools.r.dirname, 'overlap.R'))
save.overlap.performance('./MCseq30bp50bp/overlap-ordered', 'overlap.performance', './MCseq30bp50bp/overlap-ordered/overlap.tab', row.normalize = 'FALSE', mnemonic_file = './MCseq30bp50bp/overlap-direct/mnemonics1_new.txt', clobber = FALSE, col_mnemonic_file = './MCseq30bp50bp/overlap-direct/mnemonics2_new.txt')
save.overlap.heatmap('./MCseq30bp50bp/overlap-ordered', 'overlap', './MCseq30bp50bp/overlap-ordered/overlap.tab', clobber = FALSE, col_mnemonic_file = './MCseq30bp50bp/overlap-direct/mnemonics2_new.txt', cluster = FALSE, max_contrast = FALSE, row.normalize = 'FALSE', mnemonic_file = './MCseq30bp50bp/overlap-direct/mnemonics1_new.txt')
|
# Setup the MCMC
n.iter <- 5
SampleX <- function(x) x + 1
backing.path <- tempfile()
dir.create(backing.path)
x <- 0
interrupt.mcmc <- TRUE
Mcmc <- InitMcmc(n.iter, backing.path=backing.path)
# Interrupt the MCMC during the third iteration
try({
samps <- Mcmc({
x <- SampleX(x)
if(x==3 && interrupt.mcmc) break
})
}, silent=TRUE)
# The sampling is incomplete
samps <- LoadMcmc(backing.path)
samps$x[,]
rm(samps)
# Resume the MCMC
interrupt.mcmc <- FALSE
samps <- Resume(backing.path)
# All samples are available
samps$x[,]
| /examples/example-Resume.R | no_license | kurtis-s/overture | R | false | false | 551 | r | # Setup the MCMC
n.iter <- 5
SampleX <- function(x) x + 1
backing.path <- tempfile()
dir.create(backing.path)
x <- 0
interrupt.mcmc <- TRUE
Mcmc <- InitMcmc(n.iter, backing.path=backing.path)
# Interrupt the MCMC during the third iteration
try({
samps <- Mcmc({
x <- SampleX(x)
if(x==3 && interrupt.mcmc) break
})
}, silent=TRUE)
# The sampling is incomplete
samps <- LoadMcmc(backing.path)
samps$x[,]
rm(samps)
# Resume the MCMC
interrupt.mcmc <- FALSE
samps <- Resume(backing.path)
# All samples are available
samps$x[,]
|
# Test beta_part code with functional diversity and phylogenetic diversity
# Also try to deal with issues where a single faulty plot will make the multisite index NA.
library(betapart)
# Load some FIA data to test
load('/mnt/research/nasabio/data/fia/mats/newmat_100000.r')
load('/mnt/research/nasabio/data/fia/fiaworkspace2.r')
traits_imputed <- read.csv('/mnt/research/nasabio/data/fia/traits_imputed_22aug.csv', stringsAsFactors = FALSE, row.names = 1)
trait_pca <- prcomp(traits_imputed[,c('SLA','SSD','Seed.dry.mass','Plant.lifespan')], scale = TRUE, center = TRUE)
beta_part <- function(m, abundance=TRUE, pairwise=FALSE, index_family='sorensen', TD=TRUE, FD=FALSE, PD=FALSE, trait_mat = NULL, phylo_tree = NULL) {
require(betapart)
m_bin <- m
m_bin[m_bin > 0] <- 1
index_family_abund <- ifelse(index_family == 'sorensen', 'bray', 'ruzicka')
# Do precalculation.
core_presence <- betapart.core(m_bin)
# Calculate metrics.
beta_presence <- unlist(beta.multi(core_presence, index.family = index_family))
if (abundance) {
core_abundance <- betapart.core.abund(m)
beta_abundance <- unlist(beta.multi.abund(core_abundance, index.family = index_family_abund))
}
if (FD) {
trait_mat <- trait_mat[dimnames(trait_mat)[[1]] %in% dimnames(m_bin)[[2]], ]
core_func <- functional.betapart.core(m_bin, traits = as.matrix(trait_mat))
beta_func <- unlist(functional.beta.multi(core_func, index.family = index_family))
}
if (PD) {
core_phy <- phylo.betapart.core(m_bin, tree = phylo_tree)
beta_phy <- unlist(phylo.beta.multi(core_phy, index.family = index_family))
}
# Calculate pairwise metrics if needed.
if (pairwise) {
beta_presence_pair <- beta.pair(core_presence, index.family = index_family)
if (abundance) {
beta_abundance_pair <- beta.pair.abund(core_abundance, index.family = index_family_abund)
}
if (FD) {
beta_func_pair <- functional.beta.pair(core_func, traits = trait_mat, index.family = index_family)
}
if (PD) {
beta_phylo_pair <- phylo.beta.pair(core_phy, tree = phylo_tree, index.family = index_family)
}
}
# Combine and return results.
res <- data.frame(index = rep(index_family, 3),
diversity = 'taxonomic',
partition = c('replacement','nestedness','total'),
abundance = FALSE,
beta = beta_presence)
if (abundance) {
res_abund <- data.frame(index = rep(index_family_abund, 3),
diversity = 'taxonomic',
partition = c('replacement','nestedness','total'),
abundance = TRUE,
beta = beta_abundance)
res <- rbind(res, res_abund)
}
if (FD) {
res_func <- data.frame(index = rep(index_family, 3),
diversity = 'functional',
partition = c('replacement','nestedness','total'),
abundance = FALSE,
beta = beta_func)
res <- rbind(res, res_func)
}
if (PD) {
res_phy <- data.frame(index = rep(index_family, 3),
diversity = 'phylogenetic',
partition = c('replacement','nestedness','total'),
abundance = FALSE,
beta = beta_phy)
res <- rbind(res, res_phy)
}
if (!pairwise) return(res)
res <- list(res, beta_presence_pair)
if (abundance) res[[length(res) + 1]] <- beta_abundance_pair
if (FD) res[[length(res) + 1]] <- beta_func_pair
if (PD) res[[length(res) + 1]] <- beta_phy_pair
return(res)
}
######################################
# Test
set.seed(444)
idx_test <- sample(length(all_mats), 10)
# single site test
beta_part(m = mat_p[1:20,], abundance = TRUE, pairwise = FALSE, index_family = 'sorensen', TD=TRUE, PD=TRUE, FD=FALSE, phylo_tree = pnwphylo)
fia_list <- list()
null_result <- data.frame(index = rep(c('sorensen','bray','sorensen'), each=3),
diversity = rep(c('taxonomic','phylogenetic'), times=c(6,3)),
partition = c('replacement', 'nestedness', 'total'),
abundance = rep(c(FALSE, TRUE, FALSE), each=3),
beta = NA)
pb <- txtProgressBar(0, length(idx_test), style=3)
for (p in 1:length(idx_test)) {
setTxtProgressBar(pb, p)
mat_p <- all_mats[[idx_test[p]]]
if(inherits(mat_p, 'matrix')) {
if(nrow(mat_p) > 1 & ncol(mat_p) > 1) {
# Fix the species names to match the phylogeny, and get rid of the unknown species.
mat_p <- mat_p[, dimnames(mat_p)[[2]] %in% pnwphylo$tip.label, drop = FALSE]
beta_p <- beta_part(m = mat_p, abundance = TRUE, pairwise = FALSE, index_family = 'sorensen', TD=TRUE, PD=TRUE, FD=FALSE, phylo_tree = pnwphylo)
fia_list[[p]] <- cbind(nneighb = nrow(mat_p) - 1, beta_p)
}
else {
fia_list[[p]] <- cbind(nneighb = NA, null_result)
}
}
else {
fia_list[[p]] <- cbind(nneighb = NA, null_result)
}
}
close(pb)
| /OLD/betadiversity/testbetapart.r | no_license | qdread/nasabio | R | false | false | 5,136 | r | # Test beta_part code with functional diversity and phylogenetic diversity
# Also try to deal with issues where a single faulty plot will make the multisite index NA.
library(betapart)
# Load some FIA data to test
load('/mnt/research/nasabio/data/fia/mats/newmat_100000.r')
load('/mnt/research/nasabio/data/fia/fiaworkspace2.r')
traits_imputed <- read.csv('/mnt/research/nasabio/data/fia/traits_imputed_22aug.csv', stringsAsFactors = FALSE, row.names = 1)
trait_pca <- prcomp(traits_imputed[,c('SLA','SSD','Seed.dry.mass','Plant.lifespan')], scale = TRUE, center = TRUE)
beta_part <- function(m, abundance=TRUE, pairwise=FALSE, index_family='sorensen', TD=TRUE, FD=FALSE, PD=FALSE, trait_mat = NULL, phylo_tree = NULL) {
require(betapart)
m_bin <- m
m_bin[m_bin > 0] <- 1
index_family_abund <- ifelse(index_family == 'sorensen', 'bray', 'ruzicka')
# Do precalculation.
core_presence <- betapart.core(m_bin)
# Calculate metrics.
beta_presence <- unlist(beta.multi(core_presence, index.family = index_family))
if (abundance) {
core_abundance <- betapart.core.abund(m)
beta_abundance <- unlist(beta.multi.abund(core_abundance, index.family = index_family_abund))
}
if (FD) {
trait_mat <- trait_mat[dimnames(trait_mat)[[1]] %in% dimnames(m_bin)[[2]], ]
core_func <- functional.betapart.core(m_bin, traits = as.matrix(trait_mat))
beta_func <- unlist(functional.beta.multi(core_func, index.family = index_family))
}
if (PD) {
core_phy <- phylo.betapart.core(m_bin, tree = phylo_tree)
beta_phy <- unlist(phylo.beta.multi(core_phy, index.family = index_family))
}
# Calculate pairwise metrics if needed.
if (pairwise) {
beta_presence_pair <- beta.pair(core_presence, index.family = index_family)
if (abundance) {
beta_abundance_pair <- beta.pair.abund(core_abundance, index.family = index_family_abund)
}
if (FD) {
beta_func_pair <- functional.beta.pair(core_func, traits = trait_mat, index.family = index_family)
}
if (PD) {
beta_phylo_pair <- phylo.beta.pair(core_phy, tree = phylo_tree, index.family = index_family)
}
}
# Combine and return results.
res <- data.frame(index = rep(index_family, 3),
diversity = 'taxonomic',
partition = c('replacement','nestedness','total'),
abundance = FALSE,
beta = beta_presence)
if (abundance) {
res_abund <- data.frame(index = rep(index_family_abund, 3),
diversity = 'taxonomic',
partition = c('replacement','nestedness','total'),
abundance = TRUE,
beta = beta_abundance)
res <- rbind(res, res_abund)
}
if (FD) {
res_func <- data.frame(index = rep(index_family, 3),
diversity = 'functional',
partition = c('replacement','nestedness','total'),
abundance = FALSE,
beta = beta_func)
res <- rbind(res, res_func)
}
if (PD) {
res_phy <- data.frame(index = rep(index_family, 3),
diversity = 'phylogenetic',
partition = c('replacement','nestedness','total'),
abundance = FALSE,
beta = beta_phy)
res <- rbind(res, res_phy)
}
if (!pairwise) return(res)
res <- list(res, beta_presence_pair)
if (abundance) res[[length(res) + 1]] <- beta_abundance_pair
if (FD) res[[length(res) + 1]] <- beta_func_pair
if (PD) res[[length(res) + 1]] <- beta_phy_pair
return(res)
}
######################################
# Test
set.seed(444)
idx_test <- sample(length(all_mats), 10)
# single site test
beta_part(m = mat_p[1:20,], abundance = TRUE, pairwise = FALSE, index_family = 'sorensen', TD=TRUE, PD=TRUE, FD=FALSE, phylo_tree = pnwphylo)
fia_list <- list()
null_result <- data.frame(index = rep(c('sorensen','bray','sorensen'), each=3),
diversity = rep(c('taxonomic','phylogenetic'), times=c(6,3)),
partition = c('replacement', 'nestedness', 'total'),
abundance = rep(c(FALSE, TRUE, FALSE), each=3),
beta = NA)
pb <- txtProgressBar(0, length(idx_test), style=3)
for (p in 1:length(idx_test)) {
setTxtProgressBar(pb, p)
mat_p <- all_mats[[idx_test[p]]]
if(inherits(mat_p, 'matrix')) {
if(nrow(mat_p) > 1 & ncol(mat_p) > 1) {
# Fix the species names to match the phylogeny, and get rid of the unknown species.
mat_p <- mat_p[, dimnames(mat_p)[[2]] %in% pnwphylo$tip.label, drop = FALSE]
beta_p <- beta_part(m = mat_p, abundance = TRUE, pairwise = FALSE, index_family = 'sorensen', TD=TRUE, PD=TRUE, FD=FALSE, phylo_tree = pnwphylo)
fia_list[[p]] <- cbind(nneighb = nrow(mat_p) - 1, beta_p)
}
else {
fia_list[[p]] <- cbind(nneighb = NA, null_result)
}
}
else {
fia_list[[p]] <- cbind(nneighb = NA, null_result)
}
}
close(pb)
|
subroutine dtsort (wav,isort,nchans)
implicit integer*4 (i-n)
###########################################################
#ccc version date: 10/02/85
#ccc author(s): Roger Clark & Jeff Hoover
#ccc language: Ratfor
#ccc
#ccc short description:
#ccc This subroutine makes an array (sort) which
#ccc points to the data array values in increasing
#ccc wavelengths.
#ccc 10/02/85: changed to just call bubble which
#ccc does the same thing, only better.
#ccc
#ccc algorithm description: none
#ccc system requirements: none
#ccc subroutines called:
#ccc none
#ccc argument list description:
#ccc arguments: wav,sort,nchans
#ccc parameter description:
#ccc common description:
#ccc message files referenced:
#ccc internal variables:
#ccc file description:
#ccc user command lines:
#ccc update information:
#ccc NOTES:
#ccc
##########################################################
real*4 wav(4864)
integer*4 isort(4864)
call bubble (wav,isort,nchans)
return
end
| /src-local/specpr/src.specpr/fcn01-08/dtsort.r | no_license | ns-bak/tetracorder-tutorial | R | false | false | 1,070 | r | subroutine dtsort (wav,isort,nchans)
implicit integer*4 (i-n)
###########################################################
#ccc version date: 10/02/85
#ccc author(s): Roger Clark & Jeff Hoover
#ccc language: Ratfor
#ccc
#ccc short description:
#ccc This subroutine makes an array (sort) which
#ccc points to the data array values in increasing
#ccc wavelengths.
#ccc 10/02/85: changed to just call bubble which
#ccc does the same thing, only better.
#ccc
#ccc algorithm description: none
#ccc system requirements: none
#ccc subroutines called:
#ccc none
#ccc argument list description:
#ccc arguments: wav,sort,nchans
#ccc parameter description:
#ccc common description:
#ccc message files referenced:
#ccc internal variables:
#ccc file description:
#ccc user command lines:
#ccc update information:
#ccc NOTES:
#ccc
##########################################################
real*4 wav(4864)
integer*4 isort(4864)
call bubble (wav,isort,nchans)
return
end
|
## Started back when by Dan Flynn ##
## Updates by Lizzie starting in early 2018 ##
# Where were the non-leafout cuttings, by species, site, and treatement?
# Impt NOTE: Lizzie did not update all of Dan's code...
# should remove or go through it at some point #
useshinystan <- FALSE
runstan <- FALSE
forIsabelle <- TRUE
library(scales)
library(gplots) # for textplot()
library(png)
library(arm) # for invlogit
library(rstanarm)
if(useshinystan){
library(shinystan)
}
# setwd("~/Documents/git/buds/analyses")
setwd("~/Documents/git/projects/treegarden/budexperiments/analyses")
# get latest data
print(toload <- sort(dir("./input")[grep("Budburst Data", dir('./input'))], T)[1])
load(file.path("input", toload))
figpath = "../docs/ms/images"
source("source/simpleplot.R")
dx <- dx[!is.na(dx$site),] # one Betpap entry has no site, need to check
# Add non-budburst
dx$no <- dx$bday
dx$no[dx$no>0] <- 1
dx$no[is.na(dx$no)==TRUE] <- 0
# Overall non-budburst versus non-leafout rates:
sum(dx$no)
sum(dx$nl)
dim(dx)
# Subset to data that did burst bud
dx.bb <- subset(dx, no==1)
# What percent did not break bud?
(1-sum(dx$no)/nrow(dx))*100
# What percent did not leafout?
(1-sum(dx$nl)/nrow(dx))*100
## A few notes learned while using rstanarm ...
# (1) The hierarchical effects are given as deviations from the global parameters (called the b parameters) so you have to correct those http://discourse.mc-stan.org/t/question-about-hierarchical-effects/3226
# (2) Watch out of factors versus integers! I was posting the dx data (e.g., write.csv(dx, "output/dx.nonleafouts.csv", row.names=TRUE) then dx <- read.csv("output/dx.nonleafouts.csv", header=TRUE)) which reads in warm and photo as INTEGERS and thus you get different answers from the model with those (otherwise identical data) then you get here.
if(runstan){
# models (m1.nl, m1.no) with species pooling on chilling and site effects (and their interactions)
m1.no <- stan_glmer(no ~ (warm + photo + chill + site +
warm*photo + warm*chill + photo*chill + warm*site + photo*site + chill*site) +
(chill + chill*site|sp), family = binomial(link = "logit"), data = dx)
# Understanding models: m1.no
# invlogit(3.007 + -0.96*1) # warm is -0.096; photo is -0.007; chill1 is -0.693, chill2 is -1.506, site is +0.542; QUEALB on intercept: -1.798
# xhere <- -1.506
# invlogit(3.007 + xhere*1)-invlogit(3.007 + xhere*0) # so chill2 increases leafout by 13.5%
m1.nl <- stan_glmer(nl ~ (warm + photo + chill + site +
warm*photo + warm*chill + photo*chill + warm*site + photo*site + chill*site) +
(chill + chill*site|sp), family = binomial(link = "logit"), data = dx)
# models (m2.nl, m2.no) with species pooling on intercept only
m2.no <- stan_glmer(no ~ (warm + photo + chill + site +
warm*photo + warm*chill + photo*chill + warm*site + photo*site + chill*site) +
(1|sp), family = binomial(link = "logit"), data = dx)
m2.nl <- stan_glmer(nl ~ (warm + photo + chill + site +
warm*photo + warm*chill + photo*chill + warm*site + photo*site + chill*site) +
(1|sp), family = binomial(link = "logit"), data = dx) # considers all data so includes non-leafouts that did not burst bud and non-leafouts that did, but then did not burst bud
m2.nl.bb <- stan_glmer(nl ~ (warm + photo + chill + site +
warm*photo + warm*chill + photo*chill + warm*site + photo*site + chill*site) +
(1|sp), family = binomial(link = "logit"), data = dx.bb) # considers only those that did burst bud, but then did not leafout
# Understanding models: notes for main text (m2 models)
summary(m2.no, digits=3)
# warm is -0.094; photo is -0.008; chill1 is -0.703, chill2 is -1.477, site is +0.420; QUEALB on intercept: -1.649 (highest)
# xhere <- -0.703
# xhere <- -1.477
# xhere <- 0.42
# 100*(invlogit(2.899 + xhere*1)-invlogit(2.899 + xhere*0))
# chill1 decreases BB by 4.8%; chill2 by 14.2%; SH increases BB success by 1.72%
summary(m2.nl, digits=3)
# warm is 0.525; photo is 0.671; chill1 is -0.768, chill2 is -1.752, site is 0.016
# xhere <- 0.525
# xhere <- 0.671
# xhere <- -0.768
# xhere <- -1.752
# 100*(invlogit(1.770 + xhere*1)-invlogit(1.770 + xhere*0))
# warm increases leafout by 5.4%; photo increases leafout by 6.5%; chill1 decreases leafout by 12.3%; chill2 by 35%
summary(m2.nl.bb, digits=3)
# warm is 1.086; photo is 1.269; chill1 is -0.843, chill2 is -1.875, site is -0.293, warm x photo is -1.693
# xhere <- 1.086
# xhere <- 1.269
# xhere <- -0.843
# xhere <- -1.875
# xhere <- -0.293
# xhere <- -1.639
# 100*(invlogit(2.75 + xhere*1)-invlogit(2.75 + xhere*0))
# photo x temp
# 100*(invlogit(2.75 + 1.086+1.269-1.639)-invlogit(2.75))
# warm increases leafout by 3.9%; photo increases leafout by 4.2%; chill1 decreases leafout by 6.9%; chill2 by 23.4%, site decreases leafout by 1.9, overall photo x temp increases leafout by only 3.0%
#m3.no <- stan_glmer(no ~ (warm + photo + chill + site +
# warm*photo + warm*chill + photo*chill + warm*site + photo*site + chill*site) +
# ((warm + photo + chill + site + warm*photo + warm*chill + photo*chill +
# warm*site + photo*site + chill*site)|sp), family = binomial(link = "logit"), data = dx)
# error occurred during calling the sampler; sampling not done
# Error in check_stanfit(stanfit) :
# Invalid stanfit object produced please report bug
save(m1.no, file="stan/models_nonleafout/m1.no.Rdata")
save(m1.nl, file="stan/models_nonleafout/m1.nl.Rdata")
save(m2.no, file="stan/models_nonleafout/m2.no.Rdata")
save(m2.nl, file="stan/models_nonleafout/m2.nl.Rdata")
save(m2.nl.bb, file="stan/models_nonleafout/m2.nl.Rdata")
}
if(!runstan){
load("stan/models_nonleafout/m1.no.Rdata")
load("stan/models_nonleafout/m1.nl.Rdata")
load("stan/models_nonleafout/m2.no.Rdata")
load("stan/models_nonleafout/m2.nl.Rdata")
}
if(useshinystan){
launch_shinystan(m1.no)
launch_shinystan(m1.nl)
}
##############################
### Plotting for m2 models ###
##############################
## Plotting the models (m2.nl or m2.nl.bb, AND m2.no) with species pooling on chilling and site effects (and their interactions)
## Select an m2 model (see notes above on differences where models are fit)
m2nl.model <- m2.nl.bb
## Below gives the main text figure on LOGIT SCALE
bbpng <- readPNG(file.path(figpath, "Finn_BB.png")) # Illustrations from Finn et al.
lopng <- readPNG(file.path(figpath, "Finn_LO.png"))
col4fig <- c("mean","sd","25%","50%","75%","Rhat")
col4table <- c("mean","sd","2.5%","50%","97.5%","Rhat")
sumer.m2no <- summary(m2.no)
# manually to get right order
mu_params <- c("warm20","photo12","chillchill1","chillchill2","siteSH",
"warm20:photo12",
"warm20:chillchill1","warm20:chillchill2",
"photo12:chillchill1","photo12:chillchill2",
"warm20:siteSH", "photo12:siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
meanzb <- sumer.m2no[mu_params,col4fig]
rownames(meanzb) = c("Forcing Temperature",
"Photoperiod",
"Chilling 4°",
"Chilling 1.5°C",
"Site",
"Forcing x Photoperiod",
"Forcing x Chilling 4°C",
"Forcing x Chilling 1.5°C",
"Photoperiod x Chilling 4°C",
"Photoperiod x Chilling 1.5°C",
"Forcing x Site",
"Photoperiod x Site",
"Site x Chilling 4°C",
"Site x Chilling 1.5°C"
)
sumer.m2nl <- summary(m2nl.model)
meanzl <- sumer.m2nl[mu_params,col4fig]
rownames(meanzl) <- rownames(meanzb)
## prep the tables and write them out
meanzb.table <- sumer.m2no[mu_params,col4table]
row.names(meanzb.table) <- row.names(meanzb)
meanzl.table <- sumer.m2nl[mu_params,col4table]
row.names(meanzl.table) <- row.names(meanzl)
write.csv(meanzb.table, "output/nonleafout.meanzb.table.csv", row.names=FALSE)
write.csv(meanzl.table, "output/nonleafout.meanzl.table.csv", row.names=FALSE)
## back to the figure ...
pdf(file.path(figpath, "NonBBLO_m2.pdf"), width = 7, height = 8)
par(mfrow=c(2,1), mar = c(2, 10, 5, 1))
# Upper panel: bud burst
plot(seq(-2.5,
2,
length.out = nrow(meanzb)),
1:nrow(meanzb),
type="n",
xlab = "",
ylab = "",
yaxt = "n")
legend(x =-2.8, y = 3, bty="n", legend = "a. Budburst", text.font = 2)
# rasterImage(bbpng, -3, 1, -2, 4)
axis(2, at = nrow(meanzb):1, labels = rownames(meanzb), las = 1, cex.axis = 0.8)
points(meanzb[,'mean'],
nrow(meanzb):1,
pch = 16,
col = "midnightblue")
arrows(meanzb[,"75%"], nrow(meanzb):1, meanzb[,"25%"], nrow(meanzb):1,
len = 0, col = "black")
abline(v = 0, lty = 3)
# add advance/delay arrows
par(xpd=NA)
arrows(-0.1, 15.5, -2.5, 15.5, len=0.1, col = "black")
legend(-1, 17, legend="delay", bty="n", text.font = 1, cex=0.75)
arrows(0.1, 15.5, 2, 15.5, len=0.1, col = "black")
legend(0.2, 17, legend="advance", bty="n", text.font = 1, cex=0.75)
legend(-0.25, 16.25, legend="0", bty="n", text.font = 1, cex=0.75)
par(xpd=FALSE)
par(mar=c(5, 10, 2, 1))
# Lower panel: leaf-out
plot(seq(-2.5,
2,
length.out = nrow(meanzl)),
1:nrow(meanzl),
type="n",
xlab = "Model estimate change in budburst or leafout success",
ylab = "",
yaxt = "n")
legend(x = -2.8, y = 3, bty="n", legend = "b. Leafout", text.font = 2)
# rasterImage(lopng, -20, 1, -14, 4)
axis(2, at = nrow(meanzl):1, labels = rownames(meanzl), las = 1, cex.axis = 0.8)
points(meanzl[,'mean'],
nrow(meanzl):1,
pch = 16,
col = "midnightblue")
arrows(meanzl[,"75%"], nrow(meanzl):1, meanzl[,"25%"], nrow(meanzl):1,
len = 0, col = "black")
abline(v = 0, lty = 3)
dev.off()
####
### Supplemental figure -- with species-level estimates shown!
iter.m2no <- as.data.frame(m2.no)
iter.m2nl <- as.data.frame(m2nl.model)
# manually to get right order, with intercept
mu_params.wi <- c("(Intercept)", "warm20","photo12","chillchill1","chillchill2",
"siteSH","warm20:photo12",
"warm20:chillchill1","warm20:chillchill2",
"photo12:chillchill1","photo12:chillchill2",
"warm20:siteSH", "photo12:siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
meanzb.wi <- sumer.m2no[mu_params.wi,col4fig]
rownames(meanzb.wi) = c("Intercept",
"Forcing Temperature",
"Photoperiod",
"Chilling 4°",
"Chilling 1.5°C",
"Site",
"Forcing x Photoperiod",
"Forcing x Chilling 4°C",
"Forcing x Chilling 1.5°C",
"Photoperiod x Chilling 4°C",
"Photoperiod x Chilling 1.5°C",
"Forcing x Site",
"Photoperiod x Site",
"Site x Chilling 4°C",
"Site x Chilling 1.5°C"
)
meanzl.wi <- sumer.m2nl[mu_params.wi,col4fig]
rownames(meanzl.wi) <- rownames(meanzb.wi)
speff.bb <- speff.lo <- vector()
params <- c("(Intercept)", "warm20","photo12","chillchill1",
"chillchill2","siteSH", "warm20:photo12",
"warm20:chillchill1","warm20:chillchill2",
"photo12:chillchill1","photo12:chillchill2",
"warm20:siteSH", "photo12:siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
sp.params <- c("(Intercept)")
params.wsp <- c(1)
params.nosp <- c(1:15)[-params.wsp]
pdf(file.path(figpath, "NonBBLO_sp_m2.pdf"), width = 7, height = 8)
par(mfrow=c(1,1), mar = c(2, 10, 2, 1))
# Upper panel: budburst
plot(seq(-4, #min(meanz[,'mean']*1.1),
6, #max(meanz[,'mean']*1.1),
length.out = nrow(meanzb.wi)),
seq(1, 5*nrow(meanzb.wi), length.out = nrow(meanzb.wi)),
type="n",
xlab = "",
ylab = "",
yaxt = "n")
legend(x =-4.75, y = 11, bty="n", legend = "a. Budburst", text.font = 2)
rasterImage(bbpng, -4, 0, -2, 7)
axis(2, at = 5*(nrow(meanzb.wi):1), labels = rownames(meanzb.wi), las = 1, cex.axis = 0.8)
# Plot species levels for each predictor
for(i in 1:length(unique(dx$sp))){
b.params <- iter.m2no[!is.na(match(colnames(iter.m2no), c(paste("b", "[", sp.params, " sp:",
unique(dx$sp)[i], "]", sep=""))))]
main.params <- iter.m2no[!is.na(match(colnames(iter.m2no), sp.params))]
bplusmain <- b.params
for(c in 1:ncol(main.params)){
bplusmain[c] <- b.params[c]+main.params[c]
}
bplusmain.quant <- sapply(bplusmain, FUN = quantile, probs = c(0.25, 0.50, 0.75))
sp.est <- t(bplusmain.quant)
jt <- jitter(0, factor = 40)
arrows(sp.est[,"75%"], jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp], sp.est[,"25%"], jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp],
len = 0, col = alpha("firebrick", 0.2))
points(sp.est[,'50%'],
jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp], #[c(3:5,11:12)], # ADJUSTED for just the ranef here
pch = 16,
col = alpha("firebrick", 0.5))
speff.bb = rbind(speff.bb, t(sp.est[,1]))
}
arrows(meanzb.wi[,"75%"], (5*(nrow(meanzb.wi):1))+1, meanzb.wi[,"25%"], (5*(nrow(meanzb.wi):1))+1,
len = 0, col = "black", lwd = 3)
points(meanzb.wi[,'mean'],
(5*(nrow(meanzb.wi):1))+1,
pch = 16,
cex = 1,
col = "midnightblue")
abline(v = 0, lty = 2)
# Lower panel: leafout
par(mfrow=c(1,1), mar = c(2, 10, 2, 1))
plot(seq(-4, #min(meanz[,'mean']*1.1),
6, #max(meanz[,'mean']*1.1),
length.out = nrow(meanzl.wi)),
seq(1, 5*nrow(meanzl.wi), length.out = nrow(meanzl.wi)),
type="n",
xlab = "",
ylab = "",
yaxt = "n")
legend(x =-4.75, y = 11, bty="n", legend = "b. Leafout", text.font = 2)
rasterImage(lopng, -4, 0, -2, 7)
axis(2, at = 5*(nrow(meanzl.wi):1), labels = rownames(meanzl.wi), las = 1, cex.axis = 0.8)
# Plot species levels for each predictor
for(i in 1:length(unique(dx$sp))){
b.params <- iter.m2nl[!is.na(match(colnames(iter.m2nl), c(paste("b", "[", sp.params, " sp:",
unique(dx$sp)[i], "]", sep=""))))]
main.params <- iter.m2nl[!is.na(match(colnames(iter.m2nl), sp.params))]
bplusmain <- b.params
for(c in 1:ncol(main.params)){
bplusmain[c] <- b.params[c]+main.params[c]
}
bplusmain.quant <- sapply(bplusmain, FUN = quantile, probs = c(0.25, 0.50, 0.75))
sp.est <- t(bplusmain.quant)
jt <- jitter(0, factor = 40)
arrows(sp.est[,"75%"], jt+(5*(nrow(meanzl.wi):1)-1)[params.wsp], sp.est[,"25%"], jt+(5*(nrow(meanzl.wi):1)-1)[params.wsp],
len = 0, col = alpha("firebrick", 0.2))
points(sp.est[,'50%'],
jt+(5*(nrow(meanzl.wi):1)-1)[params.wsp], #[c(3:5,11:12)], # ADJUSTED for just the ranef here
pch = 16,
col = alpha("firebrick", 0.5))
speff.lo = rbind(speff.lo, t(sp.est[,1]))
}
arrows(meanzl.wi[,"75%"], (5*(nrow(meanzl.wi):1))+1, meanzl.wi[,"25%"], (5*(nrow(meanzl.wi):1))+1,
len = 0, col = "black", lwd = 3)
points(meanzl.wi[,'mean'],
(5*(nrow(meanzl.wi):1))+1,
pch = 16,
cex = 1,
col = "midnightblue")
abline(v = 0, lty = 2)
dev.off();#system(paste("open", file.path(figpath, "Fig1_bb_lo+sp.pdf"), "-a /Applications/Preview.app"))
##############################
### Plotting for m1 models ###
##############################
## Plotting the models (m1.nl, m1.no) with species pooling on chilling and site effects (and their interactions)
## Below gives the main text figure
bbpng <- readPNG(file.path(figpath, "Finn_BB.png")) # Illustrations from Finn et al.
lopng <- readPNG(file.path(figpath, "Finn_LO.png"))
col4fig <- c("mean","sd","25%","50%","75%","Rhat")
sumer.m1no <- summary(m1.no)
# manually to get right order
mu_params <- c("warm20","photo12","chillchill1","chillchill2","siteSH",
"warm20:photo12",
"warm20:chillchill1","warm20:chillchill2",
"photo12:chillchill1","photo12:chillchill2",
"warm20:siteSH", "photo12:siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
meanzb <- sumer.m1no[mu_params,col4fig]
rownames(meanzb) = c("Forcing Temperature",
"Photoperiod",
"Chilling 4°",
"Chilling 1.5°C",
"Site",
"Forcing x Photoperiod",
"Forcing x Chilling 4°C",
"Forcing x Chilling 1.5°C",
"Photoperiod x Chilling 4°C",
"Photoperiod x Chilling 1.5°C",
"Forcing x Site",
"Photoperiod x Site",
"Site x Chilling 4°C",
"Site x Chilling 1.5°C"
)
sumer.m1nl <- summary(m1.nl)
meanzl <- sumer.m1nl[mu_params,col4fig]
rownames(meanzl) <- rownames(meanzb)
pdf(file.path(figpath, "NonBBLO.pdf"), width = 7, height = 8)
par(mfrow=c(2,1), mar = c(2, 10, 5, 1))
# Upper panel: bud burst
plot(seq(-2.5,
2,
length.out = nrow(meanzb)),
1:nrow(meanzb),
type="n",
xlab = "",
ylab = "",
yaxt = "n")
legend(x =-2.8, y = 3, bty="n", legend = "a. Budburst", text.font = 2)
# rasterImage(bbpng, -3, 1, -2, 4)
axis(2, at = nrow(meanzb):1, labels = rownames(meanzb), las = 1, cex.axis = 0.8)
points(meanzb[,'mean'],
nrow(meanzb):1,
pch = 16,
col = "midnightblue")
arrows(meanzb[,"75%"], nrow(meanzb):1, meanzb[,"25%"], nrow(meanzb):1,
len = 0, col = "black")
abline(v = 0, lty = 3)
# add advance/delay arrows
par(xpd=NA)
arrows(-0.1, 15.5, -2.5, 15.5, len=0.1, col = "black")
legend(-1, 17, legend="delay", bty="n", text.font = 1, cex=0.75)
arrows(0.1, 15.5, 2, 15.5, len=0.1, col = "black")
legend(0.2, 17, legend="advance", bty="n", text.font = 1, cex=0.75)
legend(-0.25, 16.25, legend="0", bty="n", text.font = 1, cex=0.75)
par(xpd=FALSE)
par(mar=c(5, 10, 2, 1))
# Lower panel: leaf-out
plot(seq(-2.5,
2,
length.out = nrow(meanzl)),
1:nrow(meanzl),
type="n",
xlab = "Model estimate change in budburst or leafout success",
ylab = "",
yaxt = "n")
legend(x = -2.8, y = 3, bty="n", legend = "b. Leafout", text.font = 2)
# rasterImage(lopng, -20, 1, -14, 4)
axis(2, at = nrow(meanzl):1, labels = rownames(meanzl), las = 1, cex.axis = 0.8)
points(meanzl[,'mean'],
nrow(meanzl):1,
pch = 16,
col = "midnightblue")
arrows(meanzl[,"75%"], nrow(meanzl):1, meanzl[,"25%"], nrow(meanzl):1,
len = 0, col = "black")
abline(v = 0, lty = 3)
dev.off()
### Supplemental figure -- with species-level estimates shown!
iter.m1no <- as.data.frame(m1.no)
iter.m1nl <- as.data.frame(m1.nl)
# manually to get right order, with intercept
mu_params.wi <- c("(Intercept)", "warm20","photo12","chillchill1","chillchill2",
"siteSH","warm20:photo12",
"warm20:chillchill1","warm20:chillchill2",
"photo12:chillchill1","photo12:chillchill2",
"warm20:siteSH", "photo12:siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
meanzb.wi <- sumer.m1no[mu_params.wi,col4fig]
rownames(meanzb.wi) = c("Intercept",
"Forcing Temperature",
"Photoperiod",
"Chilling 4°",
"Chilling 1.5°C",
"Site",
"Forcing x Photoperiod",
"Forcing x Chilling 4°C",
"Forcing x Chilling 1.5°C",
"Photoperiod x Chilling 4°C",
"Photoperiod x Chilling 1.5°C",
"Forcing x Site",
"Photoperiod x Site",
"Site x Chilling 4°C",
"Site x Chilling 1.5°C"
)
meanzl.wi <- sumer.m1nl[mu_params.wi,col4fig]
rownames(meanzl.wi) <- rownames(meanzb.wi)
speff.bb <- speff.lo <- vector()
params <- c("(Intercept)", "warm20","photo12","chillchill1",
"chillchill2","siteSH", "warm20:photo12",
"warm20:chillchill1","warm20:chillchill2",
"photo12:chillchill1","photo12:chillchill2",
"warm20:siteSH", "photo12:siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
sp.params <- c("(Intercept)", "chillchill1","chillchill2","siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
params.wsp <- c(1, 4:6, 14:15)
params.nosp <- c(1:15)[-params.wsp]
pdf(file.path(figpath, "NonBBLO_sp.pdf"), width = 7, height = 8)
par(mfrow=c(1,1), mar = c(2, 10, 2, 1))
# Upper panel: budburst
plot(seq(-4, #min(meanz[,'mean']*1.1),
5, #max(meanz[,'mean']*1.1),
length.out = nrow(meanzb.wi)),
seq(1, 5*nrow(meanzb.wi), length.out = nrow(meanzb.wi)),
type="n",
xlab = "",
ylab = "",
yaxt = "n")
legend(x =-4.75, y = 11, bty="n", legend = "a. Budburst", text.font = 2)
rasterImage(bbpng, -4, 0, -2, 7)
axis(2, at = 5*(nrow(meanzb.wi):1), labels = rownames(meanzb.wi), las = 1, cex.axis = 0.8)
# Plot species levels for each predictor
for(i in 1:length(unique(dx$sp))){
b.params <- iter.m1no[!is.na(match(colnames(iter.m1no), c(paste("b", "[", sp.params, " sp:",
unique(dx$sp)[i], "]", sep=""))))]
main.params <- iter.m1no[!is.na(match(colnames(iter.m1no), sp.params))]
bplusmain <- b.params
for(c in 1:ncol(main.params)){
bplusmain[c] <- b.params[c]+main.params[c]
}
bplusmain.quant <- sapply(bplusmain, FUN = quantile, probs = c(0.25, 0.50, 0.75))
sp.est <- t(bplusmain.quant)
jt <- jitter(0, factor = 40)
arrows(sp.est[,"75%"], jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp], sp.est[,"25%"], jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp],
len = 0, col = alpha("firebrick", 0.2))
points(sp.est[,'50%'],
jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp], #[c(3:5,11:12)], # ADJUSTED for just the ranef here
pch = 16,
col = alpha("firebrick", 0.5))
speff.bb = rbind(speff.bb, t(sp.est[,1]))
}
arrows(meanzb.wi[,"75%"], (5*(nrow(meanzb.wi):1))+1, meanzb.wi[,"25%"], (5*(nrow(meanzb.wi):1))+1,
len = 0, col = "black", lwd = 3)
points(meanzb.wi[,'mean'],
(5*(nrow(meanzb.wi):1))+1,
pch = 16,
cex = 1,
col = "midnightblue")
abline(v = 0, lty = 2)
# Lower panel: leafout
par(mfrow=c(1,1), mar = c(2, 10, 2, 1))
plot(seq(-4, #min(meanz[,'mean']*1.1),
5, #max(meanz[,'mean']*1.1),
length.out = nrow(meanzl.wi)),
seq(1, 5*nrow(meanzl.wi), length.out = nrow(meanzl.wi)),
type="n",
xlab = "",
ylab = "",
yaxt = "n")
legend(x =-4.75, y = 11, bty="n", legend = "b. Leafout", text.font = 2)
rasterImage(lopng, -4, 0, -2, 7)
axis(2, at = 5*(nrow(meanzl.wi):1), labels = rownames(meanzl.wi), las = 1, cex.axis = 0.8)
# Plot species levels for each predictor
for(i in 1:length(unique(dx$sp))){
b.params <- iter.m1nl[!is.na(match(colnames(iter.m1nl), c(paste("b", "[", sp.params, " sp:",
unique(dx$sp)[i], "]", sep=""))))]
main.params <- iter.m1nl[!is.na(match(colnames(iter.m1nl), sp.params))]
bplusmain <- b.params
for(c in 1:ncol(main.params)){
bplusmain[c] <- b.params[c]+main.params[c]
}
bplusmain.quant <- sapply(bplusmain, FUN = quantile, probs = c(0.25, 0.50, 0.75))
sp.est <- t(bplusmain.quant)
jt <- jitter(0, factor = 40)
arrows(sp.est[,"75%"], jt+(5*(nrow(meanzl.wi):1)-1)[params.wsp], sp.est[,"25%"], jt+(5*(nrow(meanzl.wi):1)-1)[params.wsp],
len = 0, col = alpha("firebrick", 0.2))
points(sp.est[,'50%'],
jt+(5*(nrow(meanzl.wi):1)-1)[params.wsp], #[c(3:5,11:12)], # ADJUSTED for just the ranef here
pch = 16,
col = alpha("firebrick", 0.5))
speff.lo = rbind(speff.lo, t(sp.est[,1]))
}
arrows(meanzl.wi[,"75%"], (5*(nrow(meanzl.wi):1))+1, meanzl.wi[,"25%"], (5*(nrow(meanzl.wi):1))+1,
len = 0, col = "black", lwd = 3)
points(meanzl.wi[,'mean'],
(5*(nrow(meanzl.wi):1))+1,
pch = 16,
cex = 1,
col = "midnightblue")
abline(v = 0, lty = 2)
dev.off();#system(paste("open", file.path(figpath, "Fig1_bb_lo+sp.pdf"), "-a /Applications/Preview.app"))
#############################################################################
# This does some simple models species by species for Isabelle's 5 species ##
#############################################################################
if(forIsabelle){
isaspp <- c("POPGRA", "ACESAC", "TILAME", "FAGGRA", "BETALL", "QUERUB")
isaspp <- sort(isaspp)
dxisa <- dx[which(dx$sp %in% isaspp),]
m2.noisa <- stan_glmer(no ~ (warm + photo + chill + site +
warm*photo + warm*chill + photo*chill + warm*site + photo*site + chill*site) +
(1|sp), family = binomial(link = "logit"), data = dxisa)
m1.noisa <- stan_glmer(no ~ (warm + photo + chill + site +
warm*photo + warm*chill + photo*chill + warm*site + photo*site + chill*site) +
(chill + chill*site|sp), family = binomial(link = "logit"), data = dxisa) # 3 divergent transitions
## summarizing data
library(plyr)
library(dplyr)
nonsummarywtrt <-
ddply(dxisa, c("warm", "photo", "chill", "sp"), summarise,
sum.no = sum(no),
sum.nl = sum(nl),
total.n = length(no))
nonsummary <-
ddply(dxisa, c("sp"), summarise,
sum.no = sum(no),
sum.nl = sum(nl),
total.n = length(no))
## Plotting the models (m2.nl or m2.nl.bb, AND m2.no) with species pooling on chilling and site effects (and their interactions)
## Below gives the main text figure on LOGIT SCALE
col4fig <- c("mean","sd","25%","50%","75%","Rhat")
sumer.m1.noisa <- summary(m1.noisa)
iter.m1noisa <- as.data.frame(m1.noisa)
# manually to get right order, with intercept
mu_params.wi <- c("(Intercept)", "warm20","photo12","chillchill1","chillchill2",
"siteSH","warm20:photo12",
"warm20:chillchill1","warm20:chillchill2",
"photo12:chillchill1","photo12:chillchill2",
"warm20:siteSH", "photo12:siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
meanzb.wi <- sumer.m1.noisa[mu_params.wi,col4fig]
rownames(meanzb.wi) = c("Intercept",
"Forcing Temperature",
"Photoperiod",
"Chilling 4°",
"Chilling 1.5°C",
"Site",
"Forcing x Photoperiod",
"Forcing x Chilling 4°C",
"Forcing x Chilling 1.5°C",
"Photoperiod x Chilling 4°C",
"Photoperiod x Chilling 1.5°C",
"Forcing x Site",
"Photoperiod x Site",
"Site x Chilling 4°C",
"Site x Chilling 1.5°C"
)
speff.bb <- speff.lo <- vector()
params <- c("(Intercept)", "warm20","photo12","chillchill1",
"chillchill2","siteSH", "warm20:photo12",
"warm20:chillchill1","warm20:chillchill2",
"photo12:chillchill1","photo12:chillchill2",
"warm20:siteSH", "photo12:siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
sp.params <- c("(Intercept)", "chillchill1","chillchill2","siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
params.wsp <- c(1, 4:6, 14:15)
params.nosp <- c(1:15)[-params.wsp]
pdf(file.path(figpath, "NonBB_sp_forIsabelle.pdf"), width = 7, height = 8)
par(mfrow=c(1,1), mar = c(2, 10, 2, 1))
# Upper panel: budburst
plot(seq(-4, #min(meanz[,'mean']*1.1),
5, #max(meanz[,'mean']*1.1),
length.out = nrow(meanzb.wi)),
seq(1, 5*nrow(meanzb.wi), length.out = nrow(meanzb.wi)),
type="n",
xlab = "",
ylab = "",
yaxt = "n")
legend(x =-4.75, y = 11, bty="n", legend = "a. Budburst", text.font = 2)
rasterImage(bbpng, -4, 0, -2, 7)
axis(2, at = 5*(nrow(meanzb.wi):1), labels = rownames(meanzb.wi), las = 1, cex.axis = 0.8)
# Plot species levels for each predictor
for(i in 1:length(unique(dxisa$sp))){
b.params <- iter.m1noisa[!is.na(match(colnames(iter.m1noisa), c(paste("b", "[", sp.params, " sp:",
unique(dxisa$sp)[i], "]", sep=""))))]
main.params <- iter.m1noisa[!is.na(match(colnames(iter.m1noisa), sp.params))]
bplusmain <- b.params
for(c in 1:ncol(main.params)){
bplusmain[c] <- b.params[c]+main.params[c]
}
bplusmain.quant <- sapply(bplusmain, FUN = quantile, probs = c(0.25, 0.50, 0.75))
sp.est <- t(bplusmain.quant)
jt <- jitter(0, factor = 40)
arrows(sp.est[,"75%"], jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp], sp.est[,"25%"], jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp],
len = 0, col = alpha("firebrick", 0.2))
points(sp.est[,'50%'],
jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp], #[c(3:5,11:12)], # ADJUSTED for just the ranef here
pch = 16,
col = alpha("firebrick", 0.5))
speff.bb = rbind(speff.bb, t(sp.est[,1]))
}
arrows(meanzb.wi[,"75%"], (5*(nrow(meanzb.wi):1))+1, meanzb.wi[,"25%"], (5*(nrow(meanzb.wi):1))+1,
len = 0, col = "black", lwd = 3)
points(meanzb.wi[,'mean'],
(5*(nrow(meanzb.wi):1))+1,
pch = 16,
cex = 1,
col = "midnightblue")
abline(v = 0, lty = 2)
dev.off()
}
stop(print("stopping here, below code is original code by Dan Flynn ..."))
# <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <>
# <> Dan's analyses ...
# <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <>
# Analysis of where the non-leafout cuttings were
nlx <- dx[dx$nl == 0,]
summary(nlx)
nl1 <- as.data.frame(table(nlx$sp, nlx$site))
nl2 <- as.data.frame(table(nlx$warm, nlx$photo, nlx$chill))
# proportional to total numbers in each
dl2 <- as.data.frame(table(dx$warm, dx$photo, dx$chill))
nl2$prop <- nl2$Freq/dl2$Freq
nl3 <- as.data.frame(table(nlx$sp, nlx$site,nlx$warm, nlx$photo, nlx$chill))
dl3 <- as.data.frame(table(dx$sp, dx$site, dx$warm, dx$photo, dx$chill))
nl3$prop <- nl3$Freq/dl3$Freq
nl3$prop[is.nan(nl3$prop)==TRUE] <- 0
names(nl1) <- c("sp","site","freq")
names(nl2) <- c("warm","photo","chill","freq", "prop")
names(nl3) <- c("sp", "site", "warm","photo","chill","freq", "prop")
nl3.nochill <- subset(nl3, chill=="chill0")
nl3.1chill <- subset(nl3, chill=="chill1")
nl3.2chill <- subset(nl3, chill=="chill2")
#
data.frame(sort(with(nl3, tapply(prop, sp, mean)), T))
with(nl3, tapply(prop, chill, mean))
with(nl3, tapply(prop, site, mean))
# make some simple plots
# # makesimpleplot(nl3, c(0, 0.4), "prop", "% non-leafout") # all chilling combined
# makesimpleplot(nl3.nochill, c(0, 0.4), "prop", "% non-leafout")
# makesimpleplot(nl3.1chill, c(0, 0.4), "prop", "% non-leafout")
# makesimpleplot(nl3.2chill, c(0, 0.4), "prop", "% non-leafout")
sitespp <- as.data.frame(table(nl3$sp, nl3$site))
sitespp <- subset(sitespp, Freq>0)
sppatsites <- aggregate(sitespp["Var2"], sitespp["Var1"], FUN=length)
sppatbothsites <- subset(sppatsites, Var2>1)
spp <- sppatbothsites$Var1
pdf(file="graphs/simpleplots/nonleafouts_byspp.pdf", 10, 6, paper="a4r", onefile=TRUE)
for (i in c(1:length(spp))){
spdf <- subset(nl3.nochill, sp==spp[i])
makesimpleplot.sp(spdf, c(0, 1), "prop", "% non-leafout", spp[i])
}
dev.off()
# <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <>
# Simple models by species. All predictors now numeric
dx$chill <- as.numeric(dx$chill)
dx$site <- as.numeric(dx$site)
dx$photo <- as.numeric(dx$photo)
dx$warm <- as.numeric(dx$warm)
pdf(file="graphs/simpleplots/nonleafouts_byspp_model.pdf", height = 10, width = 10)
par(cex=0.7, xpd=TRUE, xaxt="n")
layout(matrix(c(1, 2, 3, 3), byrow=T, ncol = 2, nrow = 2), heights = c(3, 2))
for(i in sort(unique(dx$sp))){
makesimpleplot.sp(nl3[nl3$sp ==i,], c(0, 1), "prop", "% non-leafout", i)
# is this species across site and chill?
if(length(unique(dx[dx$sp ==i,"site"])) > 1 & length(unique(dx[dx$sp ==i,"chill"])) > 1) {
mx <- glm(nl ~ warm + photo + chill + site
+ warm:photo + warm:chill + warm:site
+ photo:chill + photo:site
+ warm:photo:chill
+ warm:photo:site
+ warm:chill:site
+ photo:chill:site
, family=binomial(link='logit'), data = dx[dx$sp == i,]
)
}
# Across site but no chill?
if(length(unique(dx[dx$sp ==i,"site"])) > 1 & length(unique(dx[dx$sp ==i,"chill"])) == 1) {
mx <- glm(nl ~ warm + photo + site
+ warm:photo + warm:site + photo:site
+ warm:photo:site
, family=binomial(link='logit'), data = dx[dx$sp == i,]
)
}
# One site, no chill?
if(length(unique(dx[dx$sp ==i,"site"])) == 1 & length(unique(dx[dx$sp ==i,"chill"])) == 1) {
mx <- glm(nl ~ warm + photo +
+ warm:photo
, family=binomial(link='logit'), data = dx[dx$sp == i,]
)
}
textplot(round(coef(summary(mx)),3))
}
dev.off(); system('open graphs/simpleplots/nonleafouts_byspp_model.pdf -a /Applications/Preview.app')
# examination code
with(dx[dx$sp == i,], table(warm, photo, site, chill))
with(dx[dx$sp == i,], tapply(nl, list(warm, photo, site, chill), mean))
# Repeat, with simple model for all. Aronia: 1 nl occured in each of the four combinations of photo and warm, no separation.
dx$warm <- as.numeric(as.character(dx$warm))
dx$photo <- as.numeric(as.character(dx$photo))
pdf(file="graphs/simpleplots/nonleafouts_byspp_simplemodel.pdf", height = 10, width = 10)
par(cex=0.7, xpd=TRUE, xaxt="n")
layout(matrix(c(1, 2, 3, 4), byrow=T, ncol = 2, nrow = 2), heights = c(3, 2))
for(i in sort(unique(dx$sp))){
makesimpleplot.sp(nl3[nl3$sp ==i,], c(0, 1), "prop", "% non-leafout", i)
mx <- glm(nl ~ warm + photo
+ warm:photo
, family=binomial(link='logit'), data = dx[dx$sp == i,]
)
textplot(round(coef(summary(mx)),3))
if(coef(summary(mx))[4,4] <= 0.05){
with(dx[dx$sp == i,], interaction.plot(warm, photo, nl))
} else { plot(1:10, type = "n", bty = "n", yaxt="n", xaxt="n",ylab="",xlab="") }
}
dev.off(); system('open graphs/simpleplots/nonleafouts_byspp_simplemodel.pdf -a /Applications/Preview.app')
# Focus on site x chill
# Tally sig site * chill effects
pdf(file="graphs/simpleplots/nonleafouts_sitechill.pdf", height = 10, width = 10)
par(cex=0.7)
layout(matrix(c(1, 2, 3, 4), byrow=T, ncol = 2, nrow = 2), heights = c(3, 2))
for(i in sort(unique(dx$sp))){
# is this species across site and chill?
if(length(unique(dx[dx$sp ==i,"site"])) > 1 & length(unique(dx[dx$sp ==i,"chill"])) > 1) {
xx <- dx[dx$sp==i,]
means <- with(xx, tapply(nl, list(chill, site), mean, na.rm=T))
sds <- with(xx, tapply(nl, list(chill, site), sd, na.rm=T))
plot(1:3, means[,1], ylim = c(0, 1.25), main = paste(i, "HF"), pch = 16, ylab = "prop leafout", xaxt="n")
axis(1, at=1:3, labels = c("chill0", "chill1","chill2"))
arrows(1:3, means[,1]-sds[,1], 1:3, means[,1]+sds[,1], length = 0)
plot(1:3, means[,2], ylim = c(0, 1.25), main = "SH", pch = 16, ylab = "prop leafout", xaxt="n")
axis(1, at=1:3, labels = c("chill0", "chill1","chill2"))
arrows(1:3, means[,2]-sds[,2], 1:3, means[,2]+sds[,2], length = 0)
mx <- glm(nl ~ chill * site
, family=binomial(link='logit'), data = dx[dx$sp == i,]
)
textplot(round(coef(summary(mx)),3))
if(coef(summary(mx))[4,4] <= 0.05){
with(dx[dx$sp == i,], interaction.plot(chill, site, nl))
} else { plot(1:10, type = "n", bty = "n", yaxt="n", xaxt="n",ylab="",xlab="") }
}
}
dev.off(); system('open graphs/simpleplots/nonleafouts_sitechill.pdf -a /Applications/Preview.app')
# <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <>
# Across species
m1 <- glm(nl ~ warm + photo + chill + site
+ warm:photo + warm:chill + warm:site
+ photo:chill + photo:site
+ warm:photo:chill
+ warm:photo:site
+ warm:chill:site
+ photo:chill:site
, family=binomial(link='logit'), data = dx
)
summary(m1)
# across species, with partial pooling
library(lme4)
m2 <- glmer(nl ~ warm + photo + chill + site
+ warm:photo + warm:chill + warm:site
+ photo:chill + photo:site
+ warm:photo:chill
+ warm:photo:site
+ warm:chill:site
+ photo:chill:site
+ (1|sp),
, family=binomial(link='logit'), data = dx
)
library(sjPlot)
sjp.glmer(m2)
sjp.glmer(m2, type = "fe")
# Basically, we can't say much about nonleafouts. Let's shift to leafouts
# <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <>
# Across species
l1 <- lm(lday ~ warm + photo + chill + site
+ warm:photo + warm:chill + warm:site
+ photo:chill + photo:site
+ warm:photo:chill
+ warm:photo:site
+ warm:chill:site
+ photo:chill:site
, data = dx[dx$nl==1,]
)
summary(l1)
summary.aov(l1)
# With partial pooling
l2 <- lmer(lday ~ warm + photo + chill + site
+ warm:photo + warm:chill + warm:site
+ photo:chill + photo:site
+ warm:photo:chill
+ warm:photo:site
+ warm:chill:site
+ photo:chill:site
+ (1|sp)
, data = dx[dx$nl==1,]
)
summary(l2)
sjp.lmer(l2)
sjp.lmer(l2, type = "fe")
interact
# Within individual species, should match nl story
pdf(file="graphs/leafoutday_byspp_simplemodel.pdf", height = 10, width = 10)
par(cex=0.7, xpd=TRUE, xaxt="n")
layout(matrix(c(1, 2, 3, 4), byrow=T, ncol = 2, nrow = 2), heights = c(3, 2))
for(i in sort(unique(dx$sp))){
xx <- dx[dx$sp==i,]
means <- with(xx, tapply(lday, list(warm, photo, site), mean, na.rm=T))
sds <- with(xx, tapply(lday, list(warm, photo, site), sd, na.rm=T))
plot(1:3, means[,1], ylim = c(0, 1.25), main = paste(i, "HF"), pch = 16, ylab = "prop leafout", xaxt="n")
axis(1, at=1:3, labels = c("chill0", "chill1","chill2"))
arrows(1:3, means[,1]-sds[,1], 1:3, means[,1]+sds[,1], length = 0)
plot(1:3, means[,2], ylim = c(0, 1.25), main = "SH", pch = 16, ylab = "prop leafout", xaxt="n")
axis(1, at=1:3, labels = c("chill0", "chill1","chill2"))
arrows(1:3, means[,2]-sds[,2], 1:3, means[,2]+sds[,2], length = 0)
mx <- glm(nl ~ warm + photo
+ warm:photo
, family=binomial(link='logit'), data = dx[dx$sp == i,]
)
textplot(round(coef(summary(mx)),3))
if(coef(summary(mx))[4,4] <= 0.05){
with(dx[dx$sp == i,], interaction.plot(warm, photo, nl))
} else { plot(1:10, type = "n", bty = "n", yaxt="n", xaxt="n",ylab="",xlab="") }
}
dev.off(); system('open graphs/simpleplots/nonleafouts_byspp_simplemodel.pdf -a /Applications/Preview.app')
# <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <>
# colors for plotting
cols = alpha(c("darkseagreen", "deepskyblue", "slateblue"), 0.5)
bb <- barplot(nl2$prop*100, ylab = "% of cuttings non-leafout",
ylim = c(0,50),
col = rep(cols, each = 4),
main = "Percent of cuttings in each treatment which failed to leaf out",
xlab = "Treatment combination", sub = "(temp + photo + chill)")
mtext(paste(nl2$warm, nl2$photo, nl2$chill, sep = "\n"), 1, line = -2, padj = 0, at = bb[,1])
chi.all <- summary(xtabs(~ warm+photo+chill, data = nl))
for(i in levels(nl$chill)) print(chisq.test(table(nl[nl$chill==i,]$warm, nl[nl$chill==i,]$photo))) # NS within chill1 or chill2, surprisingly
xx <- round(chi.all$statistic,2)
text(1, 45, substitute(paste(chi^2,"=", xx), list(xx=round(chi.all$statistic,2))))
text(1, 42, substitute(paste(p,"=", xx), list(xx=round(chi.all$p.value,4))))
dev.print(file = "graphs/nonleafout.pdf", device = pdf, width = 10, height = 6); system('open ./graphs/nonleafout.pdf -a /Applications/Preview.app')
# looping this same figure now for each species separately.
pdf(file="graphs/nonleafout_eachsp.pdf", width = 10, height = 6)
for(i in unique(nl$sp)){
nlx <- with(nl[nl$sp==i,], as.data.frame(table(warm, photo, chill)))
# proportional to total numbers in each
dlx <- with(dx[dx$sp==i,], as.data.frame(table(warm, photo, chill)))
nlx$prop <- nlx$Freq/dlx$Freq
bb <- barplot(nlx$prop*100, ylab = "% of cuttings non-leafout",
ylim = c(0,100),
col = rep(cols, each = 4),
main = paste(i, "\n Percent of cuttings in each treatment which failed to leaf out"),
xlab = "Treatment combination")
mtext(paste(nlx$warm, nlx$photo, nlx$chill, sep = "\n"), 1, line = 2, padj = 0, at = bb[,1])
if(length(unique(as.character(nl[nl$sp==i,"chill"])))==1){
chi.all <- summary(xtabs(~ warm+photo, data = nl[nl$sp==i,]))
}
else { chi.all <- summary(xtabs(~ warm+photo+chill, data = nl[nl$sp==i,])) }
xx <- round(chi.all$statistic,2)
text(1, 95, substitute(paste(chi^2,"=", xx, ", df =", df), list(xx=round(chi.all$statistic,2), df = chi.all$parameter)))
text(1, 85, substitute(paste(p,"=", xx), list(xx=round(chi.all$p.value,4))))
}
dev.off(); system('open ./graphs/nonleafout_eachsp.pdf -a /Applications/Preview.app')
# also by species and site
nl1 <- as.data.frame(table(nl$sp, nl$site))
# proportional to total numbers in each
dl1 <- as.data.frame(table(dx$sp, dx$site))
nl1$prop <- nl1$Freq/dl1$Freq
bb <- barplot(height = rbind(nl1$prop[1:28]*100, nl1$prop[29:56]*100),
beside = T,
legend.text = c("HF","SH"),
args.legend = list(bty="n"),
space = c(0.05, 1),
ylab = "% of cuttings non-leafout",
ylim = c(0,100),
main = "Percent of cuttings in each species which failed to leaf out"
)
par(xpd=T)
text(bb[1,], -3, nl1$Var1[1:28], srt = 45, adj = 1, cex = 0.8)
dev.print(file = "graphs/nonleafout_species.pdf", device = pdf, width = 10, height = 5); system('open ./graphs/nonleafout_species.pdf -a /Applications/Preview.app')
# analyze now in logistic framework
dx$nl <- as.numeric(as.character(cut(dx$lday, breaks = c(0, 74, 100), labels = c(1, 0)))) # 1: leafed out. 0: failed to leaf out
summary(m1 <- glm(nl ~ warm + photo + chill + site, family=binomial(link='logit'), data = dx)) # overall strong effects of warming, long day, and chilling
summary(m1 <- glm(nl ~ as.numeric(warm), family=binomial(link='logit'), data = dx)) # just warming for plotting
plot(as.numeric(dx$warm), dx$nl, xlab="Temperature",
ylab="Probability of Response")
curve(predict(m1, data.frame(warm=x), type="resp"),
add=TRUE, col="red")
summary(glm(nl ~ warm + photo + chill + site + sp, family=binomial(link='logit'), data = dx))
summary(glm(nl ~ warm + photo + chill + site + sp +
warm:photo + warm:chill + photo:chill,
family=binomial(link='logit'), data = dx))
summary(glm(nl ~ site + sp + sp:site, family=binomial(link='logit'), data = dx)) # no overall site effect, some acesac and franig interax by site, more non-leafouts in HF
summary(glm(nl ~ warm + photo + chill + site + sp +
warm:photo + warm:chill + photo:chill +
warm:sp + photo:sp + chill:sp,
family=binomial(link='logit'), data = dx)) # clear species effects, interax with warm x photo, very few sp-specific responses to warming or photo. Querub improved with chilling.
| /analyses/Analyzing non-leafouts.R | no_license | lizzieinvancouver/buds | R | false | false | 43,253 | r | ## Started back when by Dan Flynn ##
## Updates by Lizzie starting in early 2018 ##
# Where were the non-leafout cuttings, by species, site, and treatement?
# Impt NOTE: Lizzie did not update all of Dan's code...
# should remove or go through it at some point #
useshinystan <- FALSE
runstan <- FALSE
forIsabelle <- TRUE
library(scales)
library(gplots) # for textplot()
library(png)
library(arm) # for invlogit
library(rstanarm)
if(useshinystan){
library(shinystan)
}
# setwd("~/Documents/git/buds/analyses")
setwd("~/Documents/git/projects/treegarden/budexperiments/analyses")
# get latest data
print(toload <- sort(dir("./input")[grep("Budburst Data", dir('./input'))], T)[1])
load(file.path("input", toload))
figpath = "../docs/ms/images"
source("source/simpleplot.R")
dx <- dx[!is.na(dx$site),] # one Betpap entry has no site, need to check
# Add non-budburst
dx$no <- dx$bday
dx$no[dx$no>0] <- 1
dx$no[is.na(dx$no)==TRUE] <- 0
# Overall non-budburst versus non-leafout rates:
sum(dx$no)
sum(dx$nl)
dim(dx)
# Subset to data that did burst bud
dx.bb <- subset(dx, no==1)
# What percent did not break bud?
(1-sum(dx$no)/nrow(dx))*100
# What percent did not leafout?
(1-sum(dx$nl)/nrow(dx))*100
## A few notes learned while using rstanarm ...
# (1) The hierarchical effects are given as deviations from the global parameters (called the b parameters) so you have to correct those http://discourse.mc-stan.org/t/question-about-hierarchical-effects/3226
# (2) Watch out of factors versus integers! I was posting the dx data (e.g., write.csv(dx, "output/dx.nonleafouts.csv", row.names=TRUE) then dx <- read.csv("output/dx.nonleafouts.csv", header=TRUE)) which reads in warm and photo as INTEGERS and thus you get different answers from the model with those (otherwise identical data) then you get here.
if(runstan){
# models (m1.nl, m1.no) with species pooling on chilling and site effects (and their interactions)
m1.no <- stan_glmer(no ~ (warm + photo + chill + site +
warm*photo + warm*chill + photo*chill + warm*site + photo*site + chill*site) +
(chill + chill*site|sp), family = binomial(link = "logit"), data = dx)
# Understanding models: m1.no
# invlogit(3.007 + -0.96*1) # warm is -0.096; photo is -0.007; chill1 is -0.693, chill2 is -1.506, site is +0.542; QUEALB on intercept: -1.798
# xhere <- -1.506
# invlogit(3.007 + xhere*1)-invlogit(3.007 + xhere*0) # so chill2 increases leafout by 13.5%
m1.nl <- stan_glmer(nl ~ (warm + photo + chill + site +
warm*photo + warm*chill + photo*chill + warm*site + photo*site + chill*site) +
(chill + chill*site|sp), family = binomial(link = "logit"), data = dx)
# models (m2.nl, m2.no) with species pooling on intercept only
m2.no <- stan_glmer(no ~ (warm + photo + chill + site +
warm*photo + warm*chill + photo*chill + warm*site + photo*site + chill*site) +
(1|sp), family = binomial(link = "logit"), data = dx)
m2.nl <- stan_glmer(nl ~ (warm + photo + chill + site +
warm*photo + warm*chill + photo*chill + warm*site + photo*site + chill*site) +
(1|sp), family = binomial(link = "logit"), data = dx) # considers all data so includes non-leafouts that did not burst bud and non-leafouts that did, but then did not burst bud
m2.nl.bb <- stan_glmer(nl ~ (warm + photo + chill + site +
warm*photo + warm*chill + photo*chill + warm*site + photo*site + chill*site) +
(1|sp), family = binomial(link = "logit"), data = dx.bb) # considers only those that did burst bud, but then did not leafout
# Understanding models: notes for main text (m2 models)
summary(m2.no, digits=3)
# warm is -0.094; photo is -0.008; chill1 is -0.703, chill2 is -1.477, site is +0.420; QUEALB on intercept: -1.649 (highest)
# xhere <- -0.703
# xhere <- -1.477
# xhere <- 0.42
# 100*(invlogit(2.899 + xhere*1)-invlogit(2.899 + xhere*0))
# chill1 decreases BB by 4.8%; chill2 by 14.2%; SH increases BB success by 1.72%
summary(m2.nl, digits=3)
# warm is 0.525; photo is 0.671; chill1 is -0.768, chill2 is -1.752, site is 0.016
# xhere <- 0.525
# xhere <- 0.671
# xhere <- -0.768
# xhere <- -1.752
# 100*(invlogit(1.770 + xhere*1)-invlogit(1.770 + xhere*0))
# warm increases leafout by 5.4%; photo increases leafout by 6.5%; chill1 decreases leafout by 12.3%; chill2 by 35%
summary(m2.nl.bb, digits=3)
# warm is 1.086; photo is 1.269; chill1 is -0.843, chill2 is -1.875, site is -0.293, warm x photo is -1.693
# xhere <- 1.086
# xhere <- 1.269
# xhere <- -0.843
# xhere <- -1.875
# xhere <- -0.293
# xhere <- -1.639
# 100*(invlogit(2.75 + xhere*1)-invlogit(2.75 + xhere*0))
# photo x temp
# 100*(invlogit(2.75 + 1.086+1.269-1.639)-invlogit(2.75))
# warm increases leafout by 3.9%; photo increases leafout by 4.2%; chill1 decreases leafout by 6.9%; chill2 by 23.4%, site decreases leafout by 1.9, overall photo x temp increases leafout by only 3.0%
#m3.no <- stan_glmer(no ~ (warm + photo + chill + site +
# warm*photo + warm*chill + photo*chill + warm*site + photo*site + chill*site) +
# ((warm + photo + chill + site + warm*photo + warm*chill + photo*chill +
# warm*site + photo*site + chill*site)|sp), family = binomial(link = "logit"), data = dx)
# error occurred during calling the sampler; sampling not done
# Error in check_stanfit(stanfit) :
# Invalid stanfit object produced please report bug
save(m1.no, file="stan/models_nonleafout/m1.no.Rdata")
save(m1.nl, file="stan/models_nonleafout/m1.nl.Rdata")
save(m2.no, file="stan/models_nonleafout/m2.no.Rdata")
save(m2.nl, file="stan/models_nonleafout/m2.nl.Rdata")
save(m2.nl.bb, file="stan/models_nonleafout/m2.nl.Rdata")
}
if(!runstan){
load("stan/models_nonleafout/m1.no.Rdata")
load("stan/models_nonleafout/m1.nl.Rdata")
load("stan/models_nonleafout/m2.no.Rdata")
load("stan/models_nonleafout/m2.nl.Rdata")
}
if(useshinystan){
launch_shinystan(m1.no)
launch_shinystan(m1.nl)
}
##############################
### Plotting for m2 models ###
##############################
## Plotting the models (m2.nl or m2.nl.bb, AND m2.no) with species pooling on chilling and site effects (and their interactions)
## Select an m2 model (see notes above on differences where models are fit)
m2nl.model <- m2.nl.bb
## Below gives the main text figure on LOGIT SCALE
bbpng <- readPNG(file.path(figpath, "Finn_BB.png")) # Illustrations from Finn et al.
lopng <- readPNG(file.path(figpath, "Finn_LO.png"))
col4fig <- c("mean","sd","25%","50%","75%","Rhat")
col4table <- c("mean","sd","2.5%","50%","97.5%","Rhat")
sumer.m2no <- summary(m2.no)
# manually to get right order
mu_params <- c("warm20","photo12","chillchill1","chillchill2","siteSH",
"warm20:photo12",
"warm20:chillchill1","warm20:chillchill2",
"photo12:chillchill1","photo12:chillchill2",
"warm20:siteSH", "photo12:siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
meanzb <- sumer.m2no[mu_params,col4fig]
rownames(meanzb) = c("Forcing Temperature",
"Photoperiod",
"Chilling 4°",
"Chilling 1.5°C",
"Site",
"Forcing x Photoperiod",
"Forcing x Chilling 4°C",
"Forcing x Chilling 1.5°C",
"Photoperiod x Chilling 4°C",
"Photoperiod x Chilling 1.5°C",
"Forcing x Site",
"Photoperiod x Site",
"Site x Chilling 4°C",
"Site x Chilling 1.5°C"
)
sumer.m2nl <- summary(m2nl.model)
meanzl <- sumer.m2nl[mu_params,col4fig]
rownames(meanzl) <- rownames(meanzb)
## prep the tables and write them out
meanzb.table <- sumer.m2no[mu_params,col4table]
row.names(meanzb.table) <- row.names(meanzb)
meanzl.table <- sumer.m2nl[mu_params,col4table]
row.names(meanzl.table) <- row.names(meanzl)
write.csv(meanzb.table, "output/nonleafout.meanzb.table.csv", row.names=FALSE)
write.csv(meanzl.table, "output/nonleafout.meanzl.table.csv", row.names=FALSE)
## back to the figure ...
pdf(file.path(figpath, "NonBBLO_m2.pdf"), width = 7, height = 8)
par(mfrow=c(2,1), mar = c(2, 10, 5, 1))
# Upper panel: bud burst
plot(seq(-2.5,
2,
length.out = nrow(meanzb)),
1:nrow(meanzb),
type="n",
xlab = "",
ylab = "",
yaxt = "n")
legend(x =-2.8, y = 3, bty="n", legend = "a. Budburst", text.font = 2)
# rasterImage(bbpng, -3, 1, -2, 4)
axis(2, at = nrow(meanzb):1, labels = rownames(meanzb), las = 1, cex.axis = 0.8)
points(meanzb[,'mean'],
nrow(meanzb):1,
pch = 16,
col = "midnightblue")
arrows(meanzb[,"75%"], nrow(meanzb):1, meanzb[,"25%"], nrow(meanzb):1,
len = 0, col = "black")
abline(v = 0, lty = 3)
# add advance/delay arrows
par(xpd=NA)
arrows(-0.1, 15.5, -2.5, 15.5, len=0.1, col = "black")
legend(-1, 17, legend="delay", bty="n", text.font = 1, cex=0.75)
arrows(0.1, 15.5, 2, 15.5, len=0.1, col = "black")
legend(0.2, 17, legend="advance", bty="n", text.font = 1, cex=0.75)
legend(-0.25, 16.25, legend="0", bty="n", text.font = 1, cex=0.75)
par(xpd=FALSE)
par(mar=c(5, 10, 2, 1))
# Lower panel: leaf-out
plot(seq(-2.5,
2,
length.out = nrow(meanzl)),
1:nrow(meanzl),
type="n",
xlab = "Model estimate change in budburst or leafout success",
ylab = "",
yaxt = "n")
legend(x = -2.8, y = 3, bty="n", legend = "b. Leafout", text.font = 2)
# rasterImage(lopng, -20, 1, -14, 4)
axis(2, at = nrow(meanzl):1, labels = rownames(meanzl), las = 1, cex.axis = 0.8)
points(meanzl[,'mean'],
nrow(meanzl):1,
pch = 16,
col = "midnightblue")
arrows(meanzl[,"75%"], nrow(meanzl):1, meanzl[,"25%"], nrow(meanzl):1,
len = 0, col = "black")
abline(v = 0, lty = 3)
dev.off()
####
### Supplemental figure -- with species-level estimates shown!
iter.m2no <- as.data.frame(m2.no)
iter.m2nl <- as.data.frame(m2nl.model)
# manually to get right order, with intercept
mu_params.wi <- c("(Intercept)", "warm20","photo12","chillchill1","chillchill2",
"siteSH","warm20:photo12",
"warm20:chillchill1","warm20:chillchill2",
"photo12:chillchill1","photo12:chillchill2",
"warm20:siteSH", "photo12:siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
meanzb.wi <- sumer.m2no[mu_params.wi,col4fig]
rownames(meanzb.wi) = c("Intercept",
"Forcing Temperature",
"Photoperiod",
"Chilling 4°",
"Chilling 1.5°C",
"Site",
"Forcing x Photoperiod",
"Forcing x Chilling 4°C",
"Forcing x Chilling 1.5°C",
"Photoperiod x Chilling 4°C",
"Photoperiod x Chilling 1.5°C",
"Forcing x Site",
"Photoperiod x Site",
"Site x Chilling 4°C",
"Site x Chilling 1.5°C"
)
meanzl.wi <- sumer.m2nl[mu_params.wi,col4fig]
rownames(meanzl.wi) <- rownames(meanzb.wi)
speff.bb <- speff.lo <- vector()
params <- c("(Intercept)", "warm20","photo12","chillchill1",
"chillchill2","siteSH", "warm20:photo12",
"warm20:chillchill1","warm20:chillchill2",
"photo12:chillchill1","photo12:chillchill2",
"warm20:siteSH", "photo12:siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
sp.params <- c("(Intercept)")
params.wsp <- c(1)
params.nosp <- c(1:15)[-params.wsp]
pdf(file.path(figpath, "NonBBLO_sp_m2.pdf"), width = 7, height = 8)
par(mfrow=c(1,1), mar = c(2, 10, 2, 1))
# Upper panel: budburst
plot(seq(-4, #min(meanz[,'mean']*1.1),
6, #max(meanz[,'mean']*1.1),
length.out = nrow(meanzb.wi)),
seq(1, 5*nrow(meanzb.wi), length.out = nrow(meanzb.wi)),
type="n",
xlab = "",
ylab = "",
yaxt = "n")
legend(x =-4.75, y = 11, bty="n", legend = "a. Budburst", text.font = 2)
rasterImage(bbpng, -4, 0, -2, 7)
axis(2, at = 5*(nrow(meanzb.wi):1), labels = rownames(meanzb.wi), las = 1, cex.axis = 0.8)
# Plot species levels for each predictor
for(i in 1:length(unique(dx$sp))){
b.params <- iter.m2no[!is.na(match(colnames(iter.m2no), c(paste("b", "[", sp.params, " sp:",
unique(dx$sp)[i], "]", sep=""))))]
main.params <- iter.m2no[!is.na(match(colnames(iter.m2no), sp.params))]
bplusmain <- b.params
for(c in 1:ncol(main.params)){
bplusmain[c] <- b.params[c]+main.params[c]
}
bplusmain.quant <- sapply(bplusmain, FUN = quantile, probs = c(0.25, 0.50, 0.75))
sp.est <- t(bplusmain.quant)
jt <- jitter(0, factor = 40)
arrows(sp.est[,"75%"], jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp], sp.est[,"25%"], jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp],
len = 0, col = alpha("firebrick", 0.2))
points(sp.est[,'50%'],
jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp], #[c(3:5,11:12)], # ADJUSTED for just the ranef here
pch = 16,
col = alpha("firebrick", 0.5))
speff.bb = rbind(speff.bb, t(sp.est[,1]))
}
arrows(meanzb.wi[,"75%"], (5*(nrow(meanzb.wi):1))+1, meanzb.wi[,"25%"], (5*(nrow(meanzb.wi):1))+1,
len = 0, col = "black", lwd = 3)
points(meanzb.wi[,'mean'],
(5*(nrow(meanzb.wi):1))+1,
pch = 16,
cex = 1,
col = "midnightblue")
abline(v = 0, lty = 2)
# Lower panel: leafout
par(mfrow=c(1,1), mar = c(2, 10, 2, 1))
plot(seq(-4, #min(meanz[,'mean']*1.1),
6, #max(meanz[,'mean']*1.1),
length.out = nrow(meanzl.wi)),
seq(1, 5*nrow(meanzl.wi), length.out = nrow(meanzl.wi)),
type="n",
xlab = "",
ylab = "",
yaxt = "n")
legend(x =-4.75, y = 11, bty="n", legend = "b. Leafout", text.font = 2)
rasterImage(lopng, -4, 0, -2, 7)
axis(2, at = 5*(nrow(meanzl.wi):1), labels = rownames(meanzl.wi), las = 1, cex.axis = 0.8)
# Plot species levels for each predictor
for(i in 1:length(unique(dx$sp))){
b.params <- iter.m2nl[!is.na(match(colnames(iter.m2nl), c(paste("b", "[", sp.params, " sp:",
unique(dx$sp)[i], "]", sep=""))))]
main.params <- iter.m2nl[!is.na(match(colnames(iter.m2nl), sp.params))]
bplusmain <- b.params
for(c in 1:ncol(main.params)){
bplusmain[c] <- b.params[c]+main.params[c]
}
bplusmain.quant <- sapply(bplusmain, FUN = quantile, probs = c(0.25, 0.50, 0.75))
sp.est <- t(bplusmain.quant)
jt <- jitter(0, factor = 40)
arrows(sp.est[,"75%"], jt+(5*(nrow(meanzl.wi):1)-1)[params.wsp], sp.est[,"25%"], jt+(5*(nrow(meanzl.wi):1)-1)[params.wsp],
len = 0, col = alpha("firebrick", 0.2))
points(sp.est[,'50%'],
jt+(5*(nrow(meanzl.wi):1)-1)[params.wsp], #[c(3:5,11:12)], # ADJUSTED for just the ranef here
pch = 16,
col = alpha("firebrick", 0.5))
speff.lo = rbind(speff.lo, t(sp.est[,1]))
}
arrows(meanzl.wi[,"75%"], (5*(nrow(meanzl.wi):1))+1, meanzl.wi[,"25%"], (5*(nrow(meanzl.wi):1))+1,
len = 0, col = "black", lwd = 3)
points(meanzl.wi[,'mean'],
(5*(nrow(meanzl.wi):1))+1,
pch = 16,
cex = 1,
col = "midnightblue")
abline(v = 0, lty = 2)
dev.off();#system(paste("open", file.path(figpath, "Fig1_bb_lo+sp.pdf"), "-a /Applications/Preview.app"))
##############################
### Plotting for m1 models ###
##############################
## Plotting the models (m1.nl, m1.no) with species pooling on chilling and site effects (and their interactions)
## Below gives the main text figure
bbpng <- readPNG(file.path(figpath, "Finn_BB.png")) # Illustrations from Finn et al.
lopng <- readPNG(file.path(figpath, "Finn_LO.png"))
col4fig <- c("mean","sd","25%","50%","75%","Rhat")
sumer.m1no <- summary(m1.no)
# manually to get right order
mu_params <- c("warm20","photo12","chillchill1","chillchill2","siteSH",
"warm20:photo12",
"warm20:chillchill1","warm20:chillchill2",
"photo12:chillchill1","photo12:chillchill2",
"warm20:siteSH", "photo12:siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
meanzb <- sumer.m1no[mu_params,col4fig]
rownames(meanzb) = c("Forcing Temperature",
"Photoperiod",
"Chilling 4°",
"Chilling 1.5°C",
"Site",
"Forcing x Photoperiod",
"Forcing x Chilling 4°C",
"Forcing x Chilling 1.5°C",
"Photoperiod x Chilling 4°C",
"Photoperiod x Chilling 1.5°C",
"Forcing x Site",
"Photoperiod x Site",
"Site x Chilling 4°C",
"Site x Chilling 1.5°C"
)
sumer.m1nl <- summary(m1.nl)
meanzl <- sumer.m1nl[mu_params,col4fig]
rownames(meanzl) <- rownames(meanzb)
pdf(file.path(figpath, "NonBBLO.pdf"), width = 7, height = 8)
par(mfrow=c(2,1), mar = c(2, 10, 5, 1))
# Upper panel: bud burst
plot(seq(-2.5,
2,
length.out = nrow(meanzb)),
1:nrow(meanzb),
type="n",
xlab = "",
ylab = "",
yaxt = "n")
legend(x =-2.8, y = 3, bty="n", legend = "a. Budburst", text.font = 2)
# rasterImage(bbpng, -3, 1, -2, 4)
axis(2, at = nrow(meanzb):1, labels = rownames(meanzb), las = 1, cex.axis = 0.8)
points(meanzb[,'mean'],
nrow(meanzb):1,
pch = 16,
col = "midnightblue")
arrows(meanzb[,"75%"], nrow(meanzb):1, meanzb[,"25%"], nrow(meanzb):1,
len = 0, col = "black")
abline(v = 0, lty = 3)
# add advance/delay arrows
par(xpd=NA)
arrows(-0.1, 15.5, -2.5, 15.5, len=0.1, col = "black")
legend(-1, 17, legend="delay", bty="n", text.font = 1, cex=0.75)
arrows(0.1, 15.5, 2, 15.5, len=0.1, col = "black")
legend(0.2, 17, legend="advance", bty="n", text.font = 1, cex=0.75)
legend(-0.25, 16.25, legend="0", bty="n", text.font = 1, cex=0.75)
par(xpd=FALSE)
par(mar=c(5, 10, 2, 1))
# Lower panel: leaf-out
plot(seq(-2.5,
2,
length.out = nrow(meanzl)),
1:nrow(meanzl),
type="n",
xlab = "Model estimate change in budburst or leafout success",
ylab = "",
yaxt = "n")
legend(x = -2.8, y = 3, bty="n", legend = "b. Leafout", text.font = 2)
# rasterImage(lopng, -20, 1, -14, 4)
axis(2, at = nrow(meanzl):1, labels = rownames(meanzl), las = 1, cex.axis = 0.8)
points(meanzl[,'mean'],
nrow(meanzl):1,
pch = 16,
col = "midnightblue")
arrows(meanzl[,"75%"], nrow(meanzl):1, meanzl[,"25%"], nrow(meanzl):1,
len = 0, col = "black")
abline(v = 0, lty = 3)
dev.off()
### Supplemental figure -- with species-level estimates shown!
iter.m1no <- as.data.frame(m1.no)
iter.m1nl <- as.data.frame(m1.nl)
# manually to get right order, with intercept
mu_params.wi <- c("(Intercept)", "warm20","photo12","chillchill1","chillchill2",
"siteSH","warm20:photo12",
"warm20:chillchill1","warm20:chillchill2",
"photo12:chillchill1","photo12:chillchill2",
"warm20:siteSH", "photo12:siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
meanzb.wi <- sumer.m1no[mu_params.wi,col4fig]
rownames(meanzb.wi) = c("Intercept",
"Forcing Temperature",
"Photoperiod",
"Chilling 4°",
"Chilling 1.5°C",
"Site",
"Forcing x Photoperiod",
"Forcing x Chilling 4°C",
"Forcing x Chilling 1.5°C",
"Photoperiod x Chilling 4°C",
"Photoperiod x Chilling 1.5°C",
"Forcing x Site",
"Photoperiod x Site",
"Site x Chilling 4°C",
"Site x Chilling 1.5°C"
)
meanzl.wi <- sumer.m1nl[mu_params.wi,col4fig]
rownames(meanzl.wi) <- rownames(meanzb.wi)
speff.bb <- speff.lo <- vector()
params <- c("(Intercept)", "warm20","photo12","chillchill1",
"chillchill2","siteSH", "warm20:photo12",
"warm20:chillchill1","warm20:chillchill2",
"photo12:chillchill1","photo12:chillchill2",
"warm20:siteSH", "photo12:siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
sp.params <- c("(Intercept)", "chillchill1","chillchill2","siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
params.wsp <- c(1, 4:6, 14:15)
params.nosp <- c(1:15)[-params.wsp]
pdf(file.path(figpath, "NonBBLO_sp.pdf"), width = 7, height = 8)
par(mfrow=c(1,1), mar = c(2, 10, 2, 1))
# Upper panel: budburst
plot(seq(-4, #min(meanz[,'mean']*1.1),
5, #max(meanz[,'mean']*1.1),
length.out = nrow(meanzb.wi)),
seq(1, 5*nrow(meanzb.wi), length.out = nrow(meanzb.wi)),
type="n",
xlab = "",
ylab = "",
yaxt = "n")
legend(x =-4.75, y = 11, bty="n", legend = "a. Budburst", text.font = 2)
rasterImage(bbpng, -4, 0, -2, 7)
axis(2, at = 5*(nrow(meanzb.wi):1), labels = rownames(meanzb.wi), las = 1, cex.axis = 0.8)
# Plot species levels for each predictor
for(i in 1:length(unique(dx$sp))){
b.params <- iter.m1no[!is.na(match(colnames(iter.m1no), c(paste("b", "[", sp.params, " sp:",
unique(dx$sp)[i], "]", sep=""))))]
main.params <- iter.m1no[!is.na(match(colnames(iter.m1no), sp.params))]
bplusmain <- b.params
for(c in 1:ncol(main.params)){
bplusmain[c] <- b.params[c]+main.params[c]
}
bplusmain.quant <- sapply(bplusmain, FUN = quantile, probs = c(0.25, 0.50, 0.75))
sp.est <- t(bplusmain.quant)
jt <- jitter(0, factor = 40)
arrows(sp.est[,"75%"], jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp], sp.est[,"25%"], jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp],
len = 0, col = alpha("firebrick", 0.2))
points(sp.est[,'50%'],
jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp], #[c(3:5,11:12)], # ADJUSTED for just the ranef here
pch = 16,
col = alpha("firebrick", 0.5))
speff.bb = rbind(speff.bb, t(sp.est[,1]))
}
arrows(meanzb.wi[,"75%"], (5*(nrow(meanzb.wi):1))+1, meanzb.wi[,"25%"], (5*(nrow(meanzb.wi):1))+1,
len = 0, col = "black", lwd = 3)
points(meanzb.wi[,'mean'],
(5*(nrow(meanzb.wi):1))+1,
pch = 16,
cex = 1,
col = "midnightblue")
abline(v = 0, lty = 2)
# Lower panel: leafout
par(mfrow=c(1,1), mar = c(2, 10, 2, 1))
plot(seq(-4, #min(meanz[,'mean']*1.1),
5, #max(meanz[,'mean']*1.1),
length.out = nrow(meanzl.wi)),
seq(1, 5*nrow(meanzl.wi), length.out = nrow(meanzl.wi)),
type="n",
xlab = "",
ylab = "",
yaxt = "n")
legend(x =-4.75, y = 11, bty="n", legend = "b. Leafout", text.font = 2)
rasterImage(lopng, -4, 0, -2, 7)
axis(2, at = 5*(nrow(meanzl.wi):1), labels = rownames(meanzl.wi), las = 1, cex.axis = 0.8)
# Plot species levels for each predictor
for(i in 1:length(unique(dx$sp))){
b.params <- iter.m1nl[!is.na(match(colnames(iter.m1nl), c(paste("b", "[", sp.params, " sp:",
unique(dx$sp)[i], "]", sep=""))))]
main.params <- iter.m1nl[!is.na(match(colnames(iter.m1nl), sp.params))]
bplusmain <- b.params
for(c in 1:ncol(main.params)){
bplusmain[c] <- b.params[c]+main.params[c]
}
bplusmain.quant <- sapply(bplusmain, FUN = quantile, probs = c(0.25, 0.50, 0.75))
sp.est <- t(bplusmain.quant)
jt <- jitter(0, factor = 40)
arrows(sp.est[,"75%"], jt+(5*(nrow(meanzl.wi):1)-1)[params.wsp], sp.est[,"25%"], jt+(5*(nrow(meanzl.wi):1)-1)[params.wsp],
len = 0, col = alpha("firebrick", 0.2))
points(sp.est[,'50%'],
jt+(5*(nrow(meanzl.wi):1)-1)[params.wsp], #[c(3:5,11:12)], # ADJUSTED for just the ranef here
pch = 16,
col = alpha("firebrick", 0.5))
speff.lo = rbind(speff.lo, t(sp.est[,1]))
}
arrows(meanzl.wi[,"75%"], (5*(nrow(meanzl.wi):1))+1, meanzl.wi[,"25%"], (5*(nrow(meanzl.wi):1))+1,
len = 0, col = "black", lwd = 3)
points(meanzl.wi[,'mean'],
(5*(nrow(meanzl.wi):1))+1,
pch = 16,
cex = 1,
col = "midnightblue")
abline(v = 0, lty = 2)
dev.off();#system(paste("open", file.path(figpath, "Fig1_bb_lo+sp.pdf"), "-a /Applications/Preview.app"))
#############################################################################
# This does some simple models species by species for Isabelle's 5 species ##
#############################################################################
if(forIsabelle){
isaspp <- c("POPGRA", "ACESAC", "TILAME", "FAGGRA", "BETALL", "QUERUB")
isaspp <- sort(isaspp)
dxisa <- dx[which(dx$sp %in% isaspp),]
m2.noisa <- stan_glmer(no ~ (warm + photo + chill + site +
warm*photo + warm*chill + photo*chill + warm*site + photo*site + chill*site) +
(1|sp), family = binomial(link = "logit"), data = dxisa)
m1.noisa <- stan_glmer(no ~ (warm + photo + chill + site +
warm*photo + warm*chill + photo*chill + warm*site + photo*site + chill*site) +
(chill + chill*site|sp), family = binomial(link = "logit"), data = dxisa) # 3 divergent transitions
## summarizing data
library(plyr)
library(dplyr)
nonsummarywtrt <-
ddply(dxisa, c("warm", "photo", "chill", "sp"), summarise,
sum.no = sum(no),
sum.nl = sum(nl),
total.n = length(no))
nonsummary <-
ddply(dxisa, c("sp"), summarise,
sum.no = sum(no),
sum.nl = sum(nl),
total.n = length(no))
## Plotting the models (m2.nl or m2.nl.bb, AND m2.no) with species pooling on chilling and site effects (and their interactions)
## Below gives the main text figure on LOGIT SCALE
col4fig <- c("mean","sd","25%","50%","75%","Rhat")
sumer.m1.noisa <- summary(m1.noisa)
iter.m1noisa <- as.data.frame(m1.noisa)
# manually to get right order, with intercept
mu_params.wi <- c("(Intercept)", "warm20","photo12","chillchill1","chillchill2",
"siteSH","warm20:photo12",
"warm20:chillchill1","warm20:chillchill2",
"photo12:chillchill1","photo12:chillchill2",
"warm20:siteSH", "photo12:siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
meanzb.wi <- sumer.m1.noisa[mu_params.wi,col4fig]
rownames(meanzb.wi) = c("Intercept",
"Forcing Temperature",
"Photoperiod",
"Chilling 4°",
"Chilling 1.5°C",
"Site",
"Forcing x Photoperiod",
"Forcing x Chilling 4°C",
"Forcing x Chilling 1.5°C",
"Photoperiod x Chilling 4°C",
"Photoperiod x Chilling 1.5°C",
"Forcing x Site",
"Photoperiod x Site",
"Site x Chilling 4°C",
"Site x Chilling 1.5°C"
)
speff.bb <- speff.lo <- vector()
params <- c("(Intercept)", "warm20","photo12","chillchill1",
"chillchill2","siteSH", "warm20:photo12",
"warm20:chillchill1","warm20:chillchill2",
"photo12:chillchill1","photo12:chillchill2",
"warm20:siteSH", "photo12:siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
sp.params <- c("(Intercept)", "chillchill1","chillchill2","siteSH",
"chillchill1:siteSH","chillchill2:siteSH")
params.wsp <- c(1, 4:6, 14:15)
params.nosp <- c(1:15)[-params.wsp]
pdf(file.path(figpath, "NonBB_sp_forIsabelle.pdf"), width = 7, height = 8)
par(mfrow=c(1,1), mar = c(2, 10, 2, 1))
# Upper panel: budburst
plot(seq(-4, #min(meanz[,'mean']*1.1),
5, #max(meanz[,'mean']*1.1),
length.out = nrow(meanzb.wi)),
seq(1, 5*nrow(meanzb.wi), length.out = nrow(meanzb.wi)),
type="n",
xlab = "",
ylab = "",
yaxt = "n")
legend(x =-4.75, y = 11, bty="n", legend = "a. Budburst", text.font = 2)
rasterImage(bbpng, -4, 0, -2, 7)
axis(2, at = 5*(nrow(meanzb.wi):1), labels = rownames(meanzb.wi), las = 1, cex.axis = 0.8)
# Plot species levels for each predictor
for(i in 1:length(unique(dxisa$sp))){
b.params <- iter.m1noisa[!is.na(match(colnames(iter.m1noisa), c(paste("b", "[", sp.params, " sp:",
unique(dxisa$sp)[i], "]", sep=""))))]
main.params <- iter.m1noisa[!is.na(match(colnames(iter.m1noisa), sp.params))]
bplusmain <- b.params
for(c in 1:ncol(main.params)){
bplusmain[c] <- b.params[c]+main.params[c]
}
bplusmain.quant <- sapply(bplusmain, FUN = quantile, probs = c(0.25, 0.50, 0.75))
sp.est <- t(bplusmain.quant)
jt <- jitter(0, factor = 40)
arrows(sp.est[,"75%"], jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp], sp.est[,"25%"], jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp],
len = 0, col = alpha("firebrick", 0.2))
points(sp.est[,'50%'],
jt+(5*(nrow(meanzb.wi):1)-1)[params.wsp], #[c(3:5,11:12)], # ADJUSTED for just the ranef here
pch = 16,
col = alpha("firebrick", 0.5))
speff.bb = rbind(speff.bb, t(sp.est[,1]))
}
arrows(meanzb.wi[,"75%"], (5*(nrow(meanzb.wi):1))+1, meanzb.wi[,"25%"], (5*(nrow(meanzb.wi):1))+1,
len = 0, col = "black", lwd = 3)
points(meanzb.wi[,'mean'],
(5*(nrow(meanzb.wi):1))+1,
pch = 16,
cex = 1,
col = "midnightblue")
abline(v = 0, lty = 2)
dev.off()
}
stop(print("stopping here, below code is original code by Dan Flynn ..."))
# <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <>
# <> Dan's analyses ...
# <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <>
# Analysis of where the non-leafout cuttings were
nlx <- dx[dx$nl == 0,]
summary(nlx)
nl1 <- as.data.frame(table(nlx$sp, nlx$site))
nl2 <- as.data.frame(table(nlx$warm, nlx$photo, nlx$chill))
# proportional to total numbers in each
dl2 <- as.data.frame(table(dx$warm, dx$photo, dx$chill))
nl2$prop <- nl2$Freq/dl2$Freq
nl3 <- as.data.frame(table(nlx$sp, nlx$site,nlx$warm, nlx$photo, nlx$chill))
dl3 <- as.data.frame(table(dx$sp, dx$site, dx$warm, dx$photo, dx$chill))
nl3$prop <- nl3$Freq/dl3$Freq
nl3$prop[is.nan(nl3$prop)==TRUE] <- 0
names(nl1) <- c("sp","site","freq")
names(nl2) <- c("warm","photo","chill","freq", "prop")
names(nl3) <- c("sp", "site", "warm","photo","chill","freq", "prop")
nl3.nochill <- subset(nl3, chill=="chill0")
nl3.1chill <- subset(nl3, chill=="chill1")
nl3.2chill <- subset(nl3, chill=="chill2")
#
data.frame(sort(with(nl3, tapply(prop, sp, mean)), T))
with(nl3, tapply(prop, chill, mean))
with(nl3, tapply(prop, site, mean))
# make some simple plots
# # makesimpleplot(nl3, c(0, 0.4), "prop", "% non-leafout") # all chilling combined
# makesimpleplot(nl3.nochill, c(0, 0.4), "prop", "% non-leafout")
# makesimpleplot(nl3.1chill, c(0, 0.4), "prop", "% non-leafout")
# makesimpleplot(nl3.2chill, c(0, 0.4), "prop", "% non-leafout")
sitespp <- as.data.frame(table(nl3$sp, nl3$site))
sitespp <- subset(sitespp, Freq>0)
sppatsites <- aggregate(sitespp["Var2"], sitespp["Var1"], FUN=length)
sppatbothsites <- subset(sppatsites, Var2>1)
spp <- sppatbothsites$Var1
pdf(file="graphs/simpleplots/nonleafouts_byspp.pdf", 10, 6, paper="a4r", onefile=TRUE)
for (i in c(1:length(spp))){
spdf <- subset(nl3.nochill, sp==spp[i])
makesimpleplot.sp(spdf, c(0, 1), "prop", "% non-leafout", spp[i])
}
dev.off()
# <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <>
# Simple models by species. All predictors now numeric
dx$chill <- as.numeric(dx$chill)
dx$site <- as.numeric(dx$site)
dx$photo <- as.numeric(dx$photo)
dx$warm <- as.numeric(dx$warm)
pdf(file="graphs/simpleplots/nonleafouts_byspp_model.pdf", height = 10, width = 10)
par(cex=0.7, xpd=TRUE, xaxt="n")
layout(matrix(c(1, 2, 3, 3), byrow=T, ncol = 2, nrow = 2), heights = c(3, 2))
for(i in sort(unique(dx$sp))){
makesimpleplot.sp(nl3[nl3$sp ==i,], c(0, 1), "prop", "% non-leafout", i)
# is this species across site and chill?
if(length(unique(dx[dx$sp ==i,"site"])) > 1 & length(unique(dx[dx$sp ==i,"chill"])) > 1) {
mx <- glm(nl ~ warm + photo + chill + site
+ warm:photo + warm:chill + warm:site
+ photo:chill + photo:site
+ warm:photo:chill
+ warm:photo:site
+ warm:chill:site
+ photo:chill:site
, family=binomial(link='logit'), data = dx[dx$sp == i,]
)
}
# Across site but no chill?
if(length(unique(dx[dx$sp ==i,"site"])) > 1 & length(unique(dx[dx$sp ==i,"chill"])) == 1) {
mx <- glm(nl ~ warm + photo + site
+ warm:photo + warm:site + photo:site
+ warm:photo:site
, family=binomial(link='logit'), data = dx[dx$sp == i,]
)
}
# One site, no chill?
if(length(unique(dx[dx$sp ==i,"site"])) == 1 & length(unique(dx[dx$sp ==i,"chill"])) == 1) {
mx <- glm(nl ~ warm + photo +
+ warm:photo
, family=binomial(link='logit'), data = dx[dx$sp == i,]
)
}
textplot(round(coef(summary(mx)),3))
}
dev.off(); system('open graphs/simpleplots/nonleafouts_byspp_model.pdf -a /Applications/Preview.app')
# examination code
with(dx[dx$sp == i,], table(warm, photo, site, chill))
with(dx[dx$sp == i,], tapply(nl, list(warm, photo, site, chill), mean))
# Repeat, with simple model for all. Aronia: 1 nl occured in each of the four combinations of photo and warm, no separation.
dx$warm <- as.numeric(as.character(dx$warm))
dx$photo <- as.numeric(as.character(dx$photo))
pdf(file="graphs/simpleplots/nonleafouts_byspp_simplemodel.pdf", height = 10, width = 10)
par(cex=0.7, xpd=TRUE, xaxt="n")
layout(matrix(c(1, 2, 3, 4), byrow=T, ncol = 2, nrow = 2), heights = c(3, 2))
for(i in sort(unique(dx$sp))){
makesimpleplot.sp(nl3[nl3$sp ==i,], c(0, 1), "prop", "% non-leafout", i)
mx <- glm(nl ~ warm + photo
+ warm:photo
, family=binomial(link='logit'), data = dx[dx$sp == i,]
)
textplot(round(coef(summary(mx)),3))
if(coef(summary(mx))[4,4] <= 0.05){
with(dx[dx$sp == i,], interaction.plot(warm, photo, nl))
} else { plot(1:10, type = "n", bty = "n", yaxt="n", xaxt="n",ylab="",xlab="") }
}
dev.off(); system('open graphs/simpleplots/nonleafouts_byspp_simplemodel.pdf -a /Applications/Preview.app')
# Focus on site x chill
# Tally sig site * chill effects
pdf(file="graphs/simpleplots/nonleafouts_sitechill.pdf", height = 10, width = 10)
par(cex=0.7)
layout(matrix(c(1, 2, 3, 4), byrow=T, ncol = 2, nrow = 2), heights = c(3, 2))
for(i in sort(unique(dx$sp))){
# is this species across site and chill?
if(length(unique(dx[dx$sp ==i,"site"])) > 1 & length(unique(dx[dx$sp ==i,"chill"])) > 1) {
xx <- dx[dx$sp==i,]
means <- with(xx, tapply(nl, list(chill, site), mean, na.rm=T))
sds <- with(xx, tapply(nl, list(chill, site), sd, na.rm=T))
plot(1:3, means[,1], ylim = c(0, 1.25), main = paste(i, "HF"), pch = 16, ylab = "prop leafout", xaxt="n")
axis(1, at=1:3, labels = c("chill0", "chill1","chill2"))
arrows(1:3, means[,1]-sds[,1], 1:3, means[,1]+sds[,1], length = 0)
plot(1:3, means[,2], ylim = c(0, 1.25), main = "SH", pch = 16, ylab = "prop leafout", xaxt="n")
axis(1, at=1:3, labels = c("chill0", "chill1","chill2"))
arrows(1:3, means[,2]-sds[,2], 1:3, means[,2]+sds[,2], length = 0)
mx <- glm(nl ~ chill * site
, family=binomial(link='logit'), data = dx[dx$sp == i,]
)
textplot(round(coef(summary(mx)),3))
if(coef(summary(mx))[4,4] <= 0.05){
with(dx[dx$sp == i,], interaction.plot(chill, site, nl))
} else { plot(1:10, type = "n", bty = "n", yaxt="n", xaxt="n",ylab="",xlab="") }
}
}
dev.off(); system('open graphs/simpleplots/nonleafouts_sitechill.pdf -a /Applications/Preview.app')
# <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <>
# Across species
m1 <- glm(nl ~ warm + photo + chill + site
+ warm:photo + warm:chill + warm:site
+ photo:chill + photo:site
+ warm:photo:chill
+ warm:photo:site
+ warm:chill:site
+ photo:chill:site
, family=binomial(link='logit'), data = dx
)
summary(m1)
# across species, with partial pooling
library(lme4)
m2 <- glmer(nl ~ warm + photo + chill + site
+ warm:photo + warm:chill + warm:site
+ photo:chill + photo:site
+ warm:photo:chill
+ warm:photo:site
+ warm:chill:site
+ photo:chill:site
+ (1|sp),
, family=binomial(link='logit'), data = dx
)
library(sjPlot)
sjp.glmer(m2)
sjp.glmer(m2, type = "fe")
# Basically, we can't say much about nonleafouts. Let's shift to leafouts
# <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <>
# Across species
l1 <- lm(lday ~ warm + photo + chill + site
+ warm:photo + warm:chill + warm:site
+ photo:chill + photo:site
+ warm:photo:chill
+ warm:photo:site
+ warm:chill:site
+ photo:chill:site
, data = dx[dx$nl==1,]
)
summary(l1)
summary.aov(l1)
# With partial pooling
l2 <- lmer(lday ~ warm + photo + chill + site
+ warm:photo + warm:chill + warm:site
+ photo:chill + photo:site
+ warm:photo:chill
+ warm:photo:site
+ warm:chill:site
+ photo:chill:site
+ (1|sp)
, data = dx[dx$nl==1,]
)
summary(l2)
sjp.lmer(l2)
sjp.lmer(l2, type = "fe")
interact
# Within individual species, should match nl story
pdf(file="graphs/leafoutday_byspp_simplemodel.pdf", height = 10, width = 10)
par(cex=0.7, xpd=TRUE, xaxt="n")
layout(matrix(c(1, 2, 3, 4), byrow=T, ncol = 2, nrow = 2), heights = c(3, 2))
for(i in sort(unique(dx$sp))){
xx <- dx[dx$sp==i,]
means <- with(xx, tapply(lday, list(warm, photo, site), mean, na.rm=T))
sds <- with(xx, tapply(lday, list(warm, photo, site), sd, na.rm=T))
plot(1:3, means[,1], ylim = c(0, 1.25), main = paste(i, "HF"), pch = 16, ylab = "prop leafout", xaxt="n")
axis(1, at=1:3, labels = c("chill0", "chill1","chill2"))
arrows(1:3, means[,1]-sds[,1], 1:3, means[,1]+sds[,1], length = 0)
plot(1:3, means[,2], ylim = c(0, 1.25), main = "SH", pch = 16, ylab = "prop leafout", xaxt="n")
axis(1, at=1:3, labels = c("chill0", "chill1","chill2"))
arrows(1:3, means[,2]-sds[,2], 1:3, means[,2]+sds[,2], length = 0)
mx <- glm(nl ~ warm + photo
+ warm:photo
, family=binomial(link='logit'), data = dx[dx$sp == i,]
)
textplot(round(coef(summary(mx)),3))
if(coef(summary(mx))[4,4] <= 0.05){
with(dx[dx$sp == i,], interaction.plot(warm, photo, nl))
} else { plot(1:10, type = "n", bty = "n", yaxt="n", xaxt="n",ylab="",xlab="") }
}
dev.off(); system('open graphs/simpleplots/nonleafouts_byspp_simplemodel.pdf -a /Applications/Preview.app')
# <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <>
# colors for plotting
cols = alpha(c("darkseagreen", "deepskyblue", "slateblue"), 0.5)
bb <- barplot(nl2$prop*100, ylab = "% of cuttings non-leafout",
ylim = c(0,50),
col = rep(cols, each = 4),
main = "Percent of cuttings in each treatment which failed to leaf out",
xlab = "Treatment combination", sub = "(temp + photo + chill)")
mtext(paste(nl2$warm, nl2$photo, nl2$chill, sep = "\n"), 1, line = -2, padj = 0, at = bb[,1])
chi.all <- summary(xtabs(~ warm+photo+chill, data = nl))
for(i in levels(nl$chill)) print(chisq.test(table(nl[nl$chill==i,]$warm, nl[nl$chill==i,]$photo))) # NS within chill1 or chill2, surprisingly
xx <- round(chi.all$statistic,2)
text(1, 45, substitute(paste(chi^2,"=", xx), list(xx=round(chi.all$statistic,2))))
text(1, 42, substitute(paste(p,"=", xx), list(xx=round(chi.all$p.value,4))))
dev.print(file = "graphs/nonleafout.pdf", device = pdf, width = 10, height = 6); system('open ./graphs/nonleafout.pdf -a /Applications/Preview.app')
# looping this same figure now for each species separately.
pdf(file="graphs/nonleafout_eachsp.pdf", width = 10, height = 6)
for(i in unique(nl$sp)){
nlx <- with(nl[nl$sp==i,], as.data.frame(table(warm, photo, chill)))
# proportional to total numbers in each
dlx <- with(dx[dx$sp==i,], as.data.frame(table(warm, photo, chill)))
nlx$prop <- nlx$Freq/dlx$Freq
bb <- barplot(nlx$prop*100, ylab = "% of cuttings non-leafout",
ylim = c(0,100),
col = rep(cols, each = 4),
main = paste(i, "\n Percent of cuttings in each treatment which failed to leaf out"),
xlab = "Treatment combination")
mtext(paste(nlx$warm, nlx$photo, nlx$chill, sep = "\n"), 1, line = 2, padj = 0, at = bb[,1])
if(length(unique(as.character(nl[nl$sp==i,"chill"])))==1){
chi.all <- summary(xtabs(~ warm+photo, data = nl[nl$sp==i,]))
}
else { chi.all <- summary(xtabs(~ warm+photo+chill, data = nl[nl$sp==i,])) }
xx <- round(chi.all$statistic,2)
text(1, 95, substitute(paste(chi^2,"=", xx, ", df =", df), list(xx=round(chi.all$statistic,2), df = chi.all$parameter)))
text(1, 85, substitute(paste(p,"=", xx), list(xx=round(chi.all$p.value,4))))
}
dev.off(); system('open ./graphs/nonleafout_eachsp.pdf -a /Applications/Preview.app')
# also by species and site
nl1 <- as.data.frame(table(nl$sp, nl$site))
# proportional to total numbers in each
dl1 <- as.data.frame(table(dx$sp, dx$site))
nl1$prop <- nl1$Freq/dl1$Freq
bb <- barplot(height = rbind(nl1$prop[1:28]*100, nl1$prop[29:56]*100),
beside = T,
legend.text = c("HF","SH"),
args.legend = list(bty="n"),
space = c(0.05, 1),
ylab = "% of cuttings non-leafout",
ylim = c(0,100),
main = "Percent of cuttings in each species which failed to leaf out"
)
par(xpd=T)
text(bb[1,], -3, nl1$Var1[1:28], srt = 45, adj = 1, cex = 0.8)
dev.print(file = "graphs/nonleafout_species.pdf", device = pdf, width = 10, height = 5); system('open ./graphs/nonleafout_species.pdf -a /Applications/Preview.app')
# analyze now in logistic framework
dx$nl <- as.numeric(as.character(cut(dx$lday, breaks = c(0, 74, 100), labels = c(1, 0)))) # 1: leafed out. 0: failed to leaf out
summary(m1 <- glm(nl ~ warm + photo + chill + site, family=binomial(link='logit'), data = dx)) # overall strong effects of warming, long day, and chilling
summary(m1 <- glm(nl ~ as.numeric(warm), family=binomial(link='logit'), data = dx)) # just warming for plotting
plot(as.numeric(dx$warm), dx$nl, xlab="Temperature",
ylab="Probability of Response")
curve(predict(m1, data.frame(warm=x), type="resp"),
add=TRUE, col="red")
summary(glm(nl ~ warm + photo + chill + site + sp, family=binomial(link='logit'), data = dx))
summary(glm(nl ~ warm + photo + chill + site + sp +
warm:photo + warm:chill + photo:chill,
family=binomial(link='logit'), data = dx))
summary(glm(nl ~ site + sp + sp:site, family=binomial(link='logit'), data = dx)) # no overall site effect, some acesac and franig interax by site, more non-leafouts in HF
summary(glm(nl ~ warm + photo + chill + site + sp +
warm:photo + warm:chill + photo:chill +
warm:sp + photo:sp + chill:sp,
family=binomial(link='logit'), data = dx)) # clear species effects, interax with warm x photo, very few sp-specific responses to warming or photo. Querub improved with chilling.
|
# Compare the yearly and seasonal trend patterns and demographic characteristics of fishes
# Take the information in "Demography.Rdata"
# Take the information in "Seine_Best.Rdata" from "c_ModelSelection.R"
rm(list=ls()) # Clear saved variables
suppressMessages(library("tidyverse"))
suppressMessages(library("ggpubr"))
suppressMessages(library("gridExtra"))
load("Demography.Rdata")
Demog<-map_df (Demography, ~.x[["FB_ESTIM"]][c("TempPrefMin","TempPrefMax","Troph") ]) %>%
mutate(sci_name=names(Demography),
GT= map_dbl(Demography,~.x[["GT1"]]),
DR= map_dbl(Demography,~.x[["DR1"]]),
Lamb= map_dbl(Demography,~.x[["Lamb"]]),
Linf= map_dbl(Demography,~.x[["Linf"]]),
Lmat= map_dbl(Demography,~.x[["Lmat"]])
)
## The following operations were tried within dplyr using mutate_at, but it did not work!
Demog[,"GT"]<-rank(Demog[,"GT"],na.last = "keep",ties.method="average")
Demog[,"DR"]<-rank(Demog[,"DR"],na.last = "keep",ties.method="average")
Demog[,"Lamb"]<-rank(Demog[,"Lamb"],na.last = "keep",ties.method="average")
Demog[,"Linf"]<-rank(Demog[,"Linf"],na.last = "keep",ties.method="average")
Demog[,"Lmat"]<-rank(Demog[,"Lmat"],na.last = "keep",ties.method="average")
Demog[,"Troph"]<-rank(Demog[,"Troph"],na.last = "keep",ties.method="average")
Demog[,"TempPrefMax"]<-rank(Demog[,"TempPrefMax"],na.last = "keep",ties.method="average")
Demog[,"TempPrefMin"]<-rank(Demog[,"TempPrefMin"],na.last = "keep",ties.method="average")
load("Seine_Best.Rdata")
## Cluster the patterns of seasonal trends (take four clusters)
A<-map_dfc(fitVal,~.x[[1]]$fit) %>% mutate_all(scale)
names(A)<-unlist(SP)
distMatrix<-dist(t(A),method="euclidean")
hc<-cutree(hclust(distMatrix, method="average"),k=4)
Season<-tibble(sci_name=names(hc),season_type=hc)
## Cluster the patterns of yearly trends (take three clusters)
A<-map_dfc(fitVal,~.x[[2]]$fit) %>% mutate_all(scale)
names(A)<-as.character(unlist(SP))
distMatrix<-dist(t(A),method="euclidean")
hc<-cutree(hclust(distMatrix, method="average"),k=3)
Year<-tibble(sci_name=names(hc),year_type=hc)
COVAR<-tibble(sci_name=unlist(SP),COV=COV)
## The following will only include species that are in both TABLE and Demog
## Demog include fish species only, but COVAR include both fish and inverte
TABLE<-merge(COVAR,Year,all.x=TRUE)
TABLE<-merge(TABLE,Season,all.x=TRUE)
TABLE<-tibble(merge(TABLE,Demog))
#colSums(is.na(TABLE))
TABLE <- TABLE %>%
mutate(COV=factor(COV, ordered=FALSE), year_type=factor(year_type,ordered=FALSE),season_type=factor(season_type, ordered=FALSE))
# Rename the patterns based on the cluster analysis
levels(TABLE$year_type)<-c("increase","decrease","fluctuate")
levels(TABLE$season_type)<-c("summer","winter","fall","spring")
save("TABLE","Season","Year","fitVal",file='clusters.Rdata')
ID<-1
FIG<-NULL
RESULTS<-matrix(NA,nrow=3,ncol=8)
colnames(RESULTS)<-c("TempPrefMin","TempPrefMax","Troph",
"GT","DR","Lamb","Linf","Lmat")
rownames(RESULTS)<-c("COV","year_type","season_type")
x_lab=c("environmental variables", "annual pattern","seasonal pattern")
y_lab=c("Min Temperature","Max Temperature")
panel_label=c("(a)","(c)","(e)","(b)","(d)","(f)","(g)")
for (j in c(1:2)){
for (k in c(1:3)){ # COVARIATE, SEASON TYPE, ANNUAL TYPE,
TABLE2 <- TABLE[,c(k+1,j+4)]
# CREATE COMPACT LETTER DISPLAY and PUT THEM into XLABEL STRING "LAB"
myform <- as.formula(sprintf("%s ~ %s",names(TABLE2)[2],names(TABLE2)[1]))
mod <- aov(myform, data = TABLE2)
tuk<-eval(parse(text=paste("multcomp::glht(mod, linfct = multcomp::mcp(",names(TABLE2)[1]," = 'Tukey'))")))
tuk.cld <- multcomp::cld(tuk)
LAB<-paste(names(tuk.cld$mcletters$Letters)," (",tuk.cld$mcletters$Letters,")",sep="")
if (ID %in% c(1,4)){
LAB<-paste(c('CON','OXY','MSL','SAL','TMP')," (",tuk.cld$mcletters$Letters,")",sep="")
}
## PRODUCE BOXPLOT
FIG[[ID]]<-ggplot(TABLE,aes_string(x=names(TABLE2)[1],y=names(TABLE2)[2]))+
geom_boxplot()+
stat_compare_means(method="kruskal.test")+
scale_x_discrete(labels=LAB)+
labs(title=paste(panel_label[ID]),x=x_lab[k],y=y_lab[j])+
theme_classic()
ID<-ID+1
}
}
pdf("PATTERN_DIST.pdf",width=7.5,height=9)
grid.arrange(FIG[[1]],FIG[[4]],FIG[[2]],FIG[[5]],FIG[[3]],FIG[[6]],ncol=2)
dev.off()
panel_label=c("(a)","(b)","(c)","(d)","(e)","(f)","(g)")
y_lab=c("Trophic Level","Generation Time","Damping Ratio",expression(paste("Max. ", lambda)),"Max. Length", "Length at Maturity")
ID<-1
for (j in c(3:8)){
for (k in c(1:1)){ # COVARIATE, SEASON TYPE, ANNUAL TYPE,
TABLE2 <- TABLE[,c(k+1,j+4)]
# CREATE COMPACT LETTER DISPLAY and PUT THEM into XLABEL STRING "LAB"
myform <- as.formula(sprintf("%s ~ %s",names(TABLE2)[2],names(TABLE2)[1]))
mod <- aov(myform, data = TABLE2)
tuk<-eval(parse(text=paste("multcomp::glht(mod, linfct = multcomp::mcp(",names(TABLE2)[1]," = 'Tukey'))")))
tuk.cld <- multcomp::cld(tuk)
LAB<-paste(c('CON','OXY','MSL','SAL','TMP')," (",tuk.cld$mcletters$Letters,")",sep="")
## PRODUCE BOXPLOT
FIG[[ID]]<-ggplot(TABLE,aes_string(x=names(TABLE2)[1],y=names(TABLE2)[2]))+
geom_boxplot()+
stat_compare_means(method="kruskal.test")+
scale_x_discrete(labels=LAB)+
labs(title=paste(panel_label[ID]),x="",y=y_lab[j-2])+
theme_classic()
ID<-ID+1
}
}
pdf("ENV_DEM.pdf",width=8,height=10)
grid.arrange(FIG[[1]],FIG[[2]],FIG[[3]],FIG[[4]],FIG[[5]],FIG[[6]],ncol=2, bottom="Environmental Variables")
dev.off()
| /d_comparisons.R | no_license | masamifujiwara/GOM_TRAIT_PHENOLOGY | R | false | false | 5,668 | r | # Compare the yearly and seasonal trend patterns and demographic characteristics of fishes
# Take the information in "Demography.Rdata"
# Take the information in "Seine_Best.Rdata" from "c_ModelSelection.R"
rm(list=ls()) # Clear saved variables
suppressMessages(library("tidyverse"))
suppressMessages(library("ggpubr"))
suppressMessages(library("gridExtra"))
load("Demography.Rdata")
Demog<-map_df (Demography, ~.x[["FB_ESTIM"]][c("TempPrefMin","TempPrefMax","Troph") ]) %>%
mutate(sci_name=names(Demography),
GT= map_dbl(Demography,~.x[["GT1"]]),
DR= map_dbl(Demography,~.x[["DR1"]]),
Lamb= map_dbl(Demography,~.x[["Lamb"]]),
Linf= map_dbl(Demography,~.x[["Linf"]]),
Lmat= map_dbl(Demography,~.x[["Lmat"]])
)
## The following operations were tried within dplyr using mutate_at, but it did not work!
Demog[,"GT"]<-rank(Demog[,"GT"],na.last = "keep",ties.method="average")
Demog[,"DR"]<-rank(Demog[,"DR"],na.last = "keep",ties.method="average")
Demog[,"Lamb"]<-rank(Demog[,"Lamb"],na.last = "keep",ties.method="average")
Demog[,"Linf"]<-rank(Demog[,"Linf"],na.last = "keep",ties.method="average")
Demog[,"Lmat"]<-rank(Demog[,"Lmat"],na.last = "keep",ties.method="average")
Demog[,"Troph"]<-rank(Demog[,"Troph"],na.last = "keep",ties.method="average")
Demog[,"TempPrefMax"]<-rank(Demog[,"TempPrefMax"],na.last = "keep",ties.method="average")
Demog[,"TempPrefMin"]<-rank(Demog[,"TempPrefMin"],na.last = "keep",ties.method="average")
load("Seine_Best.Rdata")
## Cluster the patterns of seasonal trends (take four clusters)
A<-map_dfc(fitVal,~.x[[1]]$fit) %>% mutate_all(scale)
names(A)<-unlist(SP)
distMatrix<-dist(t(A),method="euclidean")
hc<-cutree(hclust(distMatrix, method="average"),k=4)
Season<-tibble(sci_name=names(hc),season_type=hc)
## Cluster the patterns of yearly trends (take three clusters)
A<-map_dfc(fitVal,~.x[[2]]$fit) %>% mutate_all(scale)
names(A)<-as.character(unlist(SP))
distMatrix<-dist(t(A),method="euclidean")
hc<-cutree(hclust(distMatrix, method="average"),k=3)
Year<-tibble(sci_name=names(hc),year_type=hc)
COVAR<-tibble(sci_name=unlist(SP),COV=COV)
## The following will only include species that are in both TABLE and Demog
## Demog include fish species only, but COVAR include both fish and inverte
TABLE<-merge(COVAR,Year,all.x=TRUE)
TABLE<-merge(TABLE,Season,all.x=TRUE)
TABLE<-tibble(merge(TABLE,Demog))
#colSums(is.na(TABLE))
TABLE <- TABLE %>%
mutate(COV=factor(COV, ordered=FALSE), year_type=factor(year_type,ordered=FALSE),season_type=factor(season_type, ordered=FALSE))
# Rename the patterns based on the cluster analysis
levels(TABLE$year_type)<-c("increase","decrease","fluctuate")
levels(TABLE$season_type)<-c("summer","winter","fall","spring")
save("TABLE","Season","Year","fitVal",file='clusters.Rdata')
ID<-1
FIG<-NULL
RESULTS<-matrix(NA,nrow=3,ncol=8)
colnames(RESULTS)<-c("TempPrefMin","TempPrefMax","Troph",
"GT","DR","Lamb","Linf","Lmat")
rownames(RESULTS)<-c("COV","year_type","season_type")
x_lab=c("environmental variables", "annual pattern","seasonal pattern")
y_lab=c("Min Temperature","Max Temperature")
panel_label=c("(a)","(c)","(e)","(b)","(d)","(f)","(g)")
for (j in c(1:2)){
for (k in c(1:3)){ # COVARIATE, SEASON TYPE, ANNUAL TYPE,
TABLE2 <- TABLE[,c(k+1,j+4)]
# CREATE COMPACT LETTER DISPLAY and PUT THEM into XLABEL STRING "LAB"
myform <- as.formula(sprintf("%s ~ %s",names(TABLE2)[2],names(TABLE2)[1]))
mod <- aov(myform, data = TABLE2)
tuk<-eval(parse(text=paste("multcomp::glht(mod, linfct = multcomp::mcp(",names(TABLE2)[1]," = 'Tukey'))")))
tuk.cld <- multcomp::cld(tuk)
LAB<-paste(names(tuk.cld$mcletters$Letters)," (",tuk.cld$mcletters$Letters,")",sep="")
if (ID %in% c(1,4)){
LAB<-paste(c('CON','OXY','MSL','SAL','TMP')," (",tuk.cld$mcletters$Letters,")",sep="")
}
## PRODUCE BOXPLOT
FIG[[ID]]<-ggplot(TABLE,aes_string(x=names(TABLE2)[1],y=names(TABLE2)[2]))+
geom_boxplot()+
stat_compare_means(method="kruskal.test")+
scale_x_discrete(labels=LAB)+
labs(title=paste(panel_label[ID]),x=x_lab[k],y=y_lab[j])+
theme_classic()
ID<-ID+1
}
}
pdf("PATTERN_DIST.pdf",width=7.5,height=9)
grid.arrange(FIG[[1]],FIG[[4]],FIG[[2]],FIG[[5]],FIG[[3]],FIG[[6]],ncol=2)
dev.off()
panel_label=c("(a)","(b)","(c)","(d)","(e)","(f)","(g)")
y_lab=c("Trophic Level","Generation Time","Damping Ratio",expression(paste("Max. ", lambda)),"Max. Length", "Length at Maturity")
ID<-1
for (j in c(3:8)){
for (k in c(1:1)){ # COVARIATE, SEASON TYPE, ANNUAL TYPE,
TABLE2 <- TABLE[,c(k+1,j+4)]
# CREATE COMPACT LETTER DISPLAY and PUT THEM into XLABEL STRING "LAB"
myform <- as.formula(sprintf("%s ~ %s",names(TABLE2)[2],names(TABLE2)[1]))
mod <- aov(myform, data = TABLE2)
tuk<-eval(parse(text=paste("multcomp::glht(mod, linfct = multcomp::mcp(",names(TABLE2)[1]," = 'Tukey'))")))
tuk.cld <- multcomp::cld(tuk)
LAB<-paste(c('CON','OXY','MSL','SAL','TMP')," (",tuk.cld$mcletters$Letters,")",sep="")
## PRODUCE BOXPLOT
FIG[[ID]]<-ggplot(TABLE,aes_string(x=names(TABLE2)[1],y=names(TABLE2)[2]))+
geom_boxplot()+
stat_compare_means(method="kruskal.test")+
scale_x_discrete(labels=LAB)+
labs(title=paste(panel_label[ID]),x="",y=y_lab[j-2])+
theme_classic()
ID<-ID+1
}
}
pdf("ENV_DEM.pdf",width=8,height=10)
grid.arrange(FIG[[1]],FIG[[2]],FIG[[3]],FIG[[4]],FIG[[5]],FIG[[6]],ncol=2, bottom="Environmental Variables")
dev.off()
|
tab_analysis <- tabItem(
tabName = "analysis",
align = "center",
br(),
tabsetPanel(
tabPanel(
title = "DE table",
HTML('<hr style="border-color: #0088cc;">'),
radioButtons(
inputId = "setdeTab",
label = "Show selection:",
inline = TRUE,
choices = c("All genes" = "all",
"DE genes" = "deg")
),
DT::dataTableOutput("detab_table") %>% withSpinner(),
HTML('<hr style="border-color: #0088cc;">')
),
tabPanel(
title = "DE ratio",
HTML('<hr style="border-color: #0088cc;">'),
sidebarLayout(
position = "right",
sidebarPanel(
width = 3,
uiOutput("de_ratio_info"),
span(icon("copyright"), "LUMC - SASC", style = "color: #e3e3e3;")
),
mainPanel(
width = 9,
plotlyOutput("de_ratio", height = "600px") %>% withSpinner()
)
),
HTML('<hr style="border-color: #0088cc;">')
),
tabPanel(
title = "MA",
HTML('<hr style="border-color: #0088cc;">'),
sidebarLayout(
position = "right",
sidebarPanel(
width = 3,
uiOutput("ma_plot_info"),
span(icon("copyright"), "LUMC - SASC", style = "color: #e3e3e3;")
),
mainPanel(
width = 9,
plotlyOutput("ma_plot", height = "600px") %>% withSpinner()
)
),
HTML('<hr style="border-color: #0088cc;">'),
DT::dataTableOutput("selected_ma") %>% withSpinner(),
HTML('<hr style="border-color: #0088cc;">')
),
tabPanel(
title = "Volcano",
HTML('<hr style="border-color: #0088cc;">'),
sidebarLayout(
position = "right",
sidebarPanel(
width = 3,
uiOutput("volcano_plot_info"),
span(icon("copyright"), "LUMC - SASC", style = "color: #e3e3e3;")
),
mainPanel(
width = 9,
plotlyOutput("volcano_plot", height = "600px") %>% withSpinner()
)
),
HTML('<hr style="border-color: #0088cc;">'),
DT::dataTableOutput("selected_volcano") %>% withSpinner(),
HTML('<hr style="border-color: #0088cc;">')
),
tabPanel(
title = "Barcode",
HTML('<hr style="border-color: #0088cc;">'),
sidebarLayout(
position = "right",
sidebarPanel(
width = 3,
uiOutput("group_analysis_bar"),
selectInput(
inputId = "selected_analysis_bar",
label = "Add specific genes:",
multiple = TRUE,
choices = c("Click to add gene" = ""),
selected = 1
),
sliderInput(
inputId = "slider_barcode",
label = "Set the number of genes to show:",
value = 10,
min = 1,
max = 50,
step = 1
),
br(),
uiOutput("barcode_plot_info"),
span(icon("copyright"), "LUMC - SASC", style = "color: #e3e3e3;")
),
mainPanel(
width = 9,
plotlyOutput("barcode_plot", height = "600px") %>% withSpinner()
)
),
HTML('<hr style="border-color: #0088cc;">')
),
tabPanel(
title = "P-Value",
HTML('<hr style="border-color: #0088cc;">'),
sidebarLayout(
position = "right",
sidebarPanel(
width = 3,
uiOutput("p_val_plot_info"),
span(icon("copyright"), "LUMC - SASC", style = "color: #e3e3e3;")
),
mainPanel(
width = 9,
plotlyOutput("p_val_plot", height = "600px") %>% withSpinner()
)
),
HTML('<hr style="border-color: #0088cc;">')
)
)
)
| /inst/src/tabs/analysis/ui_analysis.R | permissive | LUMC/dgeAnalysis | R | false | false | 3,748 | r |
tab_analysis <- tabItem(
tabName = "analysis",
align = "center",
br(),
tabsetPanel(
tabPanel(
title = "DE table",
HTML('<hr style="border-color: #0088cc;">'),
radioButtons(
inputId = "setdeTab",
label = "Show selection:",
inline = TRUE,
choices = c("All genes" = "all",
"DE genes" = "deg")
),
DT::dataTableOutput("detab_table") %>% withSpinner(),
HTML('<hr style="border-color: #0088cc;">')
),
tabPanel(
title = "DE ratio",
HTML('<hr style="border-color: #0088cc;">'),
sidebarLayout(
position = "right",
sidebarPanel(
width = 3,
uiOutput("de_ratio_info"),
span(icon("copyright"), "LUMC - SASC", style = "color: #e3e3e3;")
),
mainPanel(
width = 9,
plotlyOutput("de_ratio", height = "600px") %>% withSpinner()
)
),
HTML('<hr style="border-color: #0088cc;">')
),
tabPanel(
title = "MA",
HTML('<hr style="border-color: #0088cc;">'),
sidebarLayout(
position = "right",
sidebarPanel(
width = 3,
uiOutput("ma_plot_info"),
span(icon("copyright"), "LUMC - SASC", style = "color: #e3e3e3;")
),
mainPanel(
width = 9,
plotlyOutput("ma_plot", height = "600px") %>% withSpinner()
)
),
HTML('<hr style="border-color: #0088cc;">'),
DT::dataTableOutput("selected_ma") %>% withSpinner(),
HTML('<hr style="border-color: #0088cc;">')
),
tabPanel(
title = "Volcano",
HTML('<hr style="border-color: #0088cc;">'),
sidebarLayout(
position = "right",
sidebarPanel(
width = 3,
uiOutput("volcano_plot_info"),
span(icon("copyright"), "LUMC - SASC", style = "color: #e3e3e3;")
),
mainPanel(
width = 9,
plotlyOutput("volcano_plot", height = "600px") %>% withSpinner()
)
),
HTML('<hr style="border-color: #0088cc;">'),
DT::dataTableOutput("selected_volcano") %>% withSpinner(),
HTML('<hr style="border-color: #0088cc;">')
),
tabPanel(
title = "Barcode",
HTML('<hr style="border-color: #0088cc;">'),
sidebarLayout(
position = "right",
sidebarPanel(
width = 3,
uiOutput("group_analysis_bar"),
selectInput(
inputId = "selected_analysis_bar",
label = "Add specific genes:",
multiple = TRUE,
choices = c("Click to add gene" = ""),
selected = 1
),
sliderInput(
inputId = "slider_barcode",
label = "Set the number of genes to show:",
value = 10,
min = 1,
max = 50,
step = 1
),
br(),
uiOutput("barcode_plot_info"),
span(icon("copyright"), "LUMC - SASC", style = "color: #e3e3e3;")
),
mainPanel(
width = 9,
plotlyOutput("barcode_plot", height = "600px") %>% withSpinner()
)
),
HTML('<hr style="border-color: #0088cc;">')
),
tabPanel(
title = "P-Value",
HTML('<hr style="border-color: #0088cc;">'),
sidebarLayout(
position = "right",
sidebarPanel(
width = 3,
uiOutput("p_val_plot_info"),
span(icon("copyright"), "LUMC - SASC", style = "color: #e3e3e3;")
),
mainPanel(
width = 9,
plotlyOutput("p_val_plot", height = "600px") %>% withSpinner()
)
),
HTML('<hr style="border-color: #0088cc;">')
)
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gl.report.repavg.r
\name{gl.report.repavg}
\alias{gl.report.repavg}
\title{Report summary of RepAvg, repeatability averaged over both alleles for each locus in a genlight {adegenet} object}
\usage{
gl.report.repavg(x, plot = FALSE, smearplot = FALSE)
}
\arguments{
\item{x}{-- name of the genlight object containing the SNP data [required]}
\item{plot}{if TRUE, will produce a histogram of call rate [default FALSE]}
\item{smearplot}{if TRUE, will produce a smearplot of individuals against loci [default FALSE]}
}
\value{
-- Tabulation of repAvg against prospective Thresholds
}
\description{
SNP datasets generated by DArT have in index, RepAvg, generated by reproducing the data independently for 30% of loci.
RepAvg is the proportion of alleles that give a repeatable result, averaged over both alleles for each locus.
}
\details{
A histogram and or a smearplot can be requested. Note that the smearplot is computationally intensive, and will take time to
execute for large datasets.
}
\examples{
gl.report.repavg(testset.gl)
}
\author{
Arthur Georges (Post to \url{https://groups.google.com/d/forum/dartr})
}
| /man/gl.report.repavg.Rd | no_license | Konoutan/dartR | R | false | true | 1,195 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gl.report.repavg.r
\name{gl.report.repavg}
\alias{gl.report.repavg}
\title{Report summary of RepAvg, repeatability averaged over both alleles for each locus in a genlight {adegenet} object}
\usage{
gl.report.repavg(x, plot = FALSE, smearplot = FALSE)
}
\arguments{
\item{x}{-- name of the genlight object containing the SNP data [required]}
\item{plot}{if TRUE, will produce a histogram of call rate [default FALSE]}
\item{smearplot}{if TRUE, will produce a smearplot of individuals against loci [default FALSE]}
}
\value{
-- Tabulation of repAvg against prospective Thresholds
}
\description{
SNP datasets generated by DArT have in index, RepAvg, generated by reproducing the data independently for 30% of loci.
RepAvg is the proportion of alleles that give a repeatable result, averaged over both alleles for each locus.
}
\details{
A histogram and or a smearplot can be requested. Note that the smearplot is computationally intensive, and will take time to
execute for large datasets.
}
\examples{
gl.report.repavg(testset.gl)
}
\author{
Arthur Georges (Post to \url{https://groups.google.com/d/forum/dartr})
}
|
# Create eTable1: AIC table
# HourlyTemp-Stroke Analysis
# Temperature-CVD-NYS Project
# Sebastian T. Rowland
####***********************
#### Table of Contents ####
####***********************
# 0: Preparation
# 1: Create Table
####********************
#### 0: Preparation ####
####********************
# 0a Create the folder structure, if you haven't already
if (!exists('Ran_analysis_0_01')){
here::i_am('README.md')
source(here::here('HourlyTemp_Stroke', 'Scripts',
'0_01_setOutcomeName_folders_packages_data.R'))
}
# 0b Create the plotting objects, if you haven't already
if (!exists('HH.fig')){
source(here::here('HourlyTemp_Stroke', 'Scripts','G_01_set_PlottingObjects.R'))
}
####*********************
#### 1: Create Table ####
####*********************
# 1a Readin table of model AICs
aic.table <- read_csv(here::here('HourlyTemp_Stroke', OutputsPath, 'Tables',
'Model_AICweights.csv'))
# 1b Keep only AIC of main models
aic.table <- aic.table %>%
filter(str_detect(ModelIdentifier, 'HourlyTemp')) %>%
mutate(Outcome = str_sub(ModelIdentifier, 12)) %>%
mutate(Outcome = if_else(Outcome == 'strokeISC', 'Ischemic Stroke', 'Hemorrhagic Stroke')) %>%
mutate(Outcome = factor(Outcome, levels=c('Ischemic Stroke', 'Hemorrhagic Stroke'))) %>%
mutate(ERConstraint = if_else(ERConstraint == 'lin', 'Linear', paste0('ns ', str_sub(ERConstraint, 0, 1), ' df')),
LRConstraint = paste0('ns ', str_sub(LRConstraint, 0, 1), ' df')) %>%
dplyr::select(Outcome, ERConstraint, LRConstraint, AIC, AkaikeWeight)
# 1c Save table
aic.table %>%
mutate(ERConstraint = factor(ERConstraint, levels = c('Linear', 'ns 3 df', 'ns 4 df','ns 5 df'))) %>%
arrange(ERConstraint) %>%
arrange(Outcome) %>%
write_csv(here::here('HourlyTemp_Stroke', OutputsPath, 'Manuscript',
'eTable1_AIC_fullPopAnalysis.csv'))
| /HourlyTemp_stroke/Scripts/G_04_eTable2_AIC_fullpopAnalysis.R | permissive | s-rowland/HourlyTemp-Stroke | R | false | false | 1,913 | r | # Create eTable1: AIC table
# HourlyTemp-Stroke Analysis
# Temperature-CVD-NYS Project
# Sebastian T. Rowland
####***********************
#### Table of Contents ####
####***********************
# 0: Preparation
# 1: Create Table
####********************
#### 0: Preparation ####
####********************
# 0a Create the folder structure, if you haven't already
if (!exists('Ran_analysis_0_01')){
here::i_am('README.md')
source(here::here('HourlyTemp_Stroke', 'Scripts',
'0_01_setOutcomeName_folders_packages_data.R'))
}
# 0b Create the plotting objects, if you haven't already
if (!exists('HH.fig')){
source(here::here('HourlyTemp_Stroke', 'Scripts','G_01_set_PlottingObjects.R'))
}
####*********************
#### 1: Create Table ####
####*********************
# 1a Readin table of model AICs
aic.table <- read_csv(here::here('HourlyTemp_Stroke', OutputsPath, 'Tables',
'Model_AICweights.csv'))
# 1b Keep only AIC of main models
aic.table <- aic.table %>%
filter(str_detect(ModelIdentifier, 'HourlyTemp')) %>%
mutate(Outcome = str_sub(ModelIdentifier, 12)) %>%
mutate(Outcome = if_else(Outcome == 'strokeISC', 'Ischemic Stroke', 'Hemorrhagic Stroke')) %>%
mutate(Outcome = factor(Outcome, levels=c('Ischemic Stroke', 'Hemorrhagic Stroke'))) %>%
mutate(ERConstraint = if_else(ERConstraint == 'lin', 'Linear', paste0('ns ', str_sub(ERConstraint, 0, 1), ' df')),
LRConstraint = paste0('ns ', str_sub(LRConstraint, 0, 1), ' df')) %>%
dplyr::select(Outcome, ERConstraint, LRConstraint, AIC, AkaikeWeight)
# 1c Save table
aic.table %>%
mutate(ERConstraint = factor(ERConstraint, levels = c('Linear', 'ns 3 df', 'ns 4 df','ns 5 df'))) %>%
arrange(ERConstraint) %>%
arrange(Outcome) %>%
write_csv(here::here('HourlyTemp_Stroke', OutputsPath, 'Manuscript',
'eTable1_AIC_fullPopAnalysis.csv'))
|
# Model Check
tempFileLoc <- tempfile()
cat(
"model{
# BINOMIAL LIKELIHOOD
for(i in 1:nData){
Fertile[i] ~ dbin(p[i], N[i])
logit(p[i]) <- alpha +
betaClimateT * ClimateT[i] +
speciesCoeff[species[i]] +
eps[i] * eps.on
# overdispersion term
eps[i] ~ dnorm(0, tau.over)
}
# PRIORS
# fixed effects
alpha ~ dnorm(0, 0.001) # Intercept
betaClimateT ~ dnorm(0, 0.001) # Slope for climateT
# random effects
for(j in 1:nSpecies) {
speciesCoeff[j] ~ dnorm(0, randPrecSP)
}
randPrecSP ~ dgamma(0.001,0.001)
# binary variable to indicate flowering
tau.over ~ dgamma(0.001,0.001)
# PREDICITONS
for(i in 1:nData.pred){
Fertile.pred[i] ~ dbin(p.pred[i], N.pred[i])
logit(p.pred[i]) <- alpha +
betaClimateT * ClimateT.pred[i] +
speciesCoeff[species.pred[i]] +
eps.pred[i] * eps.on
# overdispersion term
eps.pred[i] ~ dnorm(0, tau.over)
}
}
", file = tempFileLoc)
Dat <- fertile %>%
filter(!year %in% c(2009))
Data = list(N = Dat$NumberOfOccurrence,
Fertile = Dat$SumOffertile,
nData = nrow(Dat),
ClimateT = Dat$MeanSummerTempGrid.sc,
species = as.numeric(as.factor(Dat$species)),
nSpecies = nlevels(as.factor(Dat$species)),
eps.on = 1, ## Turns on the overdispersion term in the model
# Predictions
N.pred = Dat$NumberOfOccurrence,
Fertile.pred = Dat$SumOffertile,
nData.pred = nrow(Dat),
ClimateT.pred = Dat$MeanSummerTempGrid.sc,
species.pred = as.numeric(as.factor(Dat$species))
)
# 3) Specify a function to generate inital values for the parameters
inits.fn <- function() list(alpha = -2,
betaClimateT = 0.5,
speciesCoeff = rnorm(length(unique(Dat$species)), 0, 0.1)
)
para.names <- c("alpha", "betaClimateT")
# Run analysis
# try burnin 0, iter 100000, thining 20
res1 <- jagsUI::jags(data = Data,
inits = inits.fn,
parameters.to.save = para.names,
model.file = tempFileLoc,
n.thin = 5,
n.chains = 3,
n.iter = 1000,
n.cores = 3)
# Check model
plot(res1)
whiskerplot(res1, c("betaClimateT"))
library("rjags")
jagsModel <- jags.model(file = tempFileLoc,
data = Data,
inits = inits.fn,
n.chains = 3,
n.adapt= 1000)
# Model checking with DHARMa
# Sample simulated posterior for #survivors (alive.pred)
Pred.Samples <- coda.samples(jagsModel,
variable.names = "Fertile.pred",
n.iter = 1000)
# Transform mcmc.list object to a matrix
Pred.Mat <- as.matrix(Pred.Samples)
# Cretae model checking plots
res = createDHARMa(simulatedResponse = t(Pred.Mat),
observedResponse = Dat$SumOffertile,
integerResponse = T,
fittedPredictedResponse = apply(Pred.Mat, 2, median))
plot(res)
| /R/ModelCheck.R | no_license | audhalbritter/Flowering | R | false | false | 3,156 | r | # Model Check
tempFileLoc <- tempfile()
cat(
"model{
# BINOMIAL LIKELIHOOD
for(i in 1:nData){
Fertile[i] ~ dbin(p[i], N[i])
logit(p[i]) <- alpha +
betaClimateT * ClimateT[i] +
speciesCoeff[species[i]] +
eps[i] * eps.on
# overdispersion term
eps[i] ~ dnorm(0, tau.over)
}
# PRIORS
# fixed effects
alpha ~ dnorm(0, 0.001) # Intercept
betaClimateT ~ dnorm(0, 0.001) # Slope for climateT
# random effects
for(j in 1:nSpecies) {
speciesCoeff[j] ~ dnorm(0, randPrecSP)
}
randPrecSP ~ dgamma(0.001,0.001)
# binary variable to indicate flowering
tau.over ~ dgamma(0.001,0.001)
# PREDICITONS
for(i in 1:nData.pred){
Fertile.pred[i] ~ dbin(p.pred[i], N.pred[i])
logit(p.pred[i]) <- alpha +
betaClimateT * ClimateT.pred[i] +
speciesCoeff[species.pred[i]] +
eps.pred[i] * eps.on
# overdispersion term
eps.pred[i] ~ dnorm(0, tau.over)
}
}
", file = tempFileLoc)
Dat <- fertile %>%
filter(!year %in% c(2009))
Data = list(N = Dat$NumberOfOccurrence,
Fertile = Dat$SumOffertile,
nData = nrow(Dat),
ClimateT = Dat$MeanSummerTempGrid.sc,
species = as.numeric(as.factor(Dat$species)),
nSpecies = nlevels(as.factor(Dat$species)),
eps.on = 1, ## Turns on the overdispersion term in the model
# Predictions
N.pred = Dat$NumberOfOccurrence,
Fertile.pred = Dat$SumOffertile,
nData.pred = nrow(Dat),
ClimateT.pred = Dat$MeanSummerTempGrid.sc,
species.pred = as.numeric(as.factor(Dat$species))
)
# 3) Specify a function to generate inital values for the parameters
inits.fn <- function() list(alpha = -2,
betaClimateT = 0.5,
speciesCoeff = rnorm(length(unique(Dat$species)), 0, 0.1)
)
para.names <- c("alpha", "betaClimateT")
# Run analysis
# try burnin 0, iter 100000, thining 20
res1 <- jagsUI::jags(data = Data,
inits = inits.fn,
parameters.to.save = para.names,
model.file = tempFileLoc,
n.thin = 5,
n.chains = 3,
n.iter = 1000,
n.cores = 3)
# Check model
plot(res1)
whiskerplot(res1, c("betaClimateT"))
library("rjags")
jagsModel <- jags.model(file = tempFileLoc,
data = Data,
inits = inits.fn,
n.chains = 3,
n.adapt= 1000)
# Model checking with DHARMa
# Sample simulated posterior for #survivors (alive.pred)
Pred.Samples <- coda.samples(jagsModel,
variable.names = "Fertile.pred",
n.iter = 1000)
# Transform mcmc.list object to a matrix
Pred.Mat <- as.matrix(Pred.Samples)
# Cretae model checking plots
res = createDHARMa(simulatedResponse = t(Pred.Mat),
observedResponse = Dat$SumOffertile,
integerResponse = T,
fittedPredictedResponse = apply(Pred.Mat, 2, median))
plot(res)
|
#' @importFrom purrr map_chr
#' @importFrom dplyr group_by tally ungroup mutate
#' @importFrom rlang enexprs as_string
#' @export
calculate_two_way_contingencies <- function(df, ...) {
rlang::enexprs(...) %>%
purrr::map_chr(rlang::as_string) %>%
utils::combn(2) %>%
apply(2, function(vv) {
vv <- rlang::syms(vv)
dplyr::group_by(df, !!!vv) %>%
dplyr::tally() %>%
dplyr::ungroup() %>%
dplyr::mutate(prob = n / sum(n))
})
} | /R/calculate_two_way_contingencies.R | permissive | ddimmery/cdsampler | R | false | false | 480 | r | #' @importFrom purrr map_chr
#' @importFrom dplyr group_by tally ungroup mutate
#' @importFrom rlang enexprs as_string
#' @export
calculate_two_way_contingencies <- function(df, ...) {
rlang::enexprs(...) %>%
purrr::map_chr(rlang::as_string) %>%
utils::combn(2) %>%
apply(2, function(vv) {
vv <- rlang::syms(vv)
dplyr::group_by(df, !!!vv) %>%
dplyr::tally() %>%
dplyr::ungroup() %>%
dplyr::mutate(prob = n / sum(n))
})
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.