content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
t0=Sys.time()
#source("/home/shriraj/Codes/Subro_Hadoop.R")
###################################################################################################################################################################################
Sys.setenv(HADOOP_HOME="/hadoop/mr-runtime")
Sys.setenv(HIVE_HOME="/hadoop/hive-runtime")
Sys.setenv(HADOOP_BIN="/hadoop/mr-runtime/bin")
Sys.setenv(HADOOP_CMD ="/hadoop/mr-runtime/bin/hadoop")
require("rhdfs")
hdfs.init()
require(rmr)
require(MASS)
require(epicalc)
#See list of available files from HDFS
#hdfs.ls("/New_Subro")
## Start Reading
#xx=read.csv("Final_Subro_Data.csv")
#write.table(xx,"Final_Subro_Data.txt",row.names=F,quote=F,col.names=T,sep=",")
#hdfs.put("Final_Subro_Data.txt","/New_Subro/input/Subro_data.txt",dstFS=hdfs.defaults("fs"))
content<-hdfs.read.text.file("/New_Subro/input/Subro_data.txt")
subro.data<-read.table(textConnection(content),sep=",",header=T)
# Fit Logistic regression model to find significant factors for cross sell acceptance #
#Logit Model without fault measure
#Logit_Model1<- glm(Subrogation~Age.of.Claimant+Gender+Injury+Jurisdiction.Locale+Loss.Age+Claim.Description.Code+Actual.Claim.Amount,data=subro.data, family=binomial("logit"))
#Logit Model with fault measure
Logit_Model<- glm(Subrogation~Age.of.Claimant+Gender+Injury+Jurisdiction.Locale+Loss.Age+Claim.Description.Code+Actual.Claim.Amount+Fault.Measure,data=subro.data, family=binomial("logit"))
# Converting probabilities
threshold=0.5
fitted=data.frame(fitted(Logit_Model))
fitted$Subrogation=subro.data$Subrogation
fitted$Predicted=NA
fitted[which(fitted[,1]>=threshold),3]=1
fitted[which(fitted[,1]<threshold),3]=0
# 2 X 2 contingency table
con.mat<-ftable(fitted$Subrogation,fitted$Predicted)
# Add marginal sums #
cont.table<-addmargins(con.mat)
colnames(cont.table)<-c("0","1","Marginal_sum")
rownames(cont.table)<-c("0","1","Marginal_sum")
# Find Model Accuracy #
Accuracy<- ((cont.table[1,1]+cont.table[2,2])/(cont.table[3,3]))*100
Accuracy=data.frame(Accuracy)
Accuracy
###### Second Predictive Model To predict recovery Amount
subro.data2=subro.data[which(subro.data$Subrogation==1),]
#fit.subro2=glm(Recovery.Amount~Age.of.Claimant+Gender+Injury+Jurisdiction.Locale+Claim.Age+Claim.Description.Code+Actual.Claim.Amount,data=subro.data2,family=Gamma("identity"))
GL_Model=glm(Recovery.Amount~Injury+Jurisdiction.Locale+Claim.Description.Code+Actual.Claim.Amount+Fault.Measure,data=subro.data2,family=gaussian(link="identity"))
#Prediction from Model for New data set
content<-hdfs.read.text.file("/New_Subro/input/Subro_Customer_data.txt")
New_data<-read.table(textConnection(content),sep=";",header=T)
RS=New_data$Report.Status
New_data=New_data[,-26]
# Prediction from Logit_Model
Predicted_Probabilities=predict(Logit_Model,newdata=New_data,type="response")
Predicted=data.frame(Predicted_Probabilities,Predicted_Subrogation=NA)
Predicted[which(Predicted[,1]>=threshold),2]=1
Predicted[which(Predicted[,1]<threshold),2]=0
Positive_Subrogation=which(Predicted[,2]==1)
# Prediction from GL_Model
Predicted.Recovery.Amount=predict(GL_Model,newdata=New_data[Positive_Subrogation,],type="respon")
Predicted$Predicted_Subrogation.Opportunity="Poor Subrogation Opportunity"
Predicted$Predicted_Subrogation.Opportunity[Positive_Subrogation]="Good Subrogation Opportunity"
Predicted$Predicted_Recovery.Amount=0
Predicted$Predicted_Recovery.Amount[Positive_Subrogation]=Predicted.Recovery.Amount
New_data=cbind(New_data,Predicted_Probabilities=Predicted$Predicted_Probabilities,
Predicted_Subrogation=Predicted$Predicted_Subrogation,
Predicted_Recovery.Amount=Predicted$Predicted_Recovery.Amount,
Predicted_Subrogation.Opportunity=Predicted$Predicted_Subrogation.Opportunity)
###################
count=NULL
Comment=NULL
var=matrix(data=0,nrow(New_data),ncol=5)
for(i in 1:nrow(New_data))
{
j=0
q=NULL
if(New_data[i,15]>38){j=j+1;q=paste(q,"Age.of.Claimant", sep="; ");var[i,j]="Age.of.Claimant"}
if(as.character(New_data[i,16])=="Male"){j=j+1;q=paste(q ,"Gender",sep="; ");var[i,j]="Gender"}
if(as.character(New_data[i,17])=="Yes"){j=j+1;q=paste(q,"Injury",sep="; ");var[i,j]="Injury"}
if(as.character(New_data[i,18])=="No"){j=j+1;q=paste(q,"Jurisdiction.Locale",sep="; ");var[i,j]="Jurisdiction.Locale"}
if(New_data[i,23]==1){j=j+1;q=paste(q,"Claim.Description.Code",sep="; ");var[i,j]="Claim.Description.Code"}
if(j!=0){count[i]=j;Comment[i]=substring(q,2)}else{j=0;q=0}
if(New_data[i,26]==1){Comment[i]=paste("Our model predicts good subrogation opportunity due to significant impact of",j,"variables namely",Comment[i],sep=" ")
}else{
Comment[i]=paste("Subrogation opportunity is not good enough. Since variables other than",Comment[i],"have less impact on model", sep=" ")}
}
New_data=cbind(New_data,Comments=Comment,Var1=var[,1],Var2=var[,2],Var3=var[,3],Var4=var[,4],Var5=var[,5])
#### Computing Impact of variables
b0=as.numeric(Logit_Model$coefficient[1])
b1=as.numeric(Logit_Model$coefficient[2])
b2=as.numeric(Logit_Model$coefficient[3])
b3=as.numeric(Logit_Model$coefficient[4])
b4=as.numeric(Logit_Model$coefficient[5])
b5=as.numeric(Logit_Model$coefficient[6])
b6=as.numeric(Logit_Model$coefficient[7])
b7=as.numeric(Logit_Model$coefficient[8])
Impact_of_Fault_Measure=Impact_of_Age=Impact_of_Gender=Impact_of_Injury=Impact_of_Jurisdiction_Locale=Impact_of_Loss_Age=Impact_of_Claim_Description=Impact_of_Actual_Claim_Amount=NULL
for(i in 1:nrow(New_data))
{
p1=1/(1+exp(-b0))
Impact_of_Age[i]=1/(1+exp(-(b0+b1*New_data$Age.of.Claimant[i])))
if(as.character(New_data$Gender[i])=="Male")
{ind1=1
Impact_of_Gender[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1))))
}else{ind1=0
Impact_of_Gender[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1))))}
if(as.character(New_data$Injury[i])=="Yes")
{ind2=1
Impact_of_Injury[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1+b3*ind2))))
}else{ind2=0
Impact_of_Injury[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1+b3*ind2))))}
if(as.character(New_data$Jurisdiction.Locale[i])=="Yes")
{ind3=1
Impact_of_Jurisdiction_Locale[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1+b3*ind2+b4*ind3))))
}else{ind3=0
Impact_of_Jurisdiction_Locale[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1+b3*ind2+b4*ind3))))}
Impact_of_Loss_Age[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1+b3*ind2+b4*ind3+b5*New_data$Loss.Age[i]))))
Impact_of_Claim_Description[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1+b3*ind2+b4*ind3+b5*New_data$Loss.Age[i]+b6*New_data$Claim.Description.Code[i]))))
Impact_of_Actual_Claim_Amount[i]=New_data$Predicted_Probabilities[i]
Impact_of_Actual_Claim_Amount[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1+b3*ind2+b4*ind3+b5*New_data$Loss.Age[i]+b6*New_data$Claim.Description.Code[i]+b7*New_data$Actual.Claim.Amount[i]))))
Impact_of_Fault_Measure[i]=New_data$Predicted_Probabilities[i]
}
New_data=cbind(New_data,Impact_of_Age,Impact_of_Gender,Impact_of_Injury,Impact_of_Jurisdiction_Locale,Impact_of_Loss_Age,Impact_of_Claim_Description,Impact_of_Actual_Claim_Amount,Impact_of_Fault_Measure)
New_data$RAG="A"
New_data$RAG[New_data$Predicted_Probabilities<((1-.25)*threshold)]="G"
New_data$RAG[New_data$Predicted_Probabilities>((1+.25)*threshold)]="R"
New_data$Predicted_Subrogation_Percentage=sprintf("%.2f",(Predicted_Probabilities*100))
New_data=New_data[,c(1:24,26:42,44:45,25,43)]
New_data$Report.Status=RS
rm(list=setdiff(ls(), c("subro.data","Logit_Model","GL_Model","Predicted","New_data","t0")))
#Remove Output files
if(hdfs.exists("/New_Subro/output/Model_Output.txt")==TRUE){
hdfs.del("/New_Subro/output/Model_Output.txt")}
if(hdfs.exists("/New_Subro/output/Subro_Customer_data_output.txt")==TRUE){
hdfs.del("/New_Subro/output/Subro_Customer_data_output.txt")}
# To store output in HDFS
#write.table(New_data,"Subro_Customer_data_output.csv",sep=",")
write.table(New_data,"Subro_Customer_data_output.txt",row.names = FALSE,quote = FALSE,col.names = FALSE,sep=",")
hdfs.put("Subro_Customer_data_output.txt","/New_Subro/output/Subro_Customer_data_output.txt",dstFS=hdfs.defaults("fs"))
unlink("Subro_Customer_data_output.txt")
#content<-hdfs.read.text.file("/Subrogation/output/Subro_Customer_data_output.txt")
#Subro_Customer_data_output<-read.table(textConnection(content),sep=",",header=FALSE)
sink("Model_Output.txt",append = FALSE)
print("Output of Logistic Model")
summary(Logit_Model)
print(" ")
print("##############################################################################################################################################")
print(" ")
print("Output of Gaussian Linear Model")
summary(GL_Model)
print(" ")
sink()
hdfs.put("Model_Output.txt","/New_Subro/output/Model_Output.txt",dstFS=hdfs.defaults("fs"))
unlink("Model_Output.txt")
#content<-hdfs.read.text.file("/Subrogation/output/Model_Output.txt")
#Model_Output<-read.table(textConnection(content),sep=";")
t1=Sys.time()
total_time=t1-t0
print(total_time)
| /Subro_Hadoop_V2.R | no_license | Dillip321/Data-Modeling-by-Use-of-R | R | false | false | 9,122 | r |
t0=Sys.time()
#source("/home/shriraj/Codes/Subro_Hadoop.R")
###################################################################################################################################################################################
Sys.setenv(HADOOP_HOME="/hadoop/mr-runtime")
Sys.setenv(HIVE_HOME="/hadoop/hive-runtime")
Sys.setenv(HADOOP_BIN="/hadoop/mr-runtime/bin")
Sys.setenv(HADOOP_CMD ="/hadoop/mr-runtime/bin/hadoop")
require("rhdfs")
hdfs.init()
require(rmr)
require(MASS)
require(epicalc)
#See list of available files from HDFS
#hdfs.ls("/New_Subro")
## Start Reading
#xx=read.csv("Final_Subro_Data.csv")
#write.table(xx,"Final_Subro_Data.txt",row.names=F,quote=F,col.names=T,sep=",")
#hdfs.put("Final_Subro_Data.txt","/New_Subro/input/Subro_data.txt",dstFS=hdfs.defaults("fs"))
content<-hdfs.read.text.file("/New_Subro/input/Subro_data.txt")
subro.data<-read.table(textConnection(content),sep=",",header=T)
# Fit Logistic regression model to find significant factors for cross sell acceptance #
#Logit Model without fault measure
#Logit_Model1<- glm(Subrogation~Age.of.Claimant+Gender+Injury+Jurisdiction.Locale+Loss.Age+Claim.Description.Code+Actual.Claim.Amount,data=subro.data, family=binomial("logit"))
#Logit Model with fault measure
Logit_Model<- glm(Subrogation~Age.of.Claimant+Gender+Injury+Jurisdiction.Locale+Loss.Age+Claim.Description.Code+Actual.Claim.Amount+Fault.Measure,data=subro.data, family=binomial("logit"))
# Converting probabilities
threshold=0.5
fitted=data.frame(fitted(Logit_Model))
fitted$Subrogation=subro.data$Subrogation
fitted$Predicted=NA
fitted[which(fitted[,1]>=threshold),3]=1
fitted[which(fitted[,1]<threshold),3]=0
# 2 X 2 contingency table
con.mat<-ftable(fitted$Subrogation,fitted$Predicted)
# Add marginal sums #
cont.table<-addmargins(con.mat)
colnames(cont.table)<-c("0","1","Marginal_sum")
rownames(cont.table)<-c("0","1","Marginal_sum")
# Find Model Accuracy #
Accuracy<- ((cont.table[1,1]+cont.table[2,2])/(cont.table[3,3]))*100
Accuracy=data.frame(Accuracy)
Accuracy
###### Second Predictive Model To predict recovery Amount
subro.data2=subro.data[which(subro.data$Subrogation==1),]
#fit.subro2=glm(Recovery.Amount~Age.of.Claimant+Gender+Injury+Jurisdiction.Locale+Claim.Age+Claim.Description.Code+Actual.Claim.Amount,data=subro.data2,family=Gamma("identity"))
GL_Model=glm(Recovery.Amount~Injury+Jurisdiction.Locale+Claim.Description.Code+Actual.Claim.Amount+Fault.Measure,data=subro.data2,family=gaussian(link="identity"))
#Prediction from Model for New data set
content<-hdfs.read.text.file("/New_Subro/input/Subro_Customer_data.txt")
New_data<-read.table(textConnection(content),sep=";",header=T)
RS=New_data$Report.Status
New_data=New_data[,-26]
# Prediction from Logit_Model
Predicted_Probabilities=predict(Logit_Model,newdata=New_data,type="response")
Predicted=data.frame(Predicted_Probabilities,Predicted_Subrogation=NA)
Predicted[which(Predicted[,1]>=threshold),2]=1
Predicted[which(Predicted[,1]<threshold),2]=0
Positive_Subrogation=which(Predicted[,2]==1)
# Prediction from GL_Model
Predicted.Recovery.Amount=predict(GL_Model,newdata=New_data[Positive_Subrogation,],type="respon")
Predicted$Predicted_Subrogation.Opportunity="Poor Subrogation Opportunity"
Predicted$Predicted_Subrogation.Opportunity[Positive_Subrogation]="Good Subrogation Opportunity"
Predicted$Predicted_Recovery.Amount=0
Predicted$Predicted_Recovery.Amount[Positive_Subrogation]=Predicted.Recovery.Amount
New_data=cbind(New_data,Predicted_Probabilities=Predicted$Predicted_Probabilities,
Predicted_Subrogation=Predicted$Predicted_Subrogation,
Predicted_Recovery.Amount=Predicted$Predicted_Recovery.Amount,
Predicted_Subrogation.Opportunity=Predicted$Predicted_Subrogation.Opportunity)
###################
count=NULL
Comment=NULL
var=matrix(data=0,nrow(New_data),ncol=5)
for(i in 1:nrow(New_data))
{
j=0
q=NULL
if(New_data[i,15]>38){j=j+1;q=paste(q,"Age.of.Claimant", sep="; ");var[i,j]="Age.of.Claimant"}
if(as.character(New_data[i,16])=="Male"){j=j+1;q=paste(q ,"Gender",sep="; ");var[i,j]="Gender"}
if(as.character(New_data[i,17])=="Yes"){j=j+1;q=paste(q,"Injury",sep="; ");var[i,j]="Injury"}
if(as.character(New_data[i,18])=="No"){j=j+1;q=paste(q,"Jurisdiction.Locale",sep="; ");var[i,j]="Jurisdiction.Locale"}
if(New_data[i,23]==1){j=j+1;q=paste(q,"Claim.Description.Code",sep="; ");var[i,j]="Claim.Description.Code"}
if(j!=0){count[i]=j;Comment[i]=substring(q,2)}else{j=0;q=0}
if(New_data[i,26]==1){Comment[i]=paste("Our model predicts good subrogation opportunity due to significant impact of",j,"variables namely",Comment[i],sep=" ")
}else{
Comment[i]=paste("Subrogation opportunity is not good enough. Since variables other than",Comment[i],"have less impact on model", sep=" ")}
}
New_data=cbind(New_data,Comments=Comment,Var1=var[,1],Var2=var[,2],Var3=var[,3],Var4=var[,4],Var5=var[,5])
#### Computing Impact of variables
b0=as.numeric(Logit_Model$coefficient[1])
b1=as.numeric(Logit_Model$coefficient[2])
b2=as.numeric(Logit_Model$coefficient[3])
b3=as.numeric(Logit_Model$coefficient[4])
b4=as.numeric(Logit_Model$coefficient[5])
b5=as.numeric(Logit_Model$coefficient[6])
b6=as.numeric(Logit_Model$coefficient[7])
b7=as.numeric(Logit_Model$coefficient[8])
Impact_of_Fault_Measure=Impact_of_Age=Impact_of_Gender=Impact_of_Injury=Impact_of_Jurisdiction_Locale=Impact_of_Loss_Age=Impact_of_Claim_Description=Impact_of_Actual_Claim_Amount=NULL
for(i in 1:nrow(New_data))
{
p1=1/(1+exp(-b0))
Impact_of_Age[i]=1/(1+exp(-(b0+b1*New_data$Age.of.Claimant[i])))
if(as.character(New_data$Gender[i])=="Male")
{ind1=1
Impact_of_Gender[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1))))
}else{ind1=0
Impact_of_Gender[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1))))}
if(as.character(New_data$Injury[i])=="Yes")
{ind2=1
Impact_of_Injury[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1+b3*ind2))))
}else{ind2=0
Impact_of_Injury[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1+b3*ind2))))}
if(as.character(New_data$Jurisdiction.Locale[i])=="Yes")
{ind3=1
Impact_of_Jurisdiction_Locale[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1+b3*ind2+b4*ind3))))
}else{ind3=0
Impact_of_Jurisdiction_Locale[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1+b3*ind2+b4*ind3))))}
Impact_of_Loss_Age[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1+b3*ind2+b4*ind3+b5*New_data$Loss.Age[i]))))
Impact_of_Claim_Description[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1+b3*ind2+b4*ind3+b5*New_data$Loss.Age[i]+b6*New_data$Claim.Description.Code[i]))))
Impact_of_Actual_Claim_Amount[i]=New_data$Predicted_Probabilities[i]
Impact_of_Actual_Claim_Amount[i]=1/(1+(exp(-(b0+b1*New_data$Age.of.Claimant[i]+b2*ind1+b3*ind2+b4*ind3+b5*New_data$Loss.Age[i]+b6*New_data$Claim.Description.Code[i]+b7*New_data$Actual.Claim.Amount[i]))))
Impact_of_Fault_Measure[i]=New_data$Predicted_Probabilities[i]
}
New_data=cbind(New_data,Impact_of_Age,Impact_of_Gender,Impact_of_Injury,Impact_of_Jurisdiction_Locale,Impact_of_Loss_Age,Impact_of_Claim_Description,Impact_of_Actual_Claim_Amount,Impact_of_Fault_Measure)
New_data$RAG="A"
New_data$RAG[New_data$Predicted_Probabilities<((1-.25)*threshold)]="G"
New_data$RAG[New_data$Predicted_Probabilities>((1+.25)*threshold)]="R"
New_data$Predicted_Subrogation_Percentage=sprintf("%.2f",(Predicted_Probabilities*100))
New_data=New_data[,c(1:24,26:42,44:45,25,43)]
New_data$Report.Status=RS
rm(list=setdiff(ls(), c("subro.data","Logit_Model","GL_Model","Predicted","New_data","t0")))
#Remove Output files
if(hdfs.exists("/New_Subro/output/Model_Output.txt")==TRUE){
hdfs.del("/New_Subro/output/Model_Output.txt")}
if(hdfs.exists("/New_Subro/output/Subro_Customer_data_output.txt")==TRUE){
hdfs.del("/New_Subro/output/Subro_Customer_data_output.txt")}
# To store output in HDFS
#write.table(New_data,"Subro_Customer_data_output.csv",sep=",")
write.table(New_data,"Subro_Customer_data_output.txt",row.names = FALSE,quote = FALSE,col.names = FALSE,sep=",")
hdfs.put("Subro_Customer_data_output.txt","/New_Subro/output/Subro_Customer_data_output.txt",dstFS=hdfs.defaults("fs"))
unlink("Subro_Customer_data_output.txt")
#content<-hdfs.read.text.file("/Subrogation/output/Subro_Customer_data_output.txt")
#Subro_Customer_data_output<-read.table(textConnection(content),sep=",",header=FALSE)
sink("Model_Output.txt",append = FALSE)
print("Output of Logistic Model")
summary(Logit_Model)
print(" ")
print("##############################################################################################################################################")
print(" ")
print("Output of Gaussian Linear Model")
summary(GL_Model)
print(" ")
sink()
hdfs.put("Model_Output.txt","/New_Subro/output/Model_Output.txt",dstFS=hdfs.defaults("fs"))
unlink("Model_Output.txt")
#content<-hdfs.read.text.file("/Subrogation/output/Model_Output.txt")
#Model_Output<-read.table(textConnection(content),sep=";")
t1=Sys.time()
total_time=t1-t0
print(total_time)
|
library(mirt)
resp<-read.table("emp-rasch.txt",header=FALSE)
th<-seq(-3,3,length.out=1000)
i1<-seq(0,0,length.out=1000)
i2<-seq(0,0,length.out=1000)
i3<-seq(0,0,length.out=1000)
#probabilities
p1<-function(b) 1/(1+exp(-(th+b)))
p2<-function(a,b) 1/(1+exp(-(a*th+b)))
p3<-function(a,b,g) g + (1-g)/(1+exp(-(a*th+b)))
p1_prime <- function(b) exp(-b-th)/(exp(-b-th)+1)^2
p2_prime <- function(a,b) a*exp(-a*th-b)/(exp(-a*th-b)+1)^2
p3_prime <- function(a,b,g) a*(1-g)*exp(-a*th-b)/(exp(-a*th-b)+1)^2
#models
mod1<-mirt(resp,1,itemtype="Rasch")
mod2<-mirt(resp,1,itemtype="2PL")
mod3<-mirt(resp,1,itemtype="3PL")
#parameter extraction
pars1 <- matrix(extract.mirt(mod1,'parvec'),ncol=1,byrow=TRUE)
pars2 <- matrix(extract.mirt(mod2,'parvec'),ncol=2,byrow=TRUE)
pars3 <- matrix(extract.mirt(mod3,'parvec'),ncol=3,byrow=TRUE)
for(n in 1:54){
i1 <- i1 + (p1_prime(pars1[n,]))^2/(p1(pars1[n,])*(1-p1(pars1[n,])))
i2 <- i2 + (p2_prime(pars2[n,1],pars2[n,2]))^2/(p2(pars2[n,1],pars2[n,2])*(1-p2(pars2[n,1],pars2[n,2])))
i3 <- i3 + (p3_prime(pars3[n,1],pars3[n,2],pars3[n,3]))^2 / (p3(pars3[n,1],pars3[n,2],pars3[n,3])*(1-p3(pars3[n,1],pars3[n,2],pars3[n,3])))
}
se1 = 1/sqrt(i1)
se2 = 1/sqrt(i2)
se3 = 1/sqrt(i3)
plot(th,se1,main='Rasch SE vs. Theta')
plot(th,se2,main='2PL SE vs. Theta')
plot(th,se3,main='3PL SE vs. Theta')
| /ps3/shortish8.R | no_license | kgmt0/252L | R | false | false | 1,333 | r | library(mirt)
resp<-read.table("emp-rasch.txt",header=FALSE)
th<-seq(-3,3,length.out=1000)
i1<-seq(0,0,length.out=1000)
i2<-seq(0,0,length.out=1000)
i3<-seq(0,0,length.out=1000)
#probabilities
p1<-function(b) 1/(1+exp(-(th+b)))
p2<-function(a,b) 1/(1+exp(-(a*th+b)))
p3<-function(a,b,g) g + (1-g)/(1+exp(-(a*th+b)))
p1_prime <- function(b) exp(-b-th)/(exp(-b-th)+1)^2
p2_prime <- function(a,b) a*exp(-a*th-b)/(exp(-a*th-b)+1)^2
p3_prime <- function(a,b,g) a*(1-g)*exp(-a*th-b)/(exp(-a*th-b)+1)^2
#models
mod1<-mirt(resp,1,itemtype="Rasch")
mod2<-mirt(resp,1,itemtype="2PL")
mod3<-mirt(resp,1,itemtype="3PL")
#parameter extraction
pars1 <- matrix(extract.mirt(mod1,'parvec'),ncol=1,byrow=TRUE)
pars2 <- matrix(extract.mirt(mod2,'parvec'),ncol=2,byrow=TRUE)
pars3 <- matrix(extract.mirt(mod3,'parvec'),ncol=3,byrow=TRUE)
for(n in 1:54){
i1 <- i1 + (p1_prime(pars1[n,]))^2/(p1(pars1[n,])*(1-p1(pars1[n,])))
i2 <- i2 + (p2_prime(pars2[n,1],pars2[n,2]))^2/(p2(pars2[n,1],pars2[n,2])*(1-p2(pars2[n,1],pars2[n,2])))
i3 <- i3 + (p3_prime(pars3[n,1],pars3[n,2],pars3[n,3]))^2 / (p3(pars3[n,1],pars3[n,2],pars3[n,3])*(1-p3(pars3[n,1],pars3[n,2],pars3[n,3])))
}
se1 = 1/sqrt(i1)
se2 = 1/sqrt(i2)
se3 = 1/sqrt(i3)
plot(th,se1,main='Rasch SE vs. Theta')
plot(th,se2,main='2PL SE vs. Theta')
plot(th,se3,main='3PL SE vs. Theta')
|
\name{factorScaleExample2}
\alias{factorScaleExample2}
\docType{data}
\title{
Example Factor Analysis Data for Scaling the Model
}
\description{
Data set used in some of OpenMx's examples.
}
\usage{data("factorScaleExample2")}
\format{
A data frame with 200 observations on the following variables.
\describe{
\item{\code{X1}}{}
\item{\code{X2}}{}
\item{\code{X3}}{}
\item{\code{X4}}{}
\item{\code{X5}}{}
\item{\code{X6}}{}
\item{\code{X7}}{}
\item{\code{X8}}{}
\item{\code{X9}}{}
\item{\code{X10}}{}
\item{\code{X11}}{}
\item{\code{X12}}{}
}
}
\details{
This appears to be a three factor model with factor 1 loading on X1-X4, factor 2 on X5-X8, and factor 3 on X9-X12. It differs from \link{factorScaleExample1} in the scaling of the varialbes.
}
\source{
Simulated
}
\references{
The OpenMx User's guide can be found at http://openmx.ssri.psu.edu/documentation.
}
\examples{
data(factorScaleExample2)
round(cor(factorScaleExample2), 2)
data(factorScaleExample2)
plot(sapply(factorScaleExample1, var), type='l', ylim=c(0, 6), lwd=3)
lines(1:12, sapply(factorScaleExample2, var), col='blue', lwd=3)
}
\keyword{datasets}
| /man/factorScaleExample2_data.Rd | no_license | Aeroglyphic/OpenMx | R | false | false | 1,183 | rd | \name{factorScaleExample2}
\alias{factorScaleExample2}
\docType{data}
\title{
Example Factor Analysis Data for Scaling the Model
}
\description{
Data set used in some of OpenMx's examples.
}
\usage{data("factorScaleExample2")}
\format{
A data frame with 200 observations on the following variables.
\describe{
\item{\code{X1}}{}
\item{\code{X2}}{}
\item{\code{X3}}{}
\item{\code{X4}}{}
\item{\code{X5}}{}
\item{\code{X6}}{}
\item{\code{X7}}{}
\item{\code{X8}}{}
\item{\code{X9}}{}
\item{\code{X10}}{}
\item{\code{X11}}{}
\item{\code{X12}}{}
}
}
\details{
This appears to be a three factor model with factor 1 loading on X1-X4, factor 2 on X5-X8, and factor 3 on X9-X12. It differs from \link{factorScaleExample1} in the scaling of the varialbes.
}
\source{
Simulated
}
\references{
The OpenMx User's guide can be found at http://openmx.ssri.psu.edu/documentation.
}
\examples{
data(factorScaleExample2)
round(cor(factorScaleExample2), 2)
data(factorScaleExample2)
plot(sapply(factorScaleExample1, var), type='l', ylim=c(0, 6), lwd=3)
lines(1:12, sapply(factorScaleExample2, var), col='blue', lwd=3)
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gsSurv.R
\name{print.nSurv}
\alias{print.nSurv}
\alias{nSurv}
\alias{print.gsSurv}
\alias{xtable.gsSurv}
\alias{tEventsIA}
\alias{nEventsIA}
\alias{gsSurv}
\title{Advanced time-to-event sample size calculation}
\usage{
\method{print}{nSurv}(x, digits = 4, ...)
nSurv(
lambdaC = log(2)/6,
hr = 0.6,
hr0 = 1,
eta = 0,
etaE = NULL,
gamma = 1,
R = 12,
S = NULL,
T = NULL,
minfup = NULL,
ratio = 1,
alpha = 0.025,
beta = 0.1,
sided = 1,
tol = .Machine$double.eps^0.25
)
tEventsIA(x, timing = 0.25, tol = .Machine$double.eps^0.25)
nEventsIA(tIA = 5, x = NULL, target = 0, simple = TRUE)
gsSurv(
k = 3,
test.type = 4,
alpha = 0.025,
sided = 1,
beta = 0.1,
astar = 0,
timing = 1,
sfu = sfHSD,
sfupar = -4,
sfl = sfHSD,
sflpar = -2,
r = 18,
lambdaC = log(2)/6,
hr = 0.6,
hr0 = 1,
eta = 0,
etaE = NULL,
gamma = 1,
R = 12,
S = NULL,
T = NULL,
minfup = NULL,
ratio = 1,
tol = .Machine$double.eps^0.25,
usTime = NULL,
lsTime = NULL
)
\method{print}{gsSurv}(x, digits = 2, ...)
\method{xtable}{gsSurv}(
x,
caption = NULL,
label = NULL,
align = NULL,
digits = NULL,
display = NULL,
auto = FALSE,
footnote = NULL,
fnwid = "9cm",
timename = "months",
...
)
}
\arguments{
\item{x}{An object of class \code{nSurv} or \code{gsSurv}.
\code{print.nSurv()} is used for an object of class \code{nSurv} which will
generally be output from \code{nSurv()}. For \code{print.gsSurv()} is used
for an object of class \code{gsSurv} which will generally be output from
\code{gsSurv()}. \code{nEventsIA} and \code{tEventsIA} operate on both the
\code{nSurv} and \code{gsSurv} class.}
\item{digits}{Number of digits past the decimal place to print
(\code{print.gsSurv.}); also a pass through to generic \code{xtable()} from
\code{xtable.gsSurv()}.}
\item{...}{other arguments that may be passed to generic functions
underlying the methods here.}
\item{lambdaC}{scalar, vector or matrix of event hazard rates for the
control group; rows represent time periods while columns represent strata; a
vector implies a single stratum.}
\item{hr}{hazard ratio (experimental/control) under the alternate hypothesis
(scalar).}
\item{hr0}{hazard ratio (experimental/control) under the null hypothesis
(scalar).}
\item{eta}{scalar, vector or matrix of dropout hazard rates for the control
group; rows represent time periods while columns represent strata; if
entered as a scalar, rate is constant across strata and time periods; if
entered as a vector, rates are constant across strata.}
\item{etaE}{matrix dropout hazard rates for the experimental group specified
in like form as \code{eta}; if NULL, this is set equal to \code{eta}.}
\item{gamma}{a scalar, vector or matrix of rates of entry by time period
(rows) and strata (columns); if entered as a scalar, rate is constant
across strata and time periods; if entered as a vector, rates are constant
across strata.}
\item{R}{a scalar or vector of durations of time periods for recruitment
rates specified in rows of \code{gamma}. Length is the same as number of
rows in \code{gamma}. Note that when variable enrollment duration is
specified (input \code{T=NULL}), the final enrollment period is extended as
long as needed.}
\item{S}{a scalar or vector of durations of piecewise constant event rates
specified in rows of \code{lambda}, \code{eta} and \code{etaE}; this is NULL
if there is a single event rate per stratum (exponential failure) or length
of the number of rows in \code{lambda} minus 1, otherwise.}
\item{T}{study duration; if \code{T} is input as \code{NULL}, this will be
computed on output; see details.}
\item{minfup}{follow-up of last patient enrolled; if \code{minfup} is input
as \code{NULL}, this will be computed on output; see details.}
\item{ratio}{randomization ratio of experimental treatment divided by
control; normally a scalar, but may be a vector with length equal to number
of strata.}
\item{alpha}{type I error rate. Default is 0.025 since 1-sided testing is
default.}
\item{beta}{type II error rate. Default is 0.10 (90\% power); NULL if power
is to be computed based on other input values.}
\item{sided}{1 for 1-sided testing, 2 for 2-sided testing.}
\item{tol}{for cases when \code{T} or \code{minfup} values are derived
through root finding (\code{T} or \code{minfup} input as \code{NULL}),
\code{tol} provides the level of error input to the \code{uniroot()}
root-finding function. The default is the same as for \code{\link{uniroot}}.}
\item{timing}{Sets relative timing of interim analyses in \code{gsSurv}.
Default of 1 produces equally spaced analyses. Otherwise, this is a vector
of length \code{k} or \code{k-1}. The values should satisfy \code{0 <
timing[1] < timing[2] < ... < timing[k-1] < timing[k]=1}. For
\code{tEventsIA}, this is a scalar strictly between 0 and 1 that indicates
the targeted proportion of final planned events available at an interim
analysis.}
\item{tIA}{Timing of an interim analysis; should be between 0 and
\code{y$T}.}
\item{target}{The targeted proportion of events at an interim analysis. This
is used for root-finding will be 0 for normal use.}
\item{simple}{See output specification for \code{nEventsIA()}.}
\item{k}{Number of analyses planned, including interim and final.}
\item{test.type}{\code{1=}one-sided \cr \code{2=}two-sided symmetric \cr
\code{3=}two-sided, asymmetric, beta-spending with binding lower bound \cr
\code{4=}two-sided, asymmetric, beta-spending with non-binding lower bound
\cr \code{5=}two-sided, asymmetric, lower bound spending under the null
hypothesis with binding lower bound \cr \code{6=}two-sided, asymmetric,
lower bound spending under the null hypothesis with non-binding lower bound.
\cr See details, examples and manual.}
\item{astar}{Normally not specified. If \code{test.type=5} or \code{6},
\code{astar} specifies the total probability of crossing a lower bound at
all analyses combined. This will be changed to \eqn{1 - }\code{alpha} when
default value of 0 is used. Since this is the expected usage, normally
\code{astar} is not specified by the user.}
\item{sfu}{A spending function or a character string indicating a boundary
type (that is, \dQuote{WT} for Wang-Tsiatis bounds, \dQuote{OF} for
O'Brien-Fleming bounds and \dQuote{Pocock} for Pocock bounds). For
one-sided and symmetric two-sided testing is used to completely specify
spending (\code{test.type=1, 2}), \code{sfu}. The default value is
\code{sfHSD} which is a Hwang-Shih-DeCani spending function. See details,
\link{Spending_Function_Overview}, manual and examples.}
\item{sfupar}{Real value, default is \eqn{-4} which is an
O'Brien-Fleming-like conservative bound when used with the default
Hwang-Shih-DeCani spending function. This is a real-vector for many spending
functions. The parameter \code{sfupar} specifies any parameters needed for
the spending function specified by \code{sfu}; this will be ignored for
spending functions (\code{sfLDOF}, \code{sfLDPocock}) or bound types
(\dQuote{OF}, \dQuote{Pocock}) that do not require parameters.}
\item{sfl}{Specifies the spending function for lower boundary crossing
probabilities when asymmetric, two-sided testing is performed
(\code{test.type = 3}, \code{4}, \code{5}, or \code{6}). Unlike the upper
bound, only spending functions are used to specify the lower bound. The
default value is \code{sfHSD} which is a Hwang-Shih-DeCani spending
function. The parameter \code{sfl} is ignored for one-sided testing
(\code{test.type=1}) or symmetric 2-sided testing (\code{test.type=2}). See
details, spending functions, manual and examples.}
\item{sflpar}{Real value, default is \eqn{-2}, which, with the default
Hwang-Shih-DeCani spending function, specifies a less conservative spending
rate than the default for the upper bound.}
\item{r}{Integer value controlling grid for numerical integration as in
Jennison and Turnbull (2000); default is 18, range is 1 to 80. Larger values
provide larger number of grid points and greater accuracy. Normally
\code{r} will not be changed by the user.}
\item{usTime}{Default is NULL in which case upper bound spending time is
determined by \code{timing}. Otherwise, this should be a vector of length
code{k} with the spending time at each analysis (see Details in help for \code{gsDesign}).}
\item{lsTime}{Default is NULL in which case lower bound spending time is
determined by \code{timing}. Otherwise, this should be a vector of length
\code{k} with the spending time at each analysis (see Details in help for \code{gsDesign}).}
\item{caption}{passed through to generic \code{xtable()}.}
\item{label}{passed through to generic \code{xtable()}.}
\item{align}{passed through to generic \code{xtable()}.}
\item{display}{passed through to generic \code{xtable()}.}
\item{auto}{passed through to generic \code{xtable()}.}
\item{footnote}{footnote for xtable output; may be useful for describing
some of the design parameters.}
\item{fnwid}{a text string controlling the width of footnote text at the
bottom of the xtable output.}
\item{timename}{character string with plural of time units (e.g., "months")}
}
\value{
\code{nSurv()} returns an object of type \code{nSurv} with the
following components: \item{alpha}{As input.} \item{sided}{As input.}
\item{beta}{Type II error; if missing, this is computed.} \item{power}{Power
corresponding to input \code{beta} or computed if output \code{beta} is
computed.} \item{lambdaC}{As input.} \item{etaC}{As input.} \item{etaE}{As
input.} \item{gamma}{As input unless none of the following are \code{NULL}:
\code{T}, \code{minfup}, \code{beta}; otherwise, this is a constant times
the input value required to power the trial given the other input
variables.} \item{ratio}{As input.} \item{R}{As input unless \code{T} was
\code{NULL} on input.} \item{S}{As input.} \item{T}{As input.}
\item{minfup}{As input.} \item{hr}{As input.} \item{hr0}{As input.}
\item{n}{Total expected sample size corresponding to output accrual rates
and durations.} \item{d}{Total expected number of events under the alternate
hypothesis.} \item{tol}{As input, except when not used in computations in
which case this is returned as \code{NULL}. This and the remaining output
below are not printed by the \code{print()} extension for the \code{nSurv}
class.} \item{eDC}{A vector of expected number of events by stratum in the
control group under the alternate hypothesis.} \item{eDE}{A vector of
expected number of events by stratum in the experimental group under the
alternate hypothesis.} \item{eDC0}{A vector of expected number of events by
stratum in the control group under the null hypothesis.} \item{eDE0}{A
vector of expected number of events by stratum in the experimental group
under the null hypothesis.} \item{eNC}{A vector of the expected accrual in
each stratum in the control group.} \item{eNE}{A vector of the expected
accrual in each stratum in the experimental group.} \item{variable}{A text
string equal to "Accrual rate" if a design was derived by varying the
accrual rate, "Accrual duration" if a design was derived by varying the
accrual duration, "Follow-up duration" if a design was derived by varying
follow-up duration, or "Power" if accrual rates and duration as well as
follow-up duration was specified and \code{beta=NULL} was input.}
\code{gsSurv()} returns much of the above plus variables in the class
\code{gsDesign}; see \code{\link{gsDesign}}
for general documentation on what is returned in \code{gs}. The value of
\code{gs$n.I} represents the number of endpoints required at each analysis
to adequately power the trial. Other items returned by \code{gsSurv()} are:
\item{lambdaC}{As input.} \item{etaC}{As input.} \item{etaE}{As input.}
\item{gamma}{As input unless none of the following are \code{NULL}:
\code{T}, \code{minfup}, \code{beta}; otherwise, this is a constant times
the input value required to power the trial given the other input
variables.} \item{ratio}{As input.} \item{R}{As input unless \code{T} was
\code{NULL} on input.} \item{S}{As input.} \item{T}{As input.}
\item{minfup}{As input.} \item{hr}{As input.} \item{hr0}{As input.}
\item{eNC}{Total expected sample size corresponding to output accrual rates
and durations.} \item{eNE}{Total expected sample size corresponding to
output accrual rates and durations.} \item{eDC}{Total expected number of
events under the alternate hypothesis.} \item{eDE}{Total expected number of
events under the alternate hypothesis.} \item{tol}{As input, except when not
used in computations in which case this is returned as \code{NULL}. This
and the remaining output below are not printed by the \code{print()}
extension for the \code{nSurv} class.} \item{eDC}{A vector of expected
number of events by stratum in the control group under the alternate
hypothesis.} \item{eDE}{A vector of expected number of events by stratum in
the experimental group under the alternate hypothesis.} \item{eNC}{A vector of
the expected accrual in each stratum in the control group.} \item{eNE}{A
vector of the expected accrual in each stratum in the experimental group.}
\item{variable}{A text string equal to "Accrual rate" if a design was
derived by varying the accrual rate, "Accrual duration" if a design was
derived by varying the accrual duration, "Follow-up duration" if a design
was derived by varying follow-up duration, or "Power" if accrual rates and
duration as well as follow-up duration was specified and \code{beta=NULL}
was input.}
\code{nEventsIA()} returns the expected proportion of the final planned
events observed at the input analysis time minus \code{target} when
\code{simple=TRUE}. When \code{simple=FALSE}, \code{nEventsIA} returns a
list with following components: \item{T}{The input value \code{tIA}.}
\item{eDC}{The expected number of events in the control group at time the
output time \code{T}.} \item{eDE}{The expected number of events in the
experimental group at the output time \code{T}.} \item{eNC}{The expected
enrollment in the control group at the output time \code{T}.} \item{eNE}{The
expected enrollment in the experimental group at the output time \code{T}.}
\code{tEventsIA()} returns the same structure as \code{nEventsIA(..., simple=TRUE)} when
}
\description{
\code{nSurv()} is used to calculate the sample size for a clinical trial
with a time-to-event endpoint and an assumption of proportional hazards.
This set of routines is new with version 2.7 and will continue to be
modified and refined to improve input error checking and output format with
subsequent versions. It allows both the Lachin and Foulkes (1986) method
(fixed trial duration) as well as the Kim and Tsiatis(1990) method (fixed
enrollment rates and either fixed enrollment duration or fixed minimum
follow-up). Piecewise exponential survival is supported as well as piecewise
constant enrollment and dropout rates. The methods are for a 2-arm trial
with treatment groups referred to as experimental and control. A stratified
population is allowed as in Lachin and Foulkes (1986); this method has been
extended to derive non-inferiority as well as superiority trials.
Stratification also allows power calculation for meta-analyses.
\code{gsSurv()} combines \code{nSurv()} with \code{gsDesign()} to derive a
group sequential design for a study with a time-to-event endpoint.
}
\details{
\code{print()}, \code{xtable()} and \code{summary()} methods are provided to
operate on the returned value from \code{gsSurv()}, an object of class
\code{gsSurv}. \code{print()} is also extended to \code{nSurv} objects. The
functions \code{\link{gsBoundSummary}} (data frame for tabular output),
\code{\link{xprint}} (application of \code{xtable} for tabular output) and
\code{summary.gsSurv} (textual summary of \code{gsDesign} or \code{gsSurv}
object) may be preferred summary functions; see example in vignettes. See
also \link{gsBoundSummary} for output
of tabular summaries of bounds for designs produced by \code{gsSurv()}.
Both \code{nEventsIA} and \code{tEventsIA} require a group sequential design
for a time-to-event endpoint of class \code{gsSurv} as input.
\code{nEventsIA} calculates the expected number of events under the
alternate hypothesis at a given interim time. \code{tEventsIA} calculates
the time that the expected number of events under the alternate hypothesis
is a given proportion of the total events planned for the final analysis.
\code{nSurv()} produces an object of class \code{nSurv} with the number of
subjects and events for a set of pre-specified trial parameters, such as
accrual duration and follow-up period. The underlying power calculation is
based on Lachin and Foulkes (1986) method for proportional hazards assuming
a fixed underlying hazard ratio between 2 treatment groups. The method has
been extended here to enable designs to test non-inferiority. Piecewise
constant enrollment and failure rates are assumed and a stratified
population is allowed. See also \code{\link{nSurvival}} for other Lachin and
Foulkes (1986) methods assuming a constant hazard difference or exponential
enrollment rate.
When study duration (\code{T}) and follow-up duration (\code{minfup}) are
fixed, \code{nSurv} applies exactly the Lachin and Foulkes (1986) method of
computing sample size under the proportional hazards assumption when For
this computation, enrollment rates are altered proportionately to those
input in \code{gamma} to achieve the power of interest.
Given the specified enrollment rate(s) input in \code{gamma}, \code{nSurv}
may also be used to derive enrollment duration required for a trial to have
defined power if \code{T} is input as \code{NULL}; in this case, both
\code{R} (enrollment duration for each specified enrollment rate) and
\code{T} (study duration) will be computed on output.
Alternatively and also using the fixed enrollment rate(s) in \code{gamma},
if minimum follow-up \code{minfup} is specified as \code{NULL}, then the
enrollment duration(s) specified in \code{R} are considered fixed and
\code{minfup} and \code{T} are computed to derive the desired power. This
method will fail if the specified enrollment rates and durations either
over-powers the trial with no additional follow-up or underpowers the trial
with infinite follow-up. This method produces a corresponding error message
in such cases.
The input to \code{gsSurv} is a combination of the input to \code{nSurv()}
and \code{gsDesign()}.
\code{nEventsIA()} is provided to compute the expected number of events at a
given point in time given enrollment, event and censoring rates. The routine
is used with a root finding routine to approximate the approximate timing of
an interim analysis. It is also used to extend enrollment or follow-up of a
fixed design to obtain a sufficient number of events to power a group
sequential design.
}
\examples{
# vary accrual rate to obtain power
nSurv(lambdaC = log(2) / 6, hr = .5, eta = log(2) / 40, gamma = 1, T = 36, minfup = 12)
# vary accrual duration to obtain power
nSurv(lambdaC = log(2) / 6, hr = .5, eta = log(2) / 40, gamma = 6, minfup = 12)
# vary follow-up duration to obtain power
nSurv(lambdaC = log(2) / 6, hr = .5, eta = log(2) / 40, gamma = 6, R = 25)
# piecewise constant enrollment rates (vary accrual duration)
nSurv(
lambdaC = log(2) / 6, hr = .5, eta = log(2) / 40, gamma = c(1, 3, 6),
R = c(3, 6, 9), minfup = 12
)
# stratified population (vary accrual duration)
nSurv(
lambdaC = matrix(log(2) / c(6, 12), ncol = 2), hr = .5, eta = log(2) / 40,
gamma = matrix(c(2, 4), ncol = 2), minfup = 12
)
# piecewise exponential failure rates (vary accrual duration)
nSurv(lambdaC = log(2) / c(6, 12), hr = .5, eta = log(2) / 40, S = 3, gamma = 6, minfup = 12)
# combine it all: 2 strata, 2 failure rate periods
nSurv(
lambdaC = matrix(log(2) / c(6, 12, 18, 24), ncol = 2), hr = .5,
eta = matrix(log(2) / c(40, 50, 45, 55), ncol = 2), S = 3,
gamma = matrix(c(3, 6, 5, 7), ncol = 2), R = c(5, 10), minfup = 12
)
# example where only 1 month of follow-up is desired
# set failure rate to 0 after 1 month using lambdaC and S
nSurv(lambdaC = c(.4, 0), hr = 2 / 3, S = 1, minfup = 1)
# group sequential design (vary accrual rate to obtain power)
x <- gsSurv(
k = 4, sfl = sfPower, sflpar = .5, lambdaC = log(2) / 6, hr = .5,
eta = log(2) / 40, gamma = 1, T = 36, minfup = 12
)
x
print(xtable::xtable(x,
footnote = "This is a footnote; note that it can be wide.",
caption = "Caption example."
))
# find expected number of events at time 12 in the above trial
nEventsIA(x = x, tIA = 10)
# find time at which 1/4 of events are expected
tEventsIA(x = x, timing = .25)
}
\references{
Kim KM and Tsiatis AA (1990), Study duration for clinical trials
with survival response and early stopping rule. \emph{Biometrics}, 46, 81-92
Lachin JM and Foulkes MA (1986), Evaluation of Sample Size and Power for
Analyses of Survival with Allowance for Nonuniform Patient Entry, Losses to
Follow-Up, Noncompliance, and Stratification. \emph{Biometrics}, 42,
507-519.
Schoenfeld D (1981), The Asymptotic Properties of Nonparametric Tests for
Comparing Survival Distributions. \emph{Biometrika}, 68, 316-319.
}
\seealso{
\code{\link{gsBoundSummary}}, \code{\link{xprint}},
\link{gsDesign package overview}, \link{plot.gsDesign},
\code{\link{gsDesign}}, \code{\link{gsHR}}, \code{\link{nSurvival}}
\code{\link[stats]{uniroot}}
\code{\link[stats]{Normal}}
\code{\link[xtable]{xtable}}
}
\author{
Keaven Anderson \email{keaven_anderson@merck.com}
}
\keyword{design}
| /man/nSurv.Rd | no_license | cran/gsDesign | R | false | true | 22,057 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gsSurv.R
\name{print.nSurv}
\alias{print.nSurv}
\alias{nSurv}
\alias{print.gsSurv}
\alias{xtable.gsSurv}
\alias{tEventsIA}
\alias{nEventsIA}
\alias{gsSurv}
\title{Advanced time-to-event sample size calculation}
\usage{
\method{print}{nSurv}(x, digits = 4, ...)
nSurv(
lambdaC = log(2)/6,
hr = 0.6,
hr0 = 1,
eta = 0,
etaE = NULL,
gamma = 1,
R = 12,
S = NULL,
T = NULL,
minfup = NULL,
ratio = 1,
alpha = 0.025,
beta = 0.1,
sided = 1,
tol = .Machine$double.eps^0.25
)
tEventsIA(x, timing = 0.25, tol = .Machine$double.eps^0.25)
nEventsIA(tIA = 5, x = NULL, target = 0, simple = TRUE)
gsSurv(
k = 3,
test.type = 4,
alpha = 0.025,
sided = 1,
beta = 0.1,
astar = 0,
timing = 1,
sfu = sfHSD,
sfupar = -4,
sfl = sfHSD,
sflpar = -2,
r = 18,
lambdaC = log(2)/6,
hr = 0.6,
hr0 = 1,
eta = 0,
etaE = NULL,
gamma = 1,
R = 12,
S = NULL,
T = NULL,
minfup = NULL,
ratio = 1,
tol = .Machine$double.eps^0.25,
usTime = NULL,
lsTime = NULL
)
\method{print}{gsSurv}(x, digits = 2, ...)
\method{xtable}{gsSurv}(
x,
caption = NULL,
label = NULL,
align = NULL,
digits = NULL,
display = NULL,
auto = FALSE,
footnote = NULL,
fnwid = "9cm",
timename = "months",
...
)
}
\arguments{
\item{x}{An object of class \code{nSurv} or \code{gsSurv}.
\code{print.nSurv()} is used for an object of class \code{nSurv} which will
generally be output from \code{nSurv()}. For \code{print.gsSurv()} is used
for an object of class \code{gsSurv} which will generally be output from
\code{gsSurv()}. \code{nEventsIA} and \code{tEventsIA} operate on both the
\code{nSurv} and \code{gsSurv} class.}
\item{digits}{Number of digits past the decimal place to print
(\code{print.gsSurv.}); also a pass through to generic \code{xtable()} from
\code{xtable.gsSurv()}.}
\item{...}{other arguments that may be passed to generic functions
underlying the methods here.}
\item{lambdaC}{scalar, vector or matrix of event hazard rates for the
control group; rows represent time periods while columns represent strata; a
vector implies a single stratum.}
\item{hr}{hazard ratio (experimental/control) under the alternate hypothesis
(scalar).}
\item{hr0}{hazard ratio (experimental/control) under the null hypothesis
(scalar).}
\item{eta}{scalar, vector or matrix of dropout hazard rates for the control
group; rows represent time periods while columns represent strata; if
entered as a scalar, rate is constant across strata and time periods; if
entered as a vector, rates are constant across strata.}
\item{etaE}{matrix dropout hazard rates for the experimental group specified
in like form as \code{eta}; if NULL, this is set equal to \code{eta}.}
\item{gamma}{a scalar, vector or matrix of rates of entry by time period
(rows) and strata (columns); if entered as a scalar, rate is constant
across strata and time periods; if entered as a vector, rates are constant
across strata.}
\item{R}{a scalar or vector of durations of time periods for recruitment
rates specified in rows of \code{gamma}. Length is the same as number of
rows in \code{gamma}. Note that when variable enrollment duration is
specified (input \code{T=NULL}), the final enrollment period is extended as
long as needed.}
\item{S}{a scalar or vector of durations of piecewise constant event rates
specified in rows of \code{lambda}, \code{eta} and \code{etaE}; this is NULL
if there is a single event rate per stratum (exponential failure) or length
of the number of rows in \code{lambda} minus 1, otherwise.}
\item{T}{study duration; if \code{T} is input as \code{NULL}, this will be
computed on output; see details.}
\item{minfup}{follow-up of last patient enrolled; if \code{minfup} is input
as \code{NULL}, this will be computed on output; see details.}
\item{ratio}{randomization ratio of experimental treatment divided by
control; normally a scalar, but may be a vector with length equal to number
of strata.}
\item{alpha}{type I error rate. Default is 0.025 since 1-sided testing is
default.}
\item{beta}{type II error rate. Default is 0.10 (90\% power); NULL if power
is to be computed based on other input values.}
\item{sided}{1 for 1-sided testing, 2 for 2-sided testing.}
\item{tol}{for cases when \code{T} or \code{minfup} values are derived
through root finding (\code{T} or \code{minfup} input as \code{NULL}),
\code{tol} provides the level of error input to the \code{uniroot()}
root-finding function. The default is the same as for \code{\link{uniroot}}.}
\item{timing}{Sets relative timing of interim analyses in \code{gsSurv}.
Default of 1 produces equally spaced analyses. Otherwise, this is a vector
of length \code{k} or \code{k-1}. The values should satisfy \code{0 <
timing[1] < timing[2] < ... < timing[k-1] < timing[k]=1}. For
\code{tEventsIA}, this is a scalar strictly between 0 and 1 that indicates
the targeted proportion of final planned events available at an interim
analysis.}
\item{tIA}{Timing of an interim analysis; should be between 0 and
\code{y$T}.}
\item{target}{The targeted proportion of events at an interim analysis. This
is used for root-finding will be 0 for normal use.}
\item{simple}{See output specification for \code{nEventsIA()}.}
\item{k}{Number of analyses planned, including interim and final.}
\item{test.type}{\code{1=}one-sided \cr \code{2=}two-sided symmetric \cr
\code{3=}two-sided, asymmetric, beta-spending with binding lower bound \cr
\code{4=}two-sided, asymmetric, beta-spending with non-binding lower bound
\cr \code{5=}two-sided, asymmetric, lower bound spending under the null
hypothesis with binding lower bound \cr \code{6=}two-sided, asymmetric,
lower bound spending under the null hypothesis with non-binding lower bound.
\cr See details, examples and manual.}
\item{astar}{Normally not specified. If \code{test.type=5} or \code{6},
\code{astar} specifies the total probability of crossing a lower bound at
all analyses combined. This will be changed to \eqn{1 - }\code{alpha} when
default value of 0 is used. Since this is the expected usage, normally
\code{astar} is not specified by the user.}
\item{sfu}{A spending function or a character string indicating a boundary
type (that is, \dQuote{WT} for Wang-Tsiatis bounds, \dQuote{OF} for
O'Brien-Fleming bounds and \dQuote{Pocock} for Pocock bounds). For
one-sided and symmetric two-sided testing is used to completely specify
spending (\code{test.type=1, 2}), \code{sfu}. The default value is
\code{sfHSD} which is a Hwang-Shih-DeCani spending function. See details,
\link{Spending_Function_Overview}, manual and examples.}
\item{sfupar}{Real value, default is \eqn{-4} which is an
O'Brien-Fleming-like conservative bound when used with the default
Hwang-Shih-DeCani spending function. This is a real-vector for many spending
functions. The parameter \code{sfupar} specifies any parameters needed for
the spending function specified by \code{sfu}; this will be ignored for
spending functions (\code{sfLDOF}, \code{sfLDPocock}) or bound types
(\dQuote{OF}, \dQuote{Pocock}) that do not require parameters.}
\item{sfl}{Specifies the spending function for lower boundary crossing
probabilities when asymmetric, two-sided testing is performed
(\code{test.type = 3}, \code{4}, \code{5}, or \code{6}). Unlike the upper
bound, only spending functions are used to specify the lower bound. The
default value is \code{sfHSD} which is a Hwang-Shih-DeCani spending
function. The parameter \code{sfl} is ignored for one-sided testing
(\code{test.type=1}) or symmetric 2-sided testing (\code{test.type=2}). See
details, spending functions, manual and examples.}
\item{sflpar}{Real value, default is \eqn{-2}, which, with the default
Hwang-Shih-DeCani spending function, specifies a less conservative spending
rate than the default for the upper bound.}
\item{r}{Integer value controlling grid for numerical integration as in
Jennison and Turnbull (2000); default is 18, range is 1 to 80. Larger values
provide larger number of grid points and greater accuracy. Normally
\code{r} will not be changed by the user.}
\item{usTime}{Default is NULL in which case upper bound spending time is
determined by \code{timing}. Otherwise, this should be a vector of length
code{k} with the spending time at each analysis (see Details in help for \code{gsDesign}).}
\item{lsTime}{Default is NULL in which case lower bound spending time is
determined by \code{timing}. Otherwise, this should be a vector of length
\code{k} with the spending time at each analysis (see Details in help for \code{gsDesign}).}
\item{caption}{passed through to generic \code{xtable()}.}
\item{label}{passed through to generic \code{xtable()}.}
\item{align}{passed through to generic \code{xtable()}.}
\item{display}{passed through to generic \code{xtable()}.}
\item{auto}{passed through to generic \code{xtable()}.}
\item{footnote}{footnote for xtable output; may be useful for describing
some of the design parameters.}
\item{fnwid}{a text string controlling the width of footnote text at the
bottom of the xtable output.}
\item{timename}{character string with plural of time units (e.g., "months")}
}
\value{
\code{nSurv()} returns an object of type \code{nSurv} with the
following components: \item{alpha}{As input.} \item{sided}{As input.}
\item{beta}{Type II error; if missing, this is computed.} \item{power}{Power
corresponding to input \code{beta} or computed if output \code{beta} is
computed.} \item{lambdaC}{As input.} \item{etaC}{As input.} \item{etaE}{As
input.} \item{gamma}{As input unless none of the following are \code{NULL}:
\code{T}, \code{minfup}, \code{beta}; otherwise, this is a constant times
the input value required to power the trial given the other input
variables.} \item{ratio}{As input.} \item{R}{As input unless \code{T} was
\code{NULL} on input.} \item{S}{As input.} \item{T}{As input.}
\item{minfup}{As input.} \item{hr}{As input.} \item{hr0}{As input.}
\item{n}{Total expected sample size corresponding to output accrual rates
and durations.} \item{d}{Total expected number of events under the alternate
hypothesis.} \item{tol}{As input, except when not used in computations in
which case this is returned as \code{NULL}. This and the remaining output
below are not printed by the \code{print()} extension for the \code{nSurv}
class.} \item{eDC}{A vector of expected number of events by stratum in the
control group under the alternate hypothesis.} \item{eDE}{A vector of
expected number of events by stratum in the experimental group under the
alternate hypothesis.} \item{eDC0}{A vector of expected number of events by
stratum in the control group under the null hypothesis.} \item{eDE0}{A
vector of expected number of events by stratum in the experimental group
under the null hypothesis.} \item{eNC}{A vector of the expected accrual in
each stratum in the control group.} \item{eNE}{A vector of the expected
accrual in each stratum in the experimental group.} \item{variable}{A text
string equal to "Accrual rate" if a design was derived by varying the
accrual rate, "Accrual duration" if a design was derived by varying the
accrual duration, "Follow-up duration" if a design was derived by varying
follow-up duration, or "Power" if accrual rates and duration as well as
follow-up duration was specified and \code{beta=NULL} was input.}
\code{gsSurv()} returns much of the above plus variables in the class
\code{gsDesign}; see \code{\link{gsDesign}}
for general documentation on what is returned in \code{gs}. The value of
\code{gs$n.I} represents the number of endpoints required at each analysis
to adequately power the trial. Other items returned by \code{gsSurv()} are:
\item{lambdaC}{As input.} \item{etaC}{As input.} \item{etaE}{As input.}
\item{gamma}{As input unless none of the following are \code{NULL}:
\code{T}, \code{minfup}, \code{beta}; otherwise, this is a constant times
the input value required to power the trial given the other input
variables.} \item{ratio}{As input.} \item{R}{As input unless \code{T} was
\code{NULL} on input.} \item{S}{As input.} \item{T}{As input.}
\item{minfup}{As input.} \item{hr}{As input.} \item{hr0}{As input.}
\item{eNC}{Total expected sample size corresponding to output accrual rates
and durations.} \item{eNE}{Total expected sample size corresponding to
output accrual rates and durations.} \item{eDC}{Total expected number of
events under the alternate hypothesis.} \item{eDE}{Total expected number of
events under the alternate hypothesis.} \item{tol}{As input, except when not
used in computations in which case this is returned as \code{NULL}. This
and the remaining output below are not printed by the \code{print()}
extension for the \code{nSurv} class.} \item{eDC}{A vector of expected
number of events by stratum in the control group under the alternate
hypothesis.} \item{eDE}{A vector of expected number of events by stratum in
the experimental group under the alternate hypothesis.} \item{eNC}{A vector of
the expected accrual in each stratum in the control group.} \item{eNE}{A
vector of the expected accrual in each stratum in the experimental group.}
\item{variable}{A text string equal to "Accrual rate" if a design was
derived by varying the accrual rate, "Accrual duration" if a design was
derived by varying the accrual duration, "Follow-up duration" if a design
was derived by varying follow-up duration, or "Power" if accrual rates and
duration as well as follow-up duration was specified and \code{beta=NULL}
was input.}
\code{nEventsIA()} returns the expected proportion of the final planned
events observed at the input analysis time minus \code{target} when
\code{simple=TRUE}. When \code{simple=FALSE}, \code{nEventsIA} returns a
list with following components: \item{T}{The input value \code{tIA}.}
\item{eDC}{The expected number of events in the control group at time the
output time \code{T}.} \item{eDE}{The expected number of events in the
experimental group at the output time \code{T}.} \item{eNC}{The expected
enrollment in the control group at the output time \code{T}.} \item{eNE}{The
expected enrollment in the experimental group at the output time \code{T}.}
\code{tEventsIA()} returns the same structure as \code{nEventsIA(..., simple=TRUE)} when
}
\description{
\code{nSurv()} is used to calculate the sample size for a clinical trial
with a time-to-event endpoint and an assumption of proportional hazards.
This set of routines is new with version 2.7 and will continue to be
modified and refined to improve input error checking and output format with
subsequent versions. It allows both the Lachin and Foulkes (1986) method
(fixed trial duration) as well as the Kim and Tsiatis(1990) method (fixed
enrollment rates and either fixed enrollment duration or fixed minimum
follow-up). Piecewise exponential survival is supported as well as piecewise
constant enrollment and dropout rates. The methods are for a 2-arm trial
with treatment groups referred to as experimental and control. A stratified
population is allowed as in Lachin and Foulkes (1986); this method has been
extended to derive non-inferiority as well as superiority trials.
Stratification also allows power calculation for meta-analyses.
\code{gsSurv()} combines \code{nSurv()} with \code{gsDesign()} to derive a
group sequential design for a study with a time-to-event endpoint.
}
\details{
\code{print()}, \code{xtable()} and \code{summary()} methods are provided to
operate on the returned value from \code{gsSurv()}, an object of class
\code{gsSurv}. \code{print()} is also extended to \code{nSurv} objects. The
functions \code{\link{gsBoundSummary}} (data frame for tabular output),
\code{\link{xprint}} (application of \code{xtable} for tabular output) and
\code{summary.gsSurv} (textual summary of \code{gsDesign} or \code{gsSurv}
object) may be preferred summary functions; see example in vignettes. See
also \link{gsBoundSummary} for output
of tabular summaries of bounds for designs produced by \code{gsSurv()}.
Both \code{nEventsIA} and \code{tEventsIA} require a group sequential design
for a time-to-event endpoint of class \code{gsSurv} as input.
\code{nEventsIA} calculates the expected number of events under the
alternate hypothesis at a given interim time. \code{tEventsIA} calculates
the time that the expected number of events under the alternate hypothesis
is a given proportion of the total events planned for the final analysis.
\code{nSurv()} produces an object of class \code{nSurv} with the number of
subjects and events for a set of pre-specified trial parameters, such as
accrual duration and follow-up period. The underlying power calculation is
based on Lachin and Foulkes (1986) method for proportional hazards assuming
a fixed underlying hazard ratio between 2 treatment groups. The method has
been extended here to enable designs to test non-inferiority. Piecewise
constant enrollment and failure rates are assumed and a stratified
population is allowed. See also \code{\link{nSurvival}} for other Lachin and
Foulkes (1986) methods assuming a constant hazard difference or exponential
enrollment rate.
When study duration (\code{T}) and follow-up duration (\code{minfup}) are
fixed, \code{nSurv} applies exactly the Lachin and Foulkes (1986) method of
computing sample size under the proportional hazards assumption when For
this computation, enrollment rates are altered proportionately to those
input in \code{gamma} to achieve the power of interest.
Given the specified enrollment rate(s) input in \code{gamma}, \code{nSurv}
may also be used to derive enrollment duration required for a trial to have
defined power if \code{T} is input as \code{NULL}; in this case, both
\code{R} (enrollment duration for each specified enrollment rate) and
\code{T} (study duration) will be computed on output.
Alternatively and also using the fixed enrollment rate(s) in \code{gamma},
if minimum follow-up \code{minfup} is specified as \code{NULL}, then the
enrollment duration(s) specified in \code{R} are considered fixed and
\code{minfup} and \code{T} are computed to derive the desired power. This
method will fail if the specified enrollment rates and durations either
over-powers the trial with no additional follow-up or underpowers the trial
with infinite follow-up. This method produces a corresponding error message
in such cases.
The input to \code{gsSurv} is a combination of the input to \code{nSurv()}
and \code{gsDesign()}.
\code{nEventsIA()} is provided to compute the expected number of events at a
given point in time given enrollment, event and censoring rates. The routine
is used with a root finding routine to approximate the approximate timing of
an interim analysis. It is also used to extend enrollment or follow-up of a
fixed design to obtain a sufficient number of events to power a group
sequential design.
}
\examples{
# vary accrual rate to obtain power
nSurv(lambdaC = log(2) / 6, hr = .5, eta = log(2) / 40, gamma = 1, T = 36, minfup = 12)
# vary accrual duration to obtain power
nSurv(lambdaC = log(2) / 6, hr = .5, eta = log(2) / 40, gamma = 6, minfup = 12)
# vary follow-up duration to obtain power
nSurv(lambdaC = log(2) / 6, hr = .5, eta = log(2) / 40, gamma = 6, R = 25)
# piecewise constant enrollment rates (vary accrual duration)
nSurv(
lambdaC = log(2) / 6, hr = .5, eta = log(2) / 40, gamma = c(1, 3, 6),
R = c(3, 6, 9), minfup = 12
)
# stratified population (vary accrual duration)
nSurv(
lambdaC = matrix(log(2) / c(6, 12), ncol = 2), hr = .5, eta = log(2) / 40,
gamma = matrix(c(2, 4), ncol = 2), minfup = 12
)
# piecewise exponential failure rates (vary accrual duration)
nSurv(lambdaC = log(2) / c(6, 12), hr = .5, eta = log(2) / 40, S = 3, gamma = 6, minfup = 12)
# combine it all: 2 strata, 2 failure rate periods
nSurv(
lambdaC = matrix(log(2) / c(6, 12, 18, 24), ncol = 2), hr = .5,
eta = matrix(log(2) / c(40, 50, 45, 55), ncol = 2), S = 3,
gamma = matrix(c(3, 6, 5, 7), ncol = 2), R = c(5, 10), minfup = 12
)
# example where only 1 month of follow-up is desired
# set failure rate to 0 after 1 month using lambdaC and S
nSurv(lambdaC = c(.4, 0), hr = 2 / 3, S = 1, minfup = 1)
# group sequential design (vary accrual rate to obtain power)
x <- gsSurv(
k = 4, sfl = sfPower, sflpar = .5, lambdaC = log(2) / 6, hr = .5,
eta = log(2) / 40, gamma = 1, T = 36, minfup = 12
)
x
print(xtable::xtable(x,
footnote = "This is a footnote; note that it can be wide.",
caption = "Caption example."
))
# find expected number of events at time 12 in the above trial
nEventsIA(x = x, tIA = 10)
# find time at which 1/4 of events are expected
tEventsIA(x = x, timing = .25)
}
\references{
Kim KM and Tsiatis AA (1990), Study duration for clinical trials
with survival response and early stopping rule. \emph{Biometrics}, 46, 81-92
Lachin JM and Foulkes MA (1986), Evaluation of Sample Size and Power for
Analyses of Survival with Allowance for Nonuniform Patient Entry, Losses to
Follow-Up, Noncompliance, and Stratification. \emph{Biometrics}, 42,
507-519.
Schoenfeld D (1981), The Asymptotic Properties of Nonparametric Tests for
Comparing Survival Distributions. \emph{Biometrika}, 68, 316-319.
}
\seealso{
\code{\link{gsBoundSummary}}, \code{\link{xprint}},
\link{gsDesign package overview}, \link{plot.gsDesign},
\code{\link{gsDesign}}, \code{\link{gsHR}}, \code{\link{nSurvival}}
\code{\link[stats]{uniroot}}
\code{\link[stats]{Normal}}
\code{\link[xtable]{xtable}}
}
\author{
Keaven Anderson \email{keaven_anderson@merck.com}
}
\keyword{design}
|
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Loading the required SWAT package and other R libraries necessary
library(swat)
library(ggplot2)
library(reshape2)
library(xgboost)
library(caret)
library(dplyr)
library(pROC)
library(e1071)
library(ROCR)
library(pmml)
library(randomForest)
library(caret)
# Connect to CAS server using appropriate credentials
s = CAS()
# Create a CAS library called lg pointing to the defined directory
# Need to specify the srctype as path, otherwise it defaults to HDFS
cas.table.addCaslib(s,
name = "lg",
description = "Looking glass data",
dataSource = list(srcType="path"),
path = "/viyafiles/tmp"
)
# Load the data into the in-memory CAS server
data = cas.read.csv(s,
"C:/Users/Looking_glass.csv",
casOut=list(name="castbl", caslib="lg", replace=TRUE)
)
# Invoke the overloaded R functions to view the head and summary of the input table
print(head(data))
print(summary(data))
# Check for any missingness in the data
dist_tabl = cas.simple.distinct(data)$Distinct[,c('Column','NMiss')]
print(dist_tabl)
dist_tabl = as.data.frame(dist_tabl)
sub = subset(dist_tabl, dist_tabl$NMiss != 0)
imp_cols = sub$Column
# Print the names of the columns to be imputed
print(imp_cols)
# Impute the missing values
cas.dataPreprocess.impute(data,
methodContinuous = 'MEDIAN',
methodNominal = 'MODE',
inputs = imp_cols,
copyAllVars = TRUE,
casOut = list(name = 'castbl', replace = TRUE)
)
# Split the data into training and validation and view the partitioned table
loadActionSet(s,"sampling")
cas.sampling.srs( s,
table = list(name="castbl", caslib="lg"),
samppct = 30,
seed = 123456,
partind = TRUE,
output = list(casOut = list(name = "sampled_castbl", replace = T, caslib="lg"), copyVars = 'ALL')
)
# Check for frequency distribution of partitioned data
cas.simple.freq(s,table="sampled_castbl", inputs="_PartInd_")
# Partition data into train and validation based on _PartInd_
train = defCasTable(s, tablename = "sampled_castbl", where = " _PartInd_ = 0 ")
val = defCasTable(s, tablename = "sampled_castbl", where = " _PartInd_ = 1 ")
# Create the appropriate input and target variables
info = cas.table.columnInfo(s, table = train)
colinfo = info$ColumnInfo
## nominal variables are: region, upsell_xsell
nominals = colinfo$Column[c(7,8)]
intervals = colinfo$Column[c(-7,-8,-9,-15,-18)]
target = colinfo$Column[8]
inputs = colinfo$Column[c(-8,-9,-15,-18)]
# Build a GB model for predictive classification
loadActionSet(s, "decisionTree")
model = cas.decisionTree.gbtreeTrain(
s,
casOut=list(caslib="lg",name="gb_model",replace=T),
saveState = list(caslib="lg", name="R_SWAT_GB", replace=T),
inputs = inputs,
nominals = nominals,
target = target,
table = train
)
# View the model info
print(model)
cas.table.promote(s, caslib="lg", name="R_SWAT_GB", targetCaslib="casuser")
# Score the model on test data
out = cas.decisionTree.gbtreeScore (
s,
modelTable = list(name="gb_model", caslib="lg"),
table = val,
encodeName = TRUE,
assessonerow = TRUE,
casOut = list(name="scored_data", caslib="lg", replace=T),
copyVars = target
)
# View the scored results
cas.table.fetch(s,table="scored_data")
# Train an R eXtreme Gradient Boosting model
# First, convert the train and test CAS tables to R data frames for training the R-XGB model
train_cas_df = to.casDataFrame(train)
train_df = to.data.frame(train_cas_df)
val_cas_df = to.casDataFrame(val)
val_df = to.data.frame(val_cas_df)
# In R, we need to do the data pre-processing explicitly. Hence, convert the "char" region variable to "factor"
train_df$upsell_xsell = as.factor(train_df$upsell_xsell)
val_df$upsell_xsell = as.factor(val_df$upsell_xsell)
train_df$days_openwrkorders = train_df$IMP_days_openwrkorders
train_df$ever_days_over_plan = train_df$IMP_ever_days_over_plan
val_df$days_openwrkorders = val_df$IMP_days_openwrkorders
val_df$ever_days_over_plan = val_df$IMP_ever_days_over_plan
train_df$IMP_days_openwrkorders = NULL
train_df$IMP_ever_days_over_plan = NULL
val_df$IMP_days_openwrkorders = NULL
val_df$IMP_ever_days_over_plan = NULL
# Train a RF model on the data
rf_model <- randomForest(upsell_xsell ~ . , ntree=2, mtry=5, data=train_df[,c(3,8,9,10,11,12,14)], importance=TRUE)
# Make predictions on test data
pred <- predict(rf_model, val_df[,c(3,8,9,10,11,12,14)], type="prob")
# Evaluate the performance of SAS and R models
## Assessing the performance metric of SAS-GB model
loadActionSet(s,"percentile")
tmp = cas.percentile.assess(
s,
cutStep = 0.05,
event = "1",
inputs = "P_upsell_xsell1",
nBins = 20,
response = target,
table = "scored_data"
)$ROCInfo
roc_df = data.frame(tmp)
print(head(roc_df))
# Display the confusion matrix for cutoff threshold at 0.5
cutoff = subset(roc_df, CutOff == 0.5)
tn = cutoff$TN
fn = cutoff$FN
tp = cutoff$TP
fp = cutoff$FP
a = c(tn,fn)
p = c(fp,tp)
mat = data.frame(a,p)
colnames(mat) = c("Pred:0","Pred:1")
rownames(mat) = c("Actual:0","Actual:1")
mat = as.matrix(mat)
print(mat)
# Print the accuracy and misclassification rates for the model
accuracy = cutoff$ACC
mis = cutoff$MISCEVENT
print(paste("Misclassification rate is",mis))
print(paste("Accuracy is",accuracy))
## Assessing the performance metric of R-RF model
# Create a confusion matrix for cutoff threshold at 0.5
conf.matrix = table(val_df$upsell_xsell, as.numeric(pred[,2]>0.5))
rownames(conf.matrix) = paste("Actual", rownames(conf.matrix), sep = ":")
colnames(conf.matrix) = paste("Pred", colnames(conf.matrix), sep = ":")
# Print the accuracy and misclassification rates for the model
err = mean(as.numeric(pred[,2] > 0.5) != val_df$upsell_xsell)
print(paste("Misclassification rate is",err))
print(paste("Accuracy is",1-err))
# Plot ROC curves for both the models using standard R plotting functions
FPR_SAS = roc_df['FPR']
TPR_SAS = roc_df['Sensitivity']
pred1 = prediction(pred[,2], test_labels)
perf1 = performance( pred1, "tpr", "fpr" )
FPR_R = perf1@x.values[[1]]
TPR_R = perf1@y.values[[1]]
roc_df2 = data.frame(FPR = FPR_R, TPR = TPR_R)
ggplot() +
geom_line(
data = roc_df[c('FPR', 'Sensitivity')],
aes(x = as.numeric(FPR), y = as.numeric(Sensitivity),color = "SAS"),
) +
geom_line(
data = roc_df2,
aes(x = as.numeric(FPR_R), y = as.numeric(TPR_R),color = "R_RF"),
) +
scale_color_manual(
name = "Colors",
values = c("SAS" = "blue", "R_RF" = "red")
) +
xlab('False Positive Rate') + ylab('True Positive Rate')
# Generating PMML code to export R model to Model Manager
rf.pmml = pmml(rf_model)
format(object.size(rf.pmml))
savePMML(rf.pmml, "C:/Users/neveng/rf.xml", version=4.2 )
# Terminate the CAS session
cas.session.endSession(s)
| /webinars/Predictive_Modeling.R | permissive | sassoftware/sas-viya-programming | R | false | false | 8,046 | r | #
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Loading the required SWAT package and other R libraries necessary
library(swat)
library(ggplot2)
library(reshape2)
library(xgboost)
library(caret)
library(dplyr)
library(pROC)
library(e1071)
library(ROCR)
library(pmml)
library(randomForest)
library(caret)
# Connect to CAS server using appropriate credentials
s = CAS()
# Create a CAS library called lg pointing to the defined directory
# Need to specify the srctype as path, otherwise it defaults to HDFS
cas.table.addCaslib(s,
name = "lg",
description = "Looking glass data",
dataSource = list(srcType="path"),
path = "/viyafiles/tmp"
)
# Load the data into the in-memory CAS server
data = cas.read.csv(s,
"C:/Users/Looking_glass.csv",
casOut=list(name="castbl", caslib="lg", replace=TRUE)
)
# Invoke the overloaded R functions to view the head and summary of the input table
print(head(data))
print(summary(data))
# Check for any missingness in the data
dist_tabl = cas.simple.distinct(data)$Distinct[,c('Column','NMiss')]
print(dist_tabl)
dist_tabl = as.data.frame(dist_tabl)
sub = subset(dist_tabl, dist_tabl$NMiss != 0)
imp_cols = sub$Column
# Print the names of the columns to be imputed
print(imp_cols)
# Impute the missing values
cas.dataPreprocess.impute(data,
methodContinuous = 'MEDIAN',
methodNominal = 'MODE',
inputs = imp_cols,
copyAllVars = TRUE,
casOut = list(name = 'castbl', replace = TRUE)
)
# Split the data into training and validation and view the partitioned table
loadActionSet(s,"sampling")
cas.sampling.srs( s,
table = list(name="castbl", caslib="lg"),
samppct = 30,
seed = 123456,
partind = TRUE,
output = list(casOut = list(name = "sampled_castbl", replace = T, caslib="lg"), copyVars = 'ALL')
)
# Check for frequency distribution of partitioned data
cas.simple.freq(s,table="sampled_castbl", inputs="_PartInd_")
# Partition data into train and validation based on _PartInd_
train = defCasTable(s, tablename = "sampled_castbl", where = " _PartInd_ = 0 ")
val = defCasTable(s, tablename = "sampled_castbl", where = " _PartInd_ = 1 ")
# Create the appropriate input and target variables
info = cas.table.columnInfo(s, table = train)
colinfo = info$ColumnInfo
## nominal variables are: region, upsell_xsell
nominals = colinfo$Column[c(7,8)]
intervals = colinfo$Column[c(-7,-8,-9,-15,-18)]
target = colinfo$Column[8]
inputs = colinfo$Column[c(-8,-9,-15,-18)]
# Build a GB model for predictive classification
loadActionSet(s, "decisionTree")
model = cas.decisionTree.gbtreeTrain(
s,
casOut=list(caslib="lg",name="gb_model",replace=T),
saveState = list(caslib="lg", name="R_SWAT_GB", replace=T),
inputs = inputs,
nominals = nominals,
target = target,
table = train
)
# View the model info
print(model)
cas.table.promote(s, caslib="lg", name="R_SWAT_GB", targetCaslib="casuser")
# Score the model on test data
out = cas.decisionTree.gbtreeScore (
s,
modelTable = list(name="gb_model", caslib="lg"),
table = val,
encodeName = TRUE,
assessonerow = TRUE,
casOut = list(name="scored_data", caslib="lg", replace=T),
copyVars = target
)
# View the scored results
cas.table.fetch(s,table="scored_data")
# Train an R eXtreme Gradient Boosting model
# First, convert the train and test CAS tables to R data frames for training the R-XGB model
train_cas_df = to.casDataFrame(train)
train_df = to.data.frame(train_cas_df)
val_cas_df = to.casDataFrame(val)
val_df = to.data.frame(val_cas_df)
# In R, we need to do the data pre-processing explicitly. Hence, convert the "char" region variable to "factor"
train_df$upsell_xsell = as.factor(train_df$upsell_xsell)
val_df$upsell_xsell = as.factor(val_df$upsell_xsell)
train_df$days_openwrkorders = train_df$IMP_days_openwrkorders
train_df$ever_days_over_plan = train_df$IMP_ever_days_over_plan
val_df$days_openwrkorders = val_df$IMP_days_openwrkorders
val_df$ever_days_over_plan = val_df$IMP_ever_days_over_plan
train_df$IMP_days_openwrkorders = NULL
train_df$IMP_ever_days_over_plan = NULL
val_df$IMP_days_openwrkorders = NULL
val_df$IMP_ever_days_over_plan = NULL
# Train a RF model on the data
rf_model <- randomForest(upsell_xsell ~ . , ntree=2, mtry=5, data=train_df[,c(3,8,9,10,11,12,14)], importance=TRUE)
# Make predictions on test data
pred <- predict(rf_model, val_df[,c(3,8,9,10,11,12,14)], type="prob")
# Evaluate the performance of SAS and R models
## Assessing the performance metric of SAS-GB model
loadActionSet(s,"percentile")
tmp = cas.percentile.assess(
s,
cutStep = 0.05,
event = "1",
inputs = "P_upsell_xsell1",
nBins = 20,
response = target,
table = "scored_data"
)$ROCInfo
roc_df = data.frame(tmp)
print(head(roc_df))
# Display the confusion matrix for cutoff threshold at 0.5
cutoff = subset(roc_df, CutOff == 0.5)
tn = cutoff$TN
fn = cutoff$FN
tp = cutoff$TP
fp = cutoff$FP
a = c(tn,fn)
p = c(fp,tp)
mat = data.frame(a,p)
colnames(mat) = c("Pred:0","Pred:1")
rownames(mat) = c("Actual:0","Actual:1")
mat = as.matrix(mat)
print(mat)
# Print the accuracy and misclassification rates for the model
accuracy = cutoff$ACC
mis = cutoff$MISCEVENT
print(paste("Misclassification rate is",mis))
print(paste("Accuracy is",accuracy))
## Assessing the performance metric of R-RF model
# Create a confusion matrix for cutoff threshold at 0.5
conf.matrix = table(val_df$upsell_xsell, as.numeric(pred[,2]>0.5))
rownames(conf.matrix) = paste("Actual", rownames(conf.matrix), sep = ":")
colnames(conf.matrix) = paste("Pred", colnames(conf.matrix), sep = ":")
# Print the accuracy and misclassification rates for the model
err = mean(as.numeric(pred[,2] > 0.5) != val_df$upsell_xsell)
print(paste("Misclassification rate is",err))
print(paste("Accuracy is",1-err))
# Plot ROC curves for both the models using standard R plotting functions
FPR_SAS = roc_df['FPR']
TPR_SAS = roc_df['Sensitivity']
pred1 = prediction(pred[,2], test_labels)
perf1 = performance( pred1, "tpr", "fpr" )
FPR_R = perf1@x.values[[1]]
TPR_R = perf1@y.values[[1]]
roc_df2 = data.frame(FPR = FPR_R, TPR = TPR_R)
ggplot() +
geom_line(
data = roc_df[c('FPR', 'Sensitivity')],
aes(x = as.numeric(FPR), y = as.numeric(Sensitivity),color = "SAS"),
) +
geom_line(
data = roc_df2,
aes(x = as.numeric(FPR_R), y = as.numeric(TPR_R),color = "R_RF"),
) +
scale_color_manual(
name = "Colors",
values = c("SAS" = "blue", "R_RF" = "red")
) +
xlab('False Positive Rate') + ylab('True Positive Rate')
# Generating PMML code to export R model to Model Manager
rf.pmml = pmml(rf_model)
format(object.size(rf.pmml))
savePMML(rf.pmml, "C:/Users/neveng/rf.xml", version=4.2 )
# Terminate the CAS session
cas.session.endSession(s)
|
myTestRule {
#Workflow function for no operation
#Output from running the example is:
# nop
nop;
writeLine("stdout", "nop");
}
INPUT null
OUTPUT ruleExecOut
| /irods-3.3.1-cyverse/iRODS/clients/icommands/test/rules3.0/ruleworkflownop.r | no_license | bogaotory/irods-cyverse | R | false | false | 167 | r | myTestRule {
#Workflow function for no operation
#Output from running the example is:
# nop
nop;
writeLine("stdout", "nop");
}
INPUT null
OUTPUT ruleExecOut
|
\name{vec2mat}
\alias{vec2mat}
\title{Reads a vector into a matrix}
\description{Fills a lower triangular matrix from a vector and copy it to upper triangle}
\usage{vec2mat(x)}
\arguments{
\item{x}{a vector}
}
\value{
\item{mat}{a matrix}
}
%\references{}
\author{Jerome Goudet \email{jerome.goudet@unil.ch}}
%\seealso{\code{\link{}}.}
%\examples{vec2mat(1:6)}
\keyword{univar}
| /man/vec2mat.rd | no_license | dalloliogm/hierfstat | R | false | false | 399 | rd | \name{vec2mat}
\alias{vec2mat}
\title{Reads a vector into a matrix}
\description{Fills a lower triangular matrix from a vector and copy it to upper triangle}
\usage{vec2mat(x)}
\arguments{
\item{x}{a vector}
}
\value{
\item{mat}{a matrix}
}
%\references{}
\author{Jerome Goudet \email{jerome.goudet@unil.ch}}
%\seealso{\code{\link{}}.}
%\examples{vec2mat(1:6)}
\keyword{univar}
|
all.beta<-extract.beta(startyear.set=1500, endyear.set=1600)
all.beta.precip<-all.beta[,,6]; all.beta.mo.p<-as.matrix(aggregate(all.beta.precip, by=list(all.beta[,1,9]), FUN=mean)[2:236])
all.beta.temp<-all.beta[,,5]; all.beta.mo.t<-as.matrix(aggregate(all.beta.temp, by=list(all.beta[,1,9]), FUN=mean)[2:236])
all.beta.lai<-all.beta[,,2]; all.beta.mo.l<-as.matrix(aggregate(all.beta.lai, by=list(all.beta[,1,9]), FUN=mean)[2:236])
par(mfcol=c(3,2))
hist(colMeans(all.beta.mo.p), xlim=c(0,0.00005), main='allarea.p')
hist(colMeans(eg.databin[,,5]),xlim=c(0,0.00005), main='egphase.p')
hist(colMeans(dc.databin[,,5]),xlim=c(0,0.00005), main='dcphase.p')
hist(colMeans(all.beta.mo.t), xlim=c(265,295), main='allarea.t')
hist(colMeans(eg.databin[,,6]), xlim=c(265,295), main='egphase.t')
hist(colMeans(dc.databin[,,6]), xlim=c(265,295), main='dcphase.t')
hist(colMeans(all.beta.mo.l), xlim=c(0,9), main='allarea.lai_year')
hist(colMeans(eg.databin[,,2]), xlim=c(0,9), main='egphase.lai_year')
hist(colMeans(dc.databin[,,2]), xlim=c(0,9), main='dcphase.lai_year')
hist(colMeans(all.beta.mo.l[6:8,]), xlim=c(0,10), main='allarea.lai_gs')
hist(colMeans(eg.databin[6:8,,2]), xlim=c(0,10), main='egphase.lai_gs')
hist(colMeans(dc.databin[6:8,,2]), xlim=c(0,10), main='dcphase.lai_gs')
#Change during shift
par(mfrow=c(1,2))
data.pr<-colMeans(databin[,,5]);data.t<-colMeans(databin[,,6]);data.s<-colMeans(databin[,,3])
hist(data.pr, main=paste(round(mean(data.pr),8),"(+/-)",round(sd(data.pr), 8)));abline(v=0, col='red')
hist(data.t,main=paste(round(mean(data.t),3),"(+/-)",round(sd(data.t), 3)));abline(v=0, col='red')
#yep, pretty much none.
#hist(data.s,main=paste(round(mean(data.s),3),"+/-",round(sd(data.s), 3)));abline(v=0, col='red')
| /ShiftSpace.R | no_license | bblakely/MIP2 | R | false | false | 1,747 | r | all.beta<-extract.beta(startyear.set=1500, endyear.set=1600)
all.beta.precip<-all.beta[,,6]; all.beta.mo.p<-as.matrix(aggregate(all.beta.precip, by=list(all.beta[,1,9]), FUN=mean)[2:236])
all.beta.temp<-all.beta[,,5]; all.beta.mo.t<-as.matrix(aggregate(all.beta.temp, by=list(all.beta[,1,9]), FUN=mean)[2:236])
all.beta.lai<-all.beta[,,2]; all.beta.mo.l<-as.matrix(aggregate(all.beta.lai, by=list(all.beta[,1,9]), FUN=mean)[2:236])
par(mfcol=c(3,2))
hist(colMeans(all.beta.mo.p), xlim=c(0,0.00005), main='allarea.p')
hist(colMeans(eg.databin[,,5]),xlim=c(0,0.00005), main='egphase.p')
hist(colMeans(dc.databin[,,5]),xlim=c(0,0.00005), main='dcphase.p')
hist(colMeans(all.beta.mo.t), xlim=c(265,295), main='allarea.t')
hist(colMeans(eg.databin[,,6]), xlim=c(265,295), main='egphase.t')
hist(colMeans(dc.databin[,,6]), xlim=c(265,295), main='dcphase.t')
hist(colMeans(all.beta.mo.l), xlim=c(0,9), main='allarea.lai_year')
hist(colMeans(eg.databin[,,2]), xlim=c(0,9), main='egphase.lai_year')
hist(colMeans(dc.databin[,,2]), xlim=c(0,9), main='dcphase.lai_year')
hist(colMeans(all.beta.mo.l[6:8,]), xlim=c(0,10), main='allarea.lai_gs')
hist(colMeans(eg.databin[6:8,,2]), xlim=c(0,10), main='egphase.lai_gs')
hist(colMeans(dc.databin[6:8,,2]), xlim=c(0,10), main='dcphase.lai_gs')
#Change during shift
par(mfrow=c(1,2))
data.pr<-colMeans(databin[,,5]);data.t<-colMeans(databin[,,6]);data.s<-colMeans(databin[,,3])
hist(data.pr, main=paste(round(mean(data.pr),8),"(+/-)",round(sd(data.pr), 8)));abline(v=0, col='red')
hist(data.t,main=paste(round(mean(data.t),3),"(+/-)",round(sd(data.t), 3)));abline(v=0, col='red')
#yep, pretty much none.
#hist(data.s,main=paste(round(mean(data.s),3),"+/-",round(sd(data.s), 3)));abline(v=0, col='red')
|
getBehaviorsOnsetsAndOffsets <- function(behaviorsToUse, boutTimesFilenames) {
behaviorsOnsetsAndOffsets <- list()
for(i in 1:length(behaviorsToUse)) {
behaviorsOnsetsAndOffsets[[i]] <- c()
}
for(boutTimesFilename in boutTimesFilenames) {
boutTimesFullFilename <- file.path(boutTimesPath, boutTimesFilename)
boutTimes <- np$load(boutTimesFullFilename)
for(i in 1:length(behaviorsToUse)) {
behaviorToUse <- behaviorsToUse[i]
behaviorBoutTimes <- boutTimes[[behaviorToUse]]
behaviorsOnsetsAndOffsets[[i]] <- rbind(behaviorsOnsetsAndOffsets[[i]], behaviorBoutTimes)
}
}
names(behaviorsOnsetsAndOffsets) <- behaviorsToUse
return(behaviorsOnsetsAndOffsets)
}
| /code/projectSrc/utils/getBehaviorsOnsetsAndOffsets.R | no_license | joacorapela/ldsForSocialInteractions | R | false | false | 759 | r |
getBehaviorsOnsetsAndOffsets <- function(behaviorsToUse, boutTimesFilenames) {
behaviorsOnsetsAndOffsets <- list()
for(i in 1:length(behaviorsToUse)) {
behaviorsOnsetsAndOffsets[[i]] <- c()
}
for(boutTimesFilename in boutTimesFilenames) {
boutTimesFullFilename <- file.path(boutTimesPath, boutTimesFilename)
boutTimes <- np$load(boutTimesFullFilename)
for(i in 1:length(behaviorsToUse)) {
behaviorToUse <- behaviorsToUse[i]
behaviorBoutTimes <- boutTimes[[behaviorToUse]]
behaviorsOnsetsAndOffsets[[i]] <- rbind(behaviorsOnsetsAndOffsets[[i]], behaviorBoutTimes)
}
}
names(behaviorsOnsetsAndOffsets) <- behaviorsToUse
return(behaviorsOnsetsAndOffsets)
}
|
## pCMF
## 2019-4-2 19:12:11
## loading packages
suppressPackageStartupMessages({
library(pCMF)
library(SingleCellExperiment)
library(BiocParallel)
library(matrixStats)
})
# main function
call_pCMF <- function(sce, num_pc, params){
# other parameter in pCMF method
num_core <- params$num_core
doParallel <- params$doParallel
filtering_method <- params$filtering_method
counts <- counts(sce)
if(filtering_method=="nonzeros"){
counts <- counts[which(rowSums(counts>0)>5),]
counts <- counts[,which(colSums(counts>0)>10)]
}
#counts <- counts[rowSums(counts)>0,]
#rm(sce)
counts <- t(counts) ## for pCMF, the dimension of data should be n x p instead of p x n
tryCatch({
if(doParallel){
# parallel to run
ct1 <- system.time({ res_pcmf <- pCMF(counts, K=num_pc, verbose=FALSE, ncores=num_core) })
}else{
ct1 <- system.time({ res_pcmf <- pCMF(counts, K=num_pc, verbose=FALSE, ncores=1) })
}# end fi
# extract the low dimension struct W
ct2 <- system.time({
#W <- getU(res_pcmf)
W <- getV(res_pcmf)
})
ct <- ct1 + ct2
ct <- c(user.self = ct[["user.self"]], sys.self = ct[["sys.self"]],
user.child = ct[["user.child"]], sys.child = ct[["sys.child"]],
elapsed = ct[["elapsed"]])
list(res = res_pcmf, ctimes = ct)
},
error = function(e) {
list(res = structure(rep(NA, 1), ctimes = c(user.self = NA, sys.self = NA, user.child = NA, sys.child = NA,
elapsed = NA), name_col = colnames(sce)))
})
}# end func
| /PQLMF_performance/algorithms/call_pCMF.R | no_license | QidiFeng/PQLMF-performance | R | false | false | 1,512 | r | ## pCMF
## 2019-4-2 19:12:11
## loading packages
suppressPackageStartupMessages({
library(pCMF)
library(SingleCellExperiment)
library(BiocParallel)
library(matrixStats)
})
# main function
call_pCMF <- function(sce, num_pc, params){
# other parameter in pCMF method
num_core <- params$num_core
doParallel <- params$doParallel
filtering_method <- params$filtering_method
counts <- counts(sce)
if(filtering_method=="nonzeros"){
counts <- counts[which(rowSums(counts>0)>5),]
counts <- counts[,which(colSums(counts>0)>10)]
}
#counts <- counts[rowSums(counts)>0,]
#rm(sce)
counts <- t(counts) ## for pCMF, the dimension of data should be n x p instead of p x n
tryCatch({
if(doParallel){
# parallel to run
ct1 <- system.time({ res_pcmf <- pCMF(counts, K=num_pc, verbose=FALSE, ncores=num_core) })
}else{
ct1 <- system.time({ res_pcmf <- pCMF(counts, K=num_pc, verbose=FALSE, ncores=1) })
}# end fi
# extract the low dimension struct W
ct2 <- system.time({
#W <- getU(res_pcmf)
W <- getV(res_pcmf)
})
ct <- ct1 + ct2
ct <- c(user.self = ct[["user.self"]], sys.self = ct[["sys.self"]],
user.child = ct[["user.child"]], sys.child = ct[["sys.child"]],
elapsed = ct[["elapsed"]])
list(res = res_pcmf, ctimes = ct)
},
error = function(e) {
list(res = structure(rep(NA, 1), ctimes = c(user.self = NA, sys.self = NA, user.child = NA, sys.child = NA,
elapsed = NA), name_col = colnames(sce)))
})
}# end func
|
#' @title Add values
#' @description A simple function that adds values
#' @param x One value
#' @param y another value
#' @details
#' @return numeric value
#' @examples val <- addValue(1,3)
#' @export
addValue <- function(x,y){
z<-x+y
return(z) # define which object is being returned
}
| /myPackage/R/addValue.R | no_license | ronjalappe/R_programming_class | R | false | false | 302 | r | #' @title Add values
#' @description A simple function that adds values
#' @param x One value
#' @param y another value
#' @details
#' @return numeric value
#' @examples val <- addValue(1,3)
#' @export
addValue <- function(x,y){
z<-x+y
return(z) # define which object is being returned
}
|
# Import dataset
turnout <- read.csv(file = "C:/Users/Hp/Desktop/data science/R/Datasets/turnout.csv")
str(turnout)
#Removing the irrelavent column
turnout$X <- NULL
#checking tha data type
names(turnout)
str(turnout)
# Variable conversion of Target variable
turnout$vote <- as.factor(turnout$vote)
table(turnout$vote)
# converting categorical data to numeric
# Creating a new column to represent race
turnout$race_n <- as.factor(ifelse(turnout$race == 'white' , 0 , 1))
table(turnout$race_n)
# Removing duplicate column
turnout$race <- NULL
str(turnout)
# Check missing value
sapply(data, function(x) sum(is.na(turnout)))
# Outlier check
boxplot(turnout$age) #no
boxplot(turnout$educate) #yes lower
boxplot(turnout$income) # yes upper
# Treatment of outlier for educate
summary(turnout$educate)
lower <- 10.0 - 1.5* IQR(turnout$educate)
lower
turnout$educate [turnout$educate < lower] <- lower
boxplot(turnout$educate)
summary(turnout$educate)
# Treatment of outlier for income
summary(turnout$income)
upper <- 5.233 + 1.5* IQR(turnout$income)
upper
turnout$income [turnout$income > upper] <- upper
boxplot(turnout$income)
summary(turnout$income)
#data partition
set.seed(100)
library(caret)
Train <- createDataPartition(turnout$vote , p=0.7 , list = FALSE)
training <- turnout[ Train , ]
testing <- turnout[ -Train , ]
# Model building
logit <- glm(vote~. , family = 'binomial' , data = training)
summary(logit) # AIC = 1434.4
#need to remove race variable as it does not have any impact
# Creating another model
logit2 <- step(glm(vote~. , family = 'binomial' , data = training), direction = 'backward')
summary(logit2) #AIC = 1430
# Checking correlation
library(car)
vif(logit2)
# ODDS RATIO
# Checking concordance, disconcordance and tie pair
# Running a predefined function
Acc(logit2) #Percent Concordance - 72%
exp(coef(logit2))
cbind( odds_ratio = exp(coef(logit2)) ,exp(confint(logit2)) )
logit2$coefficients
# PREDICTION on Testing
testing$probs <- predict(logit2 , testing, type = 'response')
testing$Predict <- as.factor(ifelse(testing$probs > 0.70 , 1 , 0))
# Checing Accuracy
table(testing$Predict , testing$vote)
confusionMatrix( testing$vote , testing$Predict)
library(ROCR)
#Predictions on training set
predictTrain = predict(logit2 , testing , type = 'response')
# ROC Curve
#prediction function
ROCRpred = prediction(predictTrain , testing$vote)
# performance function
ROCRpref = performance(ROCRpred , "tpr" , "fpr")
#plot ROC curve
plot(ROCRpref)
library(ROCR)
pred = prediction(testing$probs , testing$vote)
as.numeric(performance(pred, "auc") @y.values)
# K- fold validation
library(caret)
crossValSettings <- trainControl(method = "repeatedcv" ,
number = 10 ,
savePredictions = TRUE)
crossVal <- train(as.factor(vote) ~ age + educate +
income ,
data = turnout ,
family = "binomial" ,
method= "glm" ,
trControl = crossValSettings)
crossVal
summary(crossVal)
| /Logistic Regression.R | no_license | anujjohri/Logistics-Regression-in-R | R | false | false | 3,266 | r | # Import dataset
turnout <- read.csv(file = "C:/Users/Hp/Desktop/data science/R/Datasets/turnout.csv")
str(turnout)
#Removing the irrelavent column
turnout$X <- NULL
#checking tha data type
names(turnout)
str(turnout)
# Variable conversion of Target variable
turnout$vote <- as.factor(turnout$vote)
table(turnout$vote)
# converting categorical data to numeric
# Creating a new column to represent race
turnout$race_n <- as.factor(ifelse(turnout$race == 'white' , 0 , 1))
table(turnout$race_n)
# Removing duplicate column
turnout$race <- NULL
str(turnout)
# Check missing value
sapply(data, function(x) sum(is.na(turnout)))
# Outlier check
boxplot(turnout$age) #no
boxplot(turnout$educate) #yes lower
boxplot(turnout$income) # yes upper
# Treatment of outlier for educate
summary(turnout$educate)
lower <- 10.0 - 1.5* IQR(turnout$educate)
lower
turnout$educate [turnout$educate < lower] <- lower
boxplot(turnout$educate)
summary(turnout$educate)
# Treatment of outlier for income
summary(turnout$income)
upper <- 5.233 + 1.5* IQR(turnout$income)
upper
turnout$income [turnout$income > upper] <- upper
boxplot(turnout$income)
summary(turnout$income)
#data partition
set.seed(100)
library(caret)
Train <- createDataPartition(turnout$vote , p=0.7 , list = FALSE)
training <- turnout[ Train , ]
testing <- turnout[ -Train , ]
# Model building
logit <- glm(vote~. , family = 'binomial' , data = training)
summary(logit) # AIC = 1434.4
#need to remove race variable as it does not have any impact
# Creating another model
logit2 <- step(glm(vote~. , family = 'binomial' , data = training), direction = 'backward')
summary(logit2) #AIC = 1430
# Checking correlation
library(car)
vif(logit2)
# ODDS RATIO
# Checking concordance, disconcordance and tie pair
# Running a predefined function
Acc(logit2) #Percent Concordance - 72%
exp(coef(logit2))
cbind( odds_ratio = exp(coef(logit2)) ,exp(confint(logit2)) )
logit2$coefficients
# PREDICTION on Testing
testing$probs <- predict(logit2 , testing, type = 'response')
testing$Predict <- as.factor(ifelse(testing$probs > 0.70 , 1 , 0))
# Checing Accuracy
table(testing$Predict , testing$vote)
confusionMatrix( testing$vote , testing$Predict)
library(ROCR)
#Predictions on training set
predictTrain = predict(logit2 , testing , type = 'response')
# ROC Curve
#prediction function
ROCRpred = prediction(predictTrain , testing$vote)
# performance function
ROCRpref = performance(ROCRpred , "tpr" , "fpr")
#plot ROC curve
plot(ROCRpref)
library(ROCR)
pred = prediction(testing$probs , testing$vote)
as.numeric(performance(pred, "auc") @y.values)
# K- fold validation
library(caret)
crossValSettings <- trainControl(method = "repeatedcv" ,
number = 10 ,
savePredictions = TRUE)
crossVal <- train(as.factor(vote) ~ age + educate +
income ,
data = turnout ,
family = "binomial" ,
method= "glm" ,
trControl = crossValSettings)
crossVal
summary(crossVal)
|
#' forecast function!
#' @param type either arma, aruma, sigplusnoise, or other tswge model. No quotes
#' @param ... the normal inputs to tswge
#' @export
#' @return a forecast
#' @examples
#' forecast(arma, LakeHuron, phi = 0.2)
forecast <- function(type,...){
phrase <- paste0("fore.", enexpr(type),".wge")
func <- parse_expr(phrase)
eval(expr((!!func)(...)))
}
| /tswgewrapped-master/R/forecast.R | no_license | stevebramhall/TimeSeries | R | false | false | 368 | r | #' forecast function!
#' @param type either arma, aruma, sigplusnoise, or other tswge model. No quotes
#' @param ... the normal inputs to tswge
#' @export
#' @return a forecast
#' @examples
#' forecast(arma, LakeHuron, phi = 0.2)
forecast <- function(type,...){
phrase <- paste0("fore.", enexpr(type),".wge")
func <- parse_expr(phrase)
eval(expr((!!func)(...)))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_data.R
\name{ct_read_data_config}
\alias{ct_read_data_config}
\title{Read data file using config information}
\usage{
ct_read_data_config(file, config)
}
\arguments{
\item{file}{character, single line, path to a file or a single string}
\item{config}{list created using \code{\link[=ct_read_config]{ct_read_config()}}}
}
\value{
tibble (data frame)
}
\description{
This is a wrapper for \code{\link[=ct_read_data]{ct_read_data()}}.
}
\examples{
config <- ct_example("keating_1999.CFG") \%>\% ct_read_config()
ct_example("keating_1999.DAT") \%>\% ct_read_data_config(config)
}
\seealso{
\code{\link[=ct_read_data]{ct_read_data()}}
}
| /man/ct_read_data_config.Rd | no_license | ijlyttle/ieeecomtrade | R | false | true | 720 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_data.R
\name{ct_read_data_config}
\alias{ct_read_data_config}
\title{Read data file using config information}
\usage{
ct_read_data_config(file, config)
}
\arguments{
\item{file}{character, single line, path to a file or a single string}
\item{config}{list created using \code{\link[=ct_read_config]{ct_read_config()}}}
}
\value{
tibble (data frame)
}
\description{
This is a wrapper for \code{\link[=ct_read_data]{ct_read_data()}}.
}
\examples{
config <- ct_example("keating_1999.CFG") \%>\% ct_read_config()
ct_example("keating_1999.DAT") \%>\% ct_read_data_config(config)
}
\seealso{
\code{\link[=ct_read_data]{ct_read_data()}}
}
|
#' Power Plants Locations
#'
#' This data comes from Open-Power-System-Data, see Github repo:
#' \url{https://github.com/Open-Power-System-Data/conventional_power_plants}.
#'
#' @format A data.table with 158 rows and 6 variables:
#' \describe{
#' \item{name}{Name of the power plant}
#' \item{eic_code}{EIC code}
#' \item{lat}{Latitude}
#' \item{lon}{Longitude}
#' \item{X,Y}{Coordinates in Lambert93}
#' }
#' @examples
#' pplocations
"pplocations"
| /R/data-pplocations.R | permissive | dreamRs/rte.data | R | false | false | 449 | r | #' Power Plants Locations
#'
#' This data comes from Open-Power-System-Data, see Github repo:
#' \url{https://github.com/Open-Power-System-Data/conventional_power_plants}.
#'
#' @format A data.table with 158 rows and 6 variables:
#' \describe{
#' \item{name}{Name of the power plant}
#' \item{eic_code}{EIC code}
#' \item{lat}{Latitude}
#' \item{lon}{Longitude}
#' \item{X,Y}{Coordinates in Lambert93}
#' }
#' @examples
#' pplocations
"pplocations"
|
#makeInv
#This function takes a matrix as an argument and holds several subfunctions that allows us to store data
#The function generates and stores a list of subfunctions that can be accessed and used by other functions by subsetting the main function
#The solution follows the stucture of the original assignment
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL #Initialize the inv variable to a NULL Value. This value will eventually store the inverted matrix.
#We will later use the NULL value to determine if the matrix has allready been calculated or not
set <- function(y) { #Function to change the matrix stored in the main function (x), with another value (y). Not really needed for the assignment but included to match the structure of the example
x <<- y #The value x is accessible outside the main function because we use the <<- operator
inv <<- NULL #If we actually use this function to change the matrix we need to reset the stored inverted result as it is no longer valid
}
get <- function() x # Function that returns the matrix stored in the variable x in the main function
setinv <- function(solve) inv <<- solve #Sets the inv variable to the value of solve. (NB. The actual solving of the matrix does not happen here but is passed to the function via the solve variable)
getinv <- function() inv
list(set = set, get = get, #Store the subfunctions in a list so that they are available by subsetting the makeInv function
setinv = setinv,
getinv = getinv)
}
#This function checks to see if we allready have calculated the result
#If that is the case we simply return the calculated result
#If it is not allready done we calculate it and store it for future use
cacheSolve <- function(x, ...) {
inv <- x$getinv() #Get the stored value of the inv variable
if(!is.null(inv)) { #Check to see if result is allready calculated (inv is not NULL).
message("Allready calculated, getting cached result")
return(inv) #If the result was allready calculated then we don't do it again but rather return the result
}
data <- x$get() #If the result is not allready calculated we retieve the matrix (x) stored in the get function. Assign it to a variable called data
inv <- solve(data, ...) #Solve invert the matrix called data
x$setinv(inv) #Store the result for future use
inv
}
| /cachematrix.R | no_license | strutsefar/ProgrammingAssignment2 | R | false | false | 2,436 | r |
#makeInv
#This function takes a matrix as an argument and holds several subfunctions that allows us to store data
#The function generates and stores a list of subfunctions that can be accessed and used by other functions by subsetting the main function
#The solution follows the stucture of the original assignment
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL #Initialize the inv variable to a NULL Value. This value will eventually store the inverted matrix.
#We will later use the NULL value to determine if the matrix has allready been calculated or not
set <- function(y) { #Function to change the matrix stored in the main function (x), with another value (y). Not really needed for the assignment but included to match the structure of the example
x <<- y #The value x is accessible outside the main function because we use the <<- operator
inv <<- NULL #If we actually use this function to change the matrix we need to reset the stored inverted result as it is no longer valid
}
get <- function() x # Function that returns the matrix stored in the variable x in the main function
setinv <- function(solve) inv <<- solve #Sets the inv variable to the value of solve. (NB. The actual solving of the matrix does not happen here but is passed to the function via the solve variable)
getinv <- function() inv
list(set = set, get = get, #Store the subfunctions in a list so that they are available by subsetting the makeInv function
setinv = setinv,
getinv = getinv)
}
#This function checks to see if we allready have calculated the result
#If that is the case we simply return the calculated result
#If it is not allready done we calculate it and store it for future use
cacheSolve <- function(x, ...) {
inv <- x$getinv() #Get the stored value of the inv variable
if(!is.null(inv)) { #Check to see if result is allready calculated (inv is not NULL).
message("Allready calculated, getting cached result")
return(inv) #If the result was allready calculated then we don't do it again but rather return the result
}
data <- x$get() #If the result is not allready calculated we retieve the matrix (x) stored in the get function. Assign it to a variable called data
inv <- solve(data, ...) #Solve invert the matrix called data
x$setinv(inv) #Store the result for future use
inv
}
|
library(plyr)
library(dplyr)
library(DT)
acoinfo <- read.csv("./data/Medicare_Shared_Savings_Program_Accountable_Care_Organizations_with_coords.csv", stringsAsFactors = F)
acochars <- read.csv("./data/Medicare_Shared_Savings_Program_Accountable_Care_Organizations_Performance_Year_1_Results (1).csv", stringsAsFactors = F)
acoinfo2 <- acoinfo %>%
mutate(aco_name = toupper(ACO.Legal.or.Name.Doing.Business.As),
addr = ACO.Address,
zip = substr(ACO.Address, nchar(ACO.Address)-4, nchar(ACO.Address)),
state = ifelse(nchar(ACO.Service.Area)==2, ACO.Service.Area, substr(ACO.Service.Area,1,2))) %>%
select(aco_name, addr, lon, lat, state, zip, ACO.Service.Area)
acochars2 <- acochars %>%
mutate(aco_name = toupper(ACO.Name..LBN.or.DBA..if.applicable..),
benes = Total.Assigned.Beneficiaries,
benchmark_exp = Total.Benchmark.Expenditures,
exp = Total.Expenditures,
Generated.Savings.Losses1.2 = ifelse(is.na(Generated.Savings.Losses1.2), "NA", Generated.Savings.Losses1.2),
bench_minus_assign_bene_exp = Total.Benchmark.Expenditures.Minus.Total.Assigned.Beneficiary.Expenditures) %>%
select(aco_name, benes, benchmark_exp, exp, bench_minus_assign_bene_exp, ACO.1, ACO.2, ACO.3, ACO.4, ACO.5, ACO.6, ACO.7,
ACO.8., ACO.9., ACO.10., ACO.11, ACO.12, ACO.13, ACO.14, ACO.15, ACO.16, ACO.17, ACO.18, ACO.19, ACO.20, ACO.21, ACO.22,
ACO.23, ACO.24, ACO.25, ACO.26, ACO.27., ACO.28, ACO.29, ACO.30, ACO.31, Generated.Savings.Losses1.2,
Agreement.Start.Date)
acochars2 <- plyr::rename(acochars2, c("Generated.Savings.Losses1.2"="savings_losses"))
df1 <- merge(acoinfo2, acochars2, by.x="aco_name", by.y="aco_name")
address_split <- strsplit(df1$addr,",")
city <- sapply(address_split, function(x) {
if (length(x) < 4){
city <- x[2]
} else{
city <- x[3]
}
return(city)
})
df2 <- cbind(df1, city)
df2$city <- as.character(df2$city)
num_vars <- c("benes","benchmark_exp", "exp", "bench_minus_assign_bene_exp")
#convert expenditure data to numeric format
df2[,num_vars] <- sapply(df2[,num_vars], function(x) as.numeric(gsub("[[:punct:]]",'',x)))
#calculate total 0-14 CAHPS quality points based on benchmarks
flat_bench <- function(x){
score <- ifelse(x < 30, 0, ifelse(x>30 & x<=40, 1.1, ifelse(x>40 & x<=50, 1.25,
ifelse(x>50 & x<=60, 1.4, ifelse(x>60 & x<=70, 1.55, ifelse(x>70 & x<=80, 1.70,
ifelse(x>80 & x<=90, 1.85, 2)))))))
}
df3 <- df2 %>%
mutate(c_access = flat_bench(df2$ACO.1),
c_comm = flat_bench(df2$ACO.2),
rate_md = flat_bench(df2$ACO.3),
c_spec = flat_bench(df2$ACO.4),
m_hlth_promo = ifelse(ACO.5<54.71, 0, ifelse(ACO.5>54.71 & ACO.5<=55.59, 1.1, ifelse(ACO.5>55.59 & ACO.5<=56.45, 1.25,
ifelse(ACO.5>56.45 & ACO.5<=57.63, 1.4, ifelse(ACO.5>57.63 & ACO.5<=58.22, 1.55, ifelse(ACO.5>58.22 & ACO.5<=59.09, 1.70,
ifelse(ACO.5>59.09 & ACO.5<=60.71, 1.85, 2))))))),
m_sdm = ifelse(ACO.6<72.87, 0, ifelse(ACO.6>72.87 & ACO.6<=73.37, 1.1, ifelse(ACO.6>73.37 & ACO.6<=73.91, 1.25,
ifelse(ACO.6>73.91 & ACO.6<=74.51, 1.4, ifelse(ACO.6>74.51 & ACO.6<=75.25, 1.55, ifelse(ACO.6>75.25 & ACO.6<=75.82, 1.70,
ifelse(ACO.6>75.82 & ACO.6<=76.71, 1.85, 2))))))),
CAHPS_score = c_access+c_comm+rate_md+c_spec+m_hlth_promo+m_sdm+2,
rank = rank(-CAHPS_score, ties.method="max")) %>%
#Remove 2 ACOs that have duplicate values for demo purposes
filter(!aco_name %in% c("BAROMA HEALTH PARTNERS","MERCY ACO, LLC"))
allacos <- df3
allacos$aco <- allacos$aco_name
allacos$latitude <- jitter(allacos$lat)
allacos$longitude <- jitter(allacos$lon)
allacos$zipcode <- allacos$zip
row.names(allacos) <- allacos$aco
allacos <- subset(allacos, select=-c(lat,lon,aco_name,zip,addr,c_access,
c_comm,rate_md,c_spec,m_hlth_promo,m_sdm))
#Legend titles for output
legend <- data.frame(var=names(allacos), legend_name=c("State","ACO Service Area", "No. of Assigned Benes",
"Total Benchmark Expenditures($)","Total Expenditures ($)",
"Tot. Benchmark - Total Assigned Bene. Exp ($)",
"Getting Timely Care (0-100)","Provider Communication (0-100)","Rating of Doctor (0-100)",
"Access to Specialists (0-100)","Health Promotion and Education (0-100)","Shared-Decision Making (0-100)",
"Health and Functional Status (0-100)","All Condition Readmissions","ASC Admission:COPD or Asthma",
"ASC Admission: Heart Failure","% of PCPs Qualified for EHR Incentive","Medication Reconciliation","Falls: Screening for Fall Risk",
"Influenza Immunization","Pneumococcal Vaccination","Adult Weight Screening","Tobacco Use/Cessation Intervention",
"Depression Screening","Colorectal Cancer Screening","Mammography Screening","Blood Pressure Screening within 2 years",
"Diabetes HbA1c Control","Diabetes LDL Control","Diabetes BP Control","Diabetes Tobacco Non-use","Diabetes Aspirin Use",
"% of Diab. Benes with poor HbA1c Control","% of Benes with BP < 140/90","% of Benes with IVD Lipid Profile and LDL Control",
"% of Benes with IVD who use Aspirin","Beta-Blocker Therapy for LVSD","Generated Savings/Losses ($)",
"ACO Start Date","City","Patient Experience (0-14)",
"Rank","ACO Name","Lat","Lng","Zip Code"))
| /global.R | no_license | mikeyc33/acoscores | R | false | false | 6,386 | r | library(plyr)
library(dplyr)
library(DT)
acoinfo <- read.csv("./data/Medicare_Shared_Savings_Program_Accountable_Care_Organizations_with_coords.csv", stringsAsFactors = F)
acochars <- read.csv("./data/Medicare_Shared_Savings_Program_Accountable_Care_Organizations_Performance_Year_1_Results (1).csv", stringsAsFactors = F)
acoinfo2 <- acoinfo %>%
mutate(aco_name = toupper(ACO.Legal.or.Name.Doing.Business.As),
addr = ACO.Address,
zip = substr(ACO.Address, nchar(ACO.Address)-4, nchar(ACO.Address)),
state = ifelse(nchar(ACO.Service.Area)==2, ACO.Service.Area, substr(ACO.Service.Area,1,2))) %>%
select(aco_name, addr, lon, lat, state, zip, ACO.Service.Area)
acochars2 <- acochars %>%
mutate(aco_name = toupper(ACO.Name..LBN.or.DBA..if.applicable..),
benes = Total.Assigned.Beneficiaries,
benchmark_exp = Total.Benchmark.Expenditures,
exp = Total.Expenditures,
Generated.Savings.Losses1.2 = ifelse(is.na(Generated.Savings.Losses1.2), "NA", Generated.Savings.Losses1.2),
bench_minus_assign_bene_exp = Total.Benchmark.Expenditures.Minus.Total.Assigned.Beneficiary.Expenditures) %>%
select(aco_name, benes, benchmark_exp, exp, bench_minus_assign_bene_exp, ACO.1, ACO.2, ACO.3, ACO.4, ACO.5, ACO.6, ACO.7,
ACO.8., ACO.9., ACO.10., ACO.11, ACO.12, ACO.13, ACO.14, ACO.15, ACO.16, ACO.17, ACO.18, ACO.19, ACO.20, ACO.21, ACO.22,
ACO.23, ACO.24, ACO.25, ACO.26, ACO.27., ACO.28, ACO.29, ACO.30, ACO.31, Generated.Savings.Losses1.2,
Agreement.Start.Date)
acochars2 <- plyr::rename(acochars2, c("Generated.Savings.Losses1.2"="savings_losses"))
df1 <- merge(acoinfo2, acochars2, by.x="aco_name", by.y="aco_name")
address_split <- strsplit(df1$addr,",")
city <- sapply(address_split, function(x) {
if (length(x) < 4){
city <- x[2]
} else{
city <- x[3]
}
return(city)
})
df2 <- cbind(df1, city)
df2$city <- as.character(df2$city)
num_vars <- c("benes","benchmark_exp", "exp", "bench_minus_assign_bene_exp")
#convert expenditure data to numeric format
df2[,num_vars] <- sapply(df2[,num_vars], function(x) as.numeric(gsub("[[:punct:]]",'',x)))
#calculate total 0-14 CAHPS quality points based on benchmarks
flat_bench <- function(x){
score <- ifelse(x < 30, 0, ifelse(x>30 & x<=40, 1.1, ifelse(x>40 & x<=50, 1.25,
ifelse(x>50 & x<=60, 1.4, ifelse(x>60 & x<=70, 1.55, ifelse(x>70 & x<=80, 1.70,
ifelse(x>80 & x<=90, 1.85, 2)))))))
}
df3 <- df2 %>%
mutate(c_access = flat_bench(df2$ACO.1),
c_comm = flat_bench(df2$ACO.2),
rate_md = flat_bench(df2$ACO.3),
c_spec = flat_bench(df2$ACO.4),
m_hlth_promo = ifelse(ACO.5<54.71, 0, ifelse(ACO.5>54.71 & ACO.5<=55.59, 1.1, ifelse(ACO.5>55.59 & ACO.5<=56.45, 1.25,
ifelse(ACO.5>56.45 & ACO.5<=57.63, 1.4, ifelse(ACO.5>57.63 & ACO.5<=58.22, 1.55, ifelse(ACO.5>58.22 & ACO.5<=59.09, 1.70,
ifelse(ACO.5>59.09 & ACO.5<=60.71, 1.85, 2))))))),
m_sdm = ifelse(ACO.6<72.87, 0, ifelse(ACO.6>72.87 & ACO.6<=73.37, 1.1, ifelse(ACO.6>73.37 & ACO.6<=73.91, 1.25,
ifelse(ACO.6>73.91 & ACO.6<=74.51, 1.4, ifelse(ACO.6>74.51 & ACO.6<=75.25, 1.55, ifelse(ACO.6>75.25 & ACO.6<=75.82, 1.70,
ifelse(ACO.6>75.82 & ACO.6<=76.71, 1.85, 2))))))),
CAHPS_score = c_access+c_comm+rate_md+c_spec+m_hlth_promo+m_sdm+2,
rank = rank(-CAHPS_score, ties.method="max")) %>%
#Remove 2 ACOs that have duplicate values for demo purposes
filter(!aco_name %in% c("BAROMA HEALTH PARTNERS","MERCY ACO, LLC"))
allacos <- df3
allacos$aco <- allacos$aco_name
allacos$latitude <- jitter(allacos$lat)
allacos$longitude <- jitter(allacos$lon)
allacos$zipcode <- allacos$zip
row.names(allacos) <- allacos$aco
allacos <- subset(allacos, select=-c(lat,lon,aco_name,zip,addr,c_access,
c_comm,rate_md,c_spec,m_hlth_promo,m_sdm))
#Legend titles for output
legend <- data.frame(var=names(allacos), legend_name=c("State","ACO Service Area", "No. of Assigned Benes",
"Total Benchmark Expenditures($)","Total Expenditures ($)",
"Tot. Benchmark - Total Assigned Bene. Exp ($)",
"Getting Timely Care (0-100)","Provider Communication (0-100)","Rating of Doctor (0-100)",
"Access to Specialists (0-100)","Health Promotion and Education (0-100)","Shared-Decision Making (0-100)",
"Health and Functional Status (0-100)","All Condition Readmissions","ASC Admission:COPD or Asthma",
"ASC Admission: Heart Failure","% of PCPs Qualified for EHR Incentive","Medication Reconciliation","Falls: Screening for Fall Risk",
"Influenza Immunization","Pneumococcal Vaccination","Adult Weight Screening","Tobacco Use/Cessation Intervention",
"Depression Screening","Colorectal Cancer Screening","Mammography Screening","Blood Pressure Screening within 2 years",
"Diabetes HbA1c Control","Diabetes LDL Control","Diabetes BP Control","Diabetes Tobacco Non-use","Diabetes Aspirin Use",
"% of Diab. Benes with poor HbA1c Control","% of Benes with BP < 140/90","% of Benes with IVD Lipid Profile and LDL Control",
"% of Benes with IVD who use Aspirin","Beta-Blocker Therapy for LVSD","Generated Savings/Losses ($)",
"ACO Start Date","City","Patient Experience (0-14)",
"Rank","ACO Name","Lat","Lng","Zip Code"))
|
.onAttach <- function(lib, pkg) { # .First.lib
#library.dynam("galgo", pkg, lib)
#dyn.load(paste("galgoDistance",.Platform$dynlib.ext,sep=""))
#lockEnvironment(as.environment("package:datasets"), TRUE)
if(.Platform$OS.type == "windows" && interactive() && .Platform$GUI == "Rgui") addVigs2WinMenu("galgo")
packageStartupMessage("galgo v1.2-01 (19-March-2014) was loaded.\n")
packageStartupMessage("See 'packages' under R help for tutorial and manual.\n")
}
.onUnload <- function(libpath) { # .Last.lib = function(lib, pkg)
#library.dynam.unload("galgo")
}
#THIS FUNCTION AS BEEN TAKEN AS IT IS FROM BIOBASE PACKAGE
addVigs2WinMenu <- function (pkgName)
{
vigs <- ""
vigFile <- system.file(c("doc/Tutorial.pdf", "doc/Galgo.pdf"), package = pkgName)
if (any(file.exists(vigFile))) {
vigs <- vigFile[file.exists(vigFile)]
#vigMtrx <- .readRDS(vigFile)
#vigs <- file.path(.find.package(pkgName), "doc", vigMtrx[, "PDF"])
#names(vigs) <- vigMtrx[, "Title"]
names(vigs) <- c("Tutorial","Functions")[file.exists(vigFile)]
}
if (!"Vignettes" %in% winMenuNames())
winMenuAdd("Vignettes")
pkgMenu <- paste("Vignettes", pkgName, sep = "/")
winMenuAdd(pkgMenu)
for (v in 1:length(vigs)) {
i <- vigs[v]
item <- paste(names(vigs)[v],": ",basename(i),sep="") #sub(".pdf", "", basename(i))
winMenuAddItem(pkgMenu, item, paste("shell.exec(\"",
as.character(i), "\")", sep = ""))
}
}
| /R/zzz.r | no_license | cran/galgo | R | false | false | 1,484 | r | .onAttach <- function(lib, pkg) { # .First.lib
#library.dynam("galgo", pkg, lib)
#dyn.load(paste("galgoDistance",.Platform$dynlib.ext,sep=""))
#lockEnvironment(as.environment("package:datasets"), TRUE)
if(.Platform$OS.type == "windows" && interactive() && .Platform$GUI == "Rgui") addVigs2WinMenu("galgo")
packageStartupMessage("galgo v1.2-01 (19-March-2014) was loaded.\n")
packageStartupMessage("See 'packages' under R help for tutorial and manual.\n")
}
.onUnload <- function(libpath) { # .Last.lib = function(lib, pkg)
#library.dynam.unload("galgo")
}
#THIS FUNCTION AS BEEN TAKEN AS IT IS FROM BIOBASE PACKAGE
addVigs2WinMenu <- function (pkgName)
{
vigs <- ""
vigFile <- system.file(c("doc/Tutorial.pdf", "doc/Galgo.pdf"), package = pkgName)
if (any(file.exists(vigFile))) {
vigs <- vigFile[file.exists(vigFile)]
#vigMtrx <- .readRDS(vigFile)
#vigs <- file.path(.find.package(pkgName), "doc", vigMtrx[, "PDF"])
#names(vigs) <- vigMtrx[, "Title"]
names(vigs) <- c("Tutorial","Functions")[file.exists(vigFile)]
}
if (!"Vignettes" %in% winMenuNames())
winMenuAdd("Vignettes")
pkgMenu <- paste("Vignettes", pkgName, sep = "/")
winMenuAdd(pkgMenu)
for (v in 1:length(vigs)) {
i <- vigs[v]
item <- paste(names(vigs)[v],": ",basename(i),sep="") #sub(".pdf", "", basename(i))
winMenuAddItem(pkgMenu, item, paste("shell.exec(\"",
as.character(i), "\")", sep = ""))
}
}
|
#The first function, makeCacheMatrix creates a special "vector", which is really a list of functions to
# set the values of a matrix
# get the values of matrix
# set the value of the inverse of a function
# get the value of the inverse of a function
makeCacheMatrix <- function(x = matrix()) { #pass makeCacheMatrix x which is a matrix
inv <- NULL # Setting inv=NULL shows inv is empty
set <- function(y) { # function that assigns
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
#pass back a list that contains the four functions defined above.
list(set = set,get = get,setInverse = setInverse,getInverse = getInverse)
}
#CacheSolve will check to see if the inverse of a matrix has been done already. If it has then it will
#not recompute the inverse but instead get it from cache. If the inverse has not been computed, the
#inverse will be computed and stored in inv.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("Not computing inverse, getting cached data instead")
return(inv)
}
matrix <- x$get()
inv <- solve(matrix, ...)
x$setInverse(inv)
inv
} | /cachematrix.R | no_license | michelle33ward/ProgrammingAssignment2 | R | false | false | 1,323 | r | #The first function, makeCacheMatrix creates a special "vector", which is really a list of functions to
# set the values of a matrix
# get the values of matrix
# set the value of the inverse of a function
# get the value of the inverse of a function
makeCacheMatrix <- function(x = matrix()) { #pass makeCacheMatrix x which is a matrix
inv <- NULL # Setting inv=NULL shows inv is empty
set <- function(y) { # function that assigns
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
#pass back a list that contains the four functions defined above.
list(set = set,get = get,setInverse = setInverse,getInverse = getInverse)
}
#CacheSolve will check to see if the inverse of a matrix has been done already. If it has then it will
#not recompute the inverse but instead get it from cache. If the inverse has not been computed, the
#inverse will be computed and stored in inv.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("Not computing inverse, getting cached data instead")
return(inv)
}
matrix <- x$get()
inv <- solve(matrix, ...)
x$setInverse(inv)
inv
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test-frame.R
\name{testing}
\alias{testing}
\alias{test_register_src}
\alias{test_register_con}
\alias{src_test}
\alias{test_load}
\alias{test_frame}
\title{Infrastructure for testing dplyr}
\usage{
test_register_src(name, src)
test_register_con(name, ...)
src_test(name)
test_load(
df,
name = unique_table_name(),
srcs = test_srcs$get(),
ignore = character()
)
test_frame(..., srcs = test_srcs$get(), ignore = character())
}
\description{
Register testing sources, then use \code{test_load()} to load an existing
data frame into each source. To create a new table in each source,
use \code{test_frame()}.
}
\examples{
\dontrun{
test_register_src("df", src_df(env = new.env()))
test_register_src("sqlite", src_sqlite(":memory:", create = TRUE))
test_frame(x = 1:3, y = 3:1)
test_load(mtcars)
}
}
\keyword{internal}
| /man/testing.Rd | permissive | OssiLehtinen/dbplyr | R | false | true | 906 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test-frame.R
\name{testing}
\alias{testing}
\alias{test_register_src}
\alias{test_register_con}
\alias{src_test}
\alias{test_load}
\alias{test_frame}
\title{Infrastructure for testing dplyr}
\usage{
test_register_src(name, src)
test_register_con(name, ...)
src_test(name)
test_load(
df,
name = unique_table_name(),
srcs = test_srcs$get(),
ignore = character()
)
test_frame(..., srcs = test_srcs$get(), ignore = character())
}
\description{
Register testing sources, then use \code{test_load()} to load an existing
data frame into each source. To create a new table in each source,
use \code{test_frame()}.
}
\examples{
\dontrun{
test_register_src("df", src_df(env = new.env()))
test_register_src("sqlite", src_sqlite(":memory:", create = TRUE))
test_frame(x = 1:3, y = 3:1)
test_load(mtcars)
}
}
\keyword{internal}
|
# Exercise 5: dplyr grouped operations
# Install the `nycflights13` package. Load (`library()`) the package.
# You'll also need to load `dplyr`
#install.packages("nycflights13") # should be done already
library(nycflights13)
library(dplyr)
# What was the average departure delay in each month?
# Save this as a data frame `dep_delay_by_month`
# Hint: you'll have to perform a grouping operation then summarizing your data
dep_delay_by_month <- flights %>%
group_by(month) %>%
summarize(delay = mean(dep_delay, na.rm = TRUE))
# Which month had the greatest average departure delay?
filter(dep_delay_by_month, delay == max(delay)) %>%
select(month)
# If your above data frame contains just two columns (e.g., "month", and "delay" in that order), you can create
# a scatterplot by passing that data frame to the 'plot()' function
plot(dep_delay_by_month)
# To which destinations were the average arrival delays the highest?
# Hint: you'll have to perform a grouping operation then summarize your data
# You can use the `head()` function to view just the first few rows
avg_arrival_delays <- flights %>%
group_by(dest) %>%
summarize(arr_delay = mean(arr_delay, na.rm = TRUE)) %>%
arrange(-arr_delay)
head(avg_arrival_delays)
# You can look up these airports in the `airports` data frame!
View(airports)
# Which city was flown to with the highest average speed?
View(flights)
flights <-
highest_speed <- flights %>%
mutate(avg_speed = distance/air_time) %>%
group_by(dest) %>%
summarize (avg_speed = mean(speed, na.rm = TRUE)) %>%
filter(avg_speed == max(avg_speed, na.rm = TRUE)) | /exercise-5/exercise.R | permissive | sschow/ch10-dplyr | R | false | false | 1,622 | r | # Exercise 5: dplyr grouped operations
# Install the `nycflights13` package. Load (`library()`) the package.
# You'll also need to load `dplyr`
#install.packages("nycflights13") # should be done already
library(nycflights13)
library(dplyr)
# What was the average departure delay in each month?
# Save this as a data frame `dep_delay_by_month`
# Hint: you'll have to perform a grouping operation then summarizing your data
dep_delay_by_month <- flights %>%
group_by(month) %>%
summarize(delay = mean(dep_delay, na.rm = TRUE))
# Which month had the greatest average departure delay?
filter(dep_delay_by_month, delay == max(delay)) %>%
select(month)
# If your above data frame contains just two columns (e.g., "month", and "delay" in that order), you can create
# a scatterplot by passing that data frame to the 'plot()' function
plot(dep_delay_by_month)
# To which destinations were the average arrival delays the highest?
# Hint: you'll have to perform a grouping operation then summarize your data
# You can use the `head()` function to view just the first few rows
avg_arrival_delays <- flights %>%
group_by(dest) %>%
summarize(arr_delay = mean(arr_delay, na.rm = TRUE)) %>%
arrange(-arr_delay)
head(avg_arrival_delays)
# You can look up these airports in the `airports` data frame!
View(airports)
# Which city was flown to with the highest average speed?
View(flights)
flights <-
highest_speed <- flights %>%
mutate(avg_speed = distance/air_time) %>%
group_by(dest) %>%
summarize (avg_speed = mean(speed, na.rm = TRUE)) %>%
filter(avg_speed == max(avg_speed, na.rm = TRUE)) |
###############################################################################
#Regression Analysis of Deer Capture Rate by Deer Estimated Detection Distance
###############################################################################
#create data frame from Deer EDD csv
setwd("C:/Users/josey/Documents/CT Grid/Summer2017")
Deer_EDD_Summer2017<-read.csv("Deer_EDD_S17.csv")
Deer_EDD_Summer2017<-as.data.frame(Deer_EDD_Summer2017)
CR4<-subset(CR3, Species =="Odocoileus virginianus")
#Rename columns to match camdata data frame
colnames(Deer_EDD_Summer2017)[6]<-"Deployment"
colnames(Deer_EDD_Summer2017)[19]<-"Number_of_Detections"
colnames(Deer_EDD_Summer2017)[14]<-"ESW_EDR"
Deer_EDD_Summer2017$CapRate<-deercamdata_coords$Capture_Rate
#Remove deployments with <20 detections
Deer_EDD<- Deer_EDD_Summer2017[which(Deer_EDD_Summer2017$CapRate >= 20),]
#Merge deer caprate and EDD by Deployment and Species
Deer_caprate_EDD<-merge(Deer_EDD, CR4, by = "Deployment")
#boxplot of merged data to identify outliers
par(mar=c(5,5,4,2))
boxplot(Deer_caprate_EDD$ESW_EDR)
boxplot.stats(Deer_caprate_EDD$ESW_EDR)$out
#remove outliers
Deer_caprate_EDD1<-Deer_caprate_EDD[which(Deer_caprate_EDD$ESW_EDR <=10),]
#correlate capture rate with EDD and show regression line
par(mar=c(5,6,4,2))
with(Deer_caprate_EDD1, plot(CapRate ~ ESW_EDR,
pch = 19,
xlab = expression(bold("Effective Detection Distance (m)")),
ylab = expression(bold("Deer Capture Rate")),
main = "Regression Analysis of Deer Capture Rate and Effective Detection Distance",
cex.main = 2,
cex.axis = 1.3,
cex.lab = 1.7))
lm.outdedd = lm(CapRate ~ ESW_EDR, data = Deer_caprate_EDD1)
abline(lm.outdedd, col="blue")
summary(lm.outdedd)
#Add Rsquared value expression
#Need to create an object with just rsquared value first
Rsquared<-summary(lm.outdedd)$r.squared
text(5.866514,191.3044, as.expression(substitute(italic(R)^2 == r,list(r=round(Rsquared,3)))), cex = 1.5, font = 2)
#########################################################################################################
#Regression Analysis of Deer Estimated Detection Distance with Bear, Gray Squirrel, and Racoon Capture Rate
#########################################################################################################
#subset capture rate per deployment of bear
bearcam_caprates<-bearcamdata_coords[,c(1,6,7)]
#rename column title
names(bearcam_caprates)[3]<-"Bear_Capture_Rate"
#Merge Deer EDD and bear capture rate info by Deployment
bear_dEDD<-merge(Deer_EDD, bearcam_caprates, by = "Deployment")
#plot results
boxplot(bear_dEDD$ESW_EDR, main = "Effective Detection Distance for Black Bear",
cex.main = 1.7)
boxplot.stats(bear_dEDD$ESW_EDR)$out
#remove outliers
bear_dEDD1<-bear_dEDD[which(bear_dEDD$ESW_EDR <=10),]
#plot regression analysis of Bear Capture Rate by Deer EDD without outliers
par(mar=c(6,6,4,6))
with(bear_dEDD1, plot(Bear_Capture_Rate ~ ESW_EDR, main = "Regression Analysis of Bear Capture Rate and Deer Effective Detection Distance",
cex.main = 2.2,
xlab = expression(bold("Deer Effective Detection Distance")),
ylab = expression(bold("Bear Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.5))
lm.out_db = lm(Bear_Capture_Rate ~ ESW_EDR, data = bear_dEDD1)
abline(lm.out_db, col="blue")
summary(lm.out_db)
##################################################
#subset gray squirrel capture rate per deployment
grsqrlcam_caprates<-grsqrlcamdata_coords[,c(1,6,7)]
#rename column title
names(grsqrlcam_caprates)[3]<-"Gray_Squirrel_Capture_Rate"
#Merge gray squirrel info and Deer EDD
grsqrl_dEDD<-merge(Deer_EDD, grsqrlcam_caprates, by = "Deployment")
#plot results
boxplot(grsqrl_dEDD$ESW_EDR)
boxplot.stats(grsqrl_dEDD$ESW_EDR)$out
#remove outliers
grsqrl_dEDD1<-grsqrl_dEDD[which(grsqrl_dEDD$ESW_EDR <=10),]
#plot regression analysis of Gray Squirrel Capture Rate by Deer EDD without outliers
par(mar=c(11,6,4,6))
with(grsqrl_dEDD1, plot(Gray_Squirrel_Capture_Rate ~ ESW_EDR, main = "Regression Analysis of Gray Squirrel Capture Rate and Deer Effective Detection Distance",
cex.main = 1.9,
xlab = expression(bold("Deer Effective Detection Distance")),
ylab = expression(bold("Gray Squirrel Capture Rate")),
cex.axis = 1,
cex.lab = 1.5))
lm.out_gd = lm(Gray_Squirrel_Capture_Rate ~ ESW_EDR, data = grsqrl_dEDD1)
abline(lm.out_gd, col="blue")
summary(lm.out_gd)
############################################
#subset raccoon capture rates per deployment
raccooncam_caprates<-raccooncamdata_coords [,c(1,6,7)]
#rename column
names(raccooncam_caprates)[3]<-"Raccoon_Capture_Rate"
#merge raccoon capture rate and Deer EDD by Deployment
raccoon_dEDD<-merge(Deer_EDD, raccooncam_caprates, by = "Deployment")
#plot results
boxplot(raccoon_dEDD$ESW_EDR)
boxplot.stats(raccoon_dEDD$ESW_EDR)$out
#remove outliers
raccoon_dEDD1<-raccoon_dEDD[which(raccoon_dEDD$ESW_EDR <=10),]
#plot regression analysis of Raccoon Capture Rate by Deer EDD without outliers
par(mar=c(11,6,4,6))
with(raccoon_dEDD1, plot(Raccoon_Capture_Rate ~ ESW_EDR, main = "Regression Analysis of Raccoon Capture Rate and Deer Effective Detection Distance",
cex.main = 1.9,
xlab = expression(bold("Deer Effective Detection Distance")),
ylab = expression(bold("Raccoon Capture Rate")),
cex.axis = 1,
cex.lab = 1.5))
lm.out_rd = lm(Raccoon_Capture_Rate ~ ESW_EDR, data = raccoon_dEDD1)
abline(lm.out_rd, col="blue")
summary(lm.out_rd)
##################################################################################################
#Regression Analysis of the Deer Capture Rate to Capture Rate of Bear, Gray Squirrel, and Raccoon
#################################################################################################
# DEER AND BEAR CAPTURE RATE
#Subset Species, Deployment and caprate from deer and bear data frames
deercam_caprate<-deercamdata_coords[,c(1,6,7)]
names(deercam_caprate)[3]<-"Deer_Capture_Rate"
bearcam_caprate<-bearcamdata_coords[,c(1,6,7)]
names(bearcam_caprate)[3]<-"Bear_Capture_Rate"
deerbearcam<-merge(bearcam_caprate, deercam_caprate, by = "Deployment")
#boxplot to identify outliers
boxplot(deercam_caprate$Deer_Capture_Rate,bearcam_caprate$Bear_Capture_Rate)
boxplot.stats(deercam_caprate$Deer_Capture_Rate)$out
boxplot.stats(bearcam_caprate$Bear_Capture_Rate)$out
#remove outliers
deercam_caprate1<-deercam_caprate[which(deercam_caprate$Deer_Capture_Rate <=166),]
bearcam_caprate1<-bearcam_caprate[which(bearcam_caprate$Bear_Capture_Rate <=17),]
#Plot Regression Analysis of capture rate of deer and bear
deerbearcam<-merge(bearcam_caprate1, deercam_caprate1, by = "Deployment")
with(deerbearcam, plot(Deer_Capture_Rate ~ Bear_Capture_Rate, main = " Capture Rate Correlation Between Deer and Black Bear",
cex.main = 2.7,
font.lab = 2.3,
cex.axis = 1.5,
cex.lab = 1.7))
lm.outbd = lm(Deer_Capture_Rate ~ Bear_Capture_Rate, data = deerbearcam)
abline(lm.outbd, col="blue")
summary(lm.outbd)
#################################################
#DEER AND GRAY SQUIRREL CAPTURE RATE
#Subset Species, Deployment and caprate from individual species data frames
deercam_caprate<-deercamdata_coords[,c(1,6,7)]
names(deercam_caprate)[3]<-"Deer_Capture_Rate"
grsqrlcam_caprate<-grsqrlcamdata_coords[,c(1,6,7)]
names(grsqrlcam_caprate)[3]<-"GrSqrl_Capture_Rate"
deergrsqrlcam<-merge(grsqrlcam_caprate, deercam_caprate, by = "Deployment")
#boxplot to identify outliers
boxplot(deercam_caprate$Deer_Capture_Rate,grsqrlcam_caprate$GrSqrl_Capture_Rate)
boxplot.stats(deercam_caprate$Deer_Capture_Rate)$out
boxplot.stats(grsqrlcam_caprate$GrSqrl_Capture_Rate )$out
#remove outliers
deercam_caprate<-deercam_caprate[which(deercam_caprate$Deer_Capture_Rate <=166),]
grsqrlcam_caprate<-grsqrlcam_caprate[which(grsqrlcam_caprate$GrSqrl_Capture_Rate <=53),]
#Plot Regression Analysis of capture rate of Deer and Gray Squirrel
deersqrlcam<-merge(grsqrlcam_caprate, deercam_caprate, by = "Deployment")
with(deersqrlcam, plot(Deer_Capture_Rate ~ GrSqrl_Capture_Rate, main = " Capture Rate Correlation Between Deer and Gray Squirrel",
cex.main = 2.6,
font.lab = 2.3,
cex.axis = 1.5,
cex.lab = 1.7))
lm.outgd = lm(Deer_Capture_Rate ~ GrSqrl_Capture_Rate, data = deersqrlcam)
abline(lm.outgd, col="blue")
summary(lm.outgd)
####################
#DEER AND RACOON CAPTURE RATE
#Subset Species, Deployment and caprate from individual species data frames
deercam_caprate<-deercamdata_coords[,c(1,6,7)]
names(deercam_caprate)[3]<-"Deer_Capture_Rate"
raccooncam_caprate<-raccooncamdata_coords[,c(1,6,7)]
names(raccooncam_caprate)[3]<-"Raccoon_Capture_Rate"
deerraccooncam<-merge(raccooncam_caprate, deercam_caprate, by = "Deployment")
#boxplot to identify outliers
boxplot(deercam_caprate$Deer_Capture_Rate,raccooncam_caprate$Raccoon_Capture_Rate)
boxplot.stats(deercam_caprate$Deer_Capture_Rate)$out
boxplot.stats(raccooncam_caprate$Raccoon_Capture_Rate)$out
#remove outliers
deercam_caprate<-deercam_caprate[which(deercam_caprate$Deer_Capture_Rate <=166),]
raccooncam_caprate<-raccooncam_caprate[which(raccooncam_caprate$Raccoon_Capture_Rate <=12),]
#Plot Regression Analysis of capture rate of Deer and Raccoon
deerraccooncam<-merge(raccooncam_caprate, deercam_caprate, by = "Deployment")
with(deerraccooncam, plot(Deer_Capture_Rate ~ Raccoon_Capture_Rate, main = " Capture Rate Correlation Between Deer and Raccoon",
cex.main = 2.6,
font.lab = 2.3,
cex.axis = 1.5,
cex.lab = 1.7))
lm.outrd = lm(Deer_Capture_Rate ~ Raccoon_Capture_Rate, data = deerraccooncam)
abline(lm.outrd, col="blue")
summary(lm.outrd)
#############################################################################################
#Correlation between Camera Height and Capture Rate of Deer, Bear, Gray Squirrel, and Raccoon
#############################################################################################
#Bring in camera height data
Cam_heights<-read.csv("Camera_Heights.csv")
Cam_heights<-as.data.frame(Cam_heights)
#Rename columns to match for merge
names(Cam_heights)[1]<- "Deployment"
names(Cam_heights)[3]<- "Camera_Height"
#CAMERA HEIGHT is the distance in centimeters from the ground to the camera lens#
#Check variable types in data frame and change the camera height to a number variable
str(Cam_heights)
#Merge the species' capture rate per deployment and each deployment's camera height, by "Deployment" column
Deer_Cr_Ch<-merge(deercamdata_coords, Cam_heights, by = "Deployment")
Grsqrl_Cr_Ch<-merge(grsqrlcamdata_coords, Cam_heights, by = "Deployment")
Bear_Cr_Ch<-merge(bearcamdata_coords, Cam_heights, by = "Deployment")
Raccoon_Cr_Ch<-merge(raccooncamdata_coords, Cam_heights, by = "Deployment")
############################################################
#Regression Analysis of Deer capture rate by camera's height
par(mar=c(5,5,4,2))
with(Deer_Cr_Ch, plot(Capture_Rate ~ Camera_Height , main = "Regression Analysis of Deer Capture Rate and Camera Height",
cex.main = 2.2,
xlab = expression(bold("Camera Height")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outdcrch = lm(Capture_Rate ~ Camera_Height, data = Deer_Cr_Ch)
abline(lm.outdcrch, col="blue")
summary(lm.outdcrch)
###################################################################
#Regression Analysis of Black Bear capture rate by camera's height
par(mar=c(5,5,4,2))
with(Bear_Cr_Ch, plot(Capture_Rate ~ Camera_Height , main = "Regression Analysis of Bear Capture Rate and Camera Height",
cex.main = 2.2,
xlab = expression(bold("Camera Height")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outbcrch = lm(Capture_Rate ~ Camera_Height, data = Bear_Cr_Ch)
abline(lm.outbcrch, col="blue")
summary(lm.outbcrch)
#######################################################################
#Regression Analysis of Gray Squirrel capture rate by camera's height
par(mar=c(5,5,4,2))
with(Grsqrl_Cr_Ch, plot(Capture_Rate ~ Camera_Height , main = "Regression Analysis of Gray Squirrel Capture Rate and Camera Height",
cex.main = 2.2,
xlab = expression(bold("Camera Height")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outgscrch = lm(Capture_Rate ~ Camera_Height, data = Grsqrl_Cr_Ch)
abline(lm.outgscrch, col="blue")
summary(lm.outgscrch)
###################################
#Boxplot Raccoon capture rate and remove outliers
boxplot(Raccoon_Cr_Ch$Capture_Rate)
boxplot.stats(Raccoon_Cr_Ch$Capture_Rate)$out
Raccoon_Cr_Ch<-Raccoon_Cr_Ch[which(Raccoon_Cr_Ch$Capture_Rate <=12),]
#Regression Analysis of Raccoon capture rate by camera's height
par(mar=c(5,5,4,2))
with(Raccoon_Cr_Ch, plot(Capture_Rate ~ Camera_Height , main = "Regression Analysis of Raccoon Capture Rate and Camera Height",
cex.main = 2.2,
xlab = expression(bold("Camera Height")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outrcrch = lm(Capture_Rate ~ Camera_Height, data = Raccoon_Cr_Ch)
abline(lm.outrcrch, col="blue")
summary(lm.outrcrch)
################################################
#Correlation Between Camera Height and Deer EDD
################################################
#Merge Deer EDD with Camera Heights, by Deployment Column.
#Deer EDD data frame does not include deployments with <20 detections of deer
DEDD_CM<-merge(Deer_EDD, Cam_heights, by = "Deployment")
#Regression Analysis of Camera height by Deer EDD
boxplot(DEDD_CM$ESW_EDR)
boxplot.stats(DEDD_CM$ESW_EDR)$out
Raccoon_Cr_Ch<-Raccoon_Cr_Ch[which(Raccoon_Cr_Ch$Capture_Rate <=10),]
par(mar=c(5,5,4,2))
with(DEDD_CM, plot(ESW_EDR ~ Camera_Height , main = "Regression Analysis of Camera Height to Deer EDD",
cex.main = 2.2,
xlab = expression(bold("Camera Height")),
ylab = expression(bold("Deer EDD")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outcmedd = lm(ESW_EDR ~ Camera_Height, data = DEDD_CM)
abline(lm.outcmedd, col="blue")
summary(lm.outcmedd)
##############################################################################################
#Working with SIGEO tree grid information to try to associate with capture rates from our
#high resolution camera grid. Grid established summer 2017, running through summer 2018.
#Coordinates should be UTM Zone 17S
library(rgeos)
library(rgdal)
library(sp)
library(maptools)
library(raster)
library(grid)
#Bring in geo-reference tree data from entire SIGEO grid
setwd("C:/Users/josey/Documents/CT Grid")
list.files()
SIGEOtrees<-read.csv("scbi.full2_UTM_lat_long_12012017.csv")
#Change data frame into a Spatial Points Data frame
head(SIGEOtrees)
coordinates(SIGEOtrees)<- c("NAD83_X", "NAD83_Y")
class(SIGEOtrees)
#plot(SIGEOtrees)
#Bring in csv with correct coordinates
setwd("C:/Users/josey/Documents/CT Grid/Summer2017")
camdata_coordinates <- read.csv("Grid_Coordinates.csv")
camdata_coordinates <- as.data.frame(camdata_coordinates)
#plot the coordinates
plot(camdata_coordinates$NAD83_X,
camdata_coordinates$NAD83_Y,
xlim = c(747420, 747560),
ylim = c(4308900,4309040))
#Convert this trap coordinate information into a spatialpoints object
#First need to have the xy coordinates as a separate matrix
trapxy <- camdata_coordinates[, c(2,3)]
trapxy_sp <- SpatialPointsDataFrame(coords = trapxy, data = camdata_coordinates,
proj4string = CRS(proj4string(SIGEOtrees)))
plot(trapxy)
#Create a clipping polygon to reduce the size of the SIGEO grid to just the area of interest
#I'm setting the extent as 50m around the extreme trap coordinates
c<-50
CP <- as(extent(min(trapxy$NAD83_X)-c,
max(trapxy$NAD83_X)+c,
min(trapxy$NAD83_Y)-c,
max(trapxy$NAD83_Y)+c),
"SpatialPolygons")
#Assign the coordinate reference system of SIGEOtrees to the new clipping polygon
proj4string(CP) <- CRS(proj4string(SIGEOtrees))
plot(CP)
#You could also use gIntersect below but it does not preserve the original attribute data
SIGEOsmall <- intersect(SIGEOtrees, CP)
#plot grid with tree and cameras
plot(SIGEOsmall, col = "darkgreen", pch = 3,cex.main = 4)
plot(trapxy_sp, pch = 19, col = "red", add = T)
#Add a legend
par(font = 2)
legend(747300,4308970, legend = c("Tree", "Camera"), col = c("darkgreen", "red"),
pch = c(3,19), cex =1.5, bty = "n")
#Add scale
scale.len <- 20
x <- c(747308.5,747308.5+scale.len)
y<- c(4308890, 4308890)
lines(x,y,lwd = 2)
text(747347.9, 4308890, '20m', cex = 1.5)
#Add Deployment label to each camera
#pointLabel(coordinates(trapxy_sp),labels=trapxy_sp@data$Deployment, cex = 0.7, allowSmallOverlap = T)
##################################################################
#10m Buffer zones and related code
##################################################################
#buffer each camera by 10m. Maybe use actual max detection distance for each camera instead?
cams10m <- gBuffer(trapxy_sp, width=10, byid=TRUE, quadsegs = 4)
plot(cams10m, add = T)
#Cut out tree data from within the 10m buffers
trees10m <- intersect(SIGEOtrees, cams10m)
plot(trees10m)
gridtrees10m<-as.data.frame(trees10m)
#Check if trees are listed twice in buffer zone overlaps
doubletree<-gridtrees10m[,c(2,32)]
#Pull and total the # of trees per deployment and change column names
treecount<-gridtrees10m[,c(4,32)]
treecount1<-aggregate(treecount[,1],by=list(treecount$Deployment), sum)
colnames(treecount1)[2]<-"Number_of_Trees"
colnames(treecount1)[1]<-"Deployment"
#Merge number of trees per deployment with deer capture rate per deployment by Deployment
Trees_Per_Dcr<-merge(deercamdata_coords, treecount1, by = "Deployment")
#Boxplot of Number of Trees and remove outliers
boxplot(Trees_Per_Dcr$Number_of_Trees)
boxplot.stats(Trees_Per_Dcr$Number_of_Trees)$out
Trees_pDCR<-Trees_Per_Dcr[which(Trees_Per_Dcr$Number_of_Trees <=34),]
boxplot(deercamdata_coords$Capture_Rate)
boxplot.stats(deercamdata_coords$Capture_Rate)$out
deercam_caprate<-deercam_caprate[which(deercam_caprate$Deer_Capture_Rate <=166),]
#Plot Regression Analysis of # of trees to deer capture rate
par(mar=c(5,5,4,2))
with(Trees_pDCR, plot(Capture_Rate ~ Number_of_Trees, main = "Regression Analysis of Deer Capture Rate and Number of Trees per Deployment",
cex.main = 2.2,
xlab = expression(bold("Number of Trees Per Deployment")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outdct = lm(Capture_Rate ~ Number_of_Trees, data = Trees_Per_Dcr)
abline(lm.outdct, col="blue")
summary(lm.outdct)
#########################################################
#Create 4 point polygon to represent camera view
###########################################################
#Create data frame of the 4 points per camera
camview <- camdata_coordinates[, c(2,3,5)]
camview$X1<-(camview$NAD83_X + 6.84)
camview$Y1<-(camview$NAD83_Y + 18.79)
camview$X2<-(camview$NAD83_X)
camview$Y2<-(camview$NAD83_Y + 20)
camview$X3<-(camview$NAD83_X - 6.84)
camview$Y3<-(camview$NAD83_Y + 18.79)
camview1<- camdata_coordinates [,c(2,3,5)]
camview1[28:54,]<-(camview[1:27, c(4:5,3)])
camview1[55:81,]<-(camview[1:27, c(6:7,3)])
camview1[82:108,]<-(camview[1:27, c(8:9,3)])
camview_list<-split(camview1, camview1$Deployment)
camview_list<-lapply(camview_list, function(x) {x["Deployment"]<- NULL; x})
#create sp object and convert coords to polygon to prepare for
cvpp <- lapply(camview_list, Polygon)
#add id variable
cvp<-lapply(seq_along(cvpp), function(i) Polygons(list(cvpp[[i]]),ID = names(camview_list)[i]))
#create sp object
camview_spo<-SpatialPolygons(cvp, proj4string = CRS(proj4string(SIGEOtrees)))
#Create spdf with IDs (one unique ID per poly) and plot polygons
camview_spo.df<-SpatialPolygonsDataFrame(camview_spo,data.frame(id = unique(camview1$Deployment),row.names = unique(camview1$Deployment)))
plot(camview_spo.df, add = T)
#Cut out tree data from within polygons
clip_polys<-intersect(SIGEOsmall,camview_spo.df)
plot(clip_polys)
cvtrees<-as.data.frame(clip_polys)
#Pull and total the # of trees per deployment and change column names
cvtreecount<-cvtrees[,c(4,28)]
cvtreecount1<-aggregate(cvtreecount[,1], by = list(cvtreecount$d),sum)
colnames(cvtreecount1)[2]<-"Number_of_Trees"
colnames(cvtreecount1)[1]<-"Deployment"
###################################################################################
#Analyse relationship between # of Trees in cameras view with Species Capture Rate
###################################################################################
#Tree count vs Deer cr
#Merge tree count per deployment with deer capture rate per deployment by Deployment
cvTrees_per_Dcr<-merge(deercamdata_coords, cvtreecount1, by = "Deployment")
#Boxplot and remove outliers
boxplot.stats(cvTrees_per_Dcr$Number_of_Trees)$out
boxplot.stats(cvTrees_per_Dcr$Capture_Rate)$out
#Remove outliers
cvTrees_T<-cvTrees_per_Dcr[which(cvTrees_per_Dcr$Number_of_Trees <=38),]
cvTrees_Dcr<-cvTrees_per_Dcr[which(cvTrees_per_Dcr$Capture_Rate <=166),]
cvTrees_pDCR<-merge(cvTrees_Dcr, cvTrees_T, by = "Deployment")
#Plot Regression Analysis of # of trees to deer capture rate
par(mar=c(5,5,4,2))
with(cvTrees_pDCR, plot(Capture_Rate.x ~ Number_of_Trees.x, main = "Regression Analysis of Deer Capture Rate and Number of Trees per Deployment",
cex.main = 2.1,
xlab = expression(bold("Trees Per Deployment")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outdctc = lm(Capture_Rate ~ Number_of_Trees, data = cvTrees_pDCR)
abline(lm.outdctc, col="blue")
summary(lm.outdctc)
#Post Rsqrd value on plot
Rsquared<-summary(lm.outdctc)$r.squared
text(13.30551,126.1788, as.expression(substitute(italic(R)^2 == r,list(r=round(Rsquared,3)))), cex = 1.5)
#######################################
#Tree count vs Bear cr
#plot boxplot to identify outliers
boxplot(cvtreecount1$Number_of_Trees)
boxplot.stats(cvtreecount1$Number_of_Trees)$out
#Remove outliers
cvTrees_T<-cvtreecount1[which(cvtreecount1$Number_of_Trees <=38),]
cvTrees_pBCR<-merge(cvTrees_T, bearcamdata_coords, by = "Deployment")
#Plot # of trees to bear cap rate
par(mar=c(5,5,4,2))
with(cvTrees_pBCR, plot(Capture_Rate ~ Number_of_Trees, main = "Regression Analysis of Bear Capture Rate and Number of Trees per Deployment",
cex.main = 2.1,
xlab = expression(bold("Trees Per Deployment")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outbctc = lm(Capture_Rate ~ Number_of_Trees, data = cvTrees_pBCR)
abline(lm.outbctc, col="blue")
summary(lm.outbctc)
#Post Rsqrd value on plot
Rsquared<-summary(lm.outbctc)$r.squared
text(12.37023,16.05302, as.expression(substitute(italic(R)^2 == r,list(r=round(Rsquared,3)))), cex = 1.5)
########################################
#Tree count vs Squirrel CR
#Merge # of trees and Squirrel CR
cvTrees_per_Scr<-merge(grsqrlcamdata_coords, cvtreecount1, by = "Deployment")
#Boxplot and remove outliers
boxplot(cvTrees_per_Scr$Number_of_Trees)
boxplot.stats(cvTrees_per_Scr$Number_of_Trees)$out
#Remove outliers
cvTrees_pSCR<-cvTrees_per_Scr[which(cvTrees_per_Scr$Number_of_Trees <=38),]
#Plot # of trees to bear cap rate
par(mar=c(5,5,4,2))
with(cvTrees_pSCR, plot(Capture_Rate ~ Number_of_Trees, main = "Regression Analysis of Squirrel Capture Rate and Number of Trees per Deployment",
cex.main = 2,
xlab = expression(bold("Trees Per Deployment")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outsctc = lm(Capture_Rate ~ Number_of_Trees, data = cvTrees_pSCR)
abline(lm.outsctc, col="blue")
summary(lm.outsctc)
#Post Rsqrd value on plot
Rsquared<-summary(lm.outsctc)$r.squared
text(12.37023,97.61948, as.expression(substitute(italic(R)^2 == r,list(r=round(Rsquared,3)))), cex = 1.5)
########################################
#Tree count vs Raccoon cr
#Merge # of trees and Raccoon CR
cvTrees_per_Rcr<-merge(raccooncamdata_coords, cvtreecount1, by = "Deployment")
#Boxplot and remove outliers
boxplot(cvTrees_per_Rcr$Number_of_Trees)
boxplot.stats(cvTrees_per_Rcr$Number_of_Trees)$out
cvTrees_pRCR<-cvTrees_per_Rcr[which(cvTrees_per_Rcr$Number_of_Trees <=38),]
#Plot # of trees to bear cap rate
par(mar=c(5,5,4,2))
with(cvTrees_pRCR, plot(Capture_Rate ~ Number_of_Trees, main = "Regression Analysis of Raccoon Capture Rate and Number of Trees per Deployment",
cex.main = 2,
xlab = expression(bold("Trees Per Deployment")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outrctc = lm(Capture_Rate ~ Number_of_Trees, data = cvTrees_pRCR)
abline(lm.outrctc, col="blue")
summary(lm.outrctc)
#Post Rsqrd value on plot
Rsquared<-summary(lm.outrctc)$r.squared
text(12.37023,11.21435, as.expression(substitute(italic(R)^2 == r,list(r=round(Rsquared,3)))), cex = 1.5)
##########################################
#Deer EDD vs Tree Count
#Merge Deer EDD and tree count
DEDD_TC<-merge(Deer_EDD, cvtreecount1, by = "Deployment")
boxplot(DEDD_TC$Number_of_Trees)
boxplot.stats(DEDD_TC$Number_of_Trees)$out
DEDD_TC1<-DEDD_TC[which(DEDD_TC$Number_of_Trees <=38),]
#Plot # of trees to Deer Estimated Detection Distance
par(mar=c(5,5,4,2))
with(DEDD_TC1, plot(ESW_EDR ~ Number_of_Trees, main = "Analysis of Trees per Deployment on Deer Detection Distance",
cex.main = 2,
xlab = expression(bold("Trees Per Deployment")),
ylab = expression(bold("Estimated Detection Distance")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outdeddt = lm(ESW_EDR ~ Number_of_Trees, data = DEDD_TC1)
abline(lm.outdeddt, col="blue")
summary(lm.outdeddt)
#Post Rsqrd value on plot
Rsquared<-summary(lm.outdeddt)$r.squared
text(12.37023,12.21435, as.expression(substitute(italic(R)^2 == r,list(r=round(Rsquared,3)))), cex = 1.5)
#######################################
#Oak Tree Data
#######################################
#Pull Oak Tree Data from grid
Oak_Trees<-subset(SIGEOsmall,sp %in% c('qual','quru','quco','qufa','qupr','quve','qusp','qumi'))
plot(Oak_Trees, pch = 19)
#plot camera locations in red
plot(trapxy_sp, pch = 22, col = "red", add = T)
#add column to study site tree info that divides trees into 5 color size cateories
Oak_Trees$Size_Category[Oak_Trees$dbh <150]<-'461' #turqoise
Oak_Trees$Size_Category[Oak_Trees$dbh >150]<-'68' #dark blue
Oak_Trees$Size_Category[Oak_Trees$dbh >300]<-'47' #yellow
Oak_Trees$Size_Category[Oak_Trees$dbh >600]<-'139' #green
Oak_Trees$Size_Category[Oak_Trees$dbh >900]<-'8' #gray
Oak_Trees$Size_Category[Oak_Trees$dbh >1200]<-'550' #pink
#plot Oak Tree sizes by color
par(mar=c(5,17,4,2))
plot(Oak_Trees ,pch = 19, col = Oak_Trees$Size_Category, add = T)
#Legend matching color to size
legend(747285,4309044, legend = c("< 15cm","> 15cm","> 30cm","> 60cm","> 90cm","> 120cm"), col = c("461", "68", "47","139", "8", "550"), pch = 19, title = "DBH of Oak Trees", bty = 'n')
########################################################################
#Regression Analysis of Oak Trees per Deployment and Deer Capture Rate
########################################################################
#Cut out oak tree data from within the cones
polyoaktrees<- intersect(Oak_Trees, clip_polys)
plot(polyoaktrees)
polyoaktreesdf<-as.data.frame(polyoaktrees)
#Pull # of oaks out of each deployment and rename columns to prepare for merge
oakcount<-polyoaktreesdf[,c(4,33)]
oakcount1<-aggregate(oakcount[,1],by=list(oakcount$Deployment), sum)
colnames(oakcount1)[2]<-"Num_Oak_Trees"
colnames(oakcount1)[1]<-"Deployment"
#Merge number of oak trees within buffers with deer capture rate
Oaks_Per_Dcr<-merge(deercamdata_coords, oakcount1, by = "Deployment", all.x = TRUE)
Oaks_Per_Dcr$Num_Oak_Trees[is.na (Oaks_Per_Dcr$Num_Oak_Trees)] = 0
#Boxplot of Number of oak trees and remove outliers
boxplot(Oaks_Per_Dcr$Capture_Rate)
boxplot.stats(Oaks_Per_Dcr$Capture_Rate)$out
Oaks_Per_Dcr1<-Oaks_Per_Dcr[which(Oaks_Per_Dcr$Capture_Rate <166),]
#Plot Regression Analysis of # of oak trees to deer capture rate
par(mar=c(5,5,4,2))
with(Oaks_Per_Dcr1, plot(Capture_Rate ~ Num_Oak_Trees, main = "Regression Analysis of Deer Capture Rate and Number of Oak Trees per Deployment",
cex.main = 2.2,
xlab = expression(bold("Number of Oaks Per Deployment")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outodc = lm(Capture_Rate ~ Num_Oak_Trees, data = Oaks_Per_Dcr)
abline(lm.outodc, col="blue")
summary(lm.outodc)
##############################################################################
#Regression Analysis of Oak Trees Per Deployment and Gray Squirrel Capture Rate
##############################################################################
#Merge number of oak trees within buffers with gray squirrel capture rate
Oaks_Per_GrSqCR<-merge(grsqrlcamdata_coords, oakcount1, by = "Deployment", all.x = TRUE)
Oaks_Per_GrSqCR$Num_Oak_Trees[is.na (Oaks_Per_GrSqCR$Num_Oak_Trees)] = 0
#Boxplot of Number of oak trees and remove outliers
boxplot(Oaks_Per_GrSqCR$Capture_Rate)
boxplot.stats(Oaks_Per_GrSqCR$Capture_Rate)$out
Oaks_Per_GrSqCR1<-Oaks_Per_GrSqCR[which(Oaks_Per_GrSqCR$Capture_Rate <52),]
#Plot Regression Analysis of # of oak trees to gray squirrel capture rate
par(mar=c(5,5,4,2))
with(Oaks_Per_GrSqCR1, plot(Capture_Rate ~ Num_Oak_Trees, main = "Regression Analysis of Gray Squirrel Capture Rate and Number of Oak Trees per Deployment",
cex.main = 2.2,
xlab = expression(bold("Number of Oaks Per Camera")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outogsc = lm(Capture_Rate ~ Num_Oak_Trees, data = Oaks_Per_GrSqCR)
abline(lm.outogsc, col="blue")
summary(lm.outogsc) | /scripts/Old Stuff Script.R | no_license | mordecai9/GridProject | R | false | false | 31,600 | r |
###############################################################################
#Regression Analysis of Deer Capture Rate by Deer Estimated Detection Distance
###############################################################################
#create data frame from Deer EDD csv
setwd("C:/Users/josey/Documents/CT Grid/Summer2017")
Deer_EDD_Summer2017<-read.csv("Deer_EDD_S17.csv")
Deer_EDD_Summer2017<-as.data.frame(Deer_EDD_Summer2017)
CR4<-subset(CR3, Species =="Odocoileus virginianus")
#Rename columns to match camdata data frame
colnames(Deer_EDD_Summer2017)[6]<-"Deployment"
colnames(Deer_EDD_Summer2017)[19]<-"Number_of_Detections"
colnames(Deer_EDD_Summer2017)[14]<-"ESW_EDR"
Deer_EDD_Summer2017$CapRate<-deercamdata_coords$Capture_Rate
#Remove deployments with <20 detections
Deer_EDD<- Deer_EDD_Summer2017[which(Deer_EDD_Summer2017$CapRate >= 20),]
#Merge deer caprate and EDD by Deployment and Species
Deer_caprate_EDD<-merge(Deer_EDD, CR4, by = "Deployment")
#boxplot of merged data to identify outliers
par(mar=c(5,5,4,2))
boxplot(Deer_caprate_EDD$ESW_EDR)
boxplot.stats(Deer_caprate_EDD$ESW_EDR)$out
#remove outliers
Deer_caprate_EDD1<-Deer_caprate_EDD[which(Deer_caprate_EDD$ESW_EDR <=10),]
#correlate capture rate with EDD and show regression line
par(mar=c(5,6,4,2))
with(Deer_caprate_EDD1, plot(CapRate ~ ESW_EDR,
pch = 19,
xlab = expression(bold("Effective Detection Distance (m)")),
ylab = expression(bold("Deer Capture Rate")),
main = "Regression Analysis of Deer Capture Rate and Effective Detection Distance",
cex.main = 2,
cex.axis = 1.3,
cex.lab = 1.7))
lm.outdedd = lm(CapRate ~ ESW_EDR, data = Deer_caprate_EDD1)
abline(lm.outdedd, col="blue")
summary(lm.outdedd)
#Add Rsquared value expression
#Need to create an object with just rsquared value first
Rsquared<-summary(lm.outdedd)$r.squared
text(5.866514,191.3044, as.expression(substitute(italic(R)^2 == r,list(r=round(Rsquared,3)))), cex = 1.5, font = 2)
#########################################################################################################
#Regression Analysis of Deer Estimated Detection Distance with Bear, Gray Squirrel, and Racoon Capture Rate
#########################################################################################################
#subset capture rate per deployment of bear
bearcam_caprates<-bearcamdata_coords[,c(1,6,7)]
#rename column title
names(bearcam_caprates)[3]<-"Bear_Capture_Rate"
#Merge Deer EDD and bear capture rate info by Deployment
bear_dEDD<-merge(Deer_EDD, bearcam_caprates, by = "Deployment")
#plot results
boxplot(bear_dEDD$ESW_EDR, main = "Effective Detection Distance for Black Bear",
cex.main = 1.7)
boxplot.stats(bear_dEDD$ESW_EDR)$out
#remove outliers
bear_dEDD1<-bear_dEDD[which(bear_dEDD$ESW_EDR <=10),]
#plot regression analysis of Bear Capture Rate by Deer EDD without outliers
par(mar=c(6,6,4,6))
with(bear_dEDD1, plot(Bear_Capture_Rate ~ ESW_EDR, main = "Regression Analysis of Bear Capture Rate and Deer Effective Detection Distance",
cex.main = 2.2,
xlab = expression(bold("Deer Effective Detection Distance")),
ylab = expression(bold("Bear Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.5))
lm.out_db = lm(Bear_Capture_Rate ~ ESW_EDR, data = bear_dEDD1)
abline(lm.out_db, col="blue")
summary(lm.out_db)
##################################################
#subset gray squirrel capture rate per deployment
grsqrlcam_caprates<-grsqrlcamdata_coords[,c(1,6,7)]
#rename column title
names(grsqrlcam_caprates)[3]<-"Gray_Squirrel_Capture_Rate"
#Merge gray squirrel info and Deer EDD
grsqrl_dEDD<-merge(Deer_EDD, grsqrlcam_caprates, by = "Deployment")
#plot results
boxplot(grsqrl_dEDD$ESW_EDR)
boxplot.stats(grsqrl_dEDD$ESW_EDR)$out
#remove outliers
grsqrl_dEDD1<-grsqrl_dEDD[which(grsqrl_dEDD$ESW_EDR <=10),]
#plot regression analysis of Gray Squirrel Capture Rate by Deer EDD without outliers
par(mar=c(11,6,4,6))
with(grsqrl_dEDD1, plot(Gray_Squirrel_Capture_Rate ~ ESW_EDR, main = "Regression Analysis of Gray Squirrel Capture Rate and Deer Effective Detection Distance",
cex.main = 1.9,
xlab = expression(bold("Deer Effective Detection Distance")),
ylab = expression(bold("Gray Squirrel Capture Rate")),
cex.axis = 1,
cex.lab = 1.5))
lm.out_gd = lm(Gray_Squirrel_Capture_Rate ~ ESW_EDR, data = grsqrl_dEDD1)
abline(lm.out_gd, col="blue")
summary(lm.out_gd)
############################################
#subset raccoon capture rates per deployment
raccooncam_caprates<-raccooncamdata_coords [,c(1,6,7)]
#rename column
names(raccooncam_caprates)[3]<-"Raccoon_Capture_Rate"
#merge raccoon capture rate and Deer EDD by Deployment
raccoon_dEDD<-merge(Deer_EDD, raccooncam_caprates, by = "Deployment")
#plot results
boxplot(raccoon_dEDD$ESW_EDR)
boxplot.stats(raccoon_dEDD$ESW_EDR)$out
#remove outliers
raccoon_dEDD1<-raccoon_dEDD[which(raccoon_dEDD$ESW_EDR <=10),]
#plot regression analysis of Raccoon Capture Rate by Deer EDD without outliers
par(mar=c(11,6,4,6))
with(raccoon_dEDD1, plot(Raccoon_Capture_Rate ~ ESW_EDR, main = "Regression Analysis of Raccoon Capture Rate and Deer Effective Detection Distance",
cex.main = 1.9,
xlab = expression(bold("Deer Effective Detection Distance")),
ylab = expression(bold("Raccoon Capture Rate")),
cex.axis = 1,
cex.lab = 1.5))
lm.out_rd = lm(Raccoon_Capture_Rate ~ ESW_EDR, data = raccoon_dEDD1)
abline(lm.out_rd, col="blue")
summary(lm.out_rd)
##################################################################################################
#Regression Analysis of the Deer Capture Rate to Capture Rate of Bear, Gray Squirrel, and Raccoon
#################################################################################################
# DEER AND BEAR CAPTURE RATE
#Subset Species, Deployment and caprate from deer and bear data frames
deercam_caprate<-deercamdata_coords[,c(1,6,7)]
names(deercam_caprate)[3]<-"Deer_Capture_Rate"
bearcam_caprate<-bearcamdata_coords[,c(1,6,7)]
names(bearcam_caprate)[3]<-"Bear_Capture_Rate"
deerbearcam<-merge(bearcam_caprate, deercam_caprate, by = "Deployment")
#boxplot to identify outliers
boxplot(deercam_caprate$Deer_Capture_Rate,bearcam_caprate$Bear_Capture_Rate)
boxplot.stats(deercam_caprate$Deer_Capture_Rate)$out
boxplot.stats(bearcam_caprate$Bear_Capture_Rate)$out
#remove outliers
deercam_caprate1<-deercam_caprate[which(deercam_caprate$Deer_Capture_Rate <=166),]
bearcam_caprate1<-bearcam_caprate[which(bearcam_caprate$Bear_Capture_Rate <=17),]
#Plot Regression Analysis of capture rate of deer and bear
deerbearcam<-merge(bearcam_caprate1, deercam_caprate1, by = "Deployment")
with(deerbearcam, plot(Deer_Capture_Rate ~ Bear_Capture_Rate, main = " Capture Rate Correlation Between Deer and Black Bear",
cex.main = 2.7,
font.lab = 2.3,
cex.axis = 1.5,
cex.lab = 1.7))
lm.outbd = lm(Deer_Capture_Rate ~ Bear_Capture_Rate, data = deerbearcam)
abline(lm.outbd, col="blue")
summary(lm.outbd)
#################################################
#DEER AND GRAY SQUIRREL CAPTURE RATE
#Subset Species, Deployment and caprate from individual species data frames
deercam_caprate<-deercamdata_coords[,c(1,6,7)]
names(deercam_caprate)[3]<-"Deer_Capture_Rate"
grsqrlcam_caprate<-grsqrlcamdata_coords[,c(1,6,7)]
names(grsqrlcam_caprate)[3]<-"GrSqrl_Capture_Rate"
deergrsqrlcam<-merge(grsqrlcam_caprate, deercam_caprate, by = "Deployment")
#boxplot to identify outliers
boxplot(deercam_caprate$Deer_Capture_Rate,grsqrlcam_caprate$GrSqrl_Capture_Rate)
boxplot.stats(deercam_caprate$Deer_Capture_Rate)$out
boxplot.stats(grsqrlcam_caprate$GrSqrl_Capture_Rate )$out
#remove outliers
deercam_caprate<-deercam_caprate[which(deercam_caprate$Deer_Capture_Rate <=166),]
grsqrlcam_caprate<-grsqrlcam_caprate[which(grsqrlcam_caprate$GrSqrl_Capture_Rate <=53),]
#Plot Regression Analysis of capture rate of Deer and Gray Squirrel
deersqrlcam<-merge(grsqrlcam_caprate, deercam_caprate, by = "Deployment")
with(deersqrlcam, plot(Deer_Capture_Rate ~ GrSqrl_Capture_Rate, main = " Capture Rate Correlation Between Deer and Gray Squirrel",
cex.main = 2.6,
font.lab = 2.3,
cex.axis = 1.5,
cex.lab = 1.7))
lm.outgd = lm(Deer_Capture_Rate ~ GrSqrl_Capture_Rate, data = deersqrlcam)
abline(lm.outgd, col="blue")
summary(lm.outgd)
####################
#DEER AND RACOON CAPTURE RATE
#Subset Species, Deployment and caprate from individual species data frames
deercam_caprate<-deercamdata_coords[,c(1,6,7)]
names(deercam_caprate)[3]<-"Deer_Capture_Rate"
raccooncam_caprate<-raccooncamdata_coords[,c(1,6,7)]
names(raccooncam_caprate)[3]<-"Raccoon_Capture_Rate"
deerraccooncam<-merge(raccooncam_caprate, deercam_caprate, by = "Deployment")
#boxplot to identify outliers
boxplot(deercam_caprate$Deer_Capture_Rate,raccooncam_caprate$Raccoon_Capture_Rate)
boxplot.stats(deercam_caprate$Deer_Capture_Rate)$out
boxplot.stats(raccooncam_caprate$Raccoon_Capture_Rate)$out
#remove outliers
deercam_caprate<-deercam_caprate[which(deercam_caprate$Deer_Capture_Rate <=166),]
raccooncam_caprate<-raccooncam_caprate[which(raccooncam_caprate$Raccoon_Capture_Rate <=12),]
#Plot Regression Analysis of capture rate of Deer and Raccoon
deerraccooncam<-merge(raccooncam_caprate, deercam_caprate, by = "Deployment")
with(deerraccooncam, plot(Deer_Capture_Rate ~ Raccoon_Capture_Rate, main = " Capture Rate Correlation Between Deer and Raccoon",
cex.main = 2.6,
font.lab = 2.3,
cex.axis = 1.5,
cex.lab = 1.7))
lm.outrd = lm(Deer_Capture_Rate ~ Raccoon_Capture_Rate, data = deerraccooncam)
abline(lm.outrd, col="blue")
summary(lm.outrd)
#############################################################################################
#Correlation between Camera Height and Capture Rate of Deer, Bear, Gray Squirrel, and Raccoon
#############################################################################################
#Bring in camera height data
Cam_heights<-read.csv("Camera_Heights.csv")
Cam_heights<-as.data.frame(Cam_heights)
#Rename columns to match for merge
names(Cam_heights)[1]<- "Deployment"
names(Cam_heights)[3]<- "Camera_Height"
#CAMERA HEIGHT is the distance in centimeters from the ground to the camera lens#
#Check variable types in data frame and change the camera height to a number variable
str(Cam_heights)
#Merge the species' capture rate per deployment and each deployment's camera height, by "Deployment" column
Deer_Cr_Ch<-merge(deercamdata_coords, Cam_heights, by = "Deployment")
Grsqrl_Cr_Ch<-merge(grsqrlcamdata_coords, Cam_heights, by = "Deployment")
Bear_Cr_Ch<-merge(bearcamdata_coords, Cam_heights, by = "Deployment")
Raccoon_Cr_Ch<-merge(raccooncamdata_coords, Cam_heights, by = "Deployment")
############################################################
#Regression Analysis of Deer capture rate by camera's height
par(mar=c(5,5,4,2))
with(Deer_Cr_Ch, plot(Capture_Rate ~ Camera_Height , main = "Regression Analysis of Deer Capture Rate and Camera Height",
cex.main = 2.2,
xlab = expression(bold("Camera Height")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outdcrch = lm(Capture_Rate ~ Camera_Height, data = Deer_Cr_Ch)
abline(lm.outdcrch, col="blue")
summary(lm.outdcrch)
###################################################################
#Regression Analysis of Black Bear capture rate by camera's height
par(mar=c(5,5,4,2))
with(Bear_Cr_Ch, plot(Capture_Rate ~ Camera_Height , main = "Regression Analysis of Bear Capture Rate and Camera Height",
cex.main = 2.2,
xlab = expression(bold("Camera Height")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outbcrch = lm(Capture_Rate ~ Camera_Height, data = Bear_Cr_Ch)
abline(lm.outbcrch, col="blue")
summary(lm.outbcrch)
#######################################################################
#Regression Analysis of Gray Squirrel capture rate by camera's height
par(mar=c(5,5,4,2))
with(Grsqrl_Cr_Ch, plot(Capture_Rate ~ Camera_Height , main = "Regression Analysis of Gray Squirrel Capture Rate and Camera Height",
cex.main = 2.2,
xlab = expression(bold("Camera Height")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outgscrch = lm(Capture_Rate ~ Camera_Height, data = Grsqrl_Cr_Ch)
abline(lm.outgscrch, col="blue")
summary(lm.outgscrch)
###################################
#Boxplot Raccoon capture rate and remove outliers
boxplot(Raccoon_Cr_Ch$Capture_Rate)
boxplot.stats(Raccoon_Cr_Ch$Capture_Rate)$out
Raccoon_Cr_Ch<-Raccoon_Cr_Ch[which(Raccoon_Cr_Ch$Capture_Rate <=12),]
#Regression Analysis of Raccoon capture rate by camera's height
par(mar=c(5,5,4,2))
with(Raccoon_Cr_Ch, plot(Capture_Rate ~ Camera_Height , main = "Regression Analysis of Raccoon Capture Rate and Camera Height",
cex.main = 2.2,
xlab = expression(bold("Camera Height")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outrcrch = lm(Capture_Rate ~ Camera_Height, data = Raccoon_Cr_Ch)
abline(lm.outrcrch, col="blue")
summary(lm.outrcrch)
################################################
#Correlation Between Camera Height and Deer EDD
################################################
#Merge Deer EDD with Camera Heights, by Deployment Column.
#Deer EDD data frame does not include deployments with <20 detections of deer
DEDD_CM<-merge(Deer_EDD, Cam_heights, by = "Deployment")
#Regression Analysis of Camera height by Deer EDD
boxplot(DEDD_CM$ESW_EDR)
boxplot.stats(DEDD_CM$ESW_EDR)$out
Raccoon_Cr_Ch<-Raccoon_Cr_Ch[which(Raccoon_Cr_Ch$Capture_Rate <=10),]
par(mar=c(5,5,4,2))
with(DEDD_CM, plot(ESW_EDR ~ Camera_Height , main = "Regression Analysis of Camera Height to Deer EDD",
cex.main = 2.2,
xlab = expression(bold("Camera Height")),
ylab = expression(bold("Deer EDD")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outcmedd = lm(ESW_EDR ~ Camera_Height, data = DEDD_CM)
abline(lm.outcmedd, col="blue")
summary(lm.outcmedd)
##############################################################################################
#Working with SIGEO tree grid information to try to associate with capture rates from our
#high resolution camera grid. Grid established summer 2017, running through summer 2018.
#Coordinates should be UTM Zone 17S
library(rgeos)
library(rgdal)
library(sp)
library(maptools)
library(raster)
library(grid)
#Bring in geo-reference tree data from entire SIGEO grid
setwd("C:/Users/josey/Documents/CT Grid")
list.files()
SIGEOtrees<-read.csv("scbi.full2_UTM_lat_long_12012017.csv")
#Change data frame into a Spatial Points Data frame
head(SIGEOtrees)
coordinates(SIGEOtrees)<- c("NAD83_X", "NAD83_Y")
class(SIGEOtrees)
#plot(SIGEOtrees)
#Bring in csv with correct coordinates
setwd("C:/Users/josey/Documents/CT Grid/Summer2017")
camdata_coordinates <- read.csv("Grid_Coordinates.csv")
camdata_coordinates <- as.data.frame(camdata_coordinates)
#plot the coordinates
plot(camdata_coordinates$NAD83_X,
camdata_coordinates$NAD83_Y,
xlim = c(747420, 747560),
ylim = c(4308900,4309040))
#Convert this trap coordinate information into a spatialpoints object
#First need to have the xy coordinates as a separate matrix
trapxy <- camdata_coordinates[, c(2,3)]
trapxy_sp <- SpatialPointsDataFrame(coords = trapxy, data = camdata_coordinates,
proj4string = CRS(proj4string(SIGEOtrees)))
plot(trapxy)
#Create a clipping polygon to reduce the size of the SIGEO grid to just the area of interest
#I'm setting the extent as 50m around the extreme trap coordinates
c<-50
CP <- as(extent(min(trapxy$NAD83_X)-c,
max(trapxy$NAD83_X)+c,
min(trapxy$NAD83_Y)-c,
max(trapxy$NAD83_Y)+c),
"SpatialPolygons")
#Assign the coordinate reference system of SIGEOtrees to the new clipping polygon
proj4string(CP) <- CRS(proj4string(SIGEOtrees))
plot(CP)
#You could also use gIntersect below but it does not preserve the original attribute data
SIGEOsmall <- intersect(SIGEOtrees, CP)
#plot grid with tree and cameras
plot(SIGEOsmall, col = "darkgreen", pch = 3,cex.main = 4)
plot(trapxy_sp, pch = 19, col = "red", add = T)
#Add a legend
par(font = 2)
legend(747300,4308970, legend = c("Tree", "Camera"), col = c("darkgreen", "red"),
pch = c(3,19), cex =1.5, bty = "n")
#Add scale
scale.len <- 20
x <- c(747308.5,747308.5+scale.len)
y<- c(4308890, 4308890)
lines(x,y,lwd = 2)
text(747347.9, 4308890, '20m', cex = 1.5)
#Add Deployment label to each camera
#pointLabel(coordinates(trapxy_sp),labels=trapxy_sp@data$Deployment, cex = 0.7, allowSmallOverlap = T)
##################################################################
#10m Buffer zones and related code
##################################################################
#buffer each camera by 10m. Maybe use actual max detection distance for each camera instead?
cams10m <- gBuffer(trapxy_sp, width=10, byid=TRUE, quadsegs = 4)
plot(cams10m, add = T)
#Cut out tree data from within the 10m buffers
trees10m <- intersect(SIGEOtrees, cams10m)
plot(trees10m)
gridtrees10m<-as.data.frame(trees10m)
#Check if trees are listed twice in buffer zone overlaps
doubletree<-gridtrees10m[,c(2,32)]
#Pull and total the # of trees per deployment and change column names
treecount<-gridtrees10m[,c(4,32)]
treecount1<-aggregate(treecount[,1],by=list(treecount$Deployment), sum)
colnames(treecount1)[2]<-"Number_of_Trees"
colnames(treecount1)[1]<-"Deployment"
#Merge number of trees per deployment with deer capture rate per deployment by Deployment
Trees_Per_Dcr<-merge(deercamdata_coords, treecount1, by = "Deployment")
#Boxplot of Number of Trees and remove outliers
boxplot(Trees_Per_Dcr$Number_of_Trees)
boxplot.stats(Trees_Per_Dcr$Number_of_Trees)$out
Trees_pDCR<-Trees_Per_Dcr[which(Trees_Per_Dcr$Number_of_Trees <=34),]
boxplot(deercamdata_coords$Capture_Rate)
boxplot.stats(deercamdata_coords$Capture_Rate)$out
deercam_caprate<-deercam_caprate[which(deercam_caprate$Deer_Capture_Rate <=166),]
#Plot Regression Analysis of # of trees to deer capture rate
par(mar=c(5,5,4,2))
with(Trees_pDCR, plot(Capture_Rate ~ Number_of_Trees, main = "Regression Analysis of Deer Capture Rate and Number of Trees per Deployment",
cex.main = 2.2,
xlab = expression(bold("Number of Trees Per Deployment")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outdct = lm(Capture_Rate ~ Number_of_Trees, data = Trees_Per_Dcr)
abline(lm.outdct, col="blue")
summary(lm.outdct)
#########################################################
#Create 4 point polygon to represent camera view
###########################################################
#Create data frame of the 4 points per camera
camview <- camdata_coordinates[, c(2,3,5)]
camview$X1<-(camview$NAD83_X + 6.84)
camview$Y1<-(camview$NAD83_Y + 18.79)
camview$X2<-(camview$NAD83_X)
camview$Y2<-(camview$NAD83_Y + 20)
camview$X3<-(camview$NAD83_X - 6.84)
camview$Y3<-(camview$NAD83_Y + 18.79)
camview1<- camdata_coordinates [,c(2,3,5)]
camview1[28:54,]<-(camview[1:27, c(4:5,3)])
camview1[55:81,]<-(camview[1:27, c(6:7,3)])
camview1[82:108,]<-(camview[1:27, c(8:9,3)])
camview_list<-split(camview1, camview1$Deployment)
camview_list<-lapply(camview_list, function(x) {x["Deployment"]<- NULL; x})
#create sp object and convert coords to polygon to prepare for
cvpp <- lapply(camview_list, Polygon)
#add id variable
cvp<-lapply(seq_along(cvpp), function(i) Polygons(list(cvpp[[i]]),ID = names(camview_list)[i]))
#create sp object
camview_spo<-SpatialPolygons(cvp, proj4string = CRS(proj4string(SIGEOtrees)))
#Create spdf with IDs (one unique ID per poly) and plot polygons
camview_spo.df<-SpatialPolygonsDataFrame(camview_spo,data.frame(id = unique(camview1$Deployment),row.names = unique(camview1$Deployment)))
plot(camview_spo.df, add = T)
#Cut out tree data from within polygons
clip_polys<-intersect(SIGEOsmall,camview_spo.df)
plot(clip_polys)
cvtrees<-as.data.frame(clip_polys)
#Pull and total the # of trees per deployment and change column names
cvtreecount<-cvtrees[,c(4,28)]
cvtreecount1<-aggregate(cvtreecount[,1], by = list(cvtreecount$d),sum)
colnames(cvtreecount1)[2]<-"Number_of_Trees"
colnames(cvtreecount1)[1]<-"Deployment"
###################################################################################
#Analyse relationship between # of Trees in cameras view with Species Capture Rate
###################################################################################
#Tree count vs Deer cr
#Merge tree count per deployment with deer capture rate per deployment by Deployment
cvTrees_per_Dcr<-merge(deercamdata_coords, cvtreecount1, by = "Deployment")
#Boxplot and remove outliers
boxplot.stats(cvTrees_per_Dcr$Number_of_Trees)$out
boxplot.stats(cvTrees_per_Dcr$Capture_Rate)$out
#Remove outliers
cvTrees_T<-cvTrees_per_Dcr[which(cvTrees_per_Dcr$Number_of_Trees <=38),]
cvTrees_Dcr<-cvTrees_per_Dcr[which(cvTrees_per_Dcr$Capture_Rate <=166),]
cvTrees_pDCR<-merge(cvTrees_Dcr, cvTrees_T, by = "Deployment")
#Plot Regression Analysis of # of trees to deer capture rate
par(mar=c(5,5,4,2))
with(cvTrees_pDCR, plot(Capture_Rate.x ~ Number_of_Trees.x, main = "Regression Analysis of Deer Capture Rate and Number of Trees per Deployment",
cex.main = 2.1,
xlab = expression(bold("Trees Per Deployment")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outdctc = lm(Capture_Rate ~ Number_of_Trees, data = cvTrees_pDCR)
abline(lm.outdctc, col="blue")
summary(lm.outdctc)
#Post Rsqrd value on plot
Rsquared<-summary(lm.outdctc)$r.squared
text(13.30551,126.1788, as.expression(substitute(italic(R)^2 == r,list(r=round(Rsquared,3)))), cex = 1.5)
#######################################
#Tree count vs Bear cr
#plot boxplot to identify outliers
boxplot(cvtreecount1$Number_of_Trees)
boxplot.stats(cvtreecount1$Number_of_Trees)$out
#Remove outliers
cvTrees_T<-cvtreecount1[which(cvtreecount1$Number_of_Trees <=38),]
cvTrees_pBCR<-merge(cvTrees_T, bearcamdata_coords, by = "Deployment")
#Plot # of trees to bear cap rate
par(mar=c(5,5,4,2))
with(cvTrees_pBCR, plot(Capture_Rate ~ Number_of_Trees, main = "Regression Analysis of Bear Capture Rate and Number of Trees per Deployment",
cex.main = 2.1,
xlab = expression(bold("Trees Per Deployment")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outbctc = lm(Capture_Rate ~ Number_of_Trees, data = cvTrees_pBCR)
abline(lm.outbctc, col="blue")
summary(lm.outbctc)
#Post Rsqrd value on plot
Rsquared<-summary(lm.outbctc)$r.squared
text(12.37023,16.05302, as.expression(substitute(italic(R)^2 == r,list(r=round(Rsquared,3)))), cex = 1.5)
########################################
#Tree count vs Squirrel CR
#Merge # of trees and Squirrel CR
cvTrees_per_Scr<-merge(grsqrlcamdata_coords, cvtreecount1, by = "Deployment")
#Boxplot and remove outliers
boxplot(cvTrees_per_Scr$Number_of_Trees)
boxplot.stats(cvTrees_per_Scr$Number_of_Trees)$out
#Remove outliers
cvTrees_pSCR<-cvTrees_per_Scr[which(cvTrees_per_Scr$Number_of_Trees <=38),]
#Plot # of trees to bear cap rate
par(mar=c(5,5,4,2))
with(cvTrees_pSCR, plot(Capture_Rate ~ Number_of_Trees, main = "Regression Analysis of Squirrel Capture Rate and Number of Trees per Deployment",
cex.main = 2,
xlab = expression(bold("Trees Per Deployment")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outsctc = lm(Capture_Rate ~ Number_of_Trees, data = cvTrees_pSCR)
abline(lm.outsctc, col="blue")
summary(lm.outsctc)
#Post Rsqrd value on plot
Rsquared<-summary(lm.outsctc)$r.squared
text(12.37023,97.61948, as.expression(substitute(italic(R)^2 == r,list(r=round(Rsquared,3)))), cex = 1.5)
########################################
#Tree count vs Raccoon cr
#Merge # of trees and Raccoon CR
cvTrees_per_Rcr<-merge(raccooncamdata_coords, cvtreecount1, by = "Deployment")
#Boxplot and remove outliers
boxplot(cvTrees_per_Rcr$Number_of_Trees)
boxplot.stats(cvTrees_per_Rcr$Number_of_Trees)$out
cvTrees_pRCR<-cvTrees_per_Rcr[which(cvTrees_per_Rcr$Number_of_Trees <=38),]
#Plot # of trees to bear cap rate
par(mar=c(5,5,4,2))
with(cvTrees_pRCR, plot(Capture_Rate ~ Number_of_Trees, main = "Regression Analysis of Raccoon Capture Rate and Number of Trees per Deployment",
cex.main = 2,
xlab = expression(bold("Trees Per Deployment")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outrctc = lm(Capture_Rate ~ Number_of_Trees, data = cvTrees_pRCR)
abline(lm.outrctc, col="blue")
summary(lm.outrctc)
#Post Rsqrd value on plot
Rsquared<-summary(lm.outrctc)$r.squared
text(12.37023,11.21435, as.expression(substitute(italic(R)^2 == r,list(r=round(Rsquared,3)))), cex = 1.5)
##########################################
#Deer EDD vs Tree Count
#Merge Deer EDD and tree count
DEDD_TC<-merge(Deer_EDD, cvtreecount1, by = "Deployment")
boxplot(DEDD_TC$Number_of_Trees)
boxplot.stats(DEDD_TC$Number_of_Trees)$out
DEDD_TC1<-DEDD_TC[which(DEDD_TC$Number_of_Trees <=38),]
#Plot # of trees to Deer Estimated Detection Distance
par(mar=c(5,5,4,2))
with(DEDD_TC1, plot(ESW_EDR ~ Number_of_Trees, main = "Analysis of Trees per Deployment on Deer Detection Distance",
cex.main = 2,
xlab = expression(bold("Trees Per Deployment")),
ylab = expression(bold("Estimated Detection Distance")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outdeddt = lm(ESW_EDR ~ Number_of_Trees, data = DEDD_TC1)
abline(lm.outdeddt, col="blue")
summary(lm.outdeddt)
#Post Rsqrd value on plot
Rsquared<-summary(lm.outdeddt)$r.squared
text(12.37023,12.21435, as.expression(substitute(italic(R)^2 == r,list(r=round(Rsquared,3)))), cex = 1.5)
#######################################
#Oak Tree Data
#######################################
#Pull Oak Tree Data from grid
Oak_Trees<-subset(SIGEOsmall,sp %in% c('qual','quru','quco','qufa','qupr','quve','qusp','qumi'))
plot(Oak_Trees, pch = 19)
#plot camera locations in red
plot(trapxy_sp, pch = 22, col = "red", add = T)
#add column to study site tree info that divides trees into 5 color size cateories
Oak_Trees$Size_Category[Oak_Trees$dbh <150]<-'461' #turqoise
Oak_Trees$Size_Category[Oak_Trees$dbh >150]<-'68' #dark blue
Oak_Trees$Size_Category[Oak_Trees$dbh >300]<-'47' #yellow
Oak_Trees$Size_Category[Oak_Trees$dbh >600]<-'139' #green
Oak_Trees$Size_Category[Oak_Trees$dbh >900]<-'8' #gray
Oak_Trees$Size_Category[Oak_Trees$dbh >1200]<-'550' #pink
#plot Oak Tree sizes by color
par(mar=c(5,17,4,2))
plot(Oak_Trees ,pch = 19, col = Oak_Trees$Size_Category, add = T)
#Legend matching color to size
legend(747285,4309044, legend = c("< 15cm","> 15cm","> 30cm","> 60cm","> 90cm","> 120cm"), col = c("461", "68", "47","139", "8", "550"), pch = 19, title = "DBH of Oak Trees", bty = 'n')
########################################################################
#Regression Analysis of Oak Trees per Deployment and Deer Capture Rate
########################################################################
#Cut out oak tree data from within the cones
polyoaktrees<- intersect(Oak_Trees, clip_polys)
plot(polyoaktrees)
polyoaktreesdf<-as.data.frame(polyoaktrees)
#Pull # of oaks out of each deployment and rename columns to prepare for merge
oakcount<-polyoaktreesdf[,c(4,33)]
oakcount1<-aggregate(oakcount[,1],by=list(oakcount$Deployment), sum)
colnames(oakcount1)[2]<-"Num_Oak_Trees"
colnames(oakcount1)[1]<-"Deployment"
#Merge number of oak trees within buffers with deer capture rate
Oaks_Per_Dcr<-merge(deercamdata_coords, oakcount1, by = "Deployment", all.x = TRUE)
Oaks_Per_Dcr$Num_Oak_Trees[is.na (Oaks_Per_Dcr$Num_Oak_Trees)] = 0
#Boxplot of Number of oak trees and remove outliers
boxplot(Oaks_Per_Dcr$Capture_Rate)
boxplot.stats(Oaks_Per_Dcr$Capture_Rate)$out
Oaks_Per_Dcr1<-Oaks_Per_Dcr[which(Oaks_Per_Dcr$Capture_Rate <166),]
#Plot Regression Analysis of # of oak trees to deer capture rate
par(mar=c(5,5,4,2))
with(Oaks_Per_Dcr1, plot(Capture_Rate ~ Num_Oak_Trees, main = "Regression Analysis of Deer Capture Rate and Number of Oak Trees per Deployment",
cex.main = 2.2,
xlab = expression(bold("Number of Oaks Per Deployment")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outodc = lm(Capture_Rate ~ Num_Oak_Trees, data = Oaks_Per_Dcr)
abline(lm.outodc, col="blue")
summary(lm.outodc)
##############################################################################
#Regression Analysis of Oak Trees Per Deployment and Gray Squirrel Capture Rate
##############################################################################
#Merge number of oak trees within buffers with gray squirrel capture rate
Oaks_Per_GrSqCR<-merge(grsqrlcamdata_coords, oakcount1, by = "Deployment", all.x = TRUE)
Oaks_Per_GrSqCR$Num_Oak_Trees[is.na (Oaks_Per_GrSqCR$Num_Oak_Trees)] = 0
#Boxplot of Number of oak trees and remove outliers
boxplot(Oaks_Per_GrSqCR$Capture_Rate)
boxplot.stats(Oaks_Per_GrSqCR$Capture_Rate)$out
Oaks_Per_GrSqCR1<-Oaks_Per_GrSqCR[which(Oaks_Per_GrSqCR$Capture_Rate <52),]
#Plot Regression Analysis of # of oak trees to gray squirrel capture rate
par(mar=c(5,5,4,2))
with(Oaks_Per_GrSqCR1, plot(Capture_Rate ~ Num_Oak_Trees, main = "Regression Analysis of Gray Squirrel Capture Rate and Number of Oak Trees per Deployment",
cex.main = 2.2,
xlab = expression(bold("Number of Oaks Per Camera")),
ylab = expression(bold("Capture Rate")),
cex.axis = 1.3,
cex.lab = 1.6))
lm.outogsc = lm(Capture_Rate ~ Num_Oak_Trees, data = Oaks_Per_GrSqCR)
abline(lm.outogsc, col="blue")
summary(lm.outogsc) |
rm(list=ls())
require(data.table)
label_train <- fread("C:/Users/Akhil/Documents/talkingdata_kaggle/gender_age_train.csv",
colClasses=c("character","character",
"integer","character"))
count(label_train$group)
sample(label_train)
nrow(label_train)
label_test <- fread("C:/Users/Akhil/Documents/talkingdata_kaggle/gender_age_test.csv",
colClasses=c("character"))
nrow(label_test)
label_test$gender <- label_test$age <- label_test$group <- NA
label <- rbind(label_train,label_test)
setkey(label,device_id)
rm(label_test,label_train);gc()
brand <- fread("C:/Users/Akhil/Documents/talkingdata_kaggle/phone_brand_device_model.csv",
colClasses=c("character","character","character"))
setkey(brand,device_id)
brand0 <- unique(brand,by=NULL)
brand0 <- brand0[sample(nrow(brand0)),]
nrow(brand0)
brand2 <- brand0[-which(duplicated(brand0$device_id)),]
duplicated(label_train$group)
label1 <- merge(label,brand2,by="device_id",all.x=T)
rm(brand,brand0,brand2);gc()
events <- fread("C:/Users/Akhil/Documents/talkingdata_kaggle/events.csv",
colClasses=c("character","character","character",
"numeric","numeric"))
setkey(events,device_id)
sample(unique(events$timestamp))
events0 <- events[events$timestamp>="2016-05-01 00:00:00" &
events$timestamp<="2016-05-07 23:59:59",]
timestamp <- strptime(events0$timestamp,format="%Y-%m-%d %H:%M:%S")
events0$date <- strftime(timestamp,format="%m%d")
events0$hour <- strftime(timestamp,format="%H")
events1 <- events0[,list(cnt=length(event_id)),by="device_id"]
events2 <- events0[,list(cnt_day=length(unique(date))),by="device_id"]
events3 <- events0[,list(cnt_date=length(event_id)),by="device_id,date"]
events33 <- reshape(events3,direction="wide",sep="_",
v.names="cnt_date",timevar="date",idvar="device_id")
events33[is.na(events33)] <- 0
events4 <- events0[,list(cnt_hour=length(event_id)),by="device_id,hour"]
events44 <- reshape(events4,direction="wide",sep="_",
v.names="cnt_hour",timevar="hour",idvar="device_id")
events44[is.na(events44)] <- 0
events5 <- merge(events3,events1,by="device_id",all.x=T)
events5$pct_date <- events5$cnt_date/events5$cnt
events55 <- reshape(events5[,list(device_id,date,pct_date)],direction="wide",sep="_",
v.names="pct_date",timevar="date",idvar="device_id")
events55[is.na(events55)] <- 0
events6 <- merge(events4,events1,by="device_id",all.x=T)
events6$pct_hour <- events6$cnt_hour/events6$cnt
events66 <- reshape(events6[,list(device_id,hour,pct_hour)],direction="wide",sep="_",
v.names="pct_hour",timevar="hour",idvar="device_id")
events66[is.na(events66)] <- 0
label2 <- merge(merge(merge(merge(merge(merge(label1,events1,by="device_id",all.x=T),
events2,by="device_id",all.x=T),
events33,by="device_id",all.x=T),
events44,by="device_id",all.x=T),
events55,by="device_id",all.x=T),
events66,by="device_id",all.x=T)
rm(events1,events2,events3,events33,events4,events44,
events5,events55,events6,events66,timestamp,events,events0);gc()
app_label1 <- fread("C:/Users/Akhil/Documents/talkingdata_kaggle/app_labels.csv",colClasses=rep("character",2))
app_label2 <- fread("C:/Users/Akhil/Documents/talkingdata_kaggle/label_categories.csv",
colClasses=rep("character",2))
app_label <- merge(app_label1,app_label2,by="label_id",all.x=T)
rm(app_label1,app_label2);gc()
app_label <- app_label[,list(labels=paste(label_id,collapse=",")),by="app_id"]
setkey(app_label,app_id)
event_app <- fread("C:/Users/Akhil/Documents/talkingdata_kaggle/app_events.csv",
colClasses=rep("character",4))
event_app$is_installed <- NULL
setkey(event_app,app_id)
event_app <- unique(event_app[,list(event_id,app_id)],by=NULL)
event_app_cat <- merge(event_app,app_label,by="app_id")
f_split_paste <- function(z){paste(unique(unlist(strsplit(z,","))),collapse=",")}
event_cat <- event_app_cat[,list(labels=f_split_paste(labels)),by="event_id"]
rm(event_app,event_app_cat,app_label);gc()
setkey(event_cat,event_id)
events <- fread("C:/Users/Akhil/Documents/talkingdata_kaggle/events.csv",
colClasses=c("character","character","character",
"numeric","numeric"))
setkeyv(events,c("device_id","event_id"))
device_event_appcat <- merge(events[,list(device_id,event_id)],
event_cat,by="event_id")
rm(events,event_cat);gc()
device_appcat <- device_event_appcat[,list(labels=f_split_paste(labels)),by="device_id"]
rm(device_event_appcat);gc()
tmp <- strsplit(device_appcat$labels,",")
device_appcat_long <- data.table(device_id=rep(device_appcat$device_id,
times=sapply(tmp,length)),
label=unlist(tmp),isinstalled=1)
device_appcat_wide <- reshape(device_appcat_long,direction="wide",sep="_",
v.names="isinstalled",timevar="label",idvar="device_id")
device_appcat_wide[is.na(device_appcat_wide)] <- 0
rm(device_appcat_long,device_appcat,tmp);gc()
label3 <- merge(label2,device_appcat_wide,by="device_id",all.x=T)
label3 <- label3[sample(nrow(label3)),]
id <- label3$device_id
y <- label3$group
count(y)
y2 <- label3[,list(gender,age)]
x <- label3[,-c(1:4),with=F]
x$is_active_7d <- 1-as.integer(is.na(x$cnt))
ids_train <- id[!is.na(y)]
set.seed(114)
ids <- split(ids_train,sample(length(ids_train)) %% 5)
x <- as.data.frame(x)
for(i in which(sapply(x,class)=="character")) {x[,i] <- as.numeric(as.factor(x[,i]))}
rm(i)
y<-as.data.frame(y)
y2<-as.data.frame(y2)
id<-as.data.frame(id)
train_814<-cbind(id,y,y2,x)
train_814<-as.data.frame(train_814)
train_814v1<-as.data.frame(na.omit(train_814[which(train_814$id %in% unlist(ids)),]))
require(caret)
for ( i in 1:563 )
{
ifelse (length(unique(train_814v1[,i]))>53,
train_814v1[,i]<- as.numeric(train_814v1[,i]),
train_814v1[,i]<- as.factor(train_814v1[,i]))
}
control<-rfeControl(functions=rfFuncs,method="cv",number=10)
results<-rfe(train_814v1[,c(3:4,7:563)],train_814v1[,2],sizes=c(1:30),rfeControl = control)
require(caret)
x<-filterVarImp(train_814v1,factor(train_814v1$y))
write.csv(x,file="varimp.csv")
train_814v2<-train_814v1[,c("y","gender","age","isinstalled_1014","isinstalled_317","isinstalled_316")]
train_814v3<-na.omit(train_814v1[,c("y",
"isinstalled_783",
"isinstalled_757",
"isinstalled_779",
"isinstalled_959",
"isinstalled_960",
"isinstalled_1007",
"isinstalled_256",
"isinstalled_777",
"isinstalled_209",
"isinstalled_782",
"isinstalled_706",
"isinstalled_787",
"isinstalled_406",
"isinstalled_407",
"isinstalled_761",
"isinstalled_252",
"isinstalled_263",
"isinstalled_774",
"isinstalled_253",
"isinstalled_781",
"isinstalled_1014",
"isinstalled_751",
"isinstalled_1012",
"isinstalled_775",
"isinstalled_778",
"isinstalled_1015",
"isinstalled_254",
"isinstalled_562",
"isinstalled_691",
"isinstalled_758",
"isinstalled_752",
# "phone_brand",
"isinstalled_166",
"isinstalled_731",
"isinstalled_732",
"cnt_day",
"isinstalled_755",
"isinstalled_788",
"isinstalled_564",
#"device_model",
"isinstalled_168",
"cnt_hour_06",
"isinstalled_183",
"cnt",
"pct_hour_06",
"isinstalled_737",
"isinstalled_738",
"isinstalled_1011",
"cnt_hour_07",
"isinstalled_1005",
"isinstalled_1019",
"isinstalled_709",
"isinstalled_1020",
"cnt_date_0504",
"isinstalled_163",
"cnt_date_0503"
)])
n<-colnames(train_814v3)
train_814v3[is.na(train_814v3)]<-NA
form<-as.formula(paste("y~",paste(n[!n %in% c("y") ],collapse="+")))
train_814v3<-train_814v3[!is.nan(train_814v3),]
myTuneGrid <- expand.grid(n.trees = 1:5,interaction.depth = 2:5,shrinkage = 0.5,n.minobsinnode=2)
fitControl <- trainControl(method = "repeatedcv", number = 5,repeats = 2, verboseIter = FALSE,returnResamp = "all")
myModel <- train(form,data = train_814v3,method = "gbm",distribution="multinomial",trControl = fitControl,tuneGrid = myTuneGrid)
lapply(train_814v3,function(x) class(x))
train_814v3$pct_hour_06
is.factor(train_814v3$y)
#idx_test <- which(!id %in% unlist(ids))
#test_data <- x[idx_test,]
result<-predict(myModel,train_814v3,type="prob")
train_814v3<-cbind(train_814v3,result)
library(neuralnet)
n<-names(train_814v3)
n
require(dplyr)
train_814v3[]
colnames(train_814v3)[56]<-"F23"
colnames(train_814v3)
form<-as.formula(paste("y~",paste(n[!n %in% c("y") ],collapse="+")))
form
lapply(train_814v3,function(x) levels(x))
for (i in 3:67)
{
if (is.factor(train_814v3[,i])==TRUE)
{ train_814v3[,i]<- mapvalues(train_814v3[,i], from = c("1", "0"), to = c("1", "-1"))
}
}
unique(train_814v3$isinstalled_706)
class(train_814v3[,2])
train_814v3[,2] <- mapvalues(train_814v3[,2], from = c("1", "0"), to = c("1", "-1"))
train_814v3[1,66]
colnames(train_814v3)
colnames(train_814v3)[56]<-"F_23"
colnames(train_814v3)[57]<-"F_24_26"
colnames(train_814v3)[58]<-"F_27_28"
colnames(train_814v3)[59]<-"F_29_32"
colnames(train_814v3)[60]<-"F_33_42"
colnames(train_814v3)[61]<-"F_43"
colnames(train_814v3)[62]<-"M_22"
colnames(train_814v3)[63]<-"M_23_26"
colnames(train_814v3)[64]<-"M_27_28"
colnames(train_814v3)[65]<-"M_29_31"
colnames(train_814v3)[66]<-"M_32_38"
colnames(train_814v3)[67]<-"M_39"
colnames(train_814v3[,1])
class(train_814v3$isinstalled_783)
unique(train_814v3$isinstalled_777)
train_814v4<-train_814v3
for(i in which(sapply(train_814v4,class)=="factor")) {train_814v4[,i] <- as.numeric(as.factor(train_814v4[,i]))}
require(neuralnet)
f<-neuralnet(form,data=train_814v4,hidden=c(10,10,10),linear.output = F)
neur<-prediction(f,train_814v4,type="raw")
require(xgboost)
depth <- 10
shrk <- 0.2
ntree <- 100
(group_name <- na.omit(unique(y)))
idx_train <- which(id %in% unlist(ids))
idx_test <- which(!id %in% unlist(ids))
train_data <- as.matrix(x[idx_train,])
test_data <- as.matrix(x[idx_test,])
train_label <- match(y[idx_train],group_name)-1
test_label <- match(y[idx_test],group_name)-1
dtrain <- xgb.DMatrix(train_data,label=train_label,missing=NA)
dtest <- xgb.DMatrix(test_data,label=test_label,missing=NA)
param <- list(booster="gbtree",
num_class=length(group_name),
objective="multi:softprob",
eval_metric="mlogloss",
eta=shrk,
max.depth=depth,
subsample=0.7,
colsample_bytree=0.7,
num_parallel_tree=1)
watchlist <- list(train=dtrain)
# set.seed(114)
# fit_cv <- xgb.cv(params=param,
# data=dtrain,
# nrounds=ntree*100000,
# watchlist=watchlist,
# nfold=5,
# early.stop.round=3,
# verbose=1)
# ntree should be 1100 to get 2.29934
ntree <- 50
set.seed(114)
fit_xgb <- xgb.train(params=param,
data=dtrain,
nrounds=ntree,
watchlist=watchlist,
verbose=1)
pred <- predict(fit_xgb,dtest,ntreelimit=ntree)
pred_detail <- t(matrix(pred,nrow=length(group_name)))
res_submit <- cbind(id=id[idx_test],as.data.frame(pred_detail))
colnames(res_submit) <- c("device_id",group_name)
write.csv(res_submit,file="submit_v0_2.csv",row.names=F,quote=F)
sapply(train_814v1,class)
| /talkingdatascriptv1.R | no_license | akhilghorpade/talkingdata_kaggle | R | false | false | 13,365 | r | rm(list=ls())
require(data.table)
label_train <- fread("C:/Users/Akhil/Documents/talkingdata_kaggle/gender_age_train.csv",
colClasses=c("character","character",
"integer","character"))
count(label_train$group)
sample(label_train)
nrow(label_train)
label_test <- fread("C:/Users/Akhil/Documents/talkingdata_kaggle/gender_age_test.csv",
colClasses=c("character"))
nrow(label_test)
label_test$gender <- label_test$age <- label_test$group <- NA
label <- rbind(label_train,label_test)
setkey(label,device_id)
rm(label_test,label_train);gc()
brand <- fread("C:/Users/Akhil/Documents/talkingdata_kaggle/phone_brand_device_model.csv",
colClasses=c("character","character","character"))
setkey(brand,device_id)
brand0 <- unique(brand,by=NULL)
brand0 <- brand0[sample(nrow(brand0)),]
nrow(brand0)
brand2 <- brand0[-which(duplicated(brand0$device_id)),]
duplicated(label_train$group)
label1 <- merge(label,brand2,by="device_id",all.x=T)
rm(brand,brand0,brand2);gc()
events <- fread("C:/Users/Akhil/Documents/talkingdata_kaggle/events.csv",
colClasses=c("character","character","character",
"numeric","numeric"))
setkey(events,device_id)
sample(unique(events$timestamp))
events0 <- events[events$timestamp>="2016-05-01 00:00:00" &
events$timestamp<="2016-05-07 23:59:59",]
timestamp <- strptime(events0$timestamp,format="%Y-%m-%d %H:%M:%S")
events0$date <- strftime(timestamp,format="%m%d")
events0$hour <- strftime(timestamp,format="%H")
events1 <- events0[,list(cnt=length(event_id)),by="device_id"]
events2 <- events0[,list(cnt_day=length(unique(date))),by="device_id"]
events3 <- events0[,list(cnt_date=length(event_id)),by="device_id,date"]
events33 <- reshape(events3,direction="wide",sep="_",
v.names="cnt_date",timevar="date",idvar="device_id")
events33[is.na(events33)] <- 0
events4 <- events0[,list(cnt_hour=length(event_id)),by="device_id,hour"]
events44 <- reshape(events4,direction="wide",sep="_",
v.names="cnt_hour",timevar="hour",idvar="device_id")
events44[is.na(events44)] <- 0
events5 <- merge(events3,events1,by="device_id",all.x=T)
events5$pct_date <- events5$cnt_date/events5$cnt
events55 <- reshape(events5[,list(device_id,date,pct_date)],direction="wide",sep="_",
v.names="pct_date",timevar="date",idvar="device_id")
events55[is.na(events55)] <- 0
events6 <- merge(events4,events1,by="device_id",all.x=T)
events6$pct_hour <- events6$cnt_hour/events6$cnt
events66 <- reshape(events6[,list(device_id,hour,pct_hour)],direction="wide",sep="_",
v.names="pct_hour",timevar="hour",idvar="device_id")
events66[is.na(events66)] <- 0
label2 <- merge(merge(merge(merge(merge(merge(label1,events1,by="device_id",all.x=T),
events2,by="device_id",all.x=T),
events33,by="device_id",all.x=T),
events44,by="device_id",all.x=T),
events55,by="device_id",all.x=T),
events66,by="device_id",all.x=T)
rm(events1,events2,events3,events33,events4,events44,
events5,events55,events6,events66,timestamp,events,events0);gc()
app_label1 <- fread("C:/Users/Akhil/Documents/talkingdata_kaggle/app_labels.csv",colClasses=rep("character",2))
app_label2 <- fread("C:/Users/Akhil/Documents/talkingdata_kaggle/label_categories.csv",
colClasses=rep("character",2))
app_label <- merge(app_label1,app_label2,by="label_id",all.x=T)
rm(app_label1,app_label2);gc()
app_label <- app_label[,list(labels=paste(label_id,collapse=",")),by="app_id"]
setkey(app_label,app_id)
event_app <- fread("C:/Users/Akhil/Documents/talkingdata_kaggle/app_events.csv",
colClasses=rep("character",4))
event_app$is_installed <- NULL
setkey(event_app,app_id)
event_app <- unique(event_app[,list(event_id,app_id)],by=NULL)
event_app_cat <- merge(event_app,app_label,by="app_id")
f_split_paste <- function(z){paste(unique(unlist(strsplit(z,","))),collapse=",")}
event_cat <- event_app_cat[,list(labels=f_split_paste(labels)),by="event_id"]
rm(event_app,event_app_cat,app_label);gc()
setkey(event_cat,event_id)
events <- fread("C:/Users/Akhil/Documents/talkingdata_kaggle/events.csv",
colClasses=c("character","character","character",
"numeric","numeric"))
setkeyv(events,c("device_id","event_id"))
device_event_appcat <- merge(events[,list(device_id,event_id)],
event_cat,by="event_id")
rm(events,event_cat);gc()
device_appcat <- device_event_appcat[,list(labels=f_split_paste(labels)),by="device_id"]
rm(device_event_appcat);gc()
tmp <- strsplit(device_appcat$labels,",")
device_appcat_long <- data.table(device_id=rep(device_appcat$device_id,
times=sapply(tmp,length)),
label=unlist(tmp),isinstalled=1)
device_appcat_wide <- reshape(device_appcat_long,direction="wide",sep="_",
v.names="isinstalled",timevar="label",idvar="device_id")
device_appcat_wide[is.na(device_appcat_wide)] <- 0
rm(device_appcat_long,device_appcat,tmp);gc()
label3 <- merge(label2,device_appcat_wide,by="device_id",all.x=T)
label3 <- label3[sample(nrow(label3)),]
id <- label3$device_id
y <- label3$group
count(y)
y2 <- label3[,list(gender,age)]
x <- label3[,-c(1:4),with=F]
x$is_active_7d <- 1-as.integer(is.na(x$cnt))
ids_train <- id[!is.na(y)]
set.seed(114)
ids <- split(ids_train,sample(length(ids_train)) %% 5)
x <- as.data.frame(x)
for(i in which(sapply(x,class)=="character")) {x[,i] <- as.numeric(as.factor(x[,i]))}
rm(i)
y<-as.data.frame(y)
y2<-as.data.frame(y2)
id<-as.data.frame(id)
train_814<-cbind(id,y,y2,x)
train_814<-as.data.frame(train_814)
train_814v1<-as.data.frame(na.omit(train_814[which(train_814$id %in% unlist(ids)),]))
require(caret)
for ( i in 1:563 )
{
ifelse (length(unique(train_814v1[,i]))>53,
train_814v1[,i]<- as.numeric(train_814v1[,i]),
train_814v1[,i]<- as.factor(train_814v1[,i]))
}
control<-rfeControl(functions=rfFuncs,method="cv",number=10)
results<-rfe(train_814v1[,c(3:4,7:563)],train_814v1[,2],sizes=c(1:30),rfeControl = control)
require(caret)
x<-filterVarImp(train_814v1,factor(train_814v1$y))
write.csv(x,file="varimp.csv")
train_814v2<-train_814v1[,c("y","gender","age","isinstalled_1014","isinstalled_317","isinstalled_316")]
train_814v3<-na.omit(train_814v1[,c("y",
"isinstalled_783",
"isinstalled_757",
"isinstalled_779",
"isinstalled_959",
"isinstalled_960",
"isinstalled_1007",
"isinstalled_256",
"isinstalled_777",
"isinstalled_209",
"isinstalled_782",
"isinstalled_706",
"isinstalled_787",
"isinstalled_406",
"isinstalled_407",
"isinstalled_761",
"isinstalled_252",
"isinstalled_263",
"isinstalled_774",
"isinstalled_253",
"isinstalled_781",
"isinstalled_1014",
"isinstalled_751",
"isinstalled_1012",
"isinstalled_775",
"isinstalled_778",
"isinstalled_1015",
"isinstalled_254",
"isinstalled_562",
"isinstalled_691",
"isinstalled_758",
"isinstalled_752",
# "phone_brand",
"isinstalled_166",
"isinstalled_731",
"isinstalled_732",
"cnt_day",
"isinstalled_755",
"isinstalled_788",
"isinstalled_564",
#"device_model",
"isinstalled_168",
"cnt_hour_06",
"isinstalled_183",
"cnt",
"pct_hour_06",
"isinstalled_737",
"isinstalled_738",
"isinstalled_1011",
"cnt_hour_07",
"isinstalled_1005",
"isinstalled_1019",
"isinstalled_709",
"isinstalled_1020",
"cnt_date_0504",
"isinstalled_163",
"cnt_date_0503"
)])
n<-colnames(train_814v3)
train_814v3[is.na(train_814v3)]<-NA
form<-as.formula(paste("y~",paste(n[!n %in% c("y") ],collapse="+")))
train_814v3<-train_814v3[!is.nan(train_814v3),]
myTuneGrid <- expand.grid(n.trees = 1:5,interaction.depth = 2:5,shrinkage = 0.5,n.minobsinnode=2)
fitControl <- trainControl(method = "repeatedcv", number = 5,repeats = 2, verboseIter = FALSE,returnResamp = "all")
myModel <- train(form,data = train_814v3,method = "gbm",distribution="multinomial",trControl = fitControl,tuneGrid = myTuneGrid)
lapply(train_814v3,function(x) class(x))
train_814v3$pct_hour_06
is.factor(train_814v3$y)
#idx_test <- which(!id %in% unlist(ids))
#test_data <- x[idx_test,]
result<-predict(myModel,train_814v3,type="prob")
train_814v3<-cbind(train_814v3,result)
library(neuralnet)
n<-names(train_814v3)
n
require(dplyr)
train_814v3[]
colnames(train_814v3)[56]<-"F23"
colnames(train_814v3)
form<-as.formula(paste("y~",paste(n[!n %in% c("y") ],collapse="+")))
form
lapply(train_814v3,function(x) levels(x))
for (i in 3:67)
{
if (is.factor(train_814v3[,i])==TRUE)
{ train_814v3[,i]<- mapvalues(train_814v3[,i], from = c("1", "0"), to = c("1", "-1"))
}
}
unique(train_814v3$isinstalled_706)
class(train_814v3[,2])
train_814v3[,2] <- mapvalues(train_814v3[,2], from = c("1", "0"), to = c("1", "-1"))
train_814v3[1,66]
colnames(train_814v3)
colnames(train_814v3)[56]<-"F_23"
colnames(train_814v3)[57]<-"F_24_26"
colnames(train_814v3)[58]<-"F_27_28"
colnames(train_814v3)[59]<-"F_29_32"
colnames(train_814v3)[60]<-"F_33_42"
colnames(train_814v3)[61]<-"F_43"
colnames(train_814v3)[62]<-"M_22"
colnames(train_814v3)[63]<-"M_23_26"
colnames(train_814v3)[64]<-"M_27_28"
colnames(train_814v3)[65]<-"M_29_31"
colnames(train_814v3)[66]<-"M_32_38"
colnames(train_814v3)[67]<-"M_39"
colnames(train_814v3[,1])
class(train_814v3$isinstalled_783)
unique(train_814v3$isinstalled_777)
train_814v4<-train_814v3
for(i in which(sapply(train_814v4,class)=="factor")) {train_814v4[,i] <- as.numeric(as.factor(train_814v4[,i]))}
require(neuralnet)
f<-neuralnet(form,data=train_814v4,hidden=c(10,10,10),linear.output = F)
neur<-prediction(f,train_814v4,type="raw")
require(xgboost)
depth <- 10
shrk <- 0.2
ntree <- 100
(group_name <- na.omit(unique(y)))
idx_train <- which(id %in% unlist(ids))
idx_test <- which(!id %in% unlist(ids))
train_data <- as.matrix(x[idx_train,])
test_data <- as.matrix(x[idx_test,])
train_label <- match(y[idx_train],group_name)-1
test_label <- match(y[idx_test],group_name)-1
dtrain <- xgb.DMatrix(train_data,label=train_label,missing=NA)
dtest <- xgb.DMatrix(test_data,label=test_label,missing=NA)
param <- list(booster="gbtree",
num_class=length(group_name),
objective="multi:softprob",
eval_metric="mlogloss",
eta=shrk,
max.depth=depth,
subsample=0.7,
colsample_bytree=0.7,
num_parallel_tree=1)
watchlist <- list(train=dtrain)
# set.seed(114)
# fit_cv <- xgb.cv(params=param,
# data=dtrain,
# nrounds=ntree*100000,
# watchlist=watchlist,
# nfold=5,
# early.stop.round=3,
# verbose=1)
# ntree should be 1100 to get 2.29934
ntree <- 50
set.seed(114)
fit_xgb <- xgb.train(params=param,
data=dtrain,
nrounds=ntree,
watchlist=watchlist,
verbose=1)
pred <- predict(fit_xgb,dtest,ntreelimit=ntree)
pred_detail <- t(matrix(pred,nrow=length(group_name)))
res_submit <- cbind(id=id[idx_test],as.data.frame(pred_detail))
colnames(res_submit) <- c("device_id",group_name)
write.csv(res_submit,file="submit_v0_2.csv",row.names=F,quote=F)
sapply(train_814v1,class)
|
#########################################################################################
#
# Functions to extract summary and statistics of interest from the Cox regression output
# then format them into tables for exporting to LaTex and Excel
# and use in reports
#
# Nathan Green
# 11-2012
#
#########################################################################################
extractCox <- function(cox){
##
## extract a subset of the Cox PH output values
## infection status variable assumed to be the last covariate in list
## cox: summary(coxph(Surv(start, stop, status)~age+inf, data))
dpl <- 3
lastRow <- nrow(coef(cox))
beta <- round(coef(cox)[lastRow,"coef"], dpl) # exponent of hazard
se <- round(coef(cox)[lastRow,"se(coef)"], dpl) # standard error of beta
p <- round(coef(cox)[lastRow,"Pr(>|z|)"], dpl) # p-value
CI <- round(cox$conf.int[lastRow,c("lower .95","upper .95")], dpl) # lower and upper 95% confidence interval
res <- cbind(beta, "exp(beta)"=round(exp(beta),3), CI[1], CI[2], p)
res
}
table.HR <- function(output.HR){
#
# Each organism group & Cox PH method alternative format of output data
# call: res <- table.HR(output.HR)
#
namesOrganisms <- names(output.HR)
namesMethods <- names(output.HR[[1]])
colNames <- c("organism", "method", "type", "beta", "exp(beta)", "Lower CI", "Upper CI", "p")
table.HR <- data.frame(matrix(ncol = length(colNames)))
for (org in namesOrganisms){
for (meth in namesMethods){
namesEvent <- names(output.HR[[org]][[meth]]) # different length for different methods
for (event in namesEvent){
table.HR <- rbind(table.HR,
c(org, meth, event,
extractCox(output.HR[[org]][[meth]][[event]])))
}
}
}
colnames(table.HR) <- colNames
table.HR <- table.HR[!is.na(table.HR[,1]),] # remove empty rows
table.HR
}
#write.table(res, "HCAItable_output.txt", sep="\t")
## Print results in a LaTeX-ready form
#xtable(res)
table2.HR <- function(res, model){
##
## rearrange table.HR in a report style
## used in boc plotting HRboxplot.batch()
## model: subdistn, cause-specific
##
## hr=exp(beta) & upper CI & lower CI
## disch time only, disch full, death time only, death full
namesGroup <- unique(res$organism)
numGroup <- length(namesGroup)
res.sub <- res[res$method==model, c("organism","exp(beta)","Lower CI","Upper CI")]
subHeads <- c("HR","LCI","UCI")
colHeads <- c("organism", paste("atime",subHeads), paste("afull",subHeads), paste("dtime",subHeads), paste("dfull",subHeads))
res.new <- data.frame(matrix(ncol = length(colHeads), nrow = numGroup), check.rows=FALSE)
names(res.new) <- colHeads
for (j in 1:numGroup){
res.temp <- NA
firstrow <- min(which(res.sub$organism==namesGroup[j]))
for (i in 1:4){
res.temp <- cbind(res.temp, res.sub[firstrow+i-1,-1])
}
res.new[j,] <- res.temp
}
res.new[,1] <- namesGroup
res.new
}
## FUNCTION END ##
table3.HR <- function(res, hrtype){
## table format used in the JPIDS paper
##
## organism: is the grouping by organism type or something else
## call: table3.HR(res, hrtype="naive")
## table3.HR(res, hrtype="timedeptsubdistn")
## table3.HR(res, hrtype="timedependentcausespec")
if(hrtype=="timedeptsubdistn"){
colNames <- c("Group","Disch Time-adjusted","Disch Fully adjusted","Death Time-adjusted","Death Fully adjusted")
}else if(hrtype=="naive"){
colNames <- c("Group","Disch", "Death", "Both")
}else if(hrtype=="timedeptcausespec"){
colNames <- c("Group","Disch Time-adjusted","Disch Fully adjusted","Death Time-adjusted","Death Fully adjusted","Both Time-adjusted","Both Fully adjusted")
}
else {stop("Model type unidentified")}
res.new <- data.frame(matrix(ncol=length(colNames)))
colnames(res.new) <- colNames
groupnames <- unique(res$group)
for (name in groupnames){
## find rows for given organism type and HR method
whichrows <- which(res$group==name & res$method==hrtype) # & !is.na(res[,"exp(beta)"]))
rowTotal <- NULL
for (j in 1:length(whichrows)){
temp <- paste(res[whichrows[j],"exp(beta)"], " (", res[whichrows[j],"Lower CI"], ", ", res[whichrows[j],"Upper CI"], ")", sep="")
rowTotal <- c(rowTotal, temp)
}
res.new <- rbind(res.new, c(name,rowTotal))
}
res.new <- res.new[!is.na(res.new[,1]),-1] # remove empty rows
if (organism==TRUE){
## when group by organism
## reformat names and reorder
rownames(res.new) <- c("All",
"Gram-positive",
"Gram-negative",
"CoNS",
"Enterococcus spp.",
"S. aureus",
"Other (Gram-positive)",
"Other (Gram-negative)",
"E. Coli",
"Non-p. Streptococci",
"Klebsiella spp.",
"Enterobacter spp.")
res.new <- res.new[c(1,2,3,4,9,12,5,11,10,6,7,8),]
}else{rownames(res.new) <- groupnames}
res.new
}
## END FUNCTION ##
###########################
# Data set summary tables #
###########################
## get interquartile range
iqr <- function(x){paste("[",round(summary(x)[["1st Qu."]],2),",",round(summary(x)[["3rd Qu."]],2),"]", sep="")}
## summary statistics excluding NAs
na.sd <- function(x){sd(x, na.rm=TRUE)}
na.mean <- function(x){mean(x, na.rm=TRUE)}
na.median <- function(x){median(x, na.rm=TRUE)}
summaryTableAll <- function(survData){
#
# summary table of dataset descriptive stats by inf/non-inf
# mean/median (sd or IQR)
# out <- summaryTableAll(survDataByGroup[["all"]])
survData.mix <- survData[survData$infstatus==0,] # non-infected patients (controls) only
survData.inf <- survData[survData$infstatus==1,] # infected patients (cases) only
dp <- 2
out <- rbind(
## sample sizes
c(format(nrow(survData.inf),nsmall=1), round(nrow(survData.inf)/nrow(survData),dp),
round(nrow(survData.mix)), round(nrow(survData.mix)/nrow(survData),dp),
round(nrow(survData)), 1),
## ages
#c(paste(round(mean(survData.inf$age)),"/",median(survData.inf$age)),round(sd(survData.inf$age)),
c(paste(round(mean(survData.inf$age)),"/",median(survData.inf$age)),iqr(survData.inf$age),
#paste(round(mean(survData.mix$age)),"/",median(survData.mix$age)),round(sd(survData.mix$age)),
paste(round(mean(survData.mix$age)),"/",median(survData.mix$age)),iqr(survData.mix$age),
#paste(round(mean(survData$age)),"/",median(survData$age)),round(sd(survData$age))),
paste(round(mean(survData$age)),"/",median(survData$age)), iqr(survData$age) ),
## length of stays
#c(paste(round(mean(survData.inf$time)),"/",median(survData.inf$time)),round(sd(survData.inf$time)),
# paste(round(mean(survData.mix$time, na.rm=T)),"/",median(survData.mix$time, na.rm=T)),round(sd(survData.mix$time, na.rm=T)),
# paste(round(mean(survData$time, na.rm=T)),"/",median(survData$time, na.rm=T)),round(sd(survData$time, na.rm=T))),
c(paste(round(mean(survData.inf$time),dp),"/",median(survData.inf$time)),iqr(survData.inf$time),
paste(round(mean(survData.mix$time, na.rm=T),dp),"/",median(survData.mix$time, na.rm=T)),iqr(survData.mix$time),
paste(round(mean(survData$time, na.rm=T),dp),"/",median(survData$time, na.rm=T)),iqr(survData$time) ),
## infection times
#c(paste(round(mean(survData.inf$spectime)),"/",median(survData.inf$spectime)),round(sd(survData.inf$spectime)),
# paste(round(mean(survData.mix$spectime)),"/",median(survData.mix$spectime)),round(sd(survData.mix$spectime)),
#paste(round(mean(survData$spectime)),"/",median(survData$spectime)),round(sd(survData$spectime))),
c(paste(round(mean(survData.inf$spectime),dp),"/",median(survData.inf$spectime)),iqr(survData.inf$spectime),
paste(round(mean(survData.mix$spectime),dp),"/",median(survData.mix$spectime)),iqr(survData.mix$spectime),
paste(round(mean(survData$spectime),dp),"/",median(survData$spectime)),iqr(survData$spectime) ),
## in-hospital deaths
c(round(table(survData.inf$event)[2]),round(table(survData.inf$event)[2]/(table(survData.inf$event)[2]+table(survData.inf$event)[1]),dp),
round(table(survData.mix$event)[2]),round(table(survData.mix$event)[2]/(table(survData.mix$event)[2]+table(survData.mix$event)[1]),dp),
round(table(survData$event)[2]),round(table(survData$event)[2]/(table(survData$event)[2]+table(survData$event)[1]),dp)),
## sex (female)
c(round(table(survData.inf$gender)[1]),round(table(survData.inf$gender)[1]/(table(survData.inf$gender)[1]+table(survData.inf$gender)[2]),dp),
round(table(survData.mix$gender)[1]),round(table(survData.mix$gender)[1]/(table(survData.mix$gender)[1]+table(survData.mix$gender)[2]),dp),
round(table(survData$gender)[1]),round(table(survData$gender)[1]/(table(survData$gender)[1]+table(survData$gender)[2]),dp)))
rownames(out) <- c("Patient sample size","Age (years)","LoS (days)","Time from admission to infection (days)","Deaths (frequency)","Sex (F) (frequency)")
colnames(out) <- c("HA-BSI", "Prop", "Non-HA-BSI", "Prop", "All", "Prop")
## rearrange columns
out <- out[,c(3,4,1,2,5,6)]
# write.table(out, ".\\output\\summaryTableAll.txt")
return(pandoc.table(out, caption="Table: Dataset summary statistics including risk factors, comorbidites and patient movements summary statistics.
For count dat the subset size if given; for continuous values mean/median is given.
Note that a patient can be in more than one risk factor group.", style = "grid", split.tables=Inf, justify="left"))
}
## END FUNCTION ##
summaryTableGroup <- function(survDataByGroup){
##
## summary table of descriptive statistics by (organism) group
## sd or IQR
## call: out <- summaryTableOrg(survDataByGroup)
out <- NA
dp <- 2
for (group in names(survDataByGroup)){
out <- rbind(out,
c(group,
##sample size
nrow(survDataByGroup[[group]][survDataByGroup[[group]]$infstatus==1,]),
## age
paste(round(na.mean(survDataByGroup[[group]]$age[survDataByGroup[[group]]$infstatus==1]),dp),"/",
na.median(survDataByGroup[[group]]$age[survDataByGroup[[group]]$infstatus==1]),
# " (",round(na.sd(survDataByGroup[[group]]$age[survDataByGroup[[group]]$infstatus==1])),")",sep=""),
iqr(survDataByGroup[[group]]$age[survDataByGroup[[group]]$infstatus==1]), sep=""),
## gender
round(table(survDataByGroup[[group]]$gender[survDataByGroup[[group]]$infstatus==1])[1],dp),
## LoS
paste(round(na.mean(survDataByGroup[[group]]$time[survDataByGroup[[group]]$infstatus==1]),dp),"/",
na.median(survDataByGroup[[group]]$time[survDataByGroup[[group]]$infstatus==1]),
# " (",round(na.sd(survDataByGroup[[group]]$time[survDataByGroup[[group]]$infstatus==1])),")", sep=""),
iqr(survDataByGroup[[group]]$time[survDataByGroup[[group]]$infstatus==1]), sep=""),
## infection time
paste(round(na.mean(survDataByGroup[[group]]$spectime[survDataByGroup[[group]]$infstatus==1]),dp),"/",
na.median(survDataByGroup[[group]]$spectime[survDataByGroup[[group]]$infstatus==1]),
# " (",round(na.sd(survDataByGroup[[group]]$spectime[survDataByGroup[[group]]$infstatus==1])),")",sep=""),
iqr(survDataByGroup[[group]]$spectime[survDataByGroup[[group]]$infstatus==1]), sep=""),
## deaths
round(table(survDataByGroup[[group]]$event[survDataByGroup[[group]]$infstatus==1])[2]),
sum(survDataByGroup[[group]]$cancer[survDataByGroup[[group]]$infstatus==1]),
sum(survDataByGroup[[group]]$prem[survDataByGroup[[group]]$infstatus==1]),
sum(survDataByGroup[[group]]$cong[survDataByGroup[[group]]$infstatus==1]),
sum(survDataByGroup[[group]]$surgical[survDataByGroup[[group]]$infstatus==1]),
sum(survDataByGroup[[group]]$cath[survDataByGroup[[group]]$infstatus==1]),
sum(survDataByGroup[[group]]$Tai[survDataByGroup[[group]]$infstatus==1]),
sum(survDataByGroup[[group]]$highRisk[survDataByGroup[[group]]$infstatus==1])
))
}
colnames(out) <- c("Organism","Sample size","Age (years)","Sex (F)","LoS (days)","Time from admission to infection (days)","Deaths",
"Cancer","Premature birth","Congenital disease","Surgical","In-dwelling catheter","Tai","At least one risk factor")
rownames(out) <- out[,"Organism"]
out <- out[!is.na(out[,1]),-1] # remove empty rows
## rearrange rows
#out <- out[c("all", "1", "-1", "COAGULASE NEGATIVE STAPHYLOCOCCUS", "E. COLI", "ENTEROBACTER", "ENTEROCOCCUS", "KLEBSIELLA", "NON-PYOGENIC STREPTOCOCCUS",
# "STAPHYLOCOCCUS AUREUS", "other", "P. AERUGINOSA", "MICROCOCCUS", "STREP B", "SALMONELLA", "STREPTOCOCCUS PNEUMONIAE", "N. MENINGITIDIS", "STREP A", "ACINETOBACTER"),]
out <- out[c("all", "1", "-1", "COAGULASE NEGATIVE STAPHYLOCOCCUS", "E. COLI", "ENTEROBACTER", "ENTEROCOCCUS", "KLEBSIELLA", "NON-PYOGENIC STREPTOCOCCUS",
"STAPHYLOCOCCUS AUREUS", "other (Gram-positive)", "other (Gram-negative)", "P. AERUGINOSA", "MICROCOCCUS", "STREP B", "SALMONELLA", "STREPTOCOCCUS PNEUMONIAE", "N. MENINGITIDIS", "STREP A", "ACINETOBACTER"),]
#write.table(out, ".\\output\\summaryTableOrg.txt")
return(out)
# pandoc.table(out, caption="caption:...", style = "grid")
}
## END FUNCTION ##
summaryTableRF <- function(survData){
##
## output summary table of the patients
## comorbidities and risk factors
## split by infected and non-infected cases
##
## call: out <- summaryTableRF(survDataByGroup$all)
out <- NULL
emptyRow <- c(NA, NA, NA, NA, NA, NA)
dp <- 2
ncase <- nrow(survData)
## risk factor and comorbidities
## record true (present) cases only
## empty rows for extra row labels
out <- rbind(out, emptyRow)
freqRow <- function(survData, out, rf){
x <- as.data.frame(table(survData$infstatus, survData[,rf]))
y <- as.data.frame(prop.table(table(survData$infstatus, survData[,rf]),1))
all <- c(sum(survData[,rf]), round(sum(survData[,rf])/length(survData[,rf]), dp))
out <- rbind(out, c(x[x$Var2==T,"Freq"], round(y[y$Var2==T,"Freq"],dp), all))
out
}
out <- freqRow(survData, out, "cancer")
out <- freqRow(survData, out, "prem")
out <- freqRow(survData, out, "cong")
out <- freqRow(survData, out, "surgical")
out <- freqRow(survData, out, "cath")
out <- freqRow(survData, out, "Tai")
out <- freqRow(survData, out, "highRisk")
## type of admission (admission method)
out <- rbind(out, emptyRow)
### Elective
elective <- c("Elective - booked", "Elective - planned", "Elective - from waiting list")
x <- as.data.frame(with(survData,
table(infstatus, hes_admimethdescription%in%elective)))
y <- as.data.frame(prop.table(with(survData,
table(infstatus, hes_admimethdescription%in%elective)),1))
all <- c(sum(survData$hes_admimethdescription%in%elective), round(sum(survData$hes_admimethdescription%in%elective)/ncase, dp))
out <- rbind(out, c(x[x$Var2==T,"Freq"], round(y[y$Var2==T,"Freq"],dp), all))
### Emergency
emergency <- c("Emergency - other means, including patients who arrive via A&E department of another HC provider",
"Emergency - via A&E services, including casualty department of provider",
"Emergency - via General Practitioner (GP)",
"Emergency - via Bed Bureau, including Central Bureau",
"Emergency - via consultant out-patient clinic")
x <- as.data.frame(with(survData,
table(infstatus, hes_admimethdescription%in%emergency)))
y <- as.data.frame(prop.table(with(survData,
table(infstatus, hes_admimethdescription%in%emergency)),1))
all <- c(sum(survData$hes_admimethdescription%in%emergency), round(sum(survData$hes_admimethdescription%in%emergency)/ncase, dp))
out <- rbind(out, c(x[x$Var2==T,"Freq"], round(y[y$Var2==T,"Freq"],dp), all))
## total
out <- rbind(out, aggregate(as.numeric(tail(out,2)), by=list(c(1,1,2,2,3,3,4,4,5,5,6,6)), sum)$x)
## intensive neocare
neocare <- c("Level 1 intensive care", "Level 2 intensive care")
x <- as.data.frame(with(survData,
table(infstatus, hes_neocaredescription%in%neocare)))
y <- as.data.frame(prop.table(with(survData,
table(infstatus, hes_neocaredescription%in%neocare)),1))
all <- c(sum(survData$hes_neocaredescription%in%neocare), round(sum(survData$hes_neocaredescription%in%neocare)/ncase, dp))
out <- rbind(out, c(x[x$Var2==T,"Freq"], round(y[y$Var2==T,"Freq"],dp), all))
## origin of patient (admission description)
out <- rbind(out, emptyRow)
### another hospital
transfer.txt <- "Transfer of any admitted patient from another hospital provider"
x <- as.data.frame(with(survData,
table(infstatus, hes_admimethdescription==transfer.txt)))
y <- as.data.frame(prop.table(with(survData,
table(infstatus, hes_admimethdescription==transfer.txt)),1))
all <- c(sum(survData$hes_admimethdescription==transfer.txt), round(sum(survData$hes_admimethdescription==transfer.txt)/ncase, dp))
out <- rbind(out, c(x[x$Var2==T,"Freq"], round(y[y$Var2==T,"Freq"],dp), all))
### residence
residence <- c("The usual place of residence, including no fixed abode",
"Temporary place of residence when usually resident elsewhere")
x <- as.data.frame(with(survData,
table(infstatus, hes_admisorcdescription%in%residence)))
y <- as.data.frame(prop.table(with(survData,
table(infstatus, hes_admisorcdescription%in%residence)),1))
all <- c(sum(survData$hes_admisorcdescription%in%residence), round(sum(survData$hes_admisorcdescription%in%residence)/ncase, dp))
out <- rbind(out, c(x[x$Var2==T,"Freq"], round(y[y$Var2==T,"Freq"],dp), all))
## total
out <- rbind(out, aggregate(as.numeric(tail(out,2)), by=list(c(1,1,2,2,3,3,4,4,5,5,6,6)), sum)$x)
## rearange columns
if (ncol(out)==6){
colnames(out) <- c("Non-HA-BSI", "HA-BSI", "Prop", "Prop", "All", "Prop")
out <- out[,c(1,3,2,4,5,6)]}
else {colnames(out) <- c("Non-HA-BSI", "Prop")}
rownames(out) <- c("Risk Factors", "Cancer", "Premature birth", "Congenital disorder", "Surgical", "In-dwelling catheter", "Tai", "At least one",
"Type of Admission", "Elective", "Emergency", "TOTAL",
"Intensive neonatal care",
"Origin of patient", "Another hospital", "Residence", "TOTAL")
#write.table(out, ".\\output\\summaryTableRF.txt")
return(out)
#return(pandoc.table(out, caption="caption:...", style = "grid"))
}
## END FUNCTION ##
LOStable <- function(output.LOS, se=TRUE, orgs=TRUE){
##
## excess length of stay table by group
## se: standard error or confidence interval
## orgs: is grouping by organsim type or not
## call: LOS <- LOStable(output.LOS)
LOS <- NULL
dp <- 2
for (i in names(output.LOS)){
if(is.na(output.LOS[[i]][[1]][1])){ LOS <- c(LOS, NA)
} else {
if(se){
LOS <- c(LOS, paste(round(output.LOS[[i]]$clos.data$e.phi,dp), " (", round(output.LOS[[i]]$se,dp), ")", sep="") )
}else{
LOS <- c(LOS, paste(round(output.LOS[[i]]$clos.data$e.phi,dp), " (",
round(output.LOS[[i]]$clos.data$e.phi-1.96*output.LOS[[i]]$se,dp), ", ",
round(output.LOS[[i]]$clos.data$e.phi+1.96*output.LOS[[i]]$se,dp), ")", sep="") )
}
}
}
#res <- data.frame(Group=capwords(names(output.LOS), strict = TRUE), LOS)
res <- data.frame(Group=names(output.LOS), "Excess LOS"=LOS)
if(orgs==TRUE){
## for specific organism grouping
## format names and reorder
rownames(res) <- c("All",
"Gram-positive",
"Gram-negative",
"CoNS",
"Enterococcus spp.",
"S. aureus",
"Other (Gram-positive)",
"Other (Gram-negative)",
"E. Coli",
"Non-p. Streptococci",
"Klebsiella spp.",
"Enterobacter spp.")
neworder <- c(1,2,3,4,9,12,5,11,10,6,7,8)
}else{
## general, generic default grouping
rownames(res) <- names(output.LOS)
neworder <- 1:NROW(res)
}
res <- res[neworder, -1, drop=FALSE]
#write.table(res, ".\\output\\tables\\LOStable.txt", sep="\t")
res
}
## END FUNCTION ##
commonlist <- function(group, cut=20, plot=FALSE){
#
# Function that produces an ordered list by frequency.
# cut: frequency cut-off value
#
x <- as.data.frame(table(group)) # table of frequencies
x <- data.frame(x, prop=x$Freq/sum(x$Freq)) # percentage of total
if (plot){
plot(x$Var1[x$Freq>cut], x$Freq[x$Freq>cut], las=3)}
x[x$Freq>cut,] [order(x[x$Freq>cut,"Freq"], decreasing=TRUE),] # cutoff and reorder
}
## END FUNCTION ##
HCAIsummary <- function(survData){
##
## Function to aggregate the operations for summary statistics for age, length of stay & BSI time
## output to console
trimPerc <- 0.0 # trimmed/Winsorising mean cut-off percentage
print(c("age", summary(survData$age))) # quartiles
print(sd(survData$age, na.rm=TRUE))
print(table(survData$age)) # frequencies
print(c("LOS", summary(survData$time))) # hospital length of stay
print(c("sd", sd(survData$time, na.rm=TRUE)))
print(c("trimmed mean", mean(survData$time, trim=trimPerc))) # trimmmed mean
#winsor.means(survData$time, trim = trimPerc) # winsorised mean comparison
print(c("spectime", summary(survData$spectime)))
print(c("sd", sd(survData$spectime, na.rm=TRUE)))
print(c("deaths", table(survData$event))) # proportion of death-discharge
print(c("gender", table(survData$gender))) # proportion of male-female
return()
}
## END FUNCTION ##
catcodes <- function(codes){
#
# rearrange the reference look-up table
# so that codes in a single column
#
# codes <- read.csv(".\\Reference_tables\\explicitSMEcodes.csv")
# codes <- read.csv(".\\Reference_tables\\SSI_OPCS_codes.csv")
# codes <- read.table(".\\Reference_tables\\ICDGroup_match.txt")
codes$Code <- clean.ref(codes$Code)
x <- aggregate(codes, by=list(codes$Group), paste, collapse=",")[,1:2]
write.table(x, file=".\\Reference_tables\\matchcodesGrouped.txt", sep="\t")
}
## END FUNCTION ## | /R/tableFns.R | no_license | n8thangreen/HESmanip | R | false | false | 23,680 | r | #########################################################################################
#
# Functions to extract summary and statistics of interest from the Cox regression output
# then format them into tables for exporting to LaTex and Excel
# and use in reports
#
# Nathan Green
# 11-2012
#
#########################################################################################
extractCox <- function(cox){
##
## extract a subset of the Cox PH output values
## infection status variable assumed to be the last covariate in list
## cox: summary(coxph(Surv(start, stop, status)~age+inf, data))
dpl <- 3
lastRow <- nrow(coef(cox))
beta <- round(coef(cox)[lastRow,"coef"], dpl) # exponent of hazard
se <- round(coef(cox)[lastRow,"se(coef)"], dpl) # standard error of beta
p <- round(coef(cox)[lastRow,"Pr(>|z|)"], dpl) # p-value
CI <- round(cox$conf.int[lastRow,c("lower .95","upper .95")], dpl) # lower and upper 95% confidence interval
res <- cbind(beta, "exp(beta)"=round(exp(beta),3), CI[1], CI[2], p)
res
}
table.HR <- function(output.HR){
#
# Each organism group & Cox PH method alternative format of output data
# call: res <- table.HR(output.HR)
#
namesOrganisms <- names(output.HR)
namesMethods <- names(output.HR[[1]])
colNames <- c("organism", "method", "type", "beta", "exp(beta)", "Lower CI", "Upper CI", "p")
table.HR <- data.frame(matrix(ncol = length(colNames)))
for (org in namesOrganisms){
for (meth in namesMethods){
namesEvent <- names(output.HR[[org]][[meth]]) # different length for different methods
for (event in namesEvent){
table.HR <- rbind(table.HR,
c(org, meth, event,
extractCox(output.HR[[org]][[meth]][[event]])))
}
}
}
colnames(table.HR) <- colNames
table.HR <- table.HR[!is.na(table.HR[,1]),] # remove empty rows
table.HR
}
#write.table(res, "HCAItable_output.txt", sep="\t")
## Print results in a LaTeX-ready form
#xtable(res)
table2.HR <- function(res, model){
##
## rearrange table.HR in a report style
## used in boc plotting HRboxplot.batch()
## model: subdistn, cause-specific
##
## hr=exp(beta) & upper CI & lower CI
## disch time only, disch full, death time only, death full
namesGroup <- unique(res$organism)
numGroup <- length(namesGroup)
res.sub <- res[res$method==model, c("organism","exp(beta)","Lower CI","Upper CI")]
subHeads <- c("HR","LCI","UCI")
colHeads <- c("organism", paste("atime",subHeads), paste("afull",subHeads), paste("dtime",subHeads), paste("dfull",subHeads))
res.new <- data.frame(matrix(ncol = length(colHeads), nrow = numGroup), check.rows=FALSE)
names(res.new) <- colHeads
for (j in 1:numGroup){
res.temp <- NA
firstrow <- min(which(res.sub$organism==namesGroup[j]))
for (i in 1:4){
res.temp <- cbind(res.temp, res.sub[firstrow+i-1,-1])
}
res.new[j,] <- res.temp
}
res.new[,1] <- namesGroup
res.new
}
## FUNCTION END ##
table3.HR <- function(res, hrtype){
## table format used in the JPIDS paper
##
## organism: is the grouping by organism type or something else
## call: table3.HR(res, hrtype="naive")
## table3.HR(res, hrtype="timedeptsubdistn")
## table3.HR(res, hrtype="timedependentcausespec")
if(hrtype=="timedeptsubdistn"){
colNames <- c("Group","Disch Time-adjusted","Disch Fully adjusted","Death Time-adjusted","Death Fully adjusted")
}else if(hrtype=="naive"){
colNames <- c("Group","Disch", "Death", "Both")
}else if(hrtype=="timedeptcausespec"){
colNames <- c("Group","Disch Time-adjusted","Disch Fully adjusted","Death Time-adjusted","Death Fully adjusted","Both Time-adjusted","Both Fully adjusted")
}
else {stop("Model type unidentified")}
res.new <- data.frame(matrix(ncol=length(colNames)))
colnames(res.new) <- colNames
groupnames <- unique(res$group)
for (name in groupnames){
## find rows for given organism type and HR method
whichrows <- which(res$group==name & res$method==hrtype) # & !is.na(res[,"exp(beta)"]))
rowTotal <- NULL
for (j in 1:length(whichrows)){
temp <- paste(res[whichrows[j],"exp(beta)"], " (", res[whichrows[j],"Lower CI"], ", ", res[whichrows[j],"Upper CI"], ")", sep="")
rowTotal <- c(rowTotal, temp)
}
res.new <- rbind(res.new, c(name,rowTotal))
}
res.new <- res.new[!is.na(res.new[,1]),-1] # remove empty rows
if (organism==TRUE){
## when group by organism
## reformat names and reorder
rownames(res.new) <- c("All",
"Gram-positive",
"Gram-negative",
"CoNS",
"Enterococcus spp.",
"S. aureus",
"Other (Gram-positive)",
"Other (Gram-negative)",
"E. Coli",
"Non-p. Streptococci",
"Klebsiella spp.",
"Enterobacter spp.")
res.new <- res.new[c(1,2,3,4,9,12,5,11,10,6,7,8),]
}else{rownames(res.new) <- groupnames}
res.new
}
## END FUNCTION ##
###########################
# Data set summary tables #
###########################
## get interquartile range
iqr <- function(x){paste("[",round(summary(x)[["1st Qu."]],2),",",round(summary(x)[["3rd Qu."]],2),"]", sep="")}
## summary statistics excluding NAs
na.sd <- function(x){sd(x, na.rm=TRUE)}
na.mean <- function(x){mean(x, na.rm=TRUE)}
na.median <- function(x){median(x, na.rm=TRUE)}
summaryTableAll <- function(survData){
#
# summary table of dataset descriptive stats by inf/non-inf
# mean/median (sd or IQR)
# out <- summaryTableAll(survDataByGroup[["all"]])
survData.mix <- survData[survData$infstatus==0,] # non-infected patients (controls) only
survData.inf <- survData[survData$infstatus==1,] # infected patients (cases) only
dp <- 2
out <- rbind(
## sample sizes
c(format(nrow(survData.inf),nsmall=1), round(nrow(survData.inf)/nrow(survData),dp),
round(nrow(survData.mix)), round(nrow(survData.mix)/nrow(survData),dp),
round(nrow(survData)), 1),
## ages
#c(paste(round(mean(survData.inf$age)),"/",median(survData.inf$age)),round(sd(survData.inf$age)),
c(paste(round(mean(survData.inf$age)),"/",median(survData.inf$age)),iqr(survData.inf$age),
#paste(round(mean(survData.mix$age)),"/",median(survData.mix$age)),round(sd(survData.mix$age)),
paste(round(mean(survData.mix$age)),"/",median(survData.mix$age)),iqr(survData.mix$age),
#paste(round(mean(survData$age)),"/",median(survData$age)),round(sd(survData$age))),
paste(round(mean(survData$age)),"/",median(survData$age)), iqr(survData$age) ),
## length of stays
#c(paste(round(mean(survData.inf$time)),"/",median(survData.inf$time)),round(sd(survData.inf$time)),
# paste(round(mean(survData.mix$time, na.rm=T)),"/",median(survData.mix$time, na.rm=T)),round(sd(survData.mix$time, na.rm=T)),
# paste(round(mean(survData$time, na.rm=T)),"/",median(survData$time, na.rm=T)),round(sd(survData$time, na.rm=T))),
c(paste(round(mean(survData.inf$time),dp),"/",median(survData.inf$time)),iqr(survData.inf$time),
paste(round(mean(survData.mix$time, na.rm=T),dp),"/",median(survData.mix$time, na.rm=T)),iqr(survData.mix$time),
paste(round(mean(survData$time, na.rm=T),dp),"/",median(survData$time, na.rm=T)),iqr(survData$time) ),
## infection times
#c(paste(round(mean(survData.inf$spectime)),"/",median(survData.inf$spectime)),round(sd(survData.inf$spectime)),
# paste(round(mean(survData.mix$spectime)),"/",median(survData.mix$spectime)),round(sd(survData.mix$spectime)),
#paste(round(mean(survData$spectime)),"/",median(survData$spectime)),round(sd(survData$spectime))),
c(paste(round(mean(survData.inf$spectime),dp),"/",median(survData.inf$spectime)),iqr(survData.inf$spectime),
paste(round(mean(survData.mix$spectime),dp),"/",median(survData.mix$spectime)),iqr(survData.mix$spectime),
paste(round(mean(survData$spectime),dp),"/",median(survData$spectime)),iqr(survData$spectime) ),
## in-hospital deaths
c(round(table(survData.inf$event)[2]),round(table(survData.inf$event)[2]/(table(survData.inf$event)[2]+table(survData.inf$event)[1]),dp),
round(table(survData.mix$event)[2]),round(table(survData.mix$event)[2]/(table(survData.mix$event)[2]+table(survData.mix$event)[1]),dp),
round(table(survData$event)[2]),round(table(survData$event)[2]/(table(survData$event)[2]+table(survData$event)[1]),dp)),
## sex (female)
c(round(table(survData.inf$gender)[1]),round(table(survData.inf$gender)[1]/(table(survData.inf$gender)[1]+table(survData.inf$gender)[2]),dp),
round(table(survData.mix$gender)[1]),round(table(survData.mix$gender)[1]/(table(survData.mix$gender)[1]+table(survData.mix$gender)[2]),dp),
round(table(survData$gender)[1]),round(table(survData$gender)[1]/(table(survData$gender)[1]+table(survData$gender)[2]),dp)))
rownames(out) <- c("Patient sample size","Age (years)","LoS (days)","Time from admission to infection (days)","Deaths (frequency)","Sex (F) (frequency)")
colnames(out) <- c("HA-BSI", "Prop", "Non-HA-BSI", "Prop", "All", "Prop")
## rearrange columns
out <- out[,c(3,4,1,2,5,6)]
# write.table(out, ".\\output\\summaryTableAll.txt")
return(pandoc.table(out, caption="Table: Dataset summary statistics including risk factors, comorbidites and patient movements summary statistics.
For count dat the subset size if given; for continuous values mean/median is given.
Note that a patient can be in more than one risk factor group.", style = "grid", split.tables=Inf, justify="left"))
}
## END FUNCTION ##
summaryTableGroup <- function(survDataByGroup){
##
## summary table of descriptive statistics by (organism) group
## sd or IQR
## call: out <- summaryTableOrg(survDataByGroup)
out <- NA
dp <- 2
for (group in names(survDataByGroup)){
out <- rbind(out,
c(group,
##sample size
nrow(survDataByGroup[[group]][survDataByGroup[[group]]$infstatus==1,]),
## age
paste(round(na.mean(survDataByGroup[[group]]$age[survDataByGroup[[group]]$infstatus==1]),dp),"/",
na.median(survDataByGroup[[group]]$age[survDataByGroup[[group]]$infstatus==1]),
# " (",round(na.sd(survDataByGroup[[group]]$age[survDataByGroup[[group]]$infstatus==1])),")",sep=""),
iqr(survDataByGroup[[group]]$age[survDataByGroup[[group]]$infstatus==1]), sep=""),
## gender
round(table(survDataByGroup[[group]]$gender[survDataByGroup[[group]]$infstatus==1])[1],dp),
## LoS
paste(round(na.mean(survDataByGroup[[group]]$time[survDataByGroup[[group]]$infstatus==1]),dp),"/",
na.median(survDataByGroup[[group]]$time[survDataByGroup[[group]]$infstatus==1]),
# " (",round(na.sd(survDataByGroup[[group]]$time[survDataByGroup[[group]]$infstatus==1])),")", sep=""),
iqr(survDataByGroup[[group]]$time[survDataByGroup[[group]]$infstatus==1]), sep=""),
## infection time
paste(round(na.mean(survDataByGroup[[group]]$spectime[survDataByGroup[[group]]$infstatus==1]),dp),"/",
na.median(survDataByGroup[[group]]$spectime[survDataByGroup[[group]]$infstatus==1]),
# " (",round(na.sd(survDataByGroup[[group]]$spectime[survDataByGroup[[group]]$infstatus==1])),")",sep=""),
iqr(survDataByGroup[[group]]$spectime[survDataByGroup[[group]]$infstatus==1]), sep=""),
## deaths
round(table(survDataByGroup[[group]]$event[survDataByGroup[[group]]$infstatus==1])[2]),
sum(survDataByGroup[[group]]$cancer[survDataByGroup[[group]]$infstatus==1]),
sum(survDataByGroup[[group]]$prem[survDataByGroup[[group]]$infstatus==1]),
sum(survDataByGroup[[group]]$cong[survDataByGroup[[group]]$infstatus==1]),
sum(survDataByGroup[[group]]$surgical[survDataByGroup[[group]]$infstatus==1]),
sum(survDataByGroup[[group]]$cath[survDataByGroup[[group]]$infstatus==1]),
sum(survDataByGroup[[group]]$Tai[survDataByGroup[[group]]$infstatus==1]),
sum(survDataByGroup[[group]]$highRisk[survDataByGroup[[group]]$infstatus==1])
))
}
colnames(out) <- c("Organism","Sample size","Age (years)","Sex (F)","LoS (days)","Time from admission to infection (days)","Deaths",
"Cancer","Premature birth","Congenital disease","Surgical","In-dwelling catheter","Tai","At least one risk factor")
rownames(out) <- out[,"Organism"]
out <- out[!is.na(out[,1]),-1] # remove empty rows
## rearrange rows
#out <- out[c("all", "1", "-1", "COAGULASE NEGATIVE STAPHYLOCOCCUS", "E. COLI", "ENTEROBACTER", "ENTEROCOCCUS", "KLEBSIELLA", "NON-PYOGENIC STREPTOCOCCUS",
# "STAPHYLOCOCCUS AUREUS", "other", "P. AERUGINOSA", "MICROCOCCUS", "STREP B", "SALMONELLA", "STREPTOCOCCUS PNEUMONIAE", "N. MENINGITIDIS", "STREP A", "ACINETOBACTER"),]
out <- out[c("all", "1", "-1", "COAGULASE NEGATIVE STAPHYLOCOCCUS", "E. COLI", "ENTEROBACTER", "ENTEROCOCCUS", "KLEBSIELLA", "NON-PYOGENIC STREPTOCOCCUS",
"STAPHYLOCOCCUS AUREUS", "other (Gram-positive)", "other (Gram-negative)", "P. AERUGINOSA", "MICROCOCCUS", "STREP B", "SALMONELLA", "STREPTOCOCCUS PNEUMONIAE", "N. MENINGITIDIS", "STREP A", "ACINETOBACTER"),]
#write.table(out, ".\\output\\summaryTableOrg.txt")
return(out)
# pandoc.table(out, caption="caption:...", style = "grid")
}
## END FUNCTION ##
summaryTableRF <- function(survData){
##
## output summary table of the patients
## comorbidities and risk factors
## split by infected and non-infected cases
##
## call: out <- summaryTableRF(survDataByGroup$all)
out <- NULL
emptyRow <- c(NA, NA, NA, NA, NA, NA)
dp <- 2
ncase <- nrow(survData)
## risk factor and comorbidities
## record true (present) cases only
## empty rows for extra row labels
out <- rbind(out, emptyRow)
freqRow <- function(survData, out, rf){
x <- as.data.frame(table(survData$infstatus, survData[,rf]))
y <- as.data.frame(prop.table(table(survData$infstatus, survData[,rf]),1))
all <- c(sum(survData[,rf]), round(sum(survData[,rf])/length(survData[,rf]), dp))
out <- rbind(out, c(x[x$Var2==T,"Freq"], round(y[y$Var2==T,"Freq"],dp), all))
out
}
out <- freqRow(survData, out, "cancer")
out <- freqRow(survData, out, "prem")
out <- freqRow(survData, out, "cong")
out <- freqRow(survData, out, "surgical")
out <- freqRow(survData, out, "cath")
out <- freqRow(survData, out, "Tai")
out <- freqRow(survData, out, "highRisk")
## type of admission (admission method)
out <- rbind(out, emptyRow)
### Elective
elective <- c("Elective - booked", "Elective - planned", "Elective - from waiting list")
x <- as.data.frame(with(survData,
table(infstatus, hes_admimethdescription%in%elective)))
y <- as.data.frame(prop.table(with(survData,
table(infstatus, hes_admimethdescription%in%elective)),1))
all <- c(sum(survData$hes_admimethdescription%in%elective), round(sum(survData$hes_admimethdescription%in%elective)/ncase, dp))
out <- rbind(out, c(x[x$Var2==T,"Freq"], round(y[y$Var2==T,"Freq"],dp), all))
### Emergency
emergency <- c("Emergency - other means, including patients who arrive via A&E department of another HC provider",
"Emergency - via A&E services, including casualty department of provider",
"Emergency - via General Practitioner (GP)",
"Emergency - via Bed Bureau, including Central Bureau",
"Emergency - via consultant out-patient clinic")
x <- as.data.frame(with(survData,
table(infstatus, hes_admimethdescription%in%emergency)))
y <- as.data.frame(prop.table(with(survData,
table(infstatus, hes_admimethdescription%in%emergency)),1))
all <- c(sum(survData$hes_admimethdescription%in%emergency), round(sum(survData$hes_admimethdescription%in%emergency)/ncase, dp))
out <- rbind(out, c(x[x$Var2==T,"Freq"], round(y[y$Var2==T,"Freq"],dp), all))
## total
out <- rbind(out, aggregate(as.numeric(tail(out,2)), by=list(c(1,1,2,2,3,3,4,4,5,5,6,6)), sum)$x)
## intensive neocare
neocare <- c("Level 1 intensive care", "Level 2 intensive care")
x <- as.data.frame(with(survData,
table(infstatus, hes_neocaredescription%in%neocare)))
y <- as.data.frame(prop.table(with(survData,
table(infstatus, hes_neocaredescription%in%neocare)),1))
all <- c(sum(survData$hes_neocaredescription%in%neocare), round(sum(survData$hes_neocaredescription%in%neocare)/ncase, dp))
out <- rbind(out, c(x[x$Var2==T,"Freq"], round(y[y$Var2==T,"Freq"],dp), all))
## origin of patient (admission description)
out <- rbind(out, emptyRow)
### another hospital
transfer.txt <- "Transfer of any admitted patient from another hospital provider"
x <- as.data.frame(with(survData,
table(infstatus, hes_admimethdescription==transfer.txt)))
y <- as.data.frame(prop.table(with(survData,
table(infstatus, hes_admimethdescription==transfer.txt)),1))
all <- c(sum(survData$hes_admimethdescription==transfer.txt), round(sum(survData$hes_admimethdescription==transfer.txt)/ncase, dp))
out <- rbind(out, c(x[x$Var2==T,"Freq"], round(y[y$Var2==T,"Freq"],dp), all))
### residence
residence <- c("The usual place of residence, including no fixed abode",
"Temporary place of residence when usually resident elsewhere")
x <- as.data.frame(with(survData,
table(infstatus, hes_admisorcdescription%in%residence)))
y <- as.data.frame(prop.table(with(survData,
table(infstatus, hes_admisorcdescription%in%residence)),1))
all <- c(sum(survData$hes_admisorcdescription%in%residence), round(sum(survData$hes_admisorcdescription%in%residence)/ncase, dp))
out <- rbind(out, c(x[x$Var2==T,"Freq"], round(y[y$Var2==T,"Freq"],dp), all))
## total
out <- rbind(out, aggregate(as.numeric(tail(out,2)), by=list(c(1,1,2,2,3,3,4,4,5,5,6,6)), sum)$x)
## rearange columns
if (ncol(out)==6){
colnames(out) <- c("Non-HA-BSI", "HA-BSI", "Prop", "Prop", "All", "Prop")
out <- out[,c(1,3,2,4,5,6)]}
else {colnames(out) <- c("Non-HA-BSI", "Prop")}
rownames(out) <- c("Risk Factors", "Cancer", "Premature birth", "Congenital disorder", "Surgical", "In-dwelling catheter", "Tai", "At least one",
"Type of Admission", "Elective", "Emergency", "TOTAL",
"Intensive neonatal care",
"Origin of patient", "Another hospital", "Residence", "TOTAL")
#write.table(out, ".\\output\\summaryTableRF.txt")
return(out)
#return(pandoc.table(out, caption="caption:...", style = "grid"))
}
## END FUNCTION ##
LOStable <- function(output.LOS, se=TRUE, orgs=TRUE){
##
## excess length of stay table by group
## se: standard error or confidence interval
## orgs: is grouping by organsim type or not
## call: LOS <- LOStable(output.LOS)
LOS <- NULL
dp <- 2
for (i in names(output.LOS)){
if(is.na(output.LOS[[i]][[1]][1])){ LOS <- c(LOS, NA)
} else {
if(se){
LOS <- c(LOS, paste(round(output.LOS[[i]]$clos.data$e.phi,dp), " (", round(output.LOS[[i]]$se,dp), ")", sep="") )
}else{
LOS <- c(LOS, paste(round(output.LOS[[i]]$clos.data$e.phi,dp), " (",
round(output.LOS[[i]]$clos.data$e.phi-1.96*output.LOS[[i]]$se,dp), ", ",
round(output.LOS[[i]]$clos.data$e.phi+1.96*output.LOS[[i]]$se,dp), ")", sep="") )
}
}
}
#res <- data.frame(Group=capwords(names(output.LOS), strict = TRUE), LOS)
res <- data.frame(Group=names(output.LOS), "Excess LOS"=LOS)
if(orgs==TRUE){
## for specific organism grouping
## format names and reorder
rownames(res) <- c("All",
"Gram-positive",
"Gram-negative",
"CoNS",
"Enterococcus spp.",
"S. aureus",
"Other (Gram-positive)",
"Other (Gram-negative)",
"E. Coli",
"Non-p. Streptococci",
"Klebsiella spp.",
"Enterobacter spp.")
neworder <- c(1,2,3,4,9,12,5,11,10,6,7,8)
}else{
## general, generic default grouping
rownames(res) <- names(output.LOS)
neworder <- 1:NROW(res)
}
res <- res[neworder, -1, drop=FALSE]
#write.table(res, ".\\output\\tables\\LOStable.txt", sep="\t")
res
}
## END FUNCTION ##
commonlist <- function(group, cut=20, plot=FALSE){
#
# Function that produces an ordered list by frequency.
# cut: frequency cut-off value
#
x <- as.data.frame(table(group)) # table of frequencies
x <- data.frame(x, prop=x$Freq/sum(x$Freq)) # percentage of total
if (plot){
plot(x$Var1[x$Freq>cut], x$Freq[x$Freq>cut], las=3)}
x[x$Freq>cut,] [order(x[x$Freq>cut,"Freq"], decreasing=TRUE),] # cutoff and reorder
}
## END FUNCTION ##
HCAIsummary <- function(survData){
##
## Function to aggregate the operations for summary statistics for age, length of stay & BSI time
## output to console
trimPerc <- 0.0 # trimmed/Winsorising mean cut-off percentage
print(c("age", summary(survData$age))) # quartiles
print(sd(survData$age, na.rm=TRUE))
print(table(survData$age)) # frequencies
print(c("LOS", summary(survData$time))) # hospital length of stay
print(c("sd", sd(survData$time, na.rm=TRUE)))
print(c("trimmed mean", mean(survData$time, trim=trimPerc))) # trimmmed mean
#winsor.means(survData$time, trim = trimPerc) # winsorised mean comparison
print(c("spectime", summary(survData$spectime)))
print(c("sd", sd(survData$spectime, na.rm=TRUE)))
print(c("deaths", table(survData$event))) # proportion of death-discharge
print(c("gender", table(survData$gender))) # proportion of male-female
return()
}
## END FUNCTION ##
catcodes <- function(codes){
#
# rearrange the reference look-up table
# so that codes in a single column
#
# codes <- read.csv(".\\Reference_tables\\explicitSMEcodes.csv")
# codes <- read.csv(".\\Reference_tables\\SSI_OPCS_codes.csv")
# codes <- read.table(".\\Reference_tables\\ICDGroup_match.txt")
codes$Code <- clean.ref(codes$Code)
x <- aggregate(codes, by=list(codes$Group), paste, collapse=",")[,1:2]
write.table(x, file=".\\Reference_tables\\matchcodesGrouped.txt", sep="\t")
}
## END FUNCTION ## |
library(tidyverse)
library(phyloseq)
load("mothur_phyloseq.RData")
load("qiime2_phyloseq.RData")
| /Project_01/load_phyloseq.R | no_license | louryan/MICB425_portfolio | R | false | false | 98 | r | library(tidyverse)
library(phyloseq)
load("mothur_phyloseq.RData")
load("qiime2_phyloseq.RData")
|
#' A class to store the important information of an model.
#'
#' The slots are used to store the important information of an model. The class is used to create object for the
#' two algorithms implemented in seeds. Methods are implemented to easily calculate the nominal solution of the model and
#' change the details of the saved model.
#' The numerical solutions are calculated using the \pkg{deSolve} - package.
#'
#' @slot func A funtion containing the ode-equations of the model. For syntax look at the given examples of the \pkg{deSolve} package.
#' @slot times timesteps at which the model should be evaluated
#' @slot parms the parameters of the model
#' @slot input matrix containing the inputs with the time points
#' @slot measFunc function that converts the output of the ode solution
#' @slot y initial (state) values of the ODE system, has to be a vector
#' @slot meas matrix with the (experimental) measurements of the system
#' @slot sd optional standard deviations of the measurements, is used by the algorithms as weights in the costfunction
#' @slot custom customized link function
#' @slot nnStates bit vector that indicates if states should be observed by the root function
#' @slot nnTollerance tolerance at which a function is seen as zero
#' @slot resetValue value a state should be set to by an event
#'
#' @return an object of class odeModel which defines the model
#'
#' @export odeModel
#' @exportClass odeModel
#'
#' @import methods
#'
odeModel <- setClass(
#name of Class
"odeModel",
slots = c(
func = "function",
times = "numeric",
parms = "numeric",
input = "data.frame",
measFunc = "function",
y = "numeric",
meas = "data.frame",
sd = "data.frame",
custom = 'logical',
nnStates = 'numeric',
nnTollerance = 'numeric',
resetValue = "numeric"
),
prototype = list(
func = function(x) { },
times = numeric(),
parms = numeric(),
input = data.frame(matrix(numeric(0), ncol = 0)),
measFunc = function(x) { },
y = numeric(0),
meas = data.frame(matrix(numeric(0), ncol = 0)),
sd = data.frame(matrix(numeric(0), ncol = 0)),
custom = FALSE,
nnStates = numeric(),
nnTollerance = numeric(),
resetValue = numeric()
),
validity = function(object) {
# check inputs of matrix slot
if (sum(object@times) == 0) {
return("You have to specify the times on which the equation should be evaluated. A solution can only be calculated if the a intervall or specific timesteps are given. Set the 'times'' parameter.")
}
if (length(object@y) != 0 && object@custom == FALSE && sum(colSums(object@meas)) != 0) {
m <- matrix(rep(0, length(object@y)), ncol = length(object@y))
if (is.null(object@measFunc(m)) == FALSE) {
testMeas <- object@measFunc(m)
if (ncol(testMeas) != (ncol(object@meas) - 1)) {
return("The returned results of the measurement function does not have the same
dimensions as the given measurements")
}
}
}
return(TRUE)
}
)
setMethod('initialize', "odeModel", function(.Object, ...) {
.Object <- callNextMethod()
return(.Object)
})
checkMatrix <- function(argMatrix) {
if (sum(argMatrix) == 0) {
argName <- toString(deparse(substitute(argMatrix)))
errortext <- ' has to contain values not equal to 0.'
return(paste0(argName, errortext))
}
}
#' Set the model equation
#'
#' Set the model equation of the system in an odeModel object. Has to be a function that can be used with the deSolve package.
#'
#' @param odeModel an object of the class odeModel
#' @param func function describing the ode equation of the model
#'
#' @return an object of odeModel
#'
#' @examples
#' data("uvbModel")
#'
#' uvbModelEq <- function(t,x,parameters) {
#' with (as.list(parameters),{
#'
#' dx1 = ((-2) * ((ka1 * (x[1]^2) * (x[4]^2)) - (kd1 * x[5])) +
#' (-2) * ((ka2 * (x[1]^2) * x[2]) - (kd2 * x[3])) +
#' ((ks1 *((1) + (uv * n3 * (x[11] + fhy3_s)))) -
#' (kdr1 * ((1) + (n1 * uv)) * x[1])))
#' dx2 = ((-1) * ((ka2*(x[1]^2) * x[2]) - (kd2 * x[3])) +
#' (-1) * ((ka4 * x[2] * x[12]) - (kd4 * x[13])))
#' dx3 = (((ka2 * (x[1]^2) * x[2]) - (kd2* x[3])))
#' dx4 = ((-2) * (k1*(x[4]^2)) + (2) * (k2 * x[6]) +
#' (-2) * ((ka1 * (x[1]^2)* (x[4]^2)) - (kd1 * x[5])) +
#' (-1)* (ka3 * x[4] *x[7]))
#' dx5 = (((ka1 * (x[1]^2) * (x[4]^2)) -(kd1 * x[5])))
#' dx6 = ((-1) * (k2 * x[6]) + (k1 * (x[4]^2)) +(kd3 * (x[8]^2)))
#' dx7 = ((-1) * (ka3 * x[4] * x[7]) + ((ks2 * ((1) + (uv * x[5]))) -
#' (kdr2 * x[7])) + (2) * (kd3 * (x[8]^2)))
#' dx8 = ((-2) * (kd3 * x[8]^2) + (ka3 * x[4] * x[7]))
#' dx9 = 0
#' dx10 = 0
#' dx11 = (((ks3 * ((1) + (n2 * uv))) -(kdr3 * (((x[3] / (kdr3a + x[3])) +
#' (x[13] / (kdr3b + x[13]))) -(x[5] / (ksr + x[5]))) * x[11])))
#' dx12 = ((-1) * (ka4 * x[2] * x[12]) + (kd4 * x[13]))
#' dx13 =((ka4 * x[2] * x[12]) - (kd4 * x[13]))
#'
#' list(c(dx1,dx2,dx3,dx4,dx5,dx6,dx7,dx8,dx9,dx10,dx11,dx12,dx13))
#' })
#' }
#'
#' setModelEquation(uvbModel,uvbModelEq)
#'
#' @export
setGeneric(name = "setModelEquation",
def = function(odeModel, func) {
standardGeneric("setModelEquation")
}
)
#' @rdname setModelEquation
setMethod(f = "setModelEquation",
signature = "odeModel",
definition = function(odeModel, func) {
odeModel@func <- func
validObject(odeModel)
return(odeModel)
}
)
#' Set the model parameters
#'
#' A method to set the model parameters of an odeModel object.
#'
#' @param odeModel an object of the class odeModel
#' @param parms a vector containing the parameters of the model
#'
#' @examples
#' data("uvbModel")
#'
#' newParas <- c( ks1=0.23,
#' ks2=4.0526,
#' kdr1=0.1,
#' kdr2=0.2118,
#' k1=0.0043,
#' k2=161.62,
#' ka1=0.0372,
#' ka2=0.0611,
#' ka3=4.7207,
#' kd1=94.3524,
#' kd2=50.6973,
#' kd3=0.5508,
#' ks3=0.4397,
#' kdr3=1.246,
#' uv=1,
#' ka4=10.1285,
#' kd4=1.1999,
#' n1=3,
#' n2=2,
#' n3=3.5,
#' kdr3a=0.9735,
#' kdr3b=0.406,
#' ksr=0.7537,
#' fhy3_s=5)
#'
#' newModel <- setParms(odeModel = uvbModel, parms = newParas)
#'
#' @return an object of odeModel
#'
#' @export
setGeneric(name = "setParms",
def = function(odeModel, parms) {
standardGeneric("setParms")
}
)
#' @rdname setParms
setMethod(f = "setParms",
signature = c("odeModel", 'numeric'),
definition = function(odeModel, parms) {
odeModel@parms <- parms
validObject(odeModel)
return(odeModel)
}
)
#' Set the inputs of the model.
#'
#' It the model has an input it can be set with this function. The inputs
#' should be a dataframe, where the first column is the timesteps of the
#' inputs in the second column.
#'
#' @param odeModel an object of the class modelClass
#' @param input function describing the ode equation of the model
#'
#' @return an object of odeModel
#'
#' @examples
#'
#' data("uvbModel")
#'
#' model_times <- uvbModel@times
#' input <- rep(0,length(model_times))
#'
#' input_Dataframe <- data.frame(t = model_times, u = input)
#'
#' newModel <- setInput(odeModel = uvbModel,input = input_Dataframe)
#'
#' @export
setGeneric(name = "setInput",
def = function(odeModel, input) {
standardGeneric("setInput")
}
)
#' @rdname setInput
setMethod(f = "setInput",
signature = "odeModel",
definition = function(odeModel, input) {
odeModel@input <- input
validObject(odeModel)
return(odeModel)
}
)
#' Set the measurement equation for the model
#'
#' For a given model a measurement equation can be set. If no measurement function is set the
#' states become the output of the system. The function should be defined as in the example below.
#'
#' @param odeModel an object of the class odeModel
#' @param measFunc measurement function of the model. Has to be a R functions.
#' @param custom custom indexing for the measurement function (used by the baysian method)
#'
#' @return an object of odeModel
#'
#' @examples
#'
#' data("uvbModel")
#'
#' uvbMeasure <- function(x) {
#'
#' y1 = 2*x[,5] + x[,4] + x[,8]
#' y2 = 2*x[,5] + 2* x[,3] + x[,1]
#' y3 = x[,6]
#' y4 = x[,11]
#' y5 = x[,4]
#'
#' return(cbind(y1,y2,y3,y4,y5))
#' }
#'
#' newModel <- setMeasFunc(odeModel = uvbModel, measFunc = uvbMeasure)
#'
#' @export
setGeneric(name = "setMeasFunc",
def = function(odeModel, measFunc, custom) {
standardGeneric("setMeasFunc")
}
)
#' @rdname setMeasFunc
setMethod(f = "setMeasFunc",
signature = c('odeModel', 'function', 'missing'),
definition = function(odeModel, measFunc, custom) {
odeModel@measFunc <- measFunc
validObject(odeModel)
return(odeModel)
}
)
#' @rdname setMeasFunc
setMethod(f = "setMeasFunc",
signature = c('odeModel', 'function', 'logical'),
definition = function(odeModel, measFunc, custom) {
odeModel@meas <- measFunc
odeModel@custom <- custom
validObject(odeModel)
return(odeModel)
}
)
#' Set the vector with the initial (state) values
#'
#' @param odeModel an object of the class odeModel
#' @param y vector with the initial values
#'
#' @return an object of odeModel
#'
#' @examples
#'
#' data("uvbModel")
#'
#' x0 = c(0.2,10,2,0,0,20,0,0,0,4.2,0.25,20,0)
#'
#' newModel <- setInitState(uvbModel, y = x0)
#'
#' @export
setGeneric(name = "setInitState",
def = function(odeModel, y) {
standardGeneric("setInitState")
}
)
#' @rdname setInitState
setMethod(f = "setInitState",
signature = "odeModel",
definition = function(odeModel, y) {
odeModel@y <- y
validObject(odeModel)
return(odeModel)
}
)
#' set measurements of the model
#'
#' The odeModel object stores all important information. Measurements of the objects can be set
#' directly by adressing the slot, or with this function.
#'
#' @param odeModel an object of the class odeModel
#' @param meas measurements of the model, a matrix with measurements of the model
#' and the corresponding time values
#'
#' @return an object of odeModel
#'
#' @examples
#'
#' data(uvbData)
#' data(uvbModel)
#'
#' measurements <- uvbData[,1:6]
#'
#' newModel <- setMeas(odeModel = uvbModel, meas = measurements)
#'
#' @export
setGeneric(name = "setMeas",
def = function(odeModel, meas) {
standardGeneric("setMeas")
}
)
#' @rdname setMeas
setMethod(f = "setMeas",
signature = 'odeModel',
definition = function(odeModel, meas) {
odeModel@meas <- meas
validObject(odeModel)
return(odeModel)
}
)
#' Set the standard deviation of the measurements
#'
#' With multiple measurements a standard deviation can be calculated for every point of
#' measurement. The standard deviation is used to weigh the estimated data points in the
#' cost function.
#'
#' @param odeModel an object of the class odeModel
#' @param sd a matrix with the standard deviations of the measurements
#'
#' @return an object of odeModel
#'
#' @examples
#'
#' data(uvbData)
#' data(uvbModel)
#'
#' sd_uvb <- uvbData[,7:11]
#'
#' newModel <- setSd(odeModel = uvbModel, sd = sd_uvb)
#'
#' @export
setGeneric(name = "setSd",
def = function(odeModel, sd) {
standardGeneric("setSd")
}
)
#' @rdname setSd
setMethod(f = "setSd",
signature = "odeModel",
definition = function(odeModel, sd) {
odeModel@sd <- sd
validObject(odeModel)
return(odeModel)
}
)
#### generate c code (interal function)
setGeneric(name = 'genCCode',
def = function(odeModel, bden, nnStates) {
standardGeneric('genCCode')
}
)
setMethod(f = 'genCCode',
signature = c('odeModel', 'logical', 'missing'),
definition = function(odeModel, bden, nnStates) {
createCompModel(modelFunc = odeModel@func, parameters = odeModel@parms, bden = bden)
return(odeModel)
}
)
setMethod(f = 'genCCode',
signature = c('odeModel', 'logical', 'numeric'),
definition = function(odeModel, bden, nnStates) {
createCompModel(modelFunc = odeModel@func, parameters = odeModel@parms, bden = bden, nnStates = nnStates)
return(odeModel)
}
)
setMethod(f = 'genCCode',
signature = c('odeModel', 'missing', 'numeric'),
definition = function(odeModel, bden, nnStates) {
createCompModel(modelFunc = odeModel@func, parameters = odeModel@parms, nnStates = nnStates)
return(odeModel)
}
)
# nominal solution
#' Calculate the nominal solution of the model
#'
#' After an model is defined it can be evaluated. This returns the numerical solution
#' for the state equation before hidden inputs are calculated.
#'
#' @param odeModel a object of the class ode model describing the experiment
#'
#' @return a matrix with the numeric solution to the nominal ode equation
#'
#' @examples
#'
#' lotka_voltera <- function (t, x, parameters) {
#' with(as.list(c(x,parameters)), {
#' dx1 = x[1]*(alpha - beta*x[2])
#' dx2 = -x[2]*(gamma - delta*x[1])
#' return(list(c(dx1, dx2)))
#' })
#' }
#'
#' pars <- c(alpha = 2, beta = .5, gamma = .2, delta = .6)
#' init_state <- c(x1 = 10, x2 = 10)
#' time <- seq(0, 100, by = 1)
#' lotVolModel = odeModel(func = lotka_voltera, parms = pars, times = time, y = init_state)
#' nominalSol(lotVolModel)
#'
#' @export
setGeneric(name = 'nominalSol',
def = function(odeModel) {
standardGeneric('nominalSol')
}
)
#' @rdname nominalSol
setMethod(f = 'nominalSol',
signature = c('odeModel'),
definition = function(odeModel) {
x0 <- odeModel@y
### get the times from the measurements
# add case for missing input
times <- odeModel@times
if (sum(colSums(odeModel@input)) == 0) {
input <- rep(0, length(times))
uList = list(cbind(times, input))
} else {
input <- odeModel@input
u <- apply(X = input[, -1, drop = F], MARGIN = 2, FUN = function(x) stats::approx(x = input[, 1], y = x, xout = times, rule = 2))
uList = list(cbind(times, u[[1]]$y))
}
w <- matrix(rep(0, length(x0) * length(times)), ncol = length(x0))
if (grepl("Rtools", Sys.getenv('PATH')) || (.Platform$OS.type != "windows")) {
createCompModel(modelFunc = odeModel@func, parameters = odeModel@parms, nnStates = odeModel@nnStates)
temp_compiled_model <- compileModel()
wSplit <- split(w, rep(1:ncol(w), each = nrow(w)))
wList <- lapply(wSplit, FUN = function(x) cbind(times, x))
forcings <- c(uList, wList)
if (sum(odeModel@nnStates) == 0) {
resOde <- deSolve::ode(y = odeModel@y, times = times, func = "derivsc",
parms = odeModel@parms, dllname = "model", initforc = "forcc",
forcings = forcings, initfunc = "parmsc")
} else {
eventTol <- 0.0
resetValue <- 0.0001
myRoot <- eval(parse(text = createRoot(rootStates = odeModel@nnStates)))
myEvent <- eval(parse(text = createEvent(tolerance = eventTol, value = resetValue)))
resOde <- deSolve::lsoda(y = odeModel@y, times = times, func = "derivsc",
parms = odeModel@parms, dllname = "model", initforc = "forcc",
forcings = forcings, initfunc = "parmsc", nroot = sum(odeModel@nnStates),
rootfunc = "myroot", events = list(func = myEvent, root = TRUE))
}
dyn.unload(temp_compiled_model)
} else {
odeEq <- new("odeEquations")
odeEq <- createModelEqClass(odeEq, odeModel@func)
# !!!!!! check if the non rtools variant runs
odeEq <- isDynElaNet(odeEq)
odeEq <- calculateCostate(odeEq)
createFunctions(odeEq)
if (.Platform$OS.type != "windows"){
temp_hidden_input_path <- paste0(tempdir(),'/','stateHiddenInput.R')
} else {
temp_hidden_input_path <- paste0(tempdir(),'\\','stateHiddenInput.R')
}
e <- new.env()
source(temp_hidden_input_path, local = e)
hiddenInputState <- get('hiddenInputState', envir = e)
zeros_input = list(cbind(times, rep(0, length(times))))
input$w <- apply(X = w, MARGIN = 2, FUN = function(x) stats::approxfun(x = times, y = x, method = 'linear', rule = 2))
input$u <- apply(X = w[,1:2], MARGIN = 2, FUN = function(x) stats::approxfun(x = times, y = x, method = 'linear', rule = 2))
input$optW = rep(1,length(odeModel@y))
if (sum(odeModel@nnStates) == 0) {
resOde <- deSolve::ode(y = odeModel@y,
func = hiddenInputState,
times = times,
parms = odeModel@parms,
input = input)
} else {
eventTol <- 0.0
resetValue <- 0.0001
myRoot <- eval(parse(text = createRoot(rootStates = odeModel@nnStates)))
myEvent <- eval(parse(text = createEvent(tolerance = eventTol, value = resetValue)))
resOde <- deSolve::ode(y = odeModel@y,
times = times,
func = hiddenInputState,
parms = odeModel@params,
input = input,
events = list(func = myEvent, root = TRUE),
rootfun = myRoot)
}
}
return(resOde)
}
)
| /R/modelClass.R | no_license | Newmi1988/seeds | R | false | false | 18,797 | r | #' A class to store the important information of an model.
#'
#' The slots are used to store the important information of an model. The class is used to create object for the
#' two algorithms implemented in seeds. Methods are implemented to easily calculate the nominal solution of the model and
#' change the details of the saved model.
#' The numerical solutions are calculated using the \pkg{deSolve} - package.
#'
#' @slot func A funtion containing the ode-equations of the model. For syntax look at the given examples of the \pkg{deSolve} package.
#' @slot times timesteps at which the model should be evaluated
#' @slot parms the parameters of the model
#' @slot input matrix containing the inputs with the time points
#' @slot measFunc function that converts the output of the ode solution
#' @slot y initial (state) values of the ODE system, has to be a vector
#' @slot meas matrix with the (experimental) measurements of the system
#' @slot sd optional standard deviations of the measurements, is used by the algorithms as weights in the costfunction
#' @slot custom customized link function
#' @slot nnStates bit vector that indicates if states should be observed by the root function
#' @slot nnTollerance tolerance at which a function is seen as zero
#' @slot resetValue value a state should be set to by an event
#'
#' @return an object of class odeModel which defines the model
#'
#' @export odeModel
#' @exportClass odeModel
#'
#' @import methods
#'
odeModel <- setClass(
#name of Class
"odeModel",
slots = c(
func = "function",
times = "numeric",
parms = "numeric",
input = "data.frame",
measFunc = "function",
y = "numeric",
meas = "data.frame",
sd = "data.frame",
custom = 'logical',
nnStates = 'numeric',
nnTollerance = 'numeric',
resetValue = "numeric"
),
prototype = list(
func = function(x) { },
times = numeric(),
parms = numeric(),
input = data.frame(matrix(numeric(0), ncol = 0)),
measFunc = function(x) { },
y = numeric(0),
meas = data.frame(matrix(numeric(0), ncol = 0)),
sd = data.frame(matrix(numeric(0), ncol = 0)),
custom = FALSE,
nnStates = numeric(),
nnTollerance = numeric(),
resetValue = numeric()
),
validity = function(object) {
# check inputs of matrix slot
if (sum(object@times) == 0) {
return("You have to specify the times on which the equation should be evaluated. A solution can only be calculated if the a intervall or specific timesteps are given. Set the 'times'' parameter.")
}
if (length(object@y) != 0 && object@custom == FALSE && sum(colSums(object@meas)) != 0) {
m <- matrix(rep(0, length(object@y)), ncol = length(object@y))
if (is.null(object@measFunc(m)) == FALSE) {
testMeas <- object@measFunc(m)
if (ncol(testMeas) != (ncol(object@meas) - 1)) {
return("The returned results of the measurement function does not have the same
dimensions as the given measurements")
}
}
}
return(TRUE)
}
)
setMethod('initialize', "odeModel", function(.Object, ...) {
.Object <- callNextMethod()
return(.Object)
})
checkMatrix <- function(argMatrix) {
if (sum(argMatrix) == 0) {
argName <- toString(deparse(substitute(argMatrix)))
errortext <- ' has to contain values not equal to 0.'
return(paste0(argName, errortext))
}
}
#' Set the model equation
#'
#' Set the model equation of the system in an odeModel object. Has to be a function that can be used with the deSolve package.
#'
#' @param odeModel an object of the class odeModel
#' @param func function describing the ode equation of the model
#'
#' @return an object of odeModel
#'
#' @examples
#' data("uvbModel")
#'
#' uvbModelEq <- function(t,x,parameters) {
#' with (as.list(parameters),{
#'
#' dx1 = ((-2) * ((ka1 * (x[1]^2) * (x[4]^2)) - (kd1 * x[5])) +
#' (-2) * ((ka2 * (x[1]^2) * x[2]) - (kd2 * x[3])) +
#' ((ks1 *((1) + (uv * n3 * (x[11] + fhy3_s)))) -
#' (kdr1 * ((1) + (n1 * uv)) * x[1])))
#' dx2 = ((-1) * ((ka2*(x[1]^2) * x[2]) - (kd2 * x[3])) +
#' (-1) * ((ka4 * x[2] * x[12]) - (kd4 * x[13])))
#' dx3 = (((ka2 * (x[1]^2) * x[2]) - (kd2* x[3])))
#' dx4 = ((-2) * (k1*(x[4]^2)) + (2) * (k2 * x[6]) +
#' (-2) * ((ka1 * (x[1]^2)* (x[4]^2)) - (kd1 * x[5])) +
#' (-1)* (ka3 * x[4] *x[7]))
#' dx5 = (((ka1 * (x[1]^2) * (x[4]^2)) -(kd1 * x[5])))
#' dx6 = ((-1) * (k2 * x[6]) + (k1 * (x[4]^2)) +(kd3 * (x[8]^2)))
#' dx7 = ((-1) * (ka3 * x[4] * x[7]) + ((ks2 * ((1) + (uv * x[5]))) -
#' (kdr2 * x[7])) + (2) * (kd3 * (x[8]^2)))
#' dx8 = ((-2) * (kd3 * x[8]^2) + (ka3 * x[4] * x[7]))
#' dx9 = 0
#' dx10 = 0
#' dx11 = (((ks3 * ((1) + (n2 * uv))) -(kdr3 * (((x[3] / (kdr3a + x[3])) +
#' (x[13] / (kdr3b + x[13]))) -(x[5] / (ksr + x[5]))) * x[11])))
#' dx12 = ((-1) * (ka4 * x[2] * x[12]) + (kd4 * x[13]))
#' dx13 =((ka4 * x[2] * x[12]) - (kd4 * x[13]))
#'
#' list(c(dx1,dx2,dx3,dx4,dx5,dx6,dx7,dx8,dx9,dx10,dx11,dx12,dx13))
#' })
#' }
#'
#' setModelEquation(uvbModel,uvbModelEq)
#'
#' @export
setGeneric(name = "setModelEquation",
def = function(odeModel, func) {
standardGeneric("setModelEquation")
}
)
#' @rdname setModelEquation
setMethod(f = "setModelEquation",
signature = "odeModel",
definition = function(odeModel, func) {
odeModel@func <- func
validObject(odeModel)
return(odeModel)
}
)
#' Set the model parameters
#'
#' A method to set the model parameters of an odeModel object.
#'
#' @param odeModel an object of the class odeModel
#' @param parms a vector containing the parameters of the model
#'
#' @examples
#' data("uvbModel")
#'
#' newParas <- c( ks1=0.23,
#' ks2=4.0526,
#' kdr1=0.1,
#' kdr2=0.2118,
#' k1=0.0043,
#' k2=161.62,
#' ka1=0.0372,
#' ka2=0.0611,
#' ka3=4.7207,
#' kd1=94.3524,
#' kd2=50.6973,
#' kd3=0.5508,
#' ks3=0.4397,
#' kdr3=1.246,
#' uv=1,
#' ka4=10.1285,
#' kd4=1.1999,
#' n1=3,
#' n2=2,
#' n3=3.5,
#' kdr3a=0.9735,
#' kdr3b=0.406,
#' ksr=0.7537,
#' fhy3_s=5)
#'
#' newModel <- setParms(odeModel = uvbModel, parms = newParas)
#'
#' @return an object of odeModel
#'
#' @export
setGeneric(name = "setParms",
def = function(odeModel, parms) {
standardGeneric("setParms")
}
)
#' @rdname setParms
setMethod(f = "setParms",
signature = c("odeModel", 'numeric'),
definition = function(odeModel, parms) {
odeModel@parms <- parms
validObject(odeModel)
return(odeModel)
}
)
#' Set the inputs of the model.
#'
#' It the model has an input it can be set with this function. The inputs
#' should be a dataframe, where the first column is the timesteps of the
#' inputs in the second column.
#'
#' @param odeModel an object of the class modelClass
#' @param input function describing the ode equation of the model
#'
#' @return an object of odeModel
#'
#' @examples
#'
#' data("uvbModel")
#'
#' model_times <- uvbModel@times
#' input <- rep(0,length(model_times))
#'
#' input_Dataframe <- data.frame(t = model_times, u = input)
#'
#' newModel <- setInput(odeModel = uvbModel,input = input_Dataframe)
#'
#' @export
setGeneric(name = "setInput",
def = function(odeModel, input) {
standardGeneric("setInput")
}
)
#' @rdname setInput
setMethod(f = "setInput",
signature = "odeModel",
definition = function(odeModel, input) {
odeModel@input <- input
validObject(odeModel)
return(odeModel)
}
)
#' Set the measurement equation for the model
#'
#' For a given model a measurement equation can be set. If no measurement function is set the
#' states become the output of the system. The function should be defined as in the example below.
#'
#' @param odeModel an object of the class odeModel
#' @param measFunc measurement function of the model. Has to be a R functions.
#' @param custom custom indexing for the measurement function (used by the baysian method)
#'
#' @return an object of odeModel
#'
#' @examples
#'
#' data("uvbModel")
#'
#' uvbMeasure <- function(x) {
#'
#' y1 = 2*x[,5] + x[,4] + x[,8]
#' y2 = 2*x[,5] + 2* x[,3] + x[,1]
#' y3 = x[,6]
#' y4 = x[,11]
#' y5 = x[,4]
#'
#' return(cbind(y1,y2,y3,y4,y5))
#' }
#'
#' newModel <- setMeasFunc(odeModel = uvbModel, measFunc = uvbMeasure)
#'
#' @export
setGeneric(name = "setMeasFunc",
def = function(odeModel, measFunc, custom) {
standardGeneric("setMeasFunc")
}
)
#' @rdname setMeasFunc
setMethod(f = "setMeasFunc",
signature = c('odeModel', 'function', 'missing'),
definition = function(odeModel, measFunc, custom) {
odeModel@measFunc <- measFunc
validObject(odeModel)
return(odeModel)
}
)
#' @rdname setMeasFunc
setMethod(f = "setMeasFunc",
signature = c('odeModel', 'function', 'logical'),
definition = function(odeModel, measFunc, custom) {
odeModel@meas <- measFunc
odeModel@custom <- custom
validObject(odeModel)
return(odeModel)
}
)
#' Set the vector with the initial (state) values
#'
#' @param odeModel an object of the class odeModel
#' @param y vector with the initial values
#'
#' @return an object of odeModel
#'
#' @examples
#'
#' data("uvbModel")
#'
#' x0 = c(0.2,10,2,0,0,20,0,0,0,4.2,0.25,20,0)
#'
#' newModel <- setInitState(uvbModel, y = x0)
#'
#' @export
setGeneric(name = "setInitState",
def = function(odeModel, y) {
standardGeneric("setInitState")
}
)
#' @rdname setInitState
setMethod(f = "setInitState",
signature = "odeModel",
definition = function(odeModel, y) {
odeModel@y <- y
validObject(odeModel)
return(odeModel)
}
)
#' set measurements of the model
#'
#' The odeModel object stores all important information. Measurements of the objects can be set
#' directly by adressing the slot, or with this function.
#'
#' @param odeModel an object of the class odeModel
#' @param meas measurements of the model, a matrix with measurements of the model
#' and the corresponding time values
#'
#' @return an object of odeModel
#'
#' @examples
#'
#' data(uvbData)
#' data(uvbModel)
#'
#' measurements <- uvbData[,1:6]
#'
#' newModel <- setMeas(odeModel = uvbModel, meas = measurements)
#'
#' @export
setGeneric(name = "setMeas",
def = function(odeModel, meas) {
standardGeneric("setMeas")
}
)
#' @rdname setMeas
setMethod(f = "setMeas",
signature = 'odeModel',
definition = function(odeModel, meas) {
odeModel@meas <- meas
validObject(odeModel)
return(odeModel)
}
)
#' Set the standard deviation of the measurements
#'
#' With multiple measurements a standard deviation can be calculated for every point of
#' measurement. The standard deviation is used to weigh the estimated data points in the
#' cost function.
#'
#' @param odeModel an object of the class odeModel
#' @param sd a matrix with the standard deviations of the measurements
#'
#' @return an object of odeModel
#'
#' @examples
#'
#' data(uvbData)
#' data(uvbModel)
#'
#' sd_uvb <- uvbData[,7:11]
#'
#' newModel <- setSd(odeModel = uvbModel, sd = sd_uvb)
#'
#' @export
setGeneric(name = "setSd",
def = function(odeModel, sd) {
standardGeneric("setSd")
}
)
#' @rdname setSd
setMethod(f = "setSd",
signature = "odeModel",
definition = function(odeModel, sd) {
odeModel@sd <- sd
validObject(odeModel)
return(odeModel)
}
)
#### generate c code (interal function)
setGeneric(name = 'genCCode',
def = function(odeModel, bden, nnStates) {
standardGeneric('genCCode')
}
)
setMethod(f = 'genCCode',
signature = c('odeModel', 'logical', 'missing'),
definition = function(odeModel, bden, nnStates) {
createCompModel(modelFunc = odeModel@func, parameters = odeModel@parms, bden = bden)
return(odeModel)
}
)
setMethod(f = 'genCCode',
signature = c('odeModel', 'logical', 'numeric'),
definition = function(odeModel, bden, nnStates) {
createCompModel(modelFunc = odeModel@func, parameters = odeModel@parms, bden = bden, nnStates = nnStates)
return(odeModel)
}
)
setMethod(f = 'genCCode',
signature = c('odeModel', 'missing', 'numeric'),
definition = function(odeModel, bden, nnStates) {
createCompModel(modelFunc = odeModel@func, parameters = odeModel@parms, nnStates = nnStates)
return(odeModel)
}
)
# nominal solution
#' Calculate the nominal solution of the model
#'
#' After an model is defined it can be evaluated. This returns the numerical solution
#' for the state equation before hidden inputs are calculated.
#'
#' @param odeModel a object of the class ode model describing the experiment
#'
#' @return a matrix with the numeric solution to the nominal ode equation
#'
#' @examples
#'
#' lotka_voltera <- function (t, x, parameters) {
#' with(as.list(c(x,parameters)), {
#' dx1 = x[1]*(alpha - beta*x[2])
#' dx2 = -x[2]*(gamma - delta*x[1])
#' return(list(c(dx1, dx2)))
#' })
#' }
#'
#' pars <- c(alpha = 2, beta = .5, gamma = .2, delta = .6)
#' init_state <- c(x1 = 10, x2 = 10)
#' time <- seq(0, 100, by = 1)
#' lotVolModel = odeModel(func = lotka_voltera, parms = pars, times = time, y = init_state)
#' nominalSol(lotVolModel)
#'
#' @export
setGeneric(name = 'nominalSol',
def = function(odeModel) {
standardGeneric('nominalSol')
}
)
#' @rdname nominalSol
setMethod(f = 'nominalSol',
signature = c('odeModel'),
definition = function(odeModel) {
x0 <- odeModel@y
### get the times from the measurements
# add case for missing input
times <- odeModel@times
if (sum(colSums(odeModel@input)) == 0) {
input <- rep(0, length(times))
uList = list(cbind(times, input))
} else {
input <- odeModel@input
u <- apply(X = input[, -1, drop = F], MARGIN = 2, FUN = function(x) stats::approx(x = input[, 1], y = x, xout = times, rule = 2))
uList = list(cbind(times, u[[1]]$y))
}
w <- matrix(rep(0, length(x0) * length(times)), ncol = length(x0))
if (grepl("Rtools", Sys.getenv('PATH')) || (.Platform$OS.type != "windows")) {
createCompModel(modelFunc = odeModel@func, parameters = odeModel@parms, nnStates = odeModel@nnStates)
temp_compiled_model <- compileModel()
wSplit <- split(w, rep(1:ncol(w), each = nrow(w)))
wList <- lapply(wSplit, FUN = function(x) cbind(times, x))
forcings <- c(uList, wList)
if (sum(odeModel@nnStates) == 0) {
resOde <- deSolve::ode(y = odeModel@y, times = times, func = "derivsc",
parms = odeModel@parms, dllname = "model", initforc = "forcc",
forcings = forcings, initfunc = "parmsc")
} else {
eventTol <- 0.0
resetValue <- 0.0001
myRoot <- eval(parse(text = createRoot(rootStates = odeModel@nnStates)))
myEvent <- eval(parse(text = createEvent(tolerance = eventTol, value = resetValue)))
resOde <- deSolve::lsoda(y = odeModel@y, times = times, func = "derivsc",
parms = odeModel@parms, dllname = "model", initforc = "forcc",
forcings = forcings, initfunc = "parmsc", nroot = sum(odeModel@nnStates),
rootfunc = "myroot", events = list(func = myEvent, root = TRUE))
}
dyn.unload(temp_compiled_model)
} else {
odeEq <- new("odeEquations")
odeEq <- createModelEqClass(odeEq, odeModel@func)
# !!!!!! check if the non rtools variant runs
odeEq <- isDynElaNet(odeEq)
odeEq <- calculateCostate(odeEq)
createFunctions(odeEq)
if (.Platform$OS.type != "windows"){
temp_hidden_input_path <- paste0(tempdir(),'/','stateHiddenInput.R')
} else {
temp_hidden_input_path <- paste0(tempdir(),'\\','stateHiddenInput.R')
}
e <- new.env()
source(temp_hidden_input_path, local = e)
hiddenInputState <- get('hiddenInputState', envir = e)
zeros_input = list(cbind(times, rep(0, length(times))))
input$w <- apply(X = w, MARGIN = 2, FUN = function(x) stats::approxfun(x = times, y = x, method = 'linear', rule = 2))
input$u <- apply(X = w[,1:2], MARGIN = 2, FUN = function(x) stats::approxfun(x = times, y = x, method = 'linear', rule = 2))
input$optW = rep(1,length(odeModel@y))
if (sum(odeModel@nnStates) == 0) {
resOde <- deSolve::ode(y = odeModel@y,
func = hiddenInputState,
times = times,
parms = odeModel@parms,
input = input)
} else {
eventTol <- 0.0
resetValue <- 0.0001
myRoot <- eval(parse(text = createRoot(rootStates = odeModel@nnStates)))
myEvent <- eval(parse(text = createEvent(tolerance = eventTol, value = resetValue)))
resOde <- deSolve::ode(y = odeModel@y,
times = times,
func = hiddenInputState,
parms = odeModel@params,
input = input,
events = list(func = myEvent, root = TRUE),
rootfun = myRoot)
}
}
return(resOde)
}
)
|
mydat <- read.csv('boxplottest.csv')
boxPplot <- function(x, groups, scaler=0) {
df <- data.frame(x=x, groups=groups)
df$groups <- factor(df$groups)
if(scaler==0) scaler <- nlevels(df$groups)*5
plot(1:nlevels(df$groups), type='n',
ylim=c(min(df$x),max(df$x)),
xlim=c(0.5,nlevels(df$groups)+0.5),
xlab='Group', ylab='y',
xaxt='n')
axis(1,at=1:nlevels(df$groups), labels=levels(df$groups))
for(i in 1:nlevels(df$groups)) {
y <- df$x[df$groups==levels(df$groups)[i]]
freq <- hist(y, plot=F, breaks=length(y)*2*scaler)
nbpb <- freq$counts[findInterval(y, freq$breaks)]-1
jit_x <- rnorm(length(nbpb),0,nbpb/scale)
points(jit_x+i, y)
}
} | /working.R | no_license | jejoenje/RSimExamples | R | false | false | 711 | r | mydat <- read.csv('boxplottest.csv')
boxPplot <- function(x, groups, scaler=0) {
df <- data.frame(x=x, groups=groups)
df$groups <- factor(df$groups)
if(scaler==0) scaler <- nlevels(df$groups)*5
plot(1:nlevels(df$groups), type='n',
ylim=c(min(df$x),max(df$x)),
xlim=c(0.5,nlevels(df$groups)+0.5),
xlab='Group', ylab='y',
xaxt='n')
axis(1,at=1:nlevels(df$groups), labels=levels(df$groups))
for(i in 1:nlevels(df$groups)) {
y <- df$x[df$groups==levels(df$groups)[i]]
freq <- hist(y, plot=F, breaks=length(y)*2*scaler)
nbpb <- freq$counts[findInterval(y, freq$breaks)]-1
jit_x <- rnorm(length(nbpb),0,nbpb/scale)
points(jit_x+i, y)
}
} |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
test_that("Table cast (ARROW-3741)", {
tab <- Table$create(x = 1:10, y = 1:10)
expect_error(tab$cast(schema(x = int32())))
expect_error(tab$cast(schema(x = int32(), z = int32())))
s2 <- schema(x = int16(), y = int64())
tab2 <- tab$cast(s2)
expect_equal(tab2$schema, s2)
expect_equal(tab2$column(0L)$type, int16())
expect_equal(tab2$column(1L)$type, int64())
})
test_that("Table S3 methods", {
tab <- Table$create(example_data)
for (f in c("dim", "nrow", "ncol", "dimnames", "colnames", "row.names", "as.list")) {
fun <- get(f)
expect_identical(fun(tab), fun(example_data), info = f)
}
})
test_that("Table $column and $field", {
tab <- Table$create(x = 1:10, y = 1:10)
expect_equal(tab$field(0), field("x", int32()))
# input validation
expect_error(tab$column(NA), "'i' cannot be NA")
expect_error(tab$column(-1), "subscript out of bounds")
expect_error(tab$column(1000), "subscript out of bounds")
expect_error(tab$column(1:2))
expect_error(tab$column("one"))
expect_error(tab$field(NA), "'i' cannot be NA")
expect_error(tab$field(-1), "subscript out of bounds")
expect_error(tab$field(1000), "subscript out of bounds")
expect_error(tab$field(1:2))
expect_error(tab$field("one"))
})
# Common fixtures used in some of the following tests
tbl <- tibble::tibble(
int = 1:10,
dbl = as.numeric(1:10),
lgl = sample(c(TRUE, FALSE, NA), 10, replace = TRUE),
chr = letters[1:10],
fct = factor(letters[1:10])
)
tab <- Table$create(tbl)
test_that("[, [[, $ for Table", {
expect_identical(names(tab), names(tbl))
expect_data_frame(tab[6:7, ], tbl[6:7, ])
expect_data_frame(tab[6:7, 2:4], tbl[6:7, 2:4])
expect_data_frame(tab[, c("dbl", "fct")], tbl[, c(2, 5)])
expect_as_vector(tab[, "chr", drop = TRUE], tbl$chr)
# Take within a single chunk
expect_data_frame(tab[c(7, 3, 5), 2:4], tbl[c(7, 3, 5), 2:4])
expect_data_frame(tab[rep(c(FALSE, TRUE), 5), ], tbl[c(2, 4, 6, 8, 10), ])
# bool ChunkedArray (with one chunk)
expect_data_frame(tab[tab$lgl, ], tbl[tbl$lgl, ])
# ChunkedArray with multiple chunks
c1 <- c(TRUE, FALSE, TRUE, TRUE, FALSE)
c2 <- c(FALSE, FALSE, TRUE, TRUE, FALSE)
ca <- ChunkedArray$create(c1, c2)
expect_data_frame(tab[ca, ], tbl[c(1, 3, 4, 8, 9), ])
# int Array
expect_data_frame(tab[Array$create(5:6), 2:4], tbl[6:7, 2:4])
# ChunkedArray
expect_data_frame(tab[ChunkedArray$create(5L, 6L), 2:4], tbl[6:7, 2:4])
# Expression
expect_data_frame(tab[tab$int > 6, ], tbl[tbl$int > 6, ])
expect_as_vector(tab[["int"]], tbl$int)
expect_as_vector(tab$int, tbl$int)
expect_as_vector(tab[[4]], tbl$chr)
expect_null(tab$qwerty)
expect_null(tab[["asdf"]])
# List-like column slicing
expect_data_frame(tab[2:4], tbl[2:4])
expect_data_frame(tab[c(2, 1)], tbl[c(2, 1)])
expect_data_frame(tab[-3], tbl[-3])
expect_error(tab[[c(4, 3)]])
expect_error(tab[[NA]], "'i' must be character or numeric, not logical")
expect_error(tab[[NULL]], "'i' must be character or numeric, not NULL")
expect_error(tab[[c("asdf", "jkl;")]], "length(name) not equal to 1", fixed = TRUE)
expect_error(tab[-3:3], "Invalid column index")
expect_error(tab[1000], "Invalid column index")
expect_error(tab[1:1000], "Invalid column index")
# input validation
expect_error(tab[, c("dbl", "NOTACOLUMN")], 'Column not found: "NOTACOLUMN"')
expect_error(tab[, c(6, NA)], "Column indices cannot be NA")
skip("Table with 0 cols doesn't know how many rows it should have")
expect_data_frame(tab[0], tbl[0])
})
test_that("[[<- assignment", {
# can remove a column
tab[["chr"]] <- NULL
expect_data_frame(tab, tbl[-4])
# can remove a column by index
tab[[4]] <- NULL
expect_data_frame(tab, tbl[1:3])
# can add a named column
tab[["new"]] <- letters[10:1]
expect_data_frame(tab, dplyr::bind_cols(tbl[1:3], new = letters[10:1]))
# can replace a column by index
tab[[2]] <- as.numeric(10:1)
expect_as_vector(tab[[2]], as.numeric(10:1))
# can add a column by index
tab[[5]] <- as.numeric(10:1)
expect_as_vector(tab[[5]], as.numeric(10:1))
expect_as_vector(tab[["5"]], as.numeric(10:1))
# can replace a column
tab[["int"]] <- 10:1
expect_as_vector(tab[["int"]], 10:1)
# can use $
tab$new <- NULL
expect_null(as.vector(tab$new))
expect_identical(dim(tab), c(10L, 4L))
tab$int <- 1:10
expect_as_vector(tab$int, 1:10)
# recycling
tab[["atom"]] <- 1L
expect_as_vector(tab[["atom"]], rep(1L, 10))
expect_error(
tab[["atom"]] <- 1:6,
"Can't recycle input of size 6 to size 10."
)
# assign Arrow array and chunked_array
array <- Array$create(c(10:1))
tab$array <- array
expect_as_vector(tab$array, 10:1)
tab$chunked <- chunked_array(1:10)
expect_as_vector(tab$chunked, 1:10)
# nonsense indexes
expect_error(tab[[NA]] <- letters[10:1], "'i' must be character or numeric, not logical")
expect_error(tab[[NULL]] <- letters[10:1], "'i' must be character or numeric, not NULL")
expect_error(tab[[NA_integer_]] <- letters[10:1], "!is.na(i) is not TRUE", fixed = TRUE)
expect_error(tab[[NA_real_]] <- letters[10:1], "!is.na(i) is not TRUE", fixed = TRUE)
expect_error(tab[[NA_character_]] <- letters[10:1], "!is.na(i) is not TRUE", fixed = TRUE)
expect_error(tab[[c(1, 4)]] <- letters[10:1], "length(i) not equal to 1", fixed = TRUE)
})
test_that("Table$Slice", {
tab2 <- tab$Slice(5)
expect_data_frame(tab2, tbl[6:10, ])
tab3 <- tab$Slice(5, 2)
expect_data_frame(tab3, tbl[6:7, ])
# Input validation
expect_error(tab$Slice("ten"))
expect_error(tab$Slice(NA_integer_), "Slice 'offset' cannot be NA")
expect_error(tab$Slice(NA), "Slice 'offset' cannot be NA")
expect_error(tab$Slice(10, "ten"))
expect_error(tab$Slice(10, NA_integer_), "Slice 'length' cannot be NA")
expect_error(tab$Slice(NA_integer_, NA_integer_), "Slice 'offset' cannot be NA")
expect_error(tab$Slice(c(10, 10)))
expect_error(tab$Slice(10, c(10, 10)))
expect_error(tab$Slice(1000), "Slice 'offset' greater than array length")
expect_error(tab$Slice(-1), "Slice 'offset' cannot be negative")
expect_error(tab3$Slice(10, 10), "Slice 'offset' greater than array length")
expect_error(tab$Slice(10, -1), "Slice 'length' cannot be negative")
expect_error(tab$Slice(-1, 10), "Slice 'offset' cannot be negative")
})
test_that("head and tail on Table", {
expect_data_frame(head(tab), head(tbl))
expect_data_frame(head(tab, 4), head(tbl, 4))
expect_data_frame(head(tab, 40), head(tbl, 40))
expect_data_frame(head(tab, -4), head(tbl, -4))
expect_data_frame(head(tab, -40), head(tbl, -40))
expect_data_frame(tail(tab), tail(tbl))
expect_data_frame(tail(tab, 4), tail(tbl, 4))
expect_data_frame(tail(tab, 40), tail(tbl, 40))
expect_data_frame(tail(tab, -4), tail(tbl, -4))
expect_data_frame(tail(tab, -40), tail(tbl, -40))
})
test_that("Table print method", {
expect_output(
print(tab),
paste(
"Table",
"10 rows x 5 columns",
"$int <int32>",
"$dbl <double>",
"$lgl <bool>",
"$chr <string>",
"$fct <dictionary<values=string, indices=int8>>",
sep = "\n"
),
fixed = TRUE
)
})
test_that("table active bindings", {
expect_identical(dim(tbl), dim(tab))
expect_type(tab$columns, "list")
expect_equal(tab$columns[[1]], tab[[1]])
})
test_that("table() handles record batches with splicing", {
batch <- record_batch(x = 1:2, y = letters[1:2])
tab <- Table$create(batch, batch, batch)
expect_equal(tab$schema, batch$schema)
expect_equal(tab$num_rows, 6L)
expect_equal(
as.data.frame(tab),
vctrs::vec_rbind(as.data.frame(batch), as.data.frame(batch), as.data.frame(batch))
)
batches <- list(batch, batch, batch)
tab <- Table$create(!!!batches)
expect_equal(tab$schema, batch$schema)
expect_equal(tab$num_rows, 6L)
expect_equal(
as.data.frame(tab),
vctrs::vec_rbind(!!!purrr::map(batches, as.data.frame))
)
})
test_that("table() handles ... of arrays, chunked arrays, vectors", {
a <- Array$create(1:10)
ca <- chunked_array(1:5, 6:10)
v <- rnorm(10)
tbl <- tibble::tibble(x = 1:10, y = letters[1:10])
tab <- Table$create(a = a, b = ca, c = v, !!!tbl)
expect_equal(
tab$schema,
schema(a = int32(), b = int32(), c = float64(), x = int32(), y = utf8())
)
res <- as.data.frame(tab)
expect_equal(names(res), c("a", "b", "c", "x", "y"))
expect_equal(
res,
tibble::tibble(a = 1:10, b = 1:10, c = v, x = 1:10, y = letters[1:10])
)
})
test_that("table() auto splices (ARROW-5718)", {
df <- tibble::tibble(x = 1:10, y = letters[1:10])
tab1 <- Table$create(df)
tab2 <- Table$create(!!!df)
expect_equal(tab1, tab2)
expect_equal(tab1$schema, schema(x = int32(), y = utf8()))
expect_equal(as.data.frame(tab1), df)
s <- schema(x = float64(), y = utf8())
tab3 <- Table$create(df, schema = s)
tab4 <- Table$create(!!!df, schema = s)
expect_equal(tab3, tab4)
expect_equal(tab3$schema, s)
expect_equal(as.data.frame(tab3), df)
})
test_that("Validation when creating table with schema (ARROW-10953)", {
expect_error(
Table$create(data.frame(), schema = schema(a = int32())),
"incompatible. schema has 1 fields, and 0 columns are supplied",
fixed = TRUE
)
expect_error(
Table$create(data.frame(b = 1), schema = schema(a = int32())),
"field at index 1 has name 'a' != 'b'",
fixed = TRUE
)
expect_error(
Table$create(data.frame(b = 2, c = 3), schema = schema(a = int32())),
"incompatible. schema has 1 fields, and 2 columns are supplied",
fixed = TRUE
)
})
test_that("==.Table", {
tab1 <- Table$create(x = 1:2, y = c("a", "b"))
tab2 <- Table$create(x = 1:2, y = c("a", "b"))
tab3 <- Table$create(x = 1:2)
tab4 <- Table$create(x = 1:2, y = c("a", "b"), z = 3:4)
expect_true(tab1 == tab2)
expect_true(tab2 == tab1)
expect_false(tab1 == tab3)
expect_false(tab3 == tab1)
expect_false(tab1 == tab4)
expect_false(tab4 == tab1)
expect_true(all.equal(tab1, tab2))
expect_equal(tab1, tab2)
})
test_that("Table$Equals(check_metadata)", {
tab1 <- Table$create(x = 1:2, y = c("a", "b"))
tab2 <- Table$create(
x = 1:2, y = c("a", "b"),
schema = tab1$schema$WithMetadata(list(some = "metadata"))
)
expect_r6_class(tab1, "Table")
expect_r6_class(tab2, "Table")
expect_false(tab1$schema$HasMetadata)
expect_true(tab2$schema$HasMetadata)
expect_identical(tab2$schema$metadata, list(some = "metadata"))
expect_true(tab1 == tab2)
expect_true(tab1$Equals(tab2))
expect_false(tab1$Equals(tab2, check_metadata = TRUE))
expect_failure(expect_equal(tab1, tab2)) # expect_equal has check_metadata=TRUE
expect_equal(tab1, tab2, ignore_attr = TRUE) # this sets check_metadata=FALSE
expect_false(tab1$Equals(24)) # Not a Table
})
test_that("Table handles null type (ARROW-7064)", {
tab <- Table$create(a = 1:10, n = vctrs::unspecified(10))
expect_equal(tab$schema, schema(a = int32(), n = null()), ignore_attr = TRUE)
})
test_that("Can create table with specific dictionary types", {
fact <- example_data[, "fct"]
int_types <- c(int8(), int16(), int32(), int64())
# TODO: test uint types when format allows
# uint_types <- c(uint8(), uint16(), uint32(), uint64()) # nolint
for (i in int_types) {
sch <- schema(fct = dictionary(i, utf8()))
tab <- Table$create(fact, schema = sch)
expect_equal(sch, tab$schema)
if (i != int64()) {
# TODO: same downcast to int32 as we do for int64() type elsewhere
expect_identical(as.data.frame(tab), fact)
}
}
})
test_that("Table unifies dictionary on conversion back to R (ARROW-8374)", {
b1 <- record_batch(f = factor(c("a"), levels = c("a", "b")))
b2 <- record_batch(f = factor(c("c"), levels = c("c", "d")))
b3 <- record_batch(f = factor(NA, levels = "a"))
b4 <- record_batch(f = factor())
res <- tibble::tibble(f = factor(c("a", "c", NA), levels = c("a", "b", "c", "d")))
tab <- Table$create(b1, b2, b3, b4)
expect_identical(as.data.frame(tab), res)
})
test_that("Table$SelectColumns()", {
tab <- Table$create(x = 1:10, y = 1:10)
expect_equal(tab$SelectColumns(0L), Table$create(x = 1:10))
expect_error(tab$SelectColumns(2:4))
expect_error(tab$SelectColumns(""))
})
test_that("Table name assignment", {
tab <- Table$create(x = 1:10, y = 1:10)
expect_identical(names(tab), c("x", "y"))
names(tab) <- c("a", "b")
expect_identical(names(tab), c("a", "b"))
expect_error(names(tab) <- "f")
expect_error(names(tab) <- letters)
expect_error(names(tab) <- character(0))
expect_error(names(tab) <- NULL)
expect_error(names(tab) <- c(TRUE, FALSE))
})
test_that("Table$create() with different length columns", {
msg <- "All columns must have the same length"
expect_error(Table$create(a = 1:5, b = 1:6), msg)
})
test_that("Table$create() scalar recycling with vectors", {
expect_data_frame(
Table$create(a = 1:10, b = 5),
tibble::tibble(a = 1:10, b = 5)
)
})
test_that("Table$create() scalar recycling with Scalars, Arrays, and ChunkedArrays", {
expect_data_frame(
Table$create(a = Array$create(1:10), b = Scalar$create(5)),
tibble::tibble(a = 1:10, b = 5)
)
expect_data_frame(
Table$create(a = Array$create(1:10), b = Array$create(5)),
tibble::tibble(a = 1:10, b = 5)
)
expect_data_frame(
Table$create(a = Array$create(1:10), b = ChunkedArray$create(5)),
tibble::tibble(a = 1:10, b = 5)
)
})
test_that("Table$create() no recycling with tibbles", {
expect_error(
Table$create(
tibble::tibble(a = 1:10, b = 5),
tibble::tibble(a = 1, b = 5)
),
regexp = "All input tibbles or data.frames must have the same number of rows"
)
expect_error(
Table$create(
tibble::tibble(a = 1:10, b = 5),
tibble::tibble(a = 1)
),
regexp = "All input tibbles or data.frames must have the same number of rows"
)
})
test_that("Tables can be combined with concat_tables()", {
expect_error(
concat_tables(arrow_table(a = 1:10), arrow_table(a = c("a", "b")), unify_schemas = FALSE),
regexp = "Schema at index 2 does not match the first schema"
)
expect_error(
concat_tables(arrow_table(a = 1:10), arrow_table(a = c("a", "b")), unify_schemas = TRUE),
regexp = "Unable to merge: Field a has incompatible types: int32 vs string"
)
expect_error(
concat_tables(),
regexp = "Must pass at least one Table"
)
expect_equal(
concat_tables(
arrow_table(a = 1:5),
arrow_table(a = 6:7, b = c("d", "e"))
),
arrow_table(a = 1:7, b = c(rep(NA, 5), "d", "e"))
)
# concat_tables() with one argument returns identical table
expected <- arrow_table(a = 1:10)
expect_equal(expected, concat_tables(expected))
})
test_that("Table supports rbind", {
expect_error(
rbind(arrow_table(a = 1:10), arrow_table(a = c("a", "b"))),
regexp = "Schema at index 2 does not match the first schema"
)
tables <- list(
arrow_table(a = 1:10, b = Scalar$create("x")),
arrow_table(a = 2:42, b = Scalar$create("y")),
arrow_table(a = 8:10, b = Scalar$create("z"))
)
expected <- Table$create(do.call(rbind, lapply(tables, as.data.frame)))
actual <- do.call(rbind, tables)
expect_equal(actual, expected, ignore_attr = TRUE)
# rbind with empty table produces identical table
expected <- arrow_table(a = 1:10, b = Scalar$create("x"))
expect_equal(
rbind(expected, arrow_table(a = integer(0), b = character(0))),
expected
)
# rbind() with one argument returns identical table
expect_equal(rbind(expected), expected)
})
test_that("Table supports cbind", {
expect_snapshot_error(
cbind(
arrow_table(a = 1:10),
arrow_table(a = c("a", "b"))
)
)
expect_error(
cbind(arrow_table(a = 1:10), arrow_table(b = character(0))),
regexp = "Non-scalar inputs must have an equal number of rows"
)
actual <- cbind(
arrow_table(a = 1:10, b = Scalar$create("x")),
arrow_table(a = 11:20, b = Scalar$create("y")),
arrow_table(c = 1:10)
)
expected <- arrow_table(cbind(
tibble::tibble(a = 1:10, b = "x"),
tibble::tibble(a = 11:20, b = "y"),
tibble::tibble(c = 1:10)
))
expect_equal(actual, expected, ignore_attr = TRUE)
# cbind() with one argument returns identical table
expected <- arrow_table(a = 1:10)
expect_equal(expected, cbind(expected))
# Handles Arrow arrays and chunked arrays
expect_equal(
cbind(arrow_table(a = 1:2), b = Array$create(4:5)),
arrow_table(a = 1:2, b = 4:5)
)
expect_equal(
cbind(arrow_table(a = 1:2), b = chunked_array(4, 5)),
arrow_table(a = 1:2, b = chunked_array(4, 5))
)
# Handles data.frame
if (getRversion() >= "4.0.0") {
# Prior to R 4.0, cbind would short-circuit to the data.frame implementation
# if **any** of the arguments are a data.frame.
expect_equal(
cbind(arrow_table(a = 1:2), data.frame(b = 4:5)),
arrow_table(a = 1:2, b = 4:5)
)
}
# Handles factors
expect_equal(
cbind(arrow_table(a = 1:2), b = factor(c("a", "b"))),
arrow_table(a = 1:2, b = factor(c("a", "b")))
)
# Handles scalar values
expect_equal(
cbind(arrow_table(a = 1:2), b = "x"),
arrow_table(a = 1:2, b = c("x", "x"))
)
# Handles zero rows
expect_equal(
cbind(arrow_table(a = character(0)), b = Array$create(numeric(0)), c = integer(0)),
arrow_table(a = character(0), b = numeric(0), c = integer(0)),
)
# Rejects unnamed arrays, even in cases where no named arguments are passed
expect_error(
cbind(arrow_table(a = 1:2), b = 3:4, 5:6),
regexp = "Vector and array arguments must have names"
)
expect_error(
cbind(arrow_table(a = 1:2), 3:4, 5:6),
regexp = "Vector and array arguments must have names"
)
})
test_that("cbind.Table handles record batches and tables", {
# R 3.6 cbind dispatch rules cause cbind to fall back to default impl if
# there are multiple arguments with distinct cbind implementations
skip_if(getRversion() < "4.0.0", "R 3.6 cbind dispatch rules prevent this behavior")
expect_equal(
cbind(arrow_table(a = 1L:2L), record_batch(b = 4:5)),
arrow_table(a = 1L:2L, b = 4:5)
)
})
test_that("ARROW-11769/ARROW-17085 - grouping preserved in table creation", {
skip_if_not_available("dataset")
tbl <- tibble::tibble(
int = 1:10,
fct = factor(rep(c("A", "B"), 5)),
fct2 = factor(rep(c("C", "D"), each = 5)),
)
expect_identical(
tbl %>%
Table$create() %>%
dplyr::group_vars(),
dplyr::group_vars(tbl)
)
expect_identical(
tbl %>%
dplyr::group_by(fct, fct2) %>%
Table$create() %>%
dplyr::group_vars(),
c("fct", "fct2")
)
})
test_that("ARROW-12729 - length returns number of columns in Table", {
tbl <- tibble::tibble(
int = 1:10,
fct = factor(rep(c("A", "B"), 5)),
fct2 = factor(rep(c("C", "D"), each = 5)),
)
tab <- Table$create(!!!tbl)
expect_identical(length(tab), 3L)
})
test_that("as_arrow_table() works for Table", {
table <- arrow_table(col1 = 1L, col2 = "two")
expect_identical(as_arrow_table(table), table)
expect_equal(
as_arrow_table(table, schema = schema(col1 = float64(), col2 = string())),
arrow_table(col1 = Array$create(1, type = float64()), col2 = "two")
)
})
test_that("as_arrow_table() works for RecordBatch", {
table <- arrow_table(col1 = 1L, col2 = "two")
batch <- record_batch(col1 = 1L, col2 = "two")
expect_equal(as_arrow_table(batch), table)
expect_equal(
as_arrow_table(batch, schema = schema(col1 = float64(), col2 = string())),
arrow_table(col1 = Array$create(1, type = float64()), col2 = "two")
)
})
test_that("as_arrow_table() works for data.frame()", {
table <- arrow_table(col1 = 1L, col2 = "two")
tbl <- tibble::tibble(col1 = 1L, col2 = "two")
expect_equal(as_arrow_table(tbl), table)
expect_equal(
as_arrow_table(
tbl,
schema = schema(col1 = float64(), col2 = string())
),
arrow_table(col1 = Array$create(1, type = float64()), col2 = "two")
)
})
test_that("as_arrow_table() errors for invalid input", {
expect_error(
as_arrow_table("no as_arrow_table() method"),
class = "arrow_no_method_as_arrow_table"
)
})
test_that("num_rows method not susceptible to integer overflow", {
skip_if_not_running_large_memory_tests()
small_array <- Array$create(raw(1))
big_array <- Array$create(raw(.Machine$integer.max))
big_chunked_array <- chunked_array(big_array, small_array)
# LargeString array with data buffer > MAX_INT32
big_string_array <- Array$create(make_big_string())
small_table <- Table$create(col = small_array)
big_table <- Table$create(col = big_chunked_array)
expect_type(big_array$nbytes(), "integer")
expect_type(big_chunked_array$nbytes(), "double")
expect_type(length(big_array), "integer")
expect_type(length(big_chunked_array), "double")
expect_type(small_table$num_rows, "integer")
expect_type(big_table$num_rows, "double")
expect_identical(big_string_array$data()$buffers[[3]]$size, 2148007936)
})
| /r/tests/testthat/test-Table.R | permissive | G-Research/arrow | R | false | false | 21,835 | r | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
test_that("Table cast (ARROW-3741)", {
tab <- Table$create(x = 1:10, y = 1:10)
expect_error(tab$cast(schema(x = int32())))
expect_error(tab$cast(schema(x = int32(), z = int32())))
s2 <- schema(x = int16(), y = int64())
tab2 <- tab$cast(s2)
expect_equal(tab2$schema, s2)
expect_equal(tab2$column(0L)$type, int16())
expect_equal(tab2$column(1L)$type, int64())
})
test_that("Table S3 methods", {
tab <- Table$create(example_data)
for (f in c("dim", "nrow", "ncol", "dimnames", "colnames", "row.names", "as.list")) {
fun <- get(f)
expect_identical(fun(tab), fun(example_data), info = f)
}
})
test_that("Table $column and $field", {
tab <- Table$create(x = 1:10, y = 1:10)
expect_equal(tab$field(0), field("x", int32()))
# input validation
expect_error(tab$column(NA), "'i' cannot be NA")
expect_error(tab$column(-1), "subscript out of bounds")
expect_error(tab$column(1000), "subscript out of bounds")
expect_error(tab$column(1:2))
expect_error(tab$column("one"))
expect_error(tab$field(NA), "'i' cannot be NA")
expect_error(tab$field(-1), "subscript out of bounds")
expect_error(tab$field(1000), "subscript out of bounds")
expect_error(tab$field(1:2))
expect_error(tab$field("one"))
})
# Common fixtures used in some of the following tests
tbl <- tibble::tibble(
int = 1:10,
dbl = as.numeric(1:10),
lgl = sample(c(TRUE, FALSE, NA), 10, replace = TRUE),
chr = letters[1:10],
fct = factor(letters[1:10])
)
tab <- Table$create(tbl)
test_that("[, [[, $ for Table", {
expect_identical(names(tab), names(tbl))
expect_data_frame(tab[6:7, ], tbl[6:7, ])
expect_data_frame(tab[6:7, 2:4], tbl[6:7, 2:4])
expect_data_frame(tab[, c("dbl", "fct")], tbl[, c(2, 5)])
expect_as_vector(tab[, "chr", drop = TRUE], tbl$chr)
# Take within a single chunk
expect_data_frame(tab[c(7, 3, 5), 2:4], tbl[c(7, 3, 5), 2:4])
expect_data_frame(tab[rep(c(FALSE, TRUE), 5), ], tbl[c(2, 4, 6, 8, 10), ])
# bool ChunkedArray (with one chunk)
expect_data_frame(tab[tab$lgl, ], tbl[tbl$lgl, ])
# ChunkedArray with multiple chunks
c1 <- c(TRUE, FALSE, TRUE, TRUE, FALSE)
c2 <- c(FALSE, FALSE, TRUE, TRUE, FALSE)
ca <- ChunkedArray$create(c1, c2)
expect_data_frame(tab[ca, ], tbl[c(1, 3, 4, 8, 9), ])
# int Array
expect_data_frame(tab[Array$create(5:6), 2:4], tbl[6:7, 2:4])
# ChunkedArray
expect_data_frame(tab[ChunkedArray$create(5L, 6L), 2:4], tbl[6:7, 2:4])
# Expression
expect_data_frame(tab[tab$int > 6, ], tbl[tbl$int > 6, ])
expect_as_vector(tab[["int"]], tbl$int)
expect_as_vector(tab$int, tbl$int)
expect_as_vector(tab[[4]], tbl$chr)
expect_null(tab$qwerty)
expect_null(tab[["asdf"]])
# List-like column slicing
expect_data_frame(tab[2:4], tbl[2:4])
expect_data_frame(tab[c(2, 1)], tbl[c(2, 1)])
expect_data_frame(tab[-3], tbl[-3])
expect_error(tab[[c(4, 3)]])
expect_error(tab[[NA]], "'i' must be character or numeric, not logical")
expect_error(tab[[NULL]], "'i' must be character or numeric, not NULL")
expect_error(tab[[c("asdf", "jkl;")]], "length(name) not equal to 1", fixed = TRUE)
expect_error(tab[-3:3], "Invalid column index")
expect_error(tab[1000], "Invalid column index")
expect_error(tab[1:1000], "Invalid column index")
# input validation
expect_error(tab[, c("dbl", "NOTACOLUMN")], 'Column not found: "NOTACOLUMN"')
expect_error(tab[, c(6, NA)], "Column indices cannot be NA")
skip("Table with 0 cols doesn't know how many rows it should have")
expect_data_frame(tab[0], tbl[0])
})
test_that("[[<- assignment", {
# can remove a column
tab[["chr"]] <- NULL
expect_data_frame(tab, tbl[-4])
# can remove a column by index
tab[[4]] <- NULL
expect_data_frame(tab, tbl[1:3])
# can add a named column
tab[["new"]] <- letters[10:1]
expect_data_frame(tab, dplyr::bind_cols(tbl[1:3], new = letters[10:1]))
# can replace a column by index
tab[[2]] <- as.numeric(10:1)
expect_as_vector(tab[[2]], as.numeric(10:1))
# can add a column by index
tab[[5]] <- as.numeric(10:1)
expect_as_vector(tab[[5]], as.numeric(10:1))
expect_as_vector(tab[["5"]], as.numeric(10:1))
# can replace a column
tab[["int"]] <- 10:1
expect_as_vector(tab[["int"]], 10:1)
# can use $
tab$new <- NULL
expect_null(as.vector(tab$new))
expect_identical(dim(tab), c(10L, 4L))
tab$int <- 1:10
expect_as_vector(tab$int, 1:10)
# recycling
tab[["atom"]] <- 1L
expect_as_vector(tab[["atom"]], rep(1L, 10))
expect_error(
tab[["atom"]] <- 1:6,
"Can't recycle input of size 6 to size 10."
)
# assign Arrow array and chunked_array
array <- Array$create(c(10:1))
tab$array <- array
expect_as_vector(tab$array, 10:1)
tab$chunked <- chunked_array(1:10)
expect_as_vector(tab$chunked, 1:10)
# nonsense indexes
expect_error(tab[[NA]] <- letters[10:1], "'i' must be character or numeric, not logical")
expect_error(tab[[NULL]] <- letters[10:1], "'i' must be character or numeric, not NULL")
expect_error(tab[[NA_integer_]] <- letters[10:1], "!is.na(i) is not TRUE", fixed = TRUE)
expect_error(tab[[NA_real_]] <- letters[10:1], "!is.na(i) is not TRUE", fixed = TRUE)
expect_error(tab[[NA_character_]] <- letters[10:1], "!is.na(i) is not TRUE", fixed = TRUE)
expect_error(tab[[c(1, 4)]] <- letters[10:1], "length(i) not equal to 1", fixed = TRUE)
})
test_that("Table$Slice", {
tab2 <- tab$Slice(5)
expect_data_frame(tab2, tbl[6:10, ])
tab3 <- tab$Slice(5, 2)
expect_data_frame(tab3, tbl[6:7, ])
# Input validation
expect_error(tab$Slice("ten"))
expect_error(tab$Slice(NA_integer_), "Slice 'offset' cannot be NA")
expect_error(tab$Slice(NA), "Slice 'offset' cannot be NA")
expect_error(tab$Slice(10, "ten"))
expect_error(tab$Slice(10, NA_integer_), "Slice 'length' cannot be NA")
expect_error(tab$Slice(NA_integer_, NA_integer_), "Slice 'offset' cannot be NA")
expect_error(tab$Slice(c(10, 10)))
expect_error(tab$Slice(10, c(10, 10)))
expect_error(tab$Slice(1000), "Slice 'offset' greater than array length")
expect_error(tab$Slice(-1), "Slice 'offset' cannot be negative")
expect_error(tab3$Slice(10, 10), "Slice 'offset' greater than array length")
expect_error(tab$Slice(10, -1), "Slice 'length' cannot be negative")
expect_error(tab$Slice(-1, 10), "Slice 'offset' cannot be negative")
})
test_that("head and tail on Table", {
expect_data_frame(head(tab), head(tbl))
expect_data_frame(head(tab, 4), head(tbl, 4))
expect_data_frame(head(tab, 40), head(tbl, 40))
expect_data_frame(head(tab, -4), head(tbl, -4))
expect_data_frame(head(tab, -40), head(tbl, -40))
expect_data_frame(tail(tab), tail(tbl))
expect_data_frame(tail(tab, 4), tail(tbl, 4))
expect_data_frame(tail(tab, 40), tail(tbl, 40))
expect_data_frame(tail(tab, -4), tail(tbl, -4))
expect_data_frame(tail(tab, -40), tail(tbl, -40))
})
test_that("Table print method", {
expect_output(
print(tab),
paste(
"Table",
"10 rows x 5 columns",
"$int <int32>",
"$dbl <double>",
"$lgl <bool>",
"$chr <string>",
"$fct <dictionary<values=string, indices=int8>>",
sep = "\n"
),
fixed = TRUE
)
})
test_that("table active bindings", {
expect_identical(dim(tbl), dim(tab))
expect_type(tab$columns, "list")
expect_equal(tab$columns[[1]], tab[[1]])
})
test_that("table() handles record batches with splicing", {
batch <- record_batch(x = 1:2, y = letters[1:2])
tab <- Table$create(batch, batch, batch)
expect_equal(tab$schema, batch$schema)
expect_equal(tab$num_rows, 6L)
expect_equal(
as.data.frame(tab),
vctrs::vec_rbind(as.data.frame(batch), as.data.frame(batch), as.data.frame(batch))
)
batches <- list(batch, batch, batch)
tab <- Table$create(!!!batches)
expect_equal(tab$schema, batch$schema)
expect_equal(tab$num_rows, 6L)
expect_equal(
as.data.frame(tab),
vctrs::vec_rbind(!!!purrr::map(batches, as.data.frame))
)
})
test_that("table() handles ... of arrays, chunked arrays, vectors", {
a <- Array$create(1:10)
ca <- chunked_array(1:5, 6:10)
v <- rnorm(10)
tbl <- tibble::tibble(x = 1:10, y = letters[1:10])
tab <- Table$create(a = a, b = ca, c = v, !!!tbl)
expect_equal(
tab$schema,
schema(a = int32(), b = int32(), c = float64(), x = int32(), y = utf8())
)
res <- as.data.frame(tab)
expect_equal(names(res), c("a", "b", "c", "x", "y"))
expect_equal(
res,
tibble::tibble(a = 1:10, b = 1:10, c = v, x = 1:10, y = letters[1:10])
)
})
test_that("table() auto splices (ARROW-5718)", {
df <- tibble::tibble(x = 1:10, y = letters[1:10])
tab1 <- Table$create(df)
tab2 <- Table$create(!!!df)
expect_equal(tab1, tab2)
expect_equal(tab1$schema, schema(x = int32(), y = utf8()))
expect_equal(as.data.frame(tab1), df)
s <- schema(x = float64(), y = utf8())
tab3 <- Table$create(df, schema = s)
tab4 <- Table$create(!!!df, schema = s)
expect_equal(tab3, tab4)
expect_equal(tab3$schema, s)
expect_equal(as.data.frame(tab3), df)
})
test_that("Validation when creating table with schema (ARROW-10953)", {
expect_error(
Table$create(data.frame(), schema = schema(a = int32())),
"incompatible. schema has 1 fields, and 0 columns are supplied",
fixed = TRUE
)
expect_error(
Table$create(data.frame(b = 1), schema = schema(a = int32())),
"field at index 1 has name 'a' != 'b'",
fixed = TRUE
)
expect_error(
Table$create(data.frame(b = 2, c = 3), schema = schema(a = int32())),
"incompatible. schema has 1 fields, and 2 columns are supplied",
fixed = TRUE
)
})
test_that("==.Table", {
tab1 <- Table$create(x = 1:2, y = c("a", "b"))
tab2 <- Table$create(x = 1:2, y = c("a", "b"))
tab3 <- Table$create(x = 1:2)
tab4 <- Table$create(x = 1:2, y = c("a", "b"), z = 3:4)
expect_true(tab1 == tab2)
expect_true(tab2 == tab1)
expect_false(tab1 == tab3)
expect_false(tab3 == tab1)
expect_false(tab1 == tab4)
expect_false(tab4 == tab1)
expect_true(all.equal(tab1, tab2))
expect_equal(tab1, tab2)
})
test_that("Table$Equals(check_metadata)", {
tab1 <- Table$create(x = 1:2, y = c("a", "b"))
tab2 <- Table$create(
x = 1:2, y = c("a", "b"),
schema = tab1$schema$WithMetadata(list(some = "metadata"))
)
expect_r6_class(tab1, "Table")
expect_r6_class(tab2, "Table")
expect_false(tab1$schema$HasMetadata)
expect_true(tab2$schema$HasMetadata)
expect_identical(tab2$schema$metadata, list(some = "metadata"))
expect_true(tab1 == tab2)
expect_true(tab1$Equals(tab2))
expect_false(tab1$Equals(tab2, check_metadata = TRUE))
expect_failure(expect_equal(tab1, tab2)) # expect_equal has check_metadata=TRUE
expect_equal(tab1, tab2, ignore_attr = TRUE) # this sets check_metadata=FALSE
expect_false(tab1$Equals(24)) # Not a Table
})
test_that("Table handles null type (ARROW-7064)", {
tab <- Table$create(a = 1:10, n = vctrs::unspecified(10))
expect_equal(tab$schema, schema(a = int32(), n = null()), ignore_attr = TRUE)
})
test_that("Can create table with specific dictionary types", {
fact <- example_data[, "fct"]
int_types <- c(int8(), int16(), int32(), int64())
# TODO: test uint types when format allows
# uint_types <- c(uint8(), uint16(), uint32(), uint64()) # nolint
for (i in int_types) {
sch <- schema(fct = dictionary(i, utf8()))
tab <- Table$create(fact, schema = sch)
expect_equal(sch, tab$schema)
if (i != int64()) {
# TODO: same downcast to int32 as we do for int64() type elsewhere
expect_identical(as.data.frame(tab), fact)
}
}
})
test_that("Table unifies dictionary on conversion back to R (ARROW-8374)", {
b1 <- record_batch(f = factor(c("a"), levels = c("a", "b")))
b2 <- record_batch(f = factor(c("c"), levels = c("c", "d")))
b3 <- record_batch(f = factor(NA, levels = "a"))
b4 <- record_batch(f = factor())
res <- tibble::tibble(f = factor(c("a", "c", NA), levels = c("a", "b", "c", "d")))
tab <- Table$create(b1, b2, b3, b4)
expect_identical(as.data.frame(tab), res)
})
test_that("Table$SelectColumns()", {
tab <- Table$create(x = 1:10, y = 1:10)
expect_equal(tab$SelectColumns(0L), Table$create(x = 1:10))
expect_error(tab$SelectColumns(2:4))
expect_error(tab$SelectColumns(""))
})
test_that("Table name assignment", {
tab <- Table$create(x = 1:10, y = 1:10)
expect_identical(names(tab), c("x", "y"))
names(tab) <- c("a", "b")
expect_identical(names(tab), c("a", "b"))
expect_error(names(tab) <- "f")
expect_error(names(tab) <- letters)
expect_error(names(tab) <- character(0))
expect_error(names(tab) <- NULL)
expect_error(names(tab) <- c(TRUE, FALSE))
})
test_that("Table$create() with different length columns", {
msg <- "All columns must have the same length"
expect_error(Table$create(a = 1:5, b = 1:6), msg)
})
test_that("Table$create() scalar recycling with vectors", {
expect_data_frame(
Table$create(a = 1:10, b = 5),
tibble::tibble(a = 1:10, b = 5)
)
})
test_that("Table$create() scalar recycling with Scalars, Arrays, and ChunkedArrays", {
expect_data_frame(
Table$create(a = Array$create(1:10), b = Scalar$create(5)),
tibble::tibble(a = 1:10, b = 5)
)
expect_data_frame(
Table$create(a = Array$create(1:10), b = Array$create(5)),
tibble::tibble(a = 1:10, b = 5)
)
expect_data_frame(
Table$create(a = Array$create(1:10), b = ChunkedArray$create(5)),
tibble::tibble(a = 1:10, b = 5)
)
})
test_that("Table$create() no recycling with tibbles", {
expect_error(
Table$create(
tibble::tibble(a = 1:10, b = 5),
tibble::tibble(a = 1, b = 5)
),
regexp = "All input tibbles or data.frames must have the same number of rows"
)
expect_error(
Table$create(
tibble::tibble(a = 1:10, b = 5),
tibble::tibble(a = 1)
),
regexp = "All input tibbles or data.frames must have the same number of rows"
)
})
test_that("Tables can be combined with concat_tables()", {
expect_error(
concat_tables(arrow_table(a = 1:10), arrow_table(a = c("a", "b")), unify_schemas = FALSE),
regexp = "Schema at index 2 does not match the first schema"
)
expect_error(
concat_tables(arrow_table(a = 1:10), arrow_table(a = c("a", "b")), unify_schemas = TRUE),
regexp = "Unable to merge: Field a has incompatible types: int32 vs string"
)
expect_error(
concat_tables(),
regexp = "Must pass at least one Table"
)
expect_equal(
concat_tables(
arrow_table(a = 1:5),
arrow_table(a = 6:7, b = c("d", "e"))
),
arrow_table(a = 1:7, b = c(rep(NA, 5), "d", "e"))
)
# concat_tables() with one argument returns identical table
expected <- arrow_table(a = 1:10)
expect_equal(expected, concat_tables(expected))
})
test_that("Table supports rbind", {
expect_error(
rbind(arrow_table(a = 1:10), arrow_table(a = c("a", "b"))),
regexp = "Schema at index 2 does not match the first schema"
)
tables <- list(
arrow_table(a = 1:10, b = Scalar$create("x")),
arrow_table(a = 2:42, b = Scalar$create("y")),
arrow_table(a = 8:10, b = Scalar$create("z"))
)
expected <- Table$create(do.call(rbind, lapply(tables, as.data.frame)))
actual <- do.call(rbind, tables)
expect_equal(actual, expected, ignore_attr = TRUE)
# rbind with empty table produces identical table
expected <- arrow_table(a = 1:10, b = Scalar$create("x"))
expect_equal(
rbind(expected, arrow_table(a = integer(0), b = character(0))),
expected
)
# rbind() with one argument returns identical table
expect_equal(rbind(expected), expected)
})
test_that("Table supports cbind", {
expect_snapshot_error(
cbind(
arrow_table(a = 1:10),
arrow_table(a = c("a", "b"))
)
)
expect_error(
cbind(arrow_table(a = 1:10), arrow_table(b = character(0))),
regexp = "Non-scalar inputs must have an equal number of rows"
)
actual <- cbind(
arrow_table(a = 1:10, b = Scalar$create("x")),
arrow_table(a = 11:20, b = Scalar$create("y")),
arrow_table(c = 1:10)
)
expected <- arrow_table(cbind(
tibble::tibble(a = 1:10, b = "x"),
tibble::tibble(a = 11:20, b = "y"),
tibble::tibble(c = 1:10)
))
expect_equal(actual, expected, ignore_attr = TRUE)
# cbind() with one argument returns identical table
expected <- arrow_table(a = 1:10)
expect_equal(expected, cbind(expected))
# Handles Arrow arrays and chunked arrays
expect_equal(
cbind(arrow_table(a = 1:2), b = Array$create(4:5)),
arrow_table(a = 1:2, b = 4:5)
)
expect_equal(
cbind(arrow_table(a = 1:2), b = chunked_array(4, 5)),
arrow_table(a = 1:2, b = chunked_array(4, 5))
)
# Handles data.frame
if (getRversion() >= "4.0.0") {
# Prior to R 4.0, cbind would short-circuit to the data.frame implementation
# if **any** of the arguments are a data.frame.
expect_equal(
cbind(arrow_table(a = 1:2), data.frame(b = 4:5)),
arrow_table(a = 1:2, b = 4:5)
)
}
# Handles factors
expect_equal(
cbind(arrow_table(a = 1:2), b = factor(c("a", "b"))),
arrow_table(a = 1:2, b = factor(c("a", "b")))
)
# Handles scalar values
expect_equal(
cbind(arrow_table(a = 1:2), b = "x"),
arrow_table(a = 1:2, b = c("x", "x"))
)
# Handles zero rows
expect_equal(
cbind(arrow_table(a = character(0)), b = Array$create(numeric(0)), c = integer(0)),
arrow_table(a = character(0), b = numeric(0), c = integer(0)),
)
# Rejects unnamed arrays, even in cases where no named arguments are passed
expect_error(
cbind(arrow_table(a = 1:2), b = 3:4, 5:6),
regexp = "Vector and array arguments must have names"
)
expect_error(
cbind(arrow_table(a = 1:2), 3:4, 5:6),
regexp = "Vector and array arguments must have names"
)
})
test_that("cbind.Table handles record batches and tables", {
# R 3.6 cbind dispatch rules cause cbind to fall back to default impl if
# there are multiple arguments with distinct cbind implementations
skip_if(getRversion() < "4.0.0", "R 3.6 cbind dispatch rules prevent this behavior")
expect_equal(
cbind(arrow_table(a = 1L:2L), record_batch(b = 4:5)),
arrow_table(a = 1L:2L, b = 4:5)
)
})
test_that("ARROW-11769/ARROW-17085 - grouping preserved in table creation", {
skip_if_not_available("dataset")
tbl <- tibble::tibble(
int = 1:10,
fct = factor(rep(c("A", "B"), 5)),
fct2 = factor(rep(c("C", "D"), each = 5)),
)
expect_identical(
tbl %>%
Table$create() %>%
dplyr::group_vars(),
dplyr::group_vars(tbl)
)
expect_identical(
tbl %>%
dplyr::group_by(fct, fct2) %>%
Table$create() %>%
dplyr::group_vars(),
c("fct", "fct2")
)
})
test_that("ARROW-12729 - length returns number of columns in Table", {
tbl <- tibble::tibble(
int = 1:10,
fct = factor(rep(c("A", "B"), 5)),
fct2 = factor(rep(c("C", "D"), each = 5)),
)
tab <- Table$create(!!!tbl)
expect_identical(length(tab), 3L)
})
test_that("as_arrow_table() works for Table", {
table <- arrow_table(col1 = 1L, col2 = "two")
expect_identical(as_arrow_table(table), table)
expect_equal(
as_arrow_table(table, schema = schema(col1 = float64(), col2 = string())),
arrow_table(col1 = Array$create(1, type = float64()), col2 = "two")
)
})
test_that("as_arrow_table() works for RecordBatch", {
table <- arrow_table(col1 = 1L, col2 = "two")
batch <- record_batch(col1 = 1L, col2 = "two")
expect_equal(as_arrow_table(batch), table)
expect_equal(
as_arrow_table(batch, schema = schema(col1 = float64(), col2 = string())),
arrow_table(col1 = Array$create(1, type = float64()), col2 = "two")
)
})
test_that("as_arrow_table() works for data.frame()", {
table <- arrow_table(col1 = 1L, col2 = "two")
tbl <- tibble::tibble(col1 = 1L, col2 = "two")
expect_equal(as_arrow_table(tbl), table)
expect_equal(
as_arrow_table(
tbl,
schema = schema(col1 = float64(), col2 = string())
),
arrow_table(col1 = Array$create(1, type = float64()), col2 = "two")
)
})
test_that("as_arrow_table() errors for invalid input", {
expect_error(
as_arrow_table("no as_arrow_table() method"),
class = "arrow_no_method_as_arrow_table"
)
})
test_that("num_rows method not susceptible to integer overflow", {
skip_if_not_running_large_memory_tests()
small_array <- Array$create(raw(1))
big_array <- Array$create(raw(.Machine$integer.max))
big_chunked_array <- chunked_array(big_array, small_array)
# LargeString array with data buffer > MAX_INT32
big_string_array <- Array$create(make_big_string())
small_table <- Table$create(col = small_array)
big_table <- Table$create(col = big_chunked_array)
expect_type(big_array$nbytes(), "integer")
expect_type(big_chunked_array$nbytes(), "double")
expect_type(length(big_array), "integer")
expect_type(length(big_chunked_array), "double")
expect_type(small_table$num_rows, "integer")
expect_type(big_table$num_rows, "double")
expect_identical(big_string_array$data()$buffers[[3]]$size, 2148007936)
})
|
#THIS SCRIPT SAVES .RDATA OF EACH NETWORK
create_network_mg <- function(nodes, edges){
library(stringi)
library(igraph)
nodes$Description <- stri_trans_general(nodes$Description, "latin-ascii")
colnames(edges) <- c("c1","c2", "used")
g <- graph_from_data_frame(edges, directed = FALSE, vertices = nodes )
g <- delete.edges(g, which(E(g)$used < 1))
plot(g)
str(g)
#from my paper on diversity
#V(g)$size = log(totals[match(V(g)$name, names(totals))], base = 2) - 9
V(g)$size = V(g)$degree
fc <- fastgreedy.community(g); colors <- rainbow(max(membership(fc)))
V(g)$color = colors[membership(fc)]
set.seed(67)
V(g)$label <- V(g)$Description
g$layout <- layout.fruchterman.reingold(g)
plot.igraph(g, vertex.label.cex = 0.5, vertex.label.font = 1, vertex.label.family = "Helvetica", vertex.label.color="black", asp =FALSE)
return(g)
}
create_network <- function(nodes, edges){
library(stringi)
library(igraph)
library(RColorBrewer)
#working with nodes
chile_1217<-nodes
demre<-get_demre()
stem<- get_stem(demre)
chile_1217<-merge(chile_1217,stem,by.x ='ID',by.y = 'DEMRE.Code',all.x=T)
data.ed<-chile_1217
ix <- 3:31 #This depends on the dataframe, so be careful
data.ed[ix] <- lapply(data.ed[ix], as.numeric)
names(data.ed)<-gsub('\\.',"",names(data.ed))
data.ed$lbetw<-log(data.ed$betweeness)
data.ed$lbetw[is.infinite(data.ed$lbetw)]<-NA
#working with edges
chile_1217<-subset(edges,y==1,select = -y)
rownames(chile_1217)<-1:length(chile_1217$i)
chile_1217$i<-as.factor(chile_1217$i);chile_1217$j<-as.factor(chile_1217$j)
g<-graph_from_edgelist(as.matrix(chile_1217),directed = F)
#V(g)$name<-data.ed$Description[match(V(g)$name,data.ed$ID)]
#heck factors!
V(g)$name<-as.character(data.ed$Description[match(V(g)$name,as.character(data.ed$ID))])
V(g)$gender<-data.ed$Gender2017_Std[match(V(g)$name,data.ed$Description)]
V(g)$stem<-data.ed$STEM[match(V(g)$name,data.ed$Description)]
V(g)$label.cex=(sqrt(degree(g)))/4;V(g)$label.cex[V(g)$label.cex<0.75]=0.00001
V(g)$score<-data.ed$Scores2017_Std[match(V(g)$name,data.ed$Description)]
V(g)$community<-data.ed$Community[match(V(g)$name,data.ed$Description)]
components = V(g)$gender
#my.col <- colorRampPalette(rev(brewer.pal(11, "RdBu")))(diff(range(V(g)$gender)*2))
my.col <- colorRampPalette(rev(brewer.pal(11, "RdBu")))
#V(g)$color_gender <- my.col[(components+ abs(min ( components ,na.rm = T))+1)*2]#COLOUR
components = V(g)$stem
my.col <- colorRampPalette(brewer.pal(3, "RdBu"))(length(unique(range(V(g)$stem))))
V(g)$color_stem <- my.col[ifelse(components=='No',1,2)]#COLOUR
components = V(g)$stem
my.col <- c('circle','square')
V(g)$shape_stem <- my.col[ifelse(components=='Yes',1,2)]#COLOUR
#components = V(g)$score
#my.col <- colorRampPalette(rev(brewer.pal(6, "RdBu")))(diff(range(V(g)$score)))
#V(g)$color_score <- my.col[(components+ abs(min ( components ,na.rm = T))+1)]#COLOUR
components = V(g)$community
my.col <- colorRampPalette(brewer.pal(11, "RdBu"))(length(unique(V(g)$community)))
V(g)$color_comunity <- my.col[components]#COLOUR
return(g)
}
#CHILE 2006-2011
edges <- read.csv("~/Dropbox/UPLA/INVESTIGACION/PROYECTOS/FONDEF\ CHES/Chilean\ Education\ Projects/Dropouts/Data/Chilean\ Network\ 1/Adjancency_Chile_0611_Jan2020.csv")
nodes <- read.csv("~/Dropbox/UPLA/INVESTIGACION/PROYECTOS/FONDEF\ CHES/Chilean\ Education\ Projects/Dropouts/Data/Chilean\ Network\ 1/ChileData_0611_Jan2020.csv", header=TRUE)
ches0611 <- create_network(nodes,edges)
save(ches0611, file = "data/ches0611.RData")
#CHILE 2012-2017################
edges <- read.csv("~/Dropbox/UPLA/INVESTIGACION/PROYECTOS/FONDEF\ CHES/Chilean\ Education\ Projects/Dropouts/Data/Chilean\ Network\ 2/Adjancency_Chile_1217_Jan2020.csv")
nodes <- read.csv("~/Dropbox/UPLA/INVESTIGACION/PROYECTOS/FONDEF\ CHES/Chilean\ Education\ Projects/Dropouts/Data/Chilean\ Network\ 2/ChileData_1217_Jan2020.csv", header=TRUE)
ches1217 <- create_network(nodes,edges)
save(ches1217, file = "data/ches1217.RData")
#testing visNetwork
#from notebook
| /utils/SAVE_NETWORKS.R | no_license | mguevara/HES | R | false | false | 4,149 | r | #THIS SCRIPT SAVES .RDATA OF EACH NETWORK
create_network_mg <- function(nodes, edges){
library(stringi)
library(igraph)
nodes$Description <- stri_trans_general(nodes$Description, "latin-ascii")
colnames(edges) <- c("c1","c2", "used")
g <- graph_from_data_frame(edges, directed = FALSE, vertices = nodes )
g <- delete.edges(g, which(E(g)$used < 1))
plot(g)
str(g)
#from my paper on diversity
#V(g)$size = log(totals[match(V(g)$name, names(totals))], base = 2) - 9
V(g)$size = V(g)$degree
fc <- fastgreedy.community(g); colors <- rainbow(max(membership(fc)))
V(g)$color = colors[membership(fc)]
set.seed(67)
V(g)$label <- V(g)$Description
g$layout <- layout.fruchterman.reingold(g)
plot.igraph(g, vertex.label.cex = 0.5, vertex.label.font = 1, vertex.label.family = "Helvetica", vertex.label.color="black", asp =FALSE)
return(g)
}
create_network <- function(nodes, edges){
library(stringi)
library(igraph)
library(RColorBrewer)
#working with nodes
chile_1217<-nodes
demre<-get_demre()
stem<- get_stem(demre)
chile_1217<-merge(chile_1217,stem,by.x ='ID',by.y = 'DEMRE.Code',all.x=T)
data.ed<-chile_1217
ix <- 3:31 #This depends on the dataframe, so be careful
data.ed[ix] <- lapply(data.ed[ix], as.numeric)
names(data.ed)<-gsub('\\.',"",names(data.ed))
data.ed$lbetw<-log(data.ed$betweeness)
data.ed$lbetw[is.infinite(data.ed$lbetw)]<-NA
#working with edges
chile_1217<-subset(edges,y==1,select = -y)
rownames(chile_1217)<-1:length(chile_1217$i)
chile_1217$i<-as.factor(chile_1217$i);chile_1217$j<-as.factor(chile_1217$j)
g<-graph_from_edgelist(as.matrix(chile_1217),directed = F)
#V(g)$name<-data.ed$Description[match(V(g)$name,data.ed$ID)]
#heck factors!
V(g)$name<-as.character(data.ed$Description[match(V(g)$name,as.character(data.ed$ID))])
V(g)$gender<-data.ed$Gender2017_Std[match(V(g)$name,data.ed$Description)]
V(g)$stem<-data.ed$STEM[match(V(g)$name,data.ed$Description)]
V(g)$label.cex=(sqrt(degree(g)))/4;V(g)$label.cex[V(g)$label.cex<0.75]=0.00001
V(g)$score<-data.ed$Scores2017_Std[match(V(g)$name,data.ed$Description)]
V(g)$community<-data.ed$Community[match(V(g)$name,data.ed$Description)]
components = V(g)$gender
#my.col <- colorRampPalette(rev(brewer.pal(11, "RdBu")))(diff(range(V(g)$gender)*2))
my.col <- colorRampPalette(rev(brewer.pal(11, "RdBu")))
#V(g)$color_gender <- my.col[(components+ abs(min ( components ,na.rm = T))+1)*2]#COLOUR
components = V(g)$stem
my.col <- colorRampPalette(brewer.pal(3, "RdBu"))(length(unique(range(V(g)$stem))))
V(g)$color_stem <- my.col[ifelse(components=='No',1,2)]#COLOUR
components = V(g)$stem
my.col <- c('circle','square')
V(g)$shape_stem <- my.col[ifelse(components=='Yes',1,2)]#COLOUR
#components = V(g)$score
#my.col <- colorRampPalette(rev(brewer.pal(6, "RdBu")))(diff(range(V(g)$score)))
#V(g)$color_score <- my.col[(components+ abs(min ( components ,na.rm = T))+1)]#COLOUR
components = V(g)$community
my.col <- colorRampPalette(brewer.pal(11, "RdBu"))(length(unique(V(g)$community)))
V(g)$color_comunity <- my.col[components]#COLOUR
return(g)
}
#CHILE 2006-2011
edges <- read.csv("~/Dropbox/UPLA/INVESTIGACION/PROYECTOS/FONDEF\ CHES/Chilean\ Education\ Projects/Dropouts/Data/Chilean\ Network\ 1/Adjancency_Chile_0611_Jan2020.csv")
nodes <- read.csv("~/Dropbox/UPLA/INVESTIGACION/PROYECTOS/FONDEF\ CHES/Chilean\ Education\ Projects/Dropouts/Data/Chilean\ Network\ 1/ChileData_0611_Jan2020.csv", header=TRUE)
ches0611 <- create_network(nodes,edges)
save(ches0611, file = "data/ches0611.RData")
#CHILE 2012-2017################
edges <- read.csv("~/Dropbox/UPLA/INVESTIGACION/PROYECTOS/FONDEF\ CHES/Chilean\ Education\ Projects/Dropouts/Data/Chilean\ Network\ 2/Adjancency_Chile_1217_Jan2020.csv")
nodes <- read.csv("~/Dropbox/UPLA/INVESTIGACION/PROYECTOS/FONDEF\ CHES/Chilean\ Education\ Projects/Dropouts/Data/Chilean\ Network\ 2/ChileData_1217_Jan2020.csv", header=TRUE)
ches1217 <- create_network(nodes,edges)
save(ches1217, file = "data/ches1217.RData")
#testing visNetwork
#from notebook
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/computeSpillmat.R
\docType{methods}
\name{computeSpillmat}
\alias{computeSpillmat}
\alias{computeSpillmat,dbFrame-method}
\title{Compute spillover matrix}
\usage{
computeSpillmat(x, ...)
\S4method{computeSpillmat}{dbFrame}(x, method = "default",
interactions = "default", trim = 0.5, th = 1e-05)
}
\arguments{
\item{x}{a \code{\link{dbFrame}}.}
\item{...}{optional arguments.}
\item{method}{\code{"default"} or \code{"classic"}. Specifies the function
to be used for spillover estimation (see below for details).}
\item{interactions}{\code{"default"} or \code{"all"}. Specifies which interactions spillover
should be estimated for. The default exclusively takes into consideration
interactions that are sensible from a chemical and physical point of view
(see below for more details).}
\item{trim}{numeric. Specifies the trim value used for estimation of spill values.
Note that \code{trim = 0.5} is equivalent to using medians.}
\item{th}{single non-negative numeric. Specifies the threshold value
below which spill estimates will be set to 0.}
}
\value{
Returns a square compensation matrix with dimensions and dimension names
matching those of the input flowFrame. Spillover is assumed to be linear,
and, on the basis of their additive nature, spillover values are computed
independently for each interacting pair of channels.
}
\description{
Computes a spillover matrix from a priori
identified single-positive populations.
}
\details{
The \code{default} method estimates the spillover as the median ratio
between the unstained spillover receiving and the stained spillover
emitting channel in the corresponding single stained populations.
\code{method = "classic"} will compute the slope of a line through
the medians (or trimmed means) of stained and unstained populations.
The medians (or trimmed means) computed from events that are i) negative
in the respective channels; and, ii) not assigned to interacting channels;
and, iii) not unassigned are subtracted as to account for background.
\code{interactions="default"} considers only expected interactions, that is,
M+/-1 (detection sensitivity), M+16 (oxide formation) and channels measuring
metals that are potentially contaminated by isotopic impurites
(see reference below and \code{\link{isotope_list}}).
\code{interaction="all"} will estimate spill for all n x n - n
interactions, where n denotes the number of single-color controls
(= \code{nrow(bc_key(re))}).
}
\examples{
# get single-stained control samples
data(ss_exp)
# specify mass channels stained for
bc_ms <- c(139, 141:156, 158:176)
# debarcode single-positive populations
re <- assignPrelim(x = ss_exp, y = bc_ms)
re <- estCutoffs(x = re)
re <- applyCutoffs(x = re)
head(computeSpillmat(x = re))
}
\references{
Coursey, J.S., Schwab, D.J., Tsai, J.J., Dragoset, R.A. (2015).
Atomic weights and isotopic compositions,
(available at http://physics.nist.gov/Comp).
}
\author{
Helena Lucia Crowell \email{crowellh@student.ethz.ch}
}
| /man/computeSpillmat.Rd | no_license | lmweber/CATALYST | R | false | true | 3,084 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/computeSpillmat.R
\docType{methods}
\name{computeSpillmat}
\alias{computeSpillmat}
\alias{computeSpillmat,dbFrame-method}
\title{Compute spillover matrix}
\usage{
computeSpillmat(x, ...)
\S4method{computeSpillmat}{dbFrame}(x, method = "default",
interactions = "default", trim = 0.5, th = 1e-05)
}
\arguments{
\item{x}{a \code{\link{dbFrame}}.}
\item{...}{optional arguments.}
\item{method}{\code{"default"} or \code{"classic"}. Specifies the function
to be used for spillover estimation (see below for details).}
\item{interactions}{\code{"default"} or \code{"all"}. Specifies which interactions spillover
should be estimated for. The default exclusively takes into consideration
interactions that are sensible from a chemical and physical point of view
(see below for more details).}
\item{trim}{numeric. Specifies the trim value used for estimation of spill values.
Note that \code{trim = 0.5} is equivalent to using medians.}
\item{th}{single non-negative numeric. Specifies the threshold value
below which spill estimates will be set to 0.}
}
\value{
Returns a square compensation matrix with dimensions and dimension names
matching those of the input flowFrame. Spillover is assumed to be linear,
and, on the basis of their additive nature, spillover values are computed
independently for each interacting pair of channels.
}
\description{
Computes a spillover matrix from a priori
identified single-positive populations.
}
\details{
The \code{default} method estimates the spillover as the median ratio
between the unstained spillover receiving and the stained spillover
emitting channel in the corresponding single stained populations.
\code{method = "classic"} will compute the slope of a line through
the medians (or trimmed means) of stained and unstained populations.
The medians (or trimmed means) computed from events that are i) negative
in the respective channels; and, ii) not assigned to interacting channels;
and, iii) not unassigned are subtracted as to account for background.
\code{interactions="default"} considers only expected interactions, that is,
M+/-1 (detection sensitivity), M+16 (oxide formation) and channels measuring
metals that are potentially contaminated by isotopic impurites
(see reference below and \code{\link{isotope_list}}).
\code{interaction="all"} will estimate spill for all n x n - n
interactions, where n denotes the number of single-color controls
(= \code{nrow(bc_key(re))}).
}
\examples{
# get single-stained control samples
data(ss_exp)
# specify mass channels stained for
bc_ms <- c(139, 141:156, 158:176)
# debarcode single-positive populations
re <- assignPrelim(x = ss_exp, y = bc_ms)
re <- estCutoffs(x = re)
re <- applyCutoffs(x = re)
head(computeSpillmat(x = re))
}
\references{
Coursey, J.S., Schwab, D.J., Tsai, J.J., Dragoset, R.A. (2015).
Atomic weights and isotopic compositions,
(available at http://physics.nist.gov/Comp).
}
\author{
Helena Lucia Crowell \email{crowellh@student.ethz.ch}
}
|
test_that("layer_markers works", {
nc <- sf::st_read(system.file("shape/nc.shp", package = "sf"))
nc <-
dplyr::mutate(
nc,
category = dplyr::case_when(
AREA > 0.15 ~ "larger",
AREA <= 0.15 ~ "smaller"
)
)
plot <-
ggplot() +
layer_markers(
data = nc,
make = TRUE,
groupname_col = "category"
)
expect_s3_class(
plot,
"gg"
)
# expect_snapshot(
# ggplot2::summarise_layout(ggplot2::ggplot_build(plot))
# )
})
| /tests/testthat/test-layer_markers.R | permissive | elipousson/maplayer | R | false | false | 507 | r | test_that("layer_markers works", {
nc <- sf::st_read(system.file("shape/nc.shp", package = "sf"))
nc <-
dplyr::mutate(
nc,
category = dplyr::case_when(
AREA > 0.15 ~ "larger",
AREA <= 0.15 ~ "smaller"
)
)
plot <-
ggplot() +
layer_markers(
data = nc,
make = TRUE,
groupname_col = "category"
)
expect_s3_class(
plot,
"gg"
)
# expect_snapshot(
# ggplot2::summarise_layout(ggplot2::ggplot_build(plot))
# )
})
|
#' Partition Train And Predict
#'
#' Partitions the study sample, fits the model and makes predictions with SuperLearner.
#' @param study.sample Data frame. The study.sample. No default.
#' @param outcome.variable.name Character vector of length 1. The name of the outcome variable of interest. Defaults to "s30d".
#' @param models.names Character vector. The model names to stack in SuperLearner. Defaults to c("SL.gam", "SL.randomForest", "SL.nnet, SL.xgboost", "SL.svm")
#' @param cvControl List. cvControl parameter for SuperLearner::SuperLearner. Defaults to list().
#' @param sample Logical vector of length 1. If TRUE only 10% of breaks are gridsearched. Defaults to FALSE.
#' @param n.partitions Numeric vector of length 1. The number of partitions to create with PartitionSample. Accepted values are 2 or 3. If 2, a train and test set is created. If 3, train, validation, and test sets are created - the models is fitted on the training set, optimal breaks is gridsearched on the validation set, and the model is tested on the test set. Defaults to 3.
#' @param save.sample.predictions Logical. If TRUE SuperLearner predictions, outcome and tc in each partition is saved to the results list. Defaults to TRUE.
#' @param boot.sample Logical vector of length 1. If TRUE run is treated as a bootstrap sample, meaning e.g. thatthe SuperLearner object is not saved to disk. Defaults to FALSE
#' @param verbose Logical. If TRUE the modelling process is printed to console. Defaults to FALSE.
#' @param return.partitions Logical vector of length 1. If TRUE the list of feature sets partitioned from the study.sample is returned. Defaults to TRUE.
#' @param use.fitted.sl Logical vector of length 1. If TRUE the file. Default/No default.
#' @export
PartitionTrainAndPredict <- function(study.sample,
outcome.variable.name = "s30d",
model.names = c("SL.randomForest", "SL.nnet"),
cvControl = list(),
sample=FALSE,
n.partitions = 3,
save.sample.predictions = TRUE,
boot.sample = FALSE,
verbose = FALSE,
return.partitions = FALSE,
use.fitted.sl = FALSE) {
## Error handling
if (!is.data.frame(study.sample))
stop ("data must be of type data frame")
if (!bengaltiger::IsLength1(outcome.variable.name) | !is.character(outcome.variable.name))
stop ("outome.variable.name must be of a character vector of length 1")
if (!(n.partitions %in% c(2,3)))
stop ("Argument n.partitions must be either 2 or 3.")
## Partition the sample, and return the separate partitions, the corresponding outcome
## for both sets and tc in both sets
partitions <- PartitionSample(study.sample = study.sample,
outcome.variable.name = outcome.variable.name,
n.partitions = n.partitions)
## Fit to training partition
message("Fitting SuperLearner to training set...")
fitted.sl <- with(partitions, SuperLearner::SuperLearner(Y = train$y, X = train$x,
family = binomial(),
SL.library = model.names,
method = "method.AUC",
cvControl=cvControl,
verbose = verbose))
train.validation <- partitions[-grep("test", names(partitions))]
con.list.labels <- paste0("con.model.", names(train.validation))
## Make predictions on the validation set
predictions <- lapply(setNames(train.validation, nm = con.list.labels),
function (partition.list) predict(object = fitted.sl,
newdata = partition.list$x,
onlySL = TRUE)$pred)
label <- ifelse(n.partitions == 2, "train", "validation")
message(paste("Finding optimal breaks for continuous probabilities on the", label, "set..."))
optimal.breaks <- GridsearchBreaks(predictions = predictions[grepl(label, con.list.labels)][[1]],
outcome.vector = partitions[[label]]$y, sample=sample)
if (!boot.sample)
suppressMessages({
bengaltiger::SaveToResults(optimal.breaks, paste0(outcome.variable.name, ".optimal.breaks"))
})
## Merge train-validation partition to one
full.training.list <- list(y = unlist(lapply(train.validation, "[[", "y")),
x = do.call(rbind, lapply(train.validation, "[[", "x")))
message("Re-fitting SuperLearner on full training + validation set...")
fitted.sl <- with(full.training.list, SuperLearner::SuperLearner(Y = y, X = x,
family = binomial(),
SL.library = model.names,
method = "method.AUC",
cvControl = cvControl,
verbose = verbose))
sl.object.file <- paste0("SuperLearner_", outcome.variable.name, ".rds")
if (!boot.sample) {
saveRDS(fitted.sl, file = sl.object.file)
if (verbose)
message(paste0("SuperLearner object saved to disk as ", sl.object.file, "..."))
}
## Make predictions on the test set
predictions$con.model.test <- predict(object = fitted.sl, newdata = partitions$test$x, onlySL = TRUE)$pred
## Bin predictions made on the test set using the optimal cut-points
cut.list.labels <- paste0("cut.model.", c("train", "validation", "test"))
binned.predictions <- lapply(setNames(predictions, nm = cut.list.labels), function (preds) {
as.numeric(
cut(x = preds,
breaks = c(-Inf, optimal.breaks, Inf),
labels = c("Green", "Yellow", "Orange", "Red"),
include.lowest = TRUE)
)
})
NewLabelsAndNumeric <- function(label) {
new.labels <- paste0(label, ".", names(partitions))
new.list <- lapply(setNames(partitions, nm = new.labels),
function (partition.list) as.numeric(partition.list[[label]]))
return (new.list)
}
return.object <- list()
return.object$predictions.list <- c(predictions, binned.predictions,
NewLabelsAndNumeric("y"),
NewLabelsAndNumeric("tc"))
if (return.partitions) {
return.object$samples <- lapply(partitions, "[[", "x")
}
## Save the predictions, outcome and clinicians tc in each partition to the results list
if (save.sample.predictions) {
suppressMessages({
bengaltiger::SaveToResults(return.object, paste0(outcome.variable.name, ".results"))
})
}
return (return.object)
}
## fitted.sl <- ""
## sl.object.file <- paste0("SuperLearner_", outcome.variable.name, ".rds")
## ## Fit the model to the training data
## if (use.fitted.sl) {
## if (file.exists(sl.object.file)) {
## message(paste0("Argument use.fitted.sl is TRUE and SuperLearner_", outcome.variable.name, " exists.", "\nSkipping initial fitting and using ", sl.object.file, "..."))
## fitted.sl <- readRDS(sl.object.file)
## } else {
## if (verbose) {
## message(paste("No", sl.object.file, "object have been saved to disk. Ignoring use.fitted.sl."))
## message("Fitting SuperLearner to training set...")
## }
## fitted.sl <- with(partitions, SuperLearner::SuperLearner(Y = train$y, X = train$x,
## family = binomial(),
## SL.library = model.names,
## method = "method.AUC",
## cvControl=cvControl,
## verbose = verbose))
## }
## } else {
## message("Fitting SuperLearner on the training set...")
## fitted.sl <- with(partitions, SuperLearner::SuperLearner(Y = train$y, X = train$x,
## family = binomial(),
## SL.library = model.names,
## method = "method.AUC",
## cvControl=cvControl,
## verbose = verbose))
## }
## ## Extract training sets
## train.validation <- partitions[-grep("test", names(partitions))]
## con.list.labels <- paste0("con.model.", names(train.validation))
## ## Make predictions on the validation set
## predictions <- lapply(setNames(train.validation, nm = con.list.labels),
## function (partition.list) predict(object = fitted.sl,
## newdata = partition.list$x,
## onlySL = TRUE)$pred)
## label <- ifelse(n.partitions == 2, "train", "validation")
## ## Gridsearch the optimal cut-points for the predicted probabilities on
## ## the appropriate partition
## optimal.breaks <- ""
## if (use.fitted.sl) {
## results.breaks <- readRDS("results.Rds")[[paste0(outcome.variable.name, ".optimal.breaks")]]
## if (is.null(results.breaks)) {
## if (verbose)
## message(paste("Finding optimal breaks for continuous probabilities on the", label, "set..."))
## optimal.breaks <- GridsearchBreaks(predictions = predictions[grepl(label, con.list.labels)][[1]],
## outcome.vector = partitions[[label]]$y, sample=sample)
## suppressMessages({
## bengaltiger::SaveToResults(optimal.breaks, paste0(outcome.variable.name, ".optimal.breaks"))
## })
## } else {
## message("\nParameter use.fitted.sl is set to True, results file contain optimal.breaks element. \nUsing those as breaks for binning continous predictions...")
## optimal.breaks <- results.breaks
## }
## } else {
## if (verbose)
## message(paste("Finding optimal breaks for continuous probabilities on the", label, "set..."))
## optimal.breaks <- GridsearchBreaks(predictions = predictions[grepl(label, con.list.labels)][[1]],
## outcome.vector = partitions[[label]]$y, sample=sample)
## if (!boot.sample)
## suppressMessages({
## bengaltiger::SaveToResults(optimal.breaks, paste0(outcome.variable.name, ".optimal.breaks"))
## })
## }
## ## Merge train-validation partition to one
## full.training.list <- list(y = unlist(lapply(train.validation, "[[", "y")),
## x = do.call(rbind, lapply(train.validation, "[[", "x")))
## if (n.partitions == 3) {
## if (use.fitted.sl) {
## if (file.exists(sl.object.file)) {
## message(paste0("\nArgument use.fitted.sl is TRUE and SuperLearner_", outcome.variable.name, " exists.", "\nSkipping refitting to training + validation set and using ", sl.object.file, "..."))
## fitted.sl <- readRDS(sl.object.file)
## } else {
## message(paste0("Argument use.fitted.sl is TRUE, but SuperLearner_", outcome.variable.name, " doe not exist.", " Re-fitting SuperLearner to training + validation set..."))
## fitted.sl <- with(full.training.list, SuperLearner::SuperLearner(Y = y, X = x,
## family = binomial(),
## SL.library = model.names,
## method = "method.AUC",
## cvControl = cvControl,
## verbose = verbose))
## saveRDS(fitted.sl, file = sl.object.file)
## if (verbose)
## message(paste0("SuperLearner object saved to disk as ", sl.object.file, "..."))
## }
## } else {
## if (verbose)
## message("Re-fitting SuperLearner on full training + validation set...")
## fitted.sl <- with(full.training.list, SuperLearner::SuperLearner(Y = y, X = x,
## family = binomial(),
## SL.library = model.names,
## method = "method.AUC",
## cvControl = cvControl,
## verbose = verbose))
## if (!boot.sample) {
## saveRDS(fitted.sl, file = sl.object.file)
## if (verbose)
## message(paste0("SuperLearner object saved to disk as ", sl.object.file, "..."))
## }
## }
## }
## ## Make predictions on the test set
## predictions$con.model.test <- predict(object = fitted.sl, newdata = partitions$test$x, onlySL = TRUE)$pred
## ## Bin predictions made on the test set using the optimal cut-points
## cut.list.labels <- paste0("cut.model.", c("train", "validation", "test"))
## binned.predictions <- lapply(setNames(predictions, nm = cut.list.labels), function (preds) {
## as.numeric(
## cut(x = preds,
## breaks = c(-Inf, optimal.breaks, Inf),
## labels = c("Green", "Yellow", "Orange", "Red"),
## include.lowest = TRUE)
## )
## })
## NewLabelsAndNumeric <- function(label) {
## new.labels <- paste0(label, ".", names(partitions))
## new.list <- lapply(setNames(partitions, nm = new.labels),
## function (partition.list) as.numeric(partition.list[[label]]))
## return (new.list)
## }
## return.object <- list()
## return.object$predictions.list <- c(predictions, binned.predictions,
## NewLabelsAndNumeric("y"),
## NewLabelsAndNumeric("tc"))
## if (return.partitions) {
## return.object$samples <- lapply(partitions, "[[", "x")
## }
## ## Save the predictions, outcome and clinicians tc in each partition to the results list
## if (save.sample.predictions) {
## suppressMessages({
## bengaltiger::SaveToResults(return.object, paste0(outcome.variable.name, ".results"))
## })
## }
##
## return (return.object)
| /R/PartitionTrainAndPredict.R | permissive | warnbergg/emett | R | false | false | 15,372 | r | #' Partition Train And Predict
#'
#' Partitions the study sample, fits the model and makes predictions with SuperLearner.
#' @param study.sample Data frame. The study.sample. No default.
#' @param outcome.variable.name Character vector of length 1. The name of the outcome variable of interest. Defaults to "s30d".
#' @param models.names Character vector. The model names to stack in SuperLearner. Defaults to c("SL.gam", "SL.randomForest", "SL.nnet, SL.xgboost", "SL.svm")
#' @param cvControl List. cvControl parameter for SuperLearner::SuperLearner. Defaults to list().
#' @param sample Logical vector of length 1. If TRUE only 10% of breaks are gridsearched. Defaults to FALSE.
#' @param n.partitions Numeric vector of length 1. The number of partitions to create with PartitionSample. Accepted values are 2 or 3. If 2, a train and test set is created. If 3, train, validation, and test sets are created - the models is fitted on the training set, optimal breaks is gridsearched on the validation set, and the model is tested on the test set. Defaults to 3.
#' @param save.sample.predictions Logical. If TRUE SuperLearner predictions, outcome and tc in each partition is saved to the results list. Defaults to TRUE.
#' @param boot.sample Logical vector of length 1. If TRUE run is treated as a bootstrap sample, meaning e.g. thatthe SuperLearner object is not saved to disk. Defaults to FALSE
#' @param verbose Logical. If TRUE the modelling process is printed to console. Defaults to FALSE.
#' @param return.partitions Logical vector of length 1. If TRUE the list of feature sets partitioned from the study.sample is returned. Defaults to TRUE.
#' @param use.fitted.sl Logical vector of length 1. If TRUE the file. Default/No default.
#' @export
PartitionTrainAndPredict <- function(study.sample,
outcome.variable.name = "s30d",
model.names = c("SL.randomForest", "SL.nnet"),
cvControl = list(),
sample=FALSE,
n.partitions = 3,
save.sample.predictions = TRUE,
boot.sample = FALSE,
verbose = FALSE,
return.partitions = FALSE,
use.fitted.sl = FALSE) {
## Error handling
if (!is.data.frame(study.sample))
stop ("data must be of type data frame")
if (!bengaltiger::IsLength1(outcome.variable.name) | !is.character(outcome.variable.name))
stop ("outome.variable.name must be of a character vector of length 1")
if (!(n.partitions %in% c(2,3)))
stop ("Argument n.partitions must be either 2 or 3.")
## Partition the sample, and return the separate partitions, the corresponding outcome
## for both sets and tc in both sets
partitions <- PartitionSample(study.sample = study.sample,
outcome.variable.name = outcome.variable.name,
n.partitions = n.partitions)
## Fit to training partition
message("Fitting SuperLearner to training set...")
fitted.sl <- with(partitions, SuperLearner::SuperLearner(Y = train$y, X = train$x,
family = binomial(),
SL.library = model.names,
method = "method.AUC",
cvControl=cvControl,
verbose = verbose))
train.validation <- partitions[-grep("test", names(partitions))]
con.list.labels <- paste0("con.model.", names(train.validation))
## Make predictions on the validation set
predictions <- lapply(setNames(train.validation, nm = con.list.labels),
function (partition.list) predict(object = fitted.sl,
newdata = partition.list$x,
onlySL = TRUE)$pred)
label <- ifelse(n.partitions == 2, "train", "validation")
message(paste("Finding optimal breaks for continuous probabilities on the", label, "set..."))
optimal.breaks <- GridsearchBreaks(predictions = predictions[grepl(label, con.list.labels)][[1]],
outcome.vector = partitions[[label]]$y, sample=sample)
if (!boot.sample)
suppressMessages({
bengaltiger::SaveToResults(optimal.breaks, paste0(outcome.variable.name, ".optimal.breaks"))
})
## Merge train-validation partition to one
full.training.list <- list(y = unlist(lapply(train.validation, "[[", "y")),
x = do.call(rbind, lapply(train.validation, "[[", "x")))
message("Re-fitting SuperLearner on full training + validation set...")
fitted.sl <- with(full.training.list, SuperLearner::SuperLearner(Y = y, X = x,
family = binomial(),
SL.library = model.names,
method = "method.AUC",
cvControl = cvControl,
verbose = verbose))
sl.object.file <- paste0("SuperLearner_", outcome.variable.name, ".rds")
if (!boot.sample) {
saveRDS(fitted.sl, file = sl.object.file)
if (verbose)
message(paste0("SuperLearner object saved to disk as ", sl.object.file, "..."))
}
## Make predictions on the test set
predictions$con.model.test <- predict(object = fitted.sl, newdata = partitions$test$x, onlySL = TRUE)$pred
## Bin predictions made on the test set using the optimal cut-points
cut.list.labels <- paste0("cut.model.", c("train", "validation", "test"))
binned.predictions <- lapply(setNames(predictions, nm = cut.list.labels), function (preds) {
as.numeric(
cut(x = preds,
breaks = c(-Inf, optimal.breaks, Inf),
labels = c("Green", "Yellow", "Orange", "Red"),
include.lowest = TRUE)
)
})
NewLabelsAndNumeric <- function(label) {
new.labels <- paste0(label, ".", names(partitions))
new.list <- lapply(setNames(partitions, nm = new.labels),
function (partition.list) as.numeric(partition.list[[label]]))
return (new.list)
}
return.object <- list()
return.object$predictions.list <- c(predictions, binned.predictions,
NewLabelsAndNumeric("y"),
NewLabelsAndNumeric("tc"))
if (return.partitions) {
return.object$samples <- lapply(partitions, "[[", "x")
}
## Save the predictions, outcome and clinicians tc in each partition to the results list
if (save.sample.predictions) {
suppressMessages({
bengaltiger::SaveToResults(return.object, paste0(outcome.variable.name, ".results"))
})
}
return (return.object)
}
## fitted.sl <- ""
## sl.object.file <- paste0("SuperLearner_", outcome.variable.name, ".rds")
## ## Fit the model to the training data
## if (use.fitted.sl) {
## if (file.exists(sl.object.file)) {
## message(paste0("Argument use.fitted.sl is TRUE and SuperLearner_", outcome.variable.name, " exists.", "\nSkipping initial fitting and using ", sl.object.file, "..."))
## fitted.sl <- readRDS(sl.object.file)
## } else {
## if (verbose) {
## message(paste("No", sl.object.file, "object have been saved to disk. Ignoring use.fitted.sl."))
## message("Fitting SuperLearner to training set...")
## }
## fitted.sl <- with(partitions, SuperLearner::SuperLearner(Y = train$y, X = train$x,
## family = binomial(),
## SL.library = model.names,
## method = "method.AUC",
## cvControl=cvControl,
## verbose = verbose))
## }
## } else {
## message("Fitting SuperLearner on the training set...")
## fitted.sl <- with(partitions, SuperLearner::SuperLearner(Y = train$y, X = train$x,
## family = binomial(),
## SL.library = model.names,
## method = "method.AUC",
## cvControl=cvControl,
## verbose = verbose))
## }
## ## Extract training sets
## train.validation <- partitions[-grep("test", names(partitions))]
## con.list.labels <- paste0("con.model.", names(train.validation))
## ## Make predictions on the validation set
## predictions <- lapply(setNames(train.validation, nm = con.list.labels),
## function (partition.list) predict(object = fitted.sl,
## newdata = partition.list$x,
## onlySL = TRUE)$pred)
## label <- ifelse(n.partitions == 2, "train", "validation")
## ## Gridsearch the optimal cut-points for the predicted probabilities on
## ## the appropriate partition
## optimal.breaks <- ""
## if (use.fitted.sl) {
## results.breaks <- readRDS("results.Rds")[[paste0(outcome.variable.name, ".optimal.breaks")]]
## if (is.null(results.breaks)) {
## if (verbose)
## message(paste("Finding optimal breaks for continuous probabilities on the", label, "set..."))
## optimal.breaks <- GridsearchBreaks(predictions = predictions[grepl(label, con.list.labels)][[1]],
## outcome.vector = partitions[[label]]$y, sample=sample)
## suppressMessages({
## bengaltiger::SaveToResults(optimal.breaks, paste0(outcome.variable.name, ".optimal.breaks"))
## })
## } else {
## message("\nParameter use.fitted.sl is set to True, results file contain optimal.breaks element. \nUsing those as breaks for binning continous predictions...")
## optimal.breaks <- results.breaks
## }
## } else {
## if (verbose)
## message(paste("Finding optimal breaks for continuous probabilities on the", label, "set..."))
## optimal.breaks <- GridsearchBreaks(predictions = predictions[grepl(label, con.list.labels)][[1]],
## outcome.vector = partitions[[label]]$y, sample=sample)
## if (!boot.sample)
## suppressMessages({
## bengaltiger::SaveToResults(optimal.breaks, paste0(outcome.variable.name, ".optimal.breaks"))
## })
## }
## ## Merge train-validation partition to one
## full.training.list <- list(y = unlist(lapply(train.validation, "[[", "y")),
## x = do.call(rbind, lapply(train.validation, "[[", "x")))
## if (n.partitions == 3) {
## if (use.fitted.sl) {
## if (file.exists(sl.object.file)) {
## message(paste0("\nArgument use.fitted.sl is TRUE and SuperLearner_", outcome.variable.name, " exists.", "\nSkipping refitting to training + validation set and using ", sl.object.file, "..."))
## fitted.sl <- readRDS(sl.object.file)
## } else {
## message(paste0("Argument use.fitted.sl is TRUE, but SuperLearner_", outcome.variable.name, " doe not exist.", " Re-fitting SuperLearner to training + validation set..."))
## fitted.sl <- with(full.training.list, SuperLearner::SuperLearner(Y = y, X = x,
## family = binomial(),
## SL.library = model.names,
## method = "method.AUC",
## cvControl = cvControl,
## verbose = verbose))
## saveRDS(fitted.sl, file = sl.object.file)
## if (verbose)
## message(paste0("SuperLearner object saved to disk as ", sl.object.file, "..."))
## }
## } else {
## if (verbose)
## message("Re-fitting SuperLearner on full training + validation set...")
## fitted.sl <- with(full.training.list, SuperLearner::SuperLearner(Y = y, X = x,
## family = binomial(),
## SL.library = model.names,
## method = "method.AUC",
## cvControl = cvControl,
## verbose = verbose))
## if (!boot.sample) {
## saveRDS(fitted.sl, file = sl.object.file)
## if (verbose)
## message(paste0("SuperLearner object saved to disk as ", sl.object.file, "..."))
## }
## }
## }
## ## Make predictions on the test set
## predictions$con.model.test <- predict(object = fitted.sl, newdata = partitions$test$x, onlySL = TRUE)$pred
## ## Bin predictions made on the test set using the optimal cut-points
## cut.list.labels <- paste0("cut.model.", c("train", "validation", "test"))
## binned.predictions <- lapply(setNames(predictions, nm = cut.list.labels), function (preds) {
## as.numeric(
## cut(x = preds,
## breaks = c(-Inf, optimal.breaks, Inf),
## labels = c("Green", "Yellow", "Orange", "Red"),
## include.lowest = TRUE)
## )
## })
## NewLabelsAndNumeric <- function(label) {
## new.labels <- paste0(label, ".", names(partitions))
## new.list <- lapply(setNames(partitions, nm = new.labels),
## function (partition.list) as.numeric(partition.list[[label]]))
## return (new.list)
## }
## return.object <- list()
## return.object$predictions.list <- c(predictions, binned.predictions,
## NewLabelsAndNumeric("y"),
## NewLabelsAndNumeric("tc"))
## if (return.partitions) {
## return.object$samples <- lapply(partitions, "[[", "x")
## }
## ## Save the predictions, outcome and clinicians tc in each partition to the results list
## if (save.sample.predictions) {
## suppressMessages({
## bengaltiger::SaveToResults(return.object, paste0(outcome.variable.name, ".results"))
## })
## }
##
## return (return.object)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/peaklist_annotation.R
\name{.is_between_1range}
\alias{.is_between_1range}
\alias{.is_between}
\title{is_between}
\usage{
.is_between_1range(x, a, b)
.is_between(x, a, b)
}
\arguments{
\item{x}{numeric vector to check.}
\item{a}{lower limit of interval. Scalar for is_between_1range. Can be a vector for is_between.}
\item{b}{upper limit of interval. Scalar for is_between_1range. Can be a vector for is_between.}
}
\value{
Logical vector (is_between_1range) of length x or
logical matrix (is_between) of dimensions x times length of a (== length of b).
}
\description{
is_between
}
| /man/is_between.Rd | permissive | stanstrup/PeakABro | R | false | true | 665 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/peaklist_annotation.R
\name{.is_between_1range}
\alias{.is_between_1range}
\alias{.is_between}
\title{is_between}
\usage{
.is_between_1range(x, a, b)
.is_between(x, a, b)
}
\arguments{
\item{x}{numeric vector to check.}
\item{a}{lower limit of interval. Scalar for is_between_1range. Can be a vector for is_between.}
\item{b}{upper limit of interval. Scalar for is_between_1range. Can be a vector for is_between.}
}
\value{
Logical vector (is_between_1range) of length x or
logical matrix (is_between) of dimensions x times length of a (== length of b).
}
\description{
is_between
}
|
# Functions for aiding resolving of external symbols.
llvmLoadDLL =
function(libs)
{
libs = path.expand(as.character(libs))
e = file.exists(libs)
if(!all(e))
stop("DSO(s) don't exist: ", paste(libs[!e], sep = ", "))
.Call("R_DynamicLibrary_LoadLibraryPermanently", libs)
}
llvmAddSymbol =
function(..., .syms = list(...))
{
if(length(.syms) == 0)
return(list())
ids = names(.syms)
if(length(ids) == 0)
w = rep(TRUE, length(.syms))
else
w <- (ids == "")
if(any(w))
names(.syms)[w] = lapply(.syms[w], as, "character")
.syms = lapply(.syms, as, "NativeSymbol")
if(length(names(.syms)) == 0 || any(names(.syms) == ""))
stop("need names for all symbols")
invisible(.Call("R_DynamicLibrary_AddSymbol", .syms, names(.syms)))
}
setOldClass("NativeSymbol")
setOldClass("NativeSymbolInfo")
setAs("character", "NativeSymbol",
function(from)
getNativeSymbolInfo(from)$address)
setAs("NativeSymbolInfo", "character",
function(from)
from$name)
setAs("NativeSymbolInfo", "NativeSymbol",
function(from)
from$address)
| /R/dso.R | no_license | nick-ulle/Rllvm | R | false | false | 1,151 | r | # Functions for aiding resolving of external symbols.
llvmLoadDLL =
function(libs)
{
libs = path.expand(as.character(libs))
e = file.exists(libs)
if(!all(e))
stop("DSO(s) don't exist: ", paste(libs[!e], sep = ", "))
.Call("R_DynamicLibrary_LoadLibraryPermanently", libs)
}
llvmAddSymbol =
function(..., .syms = list(...))
{
if(length(.syms) == 0)
return(list())
ids = names(.syms)
if(length(ids) == 0)
w = rep(TRUE, length(.syms))
else
w <- (ids == "")
if(any(w))
names(.syms)[w] = lapply(.syms[w], as, "character")
.syms = lapply(.syms, as, "NativeSymbol")
if(length(names(.syms)) == 0 || any(names(.syms) == ""))
stop("need names for all symbols")
invisible(.Call("R_DynamicLibrary_AddSymbol", .syms, names(.syms)))
}
setOldClass("NativeSymbol")
setOldClass("NativeSymbolInfo")
setAs("character", "NativeSymbol",
function(from)
getNativeSymbolInfo(from)$address)
setAs("NativeSymbolInfo", "character",
function(from)
from$name)
setAs("NativeSymbolInfo", "NativeSymbol",
function(from)
from$address)
|
# plots C-Q relationships for up to 4 variables
# specify start date and end date
# optionally specify which site to plot (otherwise, both will be included in each plot)
multi_var_CQ <- function (var1, var2 = NA, var3 = NA, var4 = NA, startDate, endDate, site1 = NA){
# subset the data between the given start and end dates
subset <- WQ_hourly_discharge %>%
filter(date >= ymd(startDate) & date <= ymd(endDate)) %>%
arrange(dateTime)
# subset to the given site if it's provided
if(!is.na(site1)){
subset <- subset %>%
filter(site == site1)
}
# find the peak discharge for the storm
peak <- subset %>% filter(hourlyDischarge == max(hourlyDischarge))
peakDateTime <- peak[["dateTime"]]
# classify each observation as rising limb or falling limb
subset <- subset %>%
mutate(limb = case_when(dateTime <= peakDateTime ~ "RL", T ~ "FL"))
# plot the first variable
a <- subset %>%
ggplot(aes_string(x = "hourlyDischarge", y = var1,
col = case_when(!is.na(site1) ~ "limb", T ~ "site"))) +
geom_path() +
# start points
geom_point(data = subset[subset$site == "MC",][1,],
aes_string("hourlyDischarge", y = var1), col = "black") +
geom_point(data = subset[subset$site == "SI",][1,],
aes_string("hourlyDischarge", y = var1), col = "black") +
xlab ("Discharge (ft^3/s)") + ylab(y_axis_label(var1)) +
theme_bw() + theme(plot.background = element_rect(fill = "transparent",colour = NA),
legend.position = "none")
figure <- a
# plot the second variable if it's given
if(!is.na(var2)){
b <- subset %>%
ggplot(aes_string(x = "hourlyDischarge", y = var2,
col = case_when(!is.na(site1) ~ "limb", T ~ "site"))) +
geom_path() +
# start points
geom_point(data = subset[subset$site == "MC",][1,],
aes_string("hourlyDischarge", y = var2), col = "black") +
geom_point(data = subset[subset$site == "SI",][1,],
aes_string("hourlyDischarge", y = var2), col = "black") +
theme_bw() + theme(plot.background = element_rect(fill = "transparent",colour = NA),
legend.position = "none") +
xlab ("Discharge (ft^3/s)") + ylab(y_axis_label(var2))
figure <- ggarrange(a, b)
}
# plot the third variable if it's given
if(!is.na(var3)){
c <- subset %>%
ggplot(aes_string(x = "hourlyDischarge", y = var3,
col = case_when(!is.na(site1) ~ "limb", T ~ "site"))) +
geom_path() +
# start points
geom_point(data = subset[subset$site == "MC",][1,],
aes_string("hourlyDischarge", y = var3), col = "black") +
geom_point(data = subset[subset$site == "SI",][1,],
aes_string("hourlyDischarge", y = var3), col = "black") +
theme_bw() + theme(plot.background = element_rect(fill = "transparent",colour = NA),
legend.position = "none") +
xlab ("Discharge (ft^3/s)") + ylab(y_axis_label(var3))
figure <- ggarrange(a, b, c)
}
# plot the fourth variable if it's given
if(!is.na(var4)){
d <- subset %>%
ggplot(aes_string(x = "hourlyDischarge", y = var4,
col = case_when(!is.na(site1) ~ "limb", T ~ "site"))) +
geom_path() +
# start points
geom_point(data = subset[subset$site == "MC",][1,],
aes_string("hourlyDischarge", y = var4), col = "black") +
geom_point(data = subset[subset$site == "SI",][1,],
aes_string("hourlyDischarge", y = var4), col = "black") +
theme_bw() + theme(plot.background = element_rect(fill = "transparent",colour = NA),
legend.position = "none") +
xlab ("Discharge (ft^3/s)") + ylab(y_axis_label(var4))
figure <- ggarrange(a, b, c, d)
}
# show the figure
figure
}
## generate y-axis labels for C-Q plots
y_axis_label <- function(var){
if(var == "Turb") return ("Turbidity (NTU)")
if(var == "NO3_mgL") return ("[Nitrate] (mg/L)")
if(var == "CHLugL") return ("[Chlorophyll] (ug/L)")
if(var == "FDOMqsu") return("fDOM (QSU)")
if(var == "BGAugL") return("Cyanobacteria")
}
## generate y-axis labels for normalized C_Q plots
y_axis_label_n <- function(var){
if(var == "Turb") return ("Normalized turbidity (NTU)")
if(var == "NO3_mgL") return ("Normalized [nitrate] (mg/L)")
if(var == "CHLugL") return ("Normalized [chlorophyll] (ug/L)")
if(var == "FDOMqsu") return("Normalized fDOM (QSU)")
}
| /C_Q_plotting_functions.R | no_license | trwaite/HDE-hysteresis | R | false | false | 4,626 | r | # plots C-Q relationships for up to 4 variables
# specify start date and end date
# optionally specify which site to plot (otherwise, both will be included in each plot)
multi_var_CQ <- function (var1, var2 = NA, var3 = NA, var4 = NA, startDate, endDate, site1 = NA){
# subset the data between the given start and end dates
subset <- WQ_hourly_discharge %>%
filter(date >= ymd(startDate) & date <= ymd(endDate)) %>%
arrange(dateTime)
# subset to the given site if it's provided
if(!is.na(site1)){
subset <- subset %>%
filter(site == site1)
}
# find the peak discharge for the storm
peak <- subset %>% filter(hourlyDischarge == max(hourlyDischarge))
peakDateTime <- peak[["dateTime"]]
# classify each observation as rising limb or falling limb
subset <- subset %>%
mutate(limb = case_when(dateTime <= peakDateTime ~ "RL", T ~ "FL"))
# plot the first variable
a <- subset %>%
ggplot(aes_string(x = "hourlyDischarge", y = var1,
col = case_when(!is.na(site1) ~ "limb", T ~ "site"))) +
geom_path() +
# start points
geom_point(data = subset[subset$site == "MC",][1,],
aes_string("hourlyDischarge", y = var1), col = "black") +
geom_point(data = subset[subset$site == "SI",][1,],
aes_string("hourlyDischarge", y = var1), col = "black") +
xlab ("Discharge (ft^3/s)") + ylab(y_axis_label(var1)) +
theme_bw() + theme(plot.background = element_rect(fill = "transparent",colour = NA),
legend.position = "none")
figure <- a
# plot the second variable if it's given
if(!is.na(var2)){
b <- subset %>%
ggplot(aes_string(x = "hourlyDischarge", y = var2,
col = case_when(!is.na(site1) ~ "limb", T ~ "site"))) +
geom_path() +
# start points
geom_point(data = subset[subset$site == "MC",][1,],
aes_string("hourlyDischarge", y = var2), col = "black") +
geom_point(data = subset[subset$site == "SI",][1,],
aes_string("hourlyDischarge", y = var2), col = "black") +
theme_bw() + theme(plot.background = element_rect(fill = "transparent",colour = NA),
legend.position = "none") +
xlab ("Discharge (ft^3/s)") + ylab(y_axis_label(var2))
figure <- ggarrange(a, b)
}
# plot the third variable if it's given
if(!is.na(var3)){
c <- subset %>%
ggplot(aes_string(x = "hourlyDischarge", y = var3,
col = case_when(!is.na(site1) ~ "limb", T ~ "site"))) +
geom_path() +
# start points
geom_point(data = subset[subset$site == "MC",][1,],
aes_string("hourlyDischarge", y = var3), col = "black") +
geom_point(data = subset[subset$site == "SI",][1,],
aes_string("hourlyDischarge", y = var3), col = "black") +
theme_bw() + theme(plot.background = element_rect(fill = "transparent",colour = NA),
legend.position = "none") +
xlab ("Discharge (ft^3/s)") + ylab(y_axis_label(var3))
figure <- ggarrange(a, b, c)
}
# plot the fourth variable if it's given
if(!is.na(var4)){
d <- subset %>%
ggplot(aes_string(x = "hourlyDischarge", y = var4,
col = case_when(!is.na(site1) ~ "limb", T ~ "site"))) +
geom_path() +
# start points
geom_point(data = subset[subset$site == "MC",][1,],
aes_string("hourlyDischarge", y = var4), col = "black") +
geom_point(data = subset[subset$site == "SI",][1,],
aes_string("hourlyDischarge", y = var4), col = "black") +
theme_bw() + theme(plot.background = element_rect(fill = "transparent",colour = NA),
legend.position = "none") +
xlab ("Discharge (ft^3/s)") + ylab(y_axis_label(var4))
figure <- ggarrange(a, b, c, d)
}
# show the figure
figure
}
## generate y-axis labels for C-Q plots
y_axis_label <- function(var){
if(var == "Turb") return ("Turbidity (NTU)")
if(var == "NO3_mgL") return ("[Nitrate] (mg/L)")
if(var == "CHLugL") return ("[Chlorophyll] (ug/L)")
if(var == "FDOMqsu") return("fDOM (QSU)")
if(var == "BGAugL") return("Cyanobacteria")
}
## generate y-axis labels for normalized C_Q plots
y_axis_label_n <- function(var){
if(var == "Turb") return ("Normalized turbidity (NTU)")
if(var == "NO3_mgL") return ("Normalized [nitrate] (mg/L)")
if(var == "CHLugL") return ("Normalized [chlorophyll] (ug/L)")
if(var == "FDOMqsu") return("Normalized fDOM (QSU)")
}
|
# Bayesian estimation of proportions of each feed component (soy, other crops, FMFOs, and other animal)
rm(list=ls())
library(tidyverse)
library(rstan)
library(taxize)
library(data.table)
library(countrycode) # part of clean.lca
library(bayesplot) # for mcmc_areas_ridges
library(shinystan)
# Mac
datadir <- "/Volumes/jgephart/BFA Environment 2/Data"
outdir <- "/Volumes/jgephart/BFA Environment 2/Outputs"
# Windows
# datadir <- "K:/BFA Environment 2/Data"
# outdir <- "K:BFA Environment 2/Outputs"
lca_dat <- read.csv(file.path(datadir, "LCA_compiled_20201109.csv"), fileEncoding="UTF-8-BOM") #fileEncoding needed when reading in file from windows computer (suppresses BOM hidden characters)
source("Functions.R")
# Remaining code below was for initial testing/model building:
######################################################################################################
# Set the FINAL value to be no less than 0.01
lca_dat_no_zeroes <- clean.lca(LCA_data = lca_dat) %>%
select(clean_sci_name, taxa_group_name, contains("new"))
######################################################################################################
# Model 1: Remove all NAs - estimate proportion feed for a set of studies of one species
# Remove NAs
lca_dat_no_na <- lca_dat_no_zeroes %>%
filter(is.na(Feed_soy_percent)==FALSE)
# Try to get dirichlet to work with just one set of studies: Oncorhynchus mykiss
# Set data for model:
k = 4
n = 3
feed_weights <- lca_dat_no_na %>%
filter(clean_sci_name == "Oncorhynchus mykiss") %>%
select(contains("new")) %>%
as.matrix()
# note: dirichlet_rng is just a random number generator:
# rep_vector(x, m) creates a column consisting of m copies of x
# generated quantities {
# vector[k] theta = dirichlet_rng(rep_vector(alpha, k));
# }
# Estimate feed component proportions for a single species
stan_pooled <- 'data {
int<lower=0> n; // number of observations
int<lower=1> k; // number of feed types
simplex[k] feed_weights[n]; // array of feed weights simplexes
}
parameters {
vector<lower=0>[k] alpha;
simplex[k] theta;
}
model {
for (i in 1:n) {
feed_weights[i] ~ dirichlet(alpha); // estimate vector of alphas based on the data of feed weights
}
theta ~ dirichlet(alpha); // now, estimate feed weights based on the vector of alphas
}'
no_missing_mod <- stan_model(model_code = stan_pooled, verbose = TRUE)
# Note: For Windows, apparently OK to ignore this warning message:
# Warning message:
# In system(paste(CXX, ARGS), ignore.stdout = TRUE, ignore.stderr = TRUE) :
# 'C:/rtools40/usr/mingw_/bin/g++' not found
# Fit model:
fit_pooled <- sampling(object = no_missing_mod, data = list(n = n,
k = k,
feed_weights = feed_weights))
print(fit_pooled)
feeds <- c("soy", "crops", "fmfo", "animal")
feed_key <- data.frame(alpha_param = paste("alpha[", feeds, "]", sep = ""),
theta_param = paste("theta[", feeds, "]", sep = ""))
fit_pooled_clean <- fit_pooled
names(fit_pooled_clean)[grep(names(fit_pooled_clean), pattern = "alpha")] <- feed_key$alpha_param
names(fit_pooled_clean)[grep(names(fit_pooled_clean), pattern = "theta")] <- feed_key$theta_param
distribution_pooled <- as.matrix(fit_pooled_clean)
plot_theme <- theme(axis.text=element_text(size=14, color = "black"))
p_alpha <- mcmc_areas_ridges(distribution_pooled,
pars = vars(contains("alpha")),
prob = 0.8) +
ggtitle("Oncorhynchus mykiss feed proportion model", "with 80% credible intervals") +
plot_theme
p_alpha
ggsave(filename = file.path(outdir, "bayes-example_trout_feed-proportion_alphas.png"), width = 11, height = 8.5)
p_theta <- mcmc_areas_ridges(distribution_pooled,
pars = vars(contains("theta")),
prob = 0.8) +
ggtitle("Oncorhynchus mykiss feed proportion model", "with 80% credible intervals") +
plot_theme
p_theta
ggsave(filename = file.path(outdir, "bayes-example_trout_feed-proportion_thetas.png"), width = 11, height = 8.5)
######################################################################################################
# Model 2: Remove all NAs - estimate proportion feed for groups of scientific names in the dataset (but no hierarchies)
lca_dat_no_na <- lca_dat_no_zeroes %>%
filter(is.na(feed_soy_new)==FALSE)
lca_groups <- lca_dat_no_na %>%
filter(clean_sci_name %in% c("Oncorhynchus mykiss")) %>%
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar")) %>% # converges
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar", "Macrobrachium amazonicum")) %>% # creates divergent transitions
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar", "Oreochromis niloticus")) %>% # converges
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar", "Pangasius")) %>% # creates divergent transitions
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar", "Penaeus monodon")) %>% # creates divergent transitions
#filter(clean_sci_name %in% c("Penaeus monodon", "Salmo salar")) %>% # creates divergent transitions
# # Add indices for each sci-name
mutate(clean_sci_name = as.factor(clean_sci_name),
sci = as.numeric(clean_sci_name))
# lca_groups <- lca_dat_no_na %>%
# filter(clean_sci_name %in% c("Macrobrachium amazonicum", "Penaeus monodon")) %>%
# # Add indices for each sci-name
# mutate(clean_sci_name = as.factor(clean_sci_name),
# sci = as.numeric(clean_sci_name))
# Now that alpha and theta are vectorized, can include all groups
# lca_groups <- lca_dat_no_na %>%
# # Add indices for each sci-name
# mutate(clean_sci_name = as.factor(clean_sci_name),
# sci = as.numeric(clean_sci_name))
# Try including groups with only n>1; also remove Thunnus orientalis since both data points are identical (effectively n = 1)
lca_groups <- lca_dat_no_na %>%
group_by(clean_sci_name) %>%
mutate(n_sci = n()) %>%
ungroup() %>%
filter(n_sci > 1) %>%
filter(clean_sci_name != "Thunnus orientalis") %>%
# Add indices for each sci-name
mutate(clean_sci_name = as.factor(clean_sci_name),
sci = as.numeric(clean_sci_name))
feed_vars <- c("feed_soy_new", "feed_crops_new", "feed_fmfo_new", "feed_animal_new")
for (i in 1:length(feed_vars)) {
p <- ggplot(lca_groups, aes(x = clean_sci_name, y = !!sym(feed_vars[i]))) +
geom_boxplot() +
theme_classic() +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5, size = 16)) +
labs(title = "Boxplots of feed proportion by scientific name")
print(p)
}
# Set data for model:
feed_weights <- lca_groups %>%
select(contains("new")) %>%
as.matrix()
k = 4
n = nrow(feed_weights)
n_sci = length(unique(lca_groups$sci))
sci = lca_groups$sci
# SIMULATE FAKE DATA TO TEST MODEL
# library(MCMCpack)
# samp_1 <- rdirichlet(n = 10, alpha = c(1,1,1,1))
# samp_2 <- rdirichlet(n = 10, alpha = c(10, 1, 1, 1))
# feed_weights <- rbind(samp_1, samp_2)
# k = 4
# n = nrow(feed_weights)
# n_sci = 2
# sci = c(rep(1, n/2), rep (2, n/2))
# Vectorize over alpha and theta
stan_pooled <- 'data {
int n; // number of observations
int k; // number of feed types
int n_sci;
simplex[k] feed_weights[n]; // array of observed feed weights simplexes
int sci[n]; // sci-name indices
}
parameters {
vector<lower=0>[k] alpha[n_sci]; // vector of dirichlet priors, one for each sci name
simplex[k] theta[n_sci]; // vector of estimated sci-level feed weight simplexes;
}
model {
// priors on alpha
//for (m in 1:k){
// alpha[n_sci][m] ~ uniform(0.1, 10);
//}
for (i in 1:n) {
feed_weights[i] ~ dirichlet(to_vector(alpha[sci[i]]));
}
// now, estimate feed weights based on the vector of alphas
for (j in 1:n_sci) {
theta[j] ~ dirichlet(to_vector(alpha[j]));
}
}'
# # Translated and scaled simplex:
# From: https://mc-stan.org/docs/2_21/stan-users-guide/parameterizing-centered-vectors.html
# stan_pooled <- 'data {
# int n; // number of observations
# int k; // number of feed types
# int n_sci;
# simplex[k] feed_weights[n]; // array of observed feed weights simplexes
# int sci[n]; // sci-name indices
# }
# parameters {
# vector<lower=0>[k] alpha[n_sci]; // vector of dirichlet priors, one for each sci name
# simplex[k] theta_raw[n_sci]; // vector of estimated sci-level feed weight simplexes;
# real theta_scale[n_sci];
# }
# transformed parameters {
# vector[k] theta;
# for (j in 1:n_sci) {
# theta = theta_scale[j] * (theta_raw[j] - inv(k));
# }
#
# }
# model {
# for (i in 1:n) {
# feed_weights[i] ~ dirichlet(to_vector(alpha[sci[i]]));
# }
# // now, estimate feed weights based on the vector of alphas
# for (j in 1:n_sci) {
# theta_raw[j] ~ dirichlet(to_vector(alpha[j]));
# }
# }'
no_missing_mod <- stan_model(model_code = stan_pooled, verbose = TRUE)
# Note: For Windows, apparently OK to ignore this warning message:
# Warning message:
# In system(paste(CXX, ARGS), ignore.stdout = TRUE, ignore.stderr = TRUE) :
# 'C:/rtools40/usr/mingw_/bin/g++' not found
# Fit model:
fit_grouped <- sampling(object = no_missing_mod, data = list(n = n,
k = k,
feed_weights = feed_weights,
n_sci = n_sci,
sci = sci),
cores = 4, seed = "11729")
#cores = 4, iter = 10000) # iter = 10000
# control = list(adapt_delta = 0.99)) # address divergent transitions by increasing delta, i.e., take smaller steps
print(fit_grouped)
# Format of parameters is: theta[sci_name, feed]
sci_feed_key <- lca_groups %>%
select(contains(c("clean_sci_name", "new", "sci"))) %>%
pivot_longer(cols = contains("new"), names_to = "feed") %>%
select(-value) %>%
unique() %>%
mutate(feed_index = case_when(str_detect(feed, "soy") ~ 1,
str_detect(feed, "crops") ~ 2,
str_detect(feed, "fmfo") ~ 3,
str_detect(feed, "animal") ~ 4)) %>%
# Clean feed names
mutate(feed = gsub(feed, pattern = "feed_", replacement = "")) %>%
mutate(feed = gsub(feed, pattern = "_new", replacement = "")) %>%
mutate(param_name = paste("[", sci, ",", feed_index, "]", sep = "")) %>%
mutate(alpha_param_name = paste("alpha", clean_sci_name, feed, sep = "-")) %>%
mutate(theta_param_name = paste("theta", clean_sci_name, feed, sep = "-")) %>%
# IMPORTANT before replaceing param names: ARRANGE BY FEED, THEN SCIENTIFIC NAME TO MATCH HOW NAMES ARE ARRANGED IN STANFIT OBJECT
arrange(feed_index, sci)
# Replace param names; first copy to fit_grouped_clean to avoid having to re-run sampling as a result of doing something wrong to fit_grouped
fit_grouped_clean <- fit_grouped
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "alpha\\[")] <- sci_feed_key$alpha_param_name
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "theta\\[")] <- sci_feed_key$theta_param_name
distribution_grouped <- as.matrix(fit_grouped_clean)
p_alpha <- mcmc_areas(distribution_grouped,
pars = vars(contains("alpha")),
prob = 0.8,
area_method = "scaled height") + ggtitle("")
p_alpha
p_theta <- mcmc_areas(distribution_grouped,
pars = vars(contains("theta")),
prob = 0.8,
area_method = "scaled height") + ggtitle("")
p_theta
######################################################################################################
# Model 2.1: Same as model 2 but with informative priors:
# Remove all NAs - estimate proportion feed for just two scientific names in the dataset
lca_dat_no_na <- lca_dat_no_zeroes %>%
filter(is.na(feed_soy_new)==FALSE)
lca_groups <- lca_dat_no_na %>%
filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar")) %>%
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar", "Macrobrachium amazonicum")) %>% # creates divergent transitions
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar", "Oreochromis niloticus")) %>% # converges
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar", "Pangasius")) %>% # creates divergent transitions
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar", "Penaeus monodon")) %>% # creates divergent transitions
#filter(clean_sci_name %in% c("Penaeus monodon", "Salmo salar")) %>% # creates divergent transitions
# Add indices for each sci-name
mutate(clean_sci_name = as.factor(clean_sci_name),
sci = as.numeric(clean_sci_name))
# lca_groups <- lca_dat_no_na %>%
# filter(clean_sci_name %in% c("Macrobrachium amazonicum", "Penaeus monodon")) %>%
# # Add indices for each sci-name
# mutate(clean_sci_name = as.factor(clean_sci_name),
# sci = as.numeric(clean_sci_name))
# Now that alpha and theta are vectorized, can include all groups
# lca_groups <- lca_dat_no_na %>%
# # Add indices for each sci-name
# mutate(clean_sci_name = as.factor(clean_sci_name),
# sci = as.numeric(clean_sci_name))
# Set data for model:
feed_weights <- lca_groups %>%
select(contains("new")) %>%
as.matrix()
k = 4
n = nrow(feed_weights)
n_sci = length(unique(lca_groups$sci))
sci = lca_groups$sci
# Get the mean observations across all sci-names
phi_mean <- lca_groups %>%
group_by(sci) %>%
summarise(across(where(is.numeric), mean),
n_obs = n()) %>%
ungroup() %>%
select(contains(c("new", "sci", "obs"))) %>%
arrange(sci)
phi <- phi_mean %>%
select(contains("new")) %>%
as.matrix()
kappa <- phi_mean %>% pull(n_obs) + k
# This code vectorizes over alpha and theta, allowing all groups to be estiamted
# this stan_data list passes phi in as data
# stan_data = list(n = n,
# k = k,
# feed_weights = feed_weights,
# n_sci = n_sci,
# sci = sci,
# phi = phi,
# kappa = kappa)
# Code that passes priors in as data
# stan_pooled <- 'data {
# int n; // number of observations
# int k; // number of feed types
# int n_sci; // number of sci names
# simplex[k] feed_weights[n]; // array of observed feed weights simplexes
# int sci[n]; // sci-name indices
# simplex[k] phi[n_sci];
# int kappa[n_sci];
# }
# parameters {
# // alpha parameter now moved into transformed parameter section
# simplex[k] theta[n_sci]; // vectors of estimated sci-level feed weight simplexes;
# }
# transformed parameters {
# // reparameterize alpha distributions as a vector of means and counts
# // phi is expected value of theta (mean feed weights)
# // kappa is strength of the prior measured in number of prior observations (minus K)
# vector<lower=0>[k] alpha[n_sci];
# for (m in 1:n) {
# alpha[sci[m]] = kappa[sci[m]] * phi[sci[m]];
# }
# }
# model {
#
# for (i in 1:n) {
# feed_weights[i] ~ dirichlet(to_vector(alpha[sci[i]]));
# // theta[sci[i]] ~ dirichlet(to_vector(alpha[sci[i]])); // this has problems converging here
# }
# // now, estimate feed weights based on the vector of alphas
# for (j in 1:n_sci) {
# theta[j] ~ dirichlet(to_vector(alpha[j]));
# }
# }'
# NEW CODE: instead of passing phi in as data, pass it as a parameter with a distribution
# Appears like model is only valid when only one element in the phi simplex (per scientific name) is given a prior
# this stan_data list only defines kappa (not phi) as data
stan_data = list(n = n,
k = k,
feed_weights = feed_weights,
n_sci = n_sci,
sci = sci,
kappa = kappa)
stan_pooled <- 'data {
int n; // number of observations
int k; // number of feed types
int n_sci; // number of sci names
simplex[k] feed_weights[n]; // array of observed feed weights simplexes
int sci[n]; // sci-name indices
int kappa[n_sci];
}
parameters {
// alpha parameter now moved into transformed parameter section
simplex[k] phi[n_sci];
simplex[k] theta[n_sci]; // vectors of estimated sci-level feed weight simplexes
// sigma parameters for mean priors
real<lower=0> sigma_1;
// real<lower=0> sigma_2;
}
transformed parameters {
// reparameterize alpha distributions as a vector of means and counts
// phi is expected value of theta (mean feed weights)
// kappa is strength of the prior measured in number of prior observations (minus K)
vector<lower=0>[k] alpha[n_sci];
for (m in 1:n) {
alpha[sci[m]] = kappa[sci[m]] * phi[sci[m]];
}
}
model {
// priors on specific phi
// phi defined as phi[sci][k]
// option 1: define feed proportion priors as lower upper bounds (but can only give a prior for one element per simplex - i.e., priors on phi[6][1] and phi[6][2] causes error probably because elements within a simplex are constrained?)
// phi[sci][k] ~ uniform(0.1, 0.2); // example prior on lower and upper bounds
// option 2: define feed proportions as means (need to define sigmas in parameters block: real<lower=0> sigma_1, sigma_2 etc; etc;)
// phi[sci][k] ~ normal(0.13, sigma_1); // example prior on mean
sigma_1 ~ uniform(0, 10);
phi[1][1] ~ normal(0.13, sigma_1);
for (i in 1:n) {
feed_weights[i] ~ dirichlet(to_vector(alpha[sci[i]]));
// theta[sci[i]] ~ dirichlet(to_vector(alpha[sci[i]])); // this has problems converging here
}
// now, estimate feed weights based on the vector of alphas
for (j in 1:n_sci) {
theta[j] ~ dirichlet(to_vector(alpha[j]));
}
}'
no_missing_mod <- stan_model(model_code = stan_pooled, verbose = TRUE)
# Note: For Windows, apparently OK to ignore this warning message:
# Warning message:
# In system(paste(CXX, ARGS), ignore.stdout = TRUE, ignore.stderr = TRUE) :
# 'C:/rtools40/usr/mingw_/bin/g++' not found
# RUNS but gives warning about divergent transitions
# Fit model:
fit_grouped <- sampling(object = no_missing_mod, data = stan_data, cores = 4, seed = "11729")
#,cores = 4, iter = 10000,
#control = list(adapt_delta = 0.99)) # address divergent transitions by increasing delta, i.e., take smaller steps
print(fit_grouped)
launch_shinystan(fit_grouped)
# Format of parameters is: theta[sci_name, feed]
sci_feed_key <- lca_groups %>%
select(contains(c("clean_sci_name", "new", "sci"))) %>%
pivot_longer(cols = contains("new"), names_to = "feed") %>%
select(-value) %>%
unique() %>%
mutate(feed_index = case_when(str_detect(feed, "soy") ~ 1,
str_detect(feed, "crops") ~ 2,
str_detect(feed, "fmfo") ~ 3,
str_detect(feed, "animal") ~ 4)) %>%
# Clean feed names
mutate(feed = gsub(feed, pattern = "feed_", replacement = "")) %>%
mutate(feed = gsub(feed, pattern = "_new", replacement = "")) %>%
mutate(param_name = paste("[", sci, ",", feed_index, "]", sep = "")) %>%
mutate(alpha_param_name = paste("alpha", clean_sci_name, feed, sep = "-")) %>%
mutate(theta_param_name = paste("theta", clean_sci_name, feed, sep = "-")) %>%
# IMPORTANT before replaceing param names: ARRANGE BY FEED, THEN SCIENTIFIC NAME TO MATCH HOW NAMES ARE ARRANGED IN STANFIT OBJECT
arrange(feed_index, sci)
# Replace param names; first copy to fit_grouped_clean to avoid having to re-run sampling as a result of doing something wrong to fit_grouped
fit_grouped_clean <- fit_grouped
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "alpha\\[")] <- sci_feed_key$alpha_param_name
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "theta\\[")] <- sci_feed_key$theta_param_name
distribution_grouped <- as.matrix(fit_grouped_clean)
p_alpha <- mcmc_areas(distribution_grouped,
pars = vars(contains("alpha")),
prob = 0.8,
area_method = "scaled height")
p_alpha
p_theta <- mcmc_areas(distribution_grouped,
pars = vars(contains("theta")),
prob = 0.8,
area_method = "scaled height")
p_theta
######################################################################################################
# Model 3 Add hierarchies (two to three levels)
######################################################################################################
# Model 3.1 Two-level model with no priors
lca_dat_no_na <- lca_dat_no_zeroes %>%
filter(is.na(feed_soy_new)==FALSE)
# Keep all data, but Add indices
lca_groups <- lca_dat_no_na %>%
# Add indices for each sci-name
mutate(taxa_group_name = as.factor(taxa_group_name),
tx = as.numeric(taxa_group_name)) %>%
arrange(tx)
# Test a smaller dataset (just salmon/char)
# lca_groups <- lca_dat_no_na %>%
#filter(taxa_group_name %in% c("salmon/char")) %>%
#filter(clean_sci_name %in% c("Oncorhynchus mykiss")) %>%
# filter(taxa_group_name %in% c("salmon/char", "marine shrimp")) %>%
# mutate(taxa_group_name = as.factor(taxa_group_name),
# tx = as.numeric(taxa_group_name)) %>%
# arrange(tx)
# Try analyzing only groups with n>1; also remove Thunnus orientalis since both data points are identical (effectively n = 1)
# lca_groups <- lca_dat_no_na %>%
# group_by(clean_sci_name) %>%
# mutate(n_sci = n()) %>%
# ungroup() %>%
# filter(n_sci > 1) %>%
# filter(clean_sci_name != "Thunnus orientalis") %>%
# # Add indices for each sci-name
# mutate(taxa_group_name = as.factor(taxa_group_name),
# tx = as.numeric(taxa_group_name)) %>%
# arrange(tx)
# Set data for model:
feed_weights <- lca_groups %>%
select(contains("new")) %>%
as.matrix()
K = 4
N = nrow(feed_weights)
N_TX = length(unique(lca_groups$tx))
tx = lca_groups$tx
# Get counts per taxa group:
tx_kappa <- lca_groups %>%
select(contains(c("new", "tx"))) %>%
group_by(tx) %>%
summarise(n_obs = n()) %>%
ungroup() %>%
arrange(tx) %>%
pull(n_obs)
# Get mean observations per taxa group
tx_phi_mean <- lca_groups %>%
select(contains(c("new", "tx"))) %>%
group_by(tx) %>%
summarise(across(contains("new"), mean)) %>%
ungroup() %>%
arrange(tx)
# Two-level model, reparameterize alpha (dirichlet shape parameter) as the expected (mean) feed proportions
stan_data = list(N = N,
K = K,
feed_weights = feed_weights,
N_TX = N_TX,
tx = tx,
tx_kappa = tx_kappa)
stan_pooled <- 'data {
int N; // number of total observations
int K; // number of feed types
int N_TX; // number of taxa groups
simplex[K] feed_weights[N]; // array of observed feed weights simplexes
int tx[N]; // taxa-group indices
int tx_kappa[N_TX]; // number of observations per taxa group
}
parameters {
simplex[K] tx_theta[N_TX]; // vectors of estimated taxa-level feed weight simplexes
simplex[K] theta;
}
transformed parameters {
// define params
vector<lower=0>[K] tx_alpha[N_TX];
vector<lower=0>[K] alpha;
// reparameterize alphas as a vector of means (theta) and counts (kappas)
// theta is expected value of mean feed weights
// kappa is strength of the prior measured in number of prior observations (minus K)
alpha = N * theta;
for (n_tx in 1:N_TX) {
tx_alpha[n_tx] = tx_kappa[n_tx] * tx_theta[n_tx];
}
}
model {
// likelihood
for (n in 1:N) {
feed_weights[n] ~ dirichlet(to_vector(tx_alpha[tx[n]]));
}
for (n_tx in 1:N_TX) {
tx_theta[n_tx] ~ dirichlet(alpha);
}
}'
no_missing_mod <- stan_model(model_code = stan_pooled, verbose = TRUE)
# Note: For Windows, apparently OK to ignore this warning message:
# Warning message:
# In system(paste(CXX, ARGS), ignore.stdout = TRUE, ignore.stderr = TRUE) :
# 'C:/rtools40/usr/mingw_/bin/g++' not found
# RUNS but gives warning about divergent transitions
# Fit model:
fit_grouped <- sampling(object = no_missing_mod, data = stan_data, cores = 4, seed = "11729")
# Increasing adapt_delta decreases the divergences but doesn't get rid of them
# fit_grouped <- sampling(object = no_missing_mod, data = stan_data, cores = 4, seed = "11720", control = list(adapt_delta = 0.9))
# fit_grouped <- sampling(object = no_missing_mod, data = stan_data, cores = 4, seed = "11720", control = list(adapt_delta = 0.99))
#launch_shinystan(fit_grouped)
#,cores = 4, iter = 10000,
#control = list(adapt_delta = 0.99)) # address divergent transitions by increasing delta, i.e., take smaller steps
print(fit_grouped)
# Create formatted names for taxa-group level
# Format of parameters is: theta[sci_name, feed]
tx_feed_key <- lca_groups %>%
select(contains(c("taxa_group_name", "new", "tx"))) %>%
pivot_longer(cols = contains("new"), names_to = "feed") %>%
select(-value) %>%
unique() %>%
mutate(feed_index = case_when(str_detect(feed, "soy") ~ 1,
str_detect(feed, "crops") ~ 2,
str_detect(feed, "fmfo") ~ 3,
str_detect(feed, "animal") ~ 4)) %>%
# Clean feed names
mutate(feed = gsub(feed, pattern = "feed_", replacement = "")) %>%
mutate(feed = gsub(feed, pattern = "_new", replacement = "")) %>%
mutate(index = paste("[", tx, ",", feed_index, "]", sep = "")) %>%
mutate(tx_theta_param_name = paste("theta[", taxa_group_name, ", ", feed, "]", sep = "")) %>%
mutate(tx_alpha_param_name = paste("alpha[", taxa_group_name, ", ", feed, "]", sep = "")) %>%
# IMPORTANT before replaceing param names: ARRANGE BY FEED, THEN TAXA NAME TO MATCH HOW NAMES ARE ARRANGED IN STANFIT OBJECT
arrange(feed_index, tx)
overall_feed_key <- lca_groups %>%
select(contains("new")) %>%
pivot_longer(cols = contains("new"), names_to = "feed") %>%
select(-value) %>%
unique() %>%
mutate(feed_index = case_when(str_detect(feed, "soy") ~ 1,
str_detect(feed, "crops") ~ 2,
str_detect(feed, "fmfo") ~ 3,
str_detect(feed, "animal") ~ 4)) %>%
# Clean feed names
mutate(feed = gsub(feed, pattern = "feed_", replacement = "")) %>%
mutate(feed = gsub(feed, pattern = "_new", replacement = "")) %>%
mutate(overall_theta_param_name = paste("theta[overall, ", feed, "]", sep = "")) %>%
mutate(overall_alpha_param_name = paste("alpha[overall, ", feed, "]", sep = "")) %>%
# IMPORTANT before replaceing param names: ARRANGE BY FEED TO MATCH HOW NAMES ARE ARRANGED IN STANFIT OBJECT
arrange(feed_index)
# Replace param names; first copy to fit_grouped_clean to avoid having to re-run sampling as a result of doing something wrong to fit_grouped
fit_grouped_clean <- fit_grouped
# Taxa-level
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "tx_alpha")] <- tx_feed_key$tx_alpha_param_name
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "tx_theta")] <- tx_feed_key$tx_theta_param_name
# Global-level
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "alpha\\[[1-4]")] <- overall_feed_key$overall_alpha_param_name
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "theta\\[[1-4]")] <- overall_feed_key$overall_theta_param_name
distribution_grouped <- as.matrix(fit_grouped_clean)
p_alpha <- mcmc_areas(distribution_grouped,
pars = vars(contains("alpha")),
prob = 0.8,
area_method = "scaled height")
p_alpha
p_theta <- mcmc_areas(distribution_grouped,
pars = vars(contains("theta")),
prob = 0.8,
area_method = "scaled height")
p_theta
######################################################################################################
# Model 3.2: Three-level model
lca_dat_no_na <- lca_dat_no_zeroes %>%
filter(is.na(feed_soy_new)==FALSE)
# Keep all data, but Add indices
lca_groups <- lca_dat_no_na %>%
# Add indices for each sci-name
mutate(clean_sci_name = as.factor(clean_sci_name),
sci = as.numeric(clean_sci_name),
taxa_group_name = as.factor(taxa_group_name),
tx = as.numeric(taxa_group_name)) %>%
arrange(sci)
# Test a smaller dataset (just salmon/char and marine shrimp - i.e., two taxa levels + overall level)
# lca_groups <- lca_dat_no_na %>%
# filter(taxa_group_name %in% c("salmon/char", "marine shrimp")) %>%
# mutate(clean_sci_name = as.factor(clean_sci_name),
# sci = as.numeric(clean_sci_name),
# taxa_group_name = as.factor(taxa_group_name),
# tx = as.numeric(taxa_group_name)) %>%
# arrange(sci)
# # Try analyzing only groups with n>1; also remove Thunnus orientalis since both data points are identical (effectively n = 1)
# lca_groups <- lca_dat_no_na %>%
# group_by(clean_sci_name) %>%
# mutate(n_sci = n()) %>%
# ungroup() %>%
# filter(n_sci > 1) %>%
# filter(clean_sci_name != "Thunnus orientalis") %>%
# # Add indices for each sci-name
# mutate(clean_sci_name = as.factor(clean_sci_name),
# sci = as.numeric(clean_sci_name),
# taxa_group_name = as.factor(taxa_group_name),
# tx = as.numeric(taxa_group_name)) %>%
# arrange(sci)
# Set data for model:
feed_weights <- lca_groups %>%
select(contains("new")) %>%
as.matrix()
K = 4
N = nrow(feed_weights)
N_SCI = length(unique(lca_groups$sci))
N_TX = length(unique(lca_groups$tx))
n_to_sci = lca_groups$sci
sci_to_tx = lca_groups %>%
select(sci, tx) %>%
unique() %>%
pull(tx)
# Get counts per sci name and counts per taxa group (also included as data in the model):
sci_kappa <- lca_groups %>%
select(contains(c("new", "sci", "obs"))) %>%
group_by(sci) %>%
summarise(n_obs = n()) %>%
ungroup() %>%
arrange(sci) %>%
pull(n_obs)
tx_kappa <- lca_groups %>%
select(contains(c("new", "tx", "obs"))) %>%
group_by(tx) %>%
summarise(n_obs = n()) %>%
ungroup() %>%
arrange(tx) %>%
pull(n_obs)
# For priors, get the mean of observations per sci-name
sci_mean <- lca_groups %>%
select(contains(c("new", "clean_sci_name", "sci"))) %>%
group_by(clean_sci_name, sci) %>%
summarise(across(contains("new"), mean),
n_sci = n()) %>%
ungroup() %>%
arrange(sci)
# Get mean observations per taxa group
tx_mean <- lca_groups %>%
select(contains(c("new", "taxa_group_name", "tx"))) %>%
group_by(taxa_group_name, tx) %>%
summarise(across(contains("new"), mean),
n_tx = n()) %>%
ungroup() %>%
arrange(tx)
overall_mean <- lca_groups %>%
select(contains(c("new"))) %>%
summarise(across(contains("new"), mean))
stan_data = list(N = N,
K = K,
feed_weights = feed_weights,
N_SCI = N_SCI,
N_TX = N_TX,
n_to_sci = n_to_sci,
sci_to_tx = sci_to_tx,
sci_kappa = sci_kappa,
tx_kappa = tx_kappa)
stan_pooled <- 'data {
int N; // number of total observations
int K; // number of feed types
int N_SCI; // number of sci names
int N_TX; // number of taxa groups
simplex[K] feed_weights[N]; // array of observed feed weights simplexes
int n_to_sci[N]; // sci-name indices
int sci_to_tx[N_SCI]; // taxa-group indices
int sci_kappa[N_SCI]; // number of observations per sci-name
int tx_kappa[N_TX]; // number of observations per taxa group
}
parameters {
simplex[K] sci_theta[N_SCI]; // vectors of estimated sci-level feed weight simplexes
simplex[K] tx_theta[N_TX]; // vectors of estimated taxa-level feed weight simplexes
simplex[K] theta;
// if needed, define sigma params for mean priors:
// real<lower=0> sigma_1;
}
transformed parameters {
// define params
vector<lower=0>[K] sci_alpha[N_SCI];
vector<lower=0>[K] tx_alpha[N_TX];
vector<lower=0>[K] alpha;
// reparameterize alphas as a vector of means (theta) and counts (kappas)
// theta is expected value of mean feed weights
// kappa is strength of the prior measured in number of prior observations (minus K)
alpha = N * theta;
for (n_tx in 1:N_TX) {
tx_alpha[n_tx] = tx_kappa[n_tx] * tx_theta[n_tx];
}
for (n_sci in 1:N_SCI) {
sci_alpha[n_sci] = sci_kappa[n_sci] * sci_theta[n_sci];
}
}
model {
// priors on specific theta
// sci_theta defined as sci_theta[sci][K]
// option 1: define feed proportion priors as lower upper bounds
//sci_theta[24][1] ~ uniform(0.001, 0.05); // hypothetical lower and upper bounds
// option 2: define feed proportions as means (also need to define sigmas in parameters block: real<lower=0> sigma_1 etc; etc;)
// sci_theta[24][1] ~ normal(0.9, sigma_1); // hypothetical mean prior
// likelihood
for (n in 1:N) {
feed_weights[n] ~ dirichlet(to_vector(sci_alpha[n_to_sci[n]]));
}
for (n_sci in 1:N_SCI){
sci_theta[n_sci] ~ dirichlet(tx_alpha[sci_to_tx[n_sci]]);
}
for (n_tx in 1:N_TX){
tx_theta[n_tx] ~ dirichlet(alpha);
}
}'
# Three level model (no priors), but to help with convergence, try offsetting and scaling simplex:
# From: https://mc-stan.org/docs/2_21/stan-users-guide/parameterizing-centered-vectors.html
# stan_data = list(N = N,
# K = K,
# feed_weights = feed_weights,
# N_SCI = N_SCI,
# N_TX = N_TX,
# sci = sci,
# tx = tx)
#
# stan_pooled <- 'data {
# int N; // number of total observations
# int K; // number of feed types
# int N_SCI; // number of sci names
# int N_TX; // number of taxa groups
# simplex[K] feed_weights[N]; // array of observed feed weights simplexes
# int sci[N]; // sci-name indices
# int tx[N]; // taxa-group indices
# }
# parameters {
# vector<lower=0>[K] sci_alpha[N_SCI]; // vector of dirichlet priors, one for each sci name (alpha is not a simplex)
# simplex[K] sci_theta_raw[N_SCI]; // vectors of estimated sci-level feed weight simplexes
# vector<lower=0>[K] tx_alpha[N_TX];
# simplex[K] tx_theta_raw[N_TX];
# vector<lower=0>[K] alpha;
# simplex[K] theta_raw;
#
# // scaling parameters
# real sci_theta_scale[N_SCI]; // vectors of estimated sci-level feed weight simplexes
# real tx_theta_scale[N_TX];
# real theta_scale;
# }
# transformed parameters {
# vector[K] sci_theta[N_SCI];
# vector[K] tx_theta[N_TX];
# vector[K] theta;
#
# for (n_sci in 1:N_SCI){
# sci_theta[N_SCI] = sci_theta_scale[N_SCI] * (sci_theta_raw[N_SCI] - inv(K));
# }
#
# for (n_tx in 1:N_TX) {
# tx_theta[N_TX] = tx_theta_scale[N_TX] * (tx_theta_raw[N_TX] - inv(K));
# }
# theta = theta_scale * (theta_raw - inv(K));
#
# }
# model {
#
# // likelihood
# for (n in 1:N) {
# tx_theta_raw[tx[n]] ~ dirichlet(alpha);
# sci_theta_raw[sci[n]] ~ dirichlet(to_vector(tx_alpha[tx[n]]));
# feed_weights[n] ~ dirichlet(to_vector(sci_alpha[sci[n]]));
# }
# // now, estimate feed weights based on the vector of alphas
# theta_raw ~ dirichlet(to_vector(alpha));
# for (n_tx in 1:N_TX) {
# tx_theta_raw[n_tx] ~ dirichlet(to_vector(tx_alpha[n_tx]));
# }
# for (n_sci in 1:N_SCI) {
# sci_theta_raw[n_sci] ~ dirichlet(to_vector(sci_alpha[n_sci]));
# }
# }'
no_missing_mod <- stan_model(model_code = stan_pooled, verbose = TRUE)
# Note: For Windows, apparently OK to ignore this warning message:
# Warning message:
# In system(paste(CXX, ARGS), ignore.stdout = TRUE, ignore.stderr = TRUE) :
# 'C:/rtools40/usr/mingw_/bin/g++' not found
# RUNS but gives warning about divergent transitions
# Fit model:
fit_grouped <- sampling(object = no_missing_mod, data = stan_data, cores = 4, seed = "11729")
# Increasing adapt_delta decreases the divergences but doesn't get rid of them
# fit_grouped <- sampling(object = no_missing_mod, data = stan_data, cores = 4, seed = "11720", control = list(adapt_delta = 0.9))
# fit_grouped <- sampling(object = no_missing_mod, data = stan_data, cores = 4, seed = "11720", control = list(adapt_delta = 0.99))
#launch_shinystan(fit_grouped)
print(fit_grouped)
distribution_grouped <- as.matrix(fit_grouped)
# Plot all in one plot
p_alpha <- mcmc_areas(distribution_grouped,
pars = vars(contains("tx_alpha")),
prob = 0.8,
area_method = "scaled height")
p_alpha
p_theta <- mcmc_areas(distribution_grouped,
pars = vars(contains("tx_theta")),
prob = 0.8,
area_method = "scaled height")
p_theta
# Create formatted names for sci-name level
# Format of parameters is: theta[sci_name, feed]
sci_feed_key <- lca_groups %>%
select(contains(c("clean_sci_name", "new", "sci"))) %>%
pivot_longer(cols = contains("new"), names_to = "feed") %>%
select(-value) %>%
unique() %>%
mutate(feed_index = case_when(str_detect(feed, "soy") ~ 1,
str_detect(feed, "crops") ~ 2,
str_detect(feed, "fmfo") ~ 3,
str_detect(feed, "animal") ~ 4)) %>%
# Clean feed names
mutate(feed = gsub(feed, pattern = "feed_", replacement = "")) %>%
mutate(feed = gsub(feed, pattern = "_new", replacement = "")) %>%
mutate(index = paste("[", sci, ",", feed_index, "]", sep = "")) %>%
mutate(sci_theta_param_name = paste("theta[", clean_sci_name, ", ", feed, "]", sep = "")) %>%
mutate(sci_alpha_param_name = paste("alpha[", clean_sci_name, ", ", feed, "]", sep = "")) %>%
# IMPORTANT before replaceing param names: ARRANGE BY FEED, THEN SCIENTIFIC NAME TO MATCH HOW NAMES ARE ARRANGED IN STANFIT OBJECT
arrange(feed_index, sci)
# Create formatted names for taxa-group level
# Format of parameters is: theta[sci_name, feed]
tx_feed_key <- lca_groups %>%
select(contains(c("taxa_group_name", "new", "tx"))) %>%
pivot_longer(cols = contains("new"), names_to = "feed") %>%
select(-value) %>%
unique() %>%
mutate(feed_index = case_when(str_detect(feed, "soy") ~ 1,
str_detect(feed, "crops") ~ 2,
str_detect(feed, "fmfo") ~ 3,
str_detect(feed, "animal") ~ 4)) %>%
# Clean feed names
mutate(feed = gsub(feed, pattern = "feed_", replacement = "")) %>%
mutate(feed = gsub(feed, pattern = "_new", replacement = "")) %>%
mutate(index = paste("[", tx, ",", feed_index, "]", sep = "")) %>%
mutate(tx_theta_param_name = paste("theta[", taxa_group_name, ", ", feed, "]", sep = "")) %>%
mutate(tx_alpha_param_name = paste("alpha[", taxa_group_name, ", ", feed, "]", sep = "")) %>%
# IMPORTANT before replaceing param names: ARRANGE BY FEED, THEN TAXA NAME TO MATCH HOW NAMES ARE ARRANGED IN STANFIT OBJECT
arrange(feed_index, tx)
overall_feed_key <- lca_groups %>%
select(contains("new")) %>%
pivot_longer(cols = contains("new"), names_to = "feed") %>%
select(-value) %>%
unique() %>%
mutate(feed_index = case_when(str_detect(feed, "soy") ~ 1,
str_detect(feed, "crops") ~ 2,
str_detect(feed, "fmfo") ~ 3,
str_detect(feed, "animal") ~ 4)) %>%
# Clean feed names
mutate(feed = gsub(feed, pattern = "feed_", replacement = "")) %>%
mutate(feed = gsub(feed, pattern = "_new", replacement = "")) %>%
mutate(overall_theta_param_name = paste("theta[overall, ", feed, "]", sep = "")) %>%
mutate(overall_alpha_param_name = paste("alpha[overall, ", feed, "]", sep = "")) %>%
# IMPORTANT before replaceing param names: ARRANGE BY FEED TO MATCH HOW NAMES ARE ARRANGED IN STANFIT OBJECT
arrange(feed_index)
# Replace param names; first copy to fit_grouped_clean to avoid having to re-run sampling as a result of doing something wrong to fit_grouped
fit_grouped_clean <- fit_grouped
# Sci-Level
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "sci_alpha")] <- sci_feed_key$sci_alpha_param_name
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "sci_theta")] <- sci_feed_key$sci_theta_param_name
# Taxa-level
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "tx_alpha")] <- tx_feed_key$tx_alpha_param_name
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "tx_theta")] <- tx_feed_key$tx_theta_param_name
# Global-level
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "alpha\\[[1-4]")] <- overall_feed_key$overall_alpha_param_name
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "theta\\[[1-4]")] <- overall_feed_key$overall_theta_param_name
distribution_grouped_clean <- as.matrix(fit_grouped_clean)
# Choose secific plot
p_alpha <- mcmc_areas(distribution_grouped_clean,
pars = vars(contains("alpha") & contains("Thunnus thynnus")),
prob = 0.8,
area_method = "scaled height")
p_alpha
p_theta <- mcmc_areas(distribution_grouped_clean,
pars = vars(contains("theta") & contains("Thunnus thynnus")),
prob = 0.8,
area_method = "scaled height")
p_theta
# Plot per sci-name
for (i in 1:length(unique(lca_groups$clean_sci_name))){
name_i <- as.character(unique(lca_groups$clean_sci_name)[i])
p_alpha <- mcmc_areas(distribution_grouped_clean,
pars = vars(contains("alpha") & contains(name_i)),
prob = 0.8,
area_method = "scaled height")
print(p_alpha)
p_theta <- mcmc_areas(distribution_grouped_clean,
pars = vars(contains("theta") & contains(name_i)),
prob = 0.8,
area_method = "scaled height")
print(p_theta)
}
# Plot per taxa-name
for (i in 1:length(unique(lca_groups$taxa_group_name))){
name_i <- as.character(unique(lca_groups$taxa_group_name)[i])
p_alpha <- mcmc_areas(distribution_grouped_clean,
pars = vars(contains("alpha") & contains(name_i)),
prob = 0.8,
area_method = "scaled height")
print(p_alpha)
p_theta <- mcmc_areas(distribution_grouped_clean,
pars = vars(contains("theta") & contains(name_i)),
prob = 0.8,
area_method = "scaled height")
print(p_theta)
}
#Plot overall
p_alpha <- mcmc_areas(distribution_grouped_clean,
pars = vars(contains("alpha") & contains("overall")),
prob = 0.8,
area_method = "scaled height")
print(p_alpha)
p_theta <- mcmc_areas(distribution_grouped_clean,
pars = vars(contains("theta") & contains("overall")),
prob = 0.8,
area_method = "scaled height")
print(p_theta)
| /Archive/bayes_hierarchies_dirichlet_prop_feed.R | no_license | liulikshun/FishPrint | R | false | false | 42,983 | r | # Bayesian estimation of proportions of each feed component (soy, other crops, FMFOs, and other animal)
rm(list=ls())
library(tidyverse)
library(rstan)
library(taxize)
library(data.table)
library(countrycode) # part of clean.lca
library(bayesplot) # for mcmc_areas_ridges
library(shinystan)
# Mac
datadir <- "/Volumes/jgephart/BFA Environment 2/Data"
outdir <- "/Volumes/jgephart/BFA Environment 2/Outputs"
# Windows
# datadir <- "K:/BFA Environment 2/Data"
# outdir <- "K:BFA Environment 2/Outputs"
lca_dat <- read.csv(file.path(datadir, "LCA_compiled_20201109.csv"), fileEncoding="UTF-8-BOM") #fileEncoding needed when reading in file from windows computer (suppresses BOM hidden characters)
source("Functions.R")
# Remaining code below was for initial testing/model building:
######################################################################################################
# Set the FINAL value to be no less than 0.01
lca_dat_no_zeroes <- clean.lca(LCA_data = lca_dat) %>%
select(clean_sci_name, taxa_group_name, contains("new"))
######################################################################################################
# Model 1: Remove all NAs - estimate proportion feed for a set of studies of one species
# Remove NAs
lca_dat_no_na <- lca_dat_no_zeroes %>%
filter(is.na(Feed_soy_percent)==FALSE)
# Try to get dirichlet to work with just one set of studies: Oncorhynchus mykiss
# Set data for model:
k = 4
n = 3
feed_weights <- lca_dat_no_na %>%
filter(clean_sci_name == "Oncorhynchus mykiss") %>%
select(contains("new")) %>%
as.matrix()
# note: dirichlet_rng is just a random number generator:
# rep_vector(x, m) creates a column consisting of m copies of x
# generated quantities {
# vector[k] theta = dirichlet_rng(rep_vector(alpha, k));
# }
# Estimate feed component proportions for a single species
stan_pooled <- 'data {
int<lower=0> n; // number of observations
int<lower=1> k; // number of feed types
simplex[k] feed_weights[n]; // array of feed weights simplexes
}
parameters {
vector<lower=0>[k] alpha;
simplex[k] theta;
}
model {
for (i in 1:n) {
feed_weights[i] ~ dirichlet(alpha); // estimate vector of alphas based on the data of feed weights
}
theta ~ dirichlet(alpha); // now, estimate feed weights based on the vector of alphas
}'
no_missing_mod <- stan_model(model_code = stan_pooled, verbose = TRUE)
# Note: For Windows, apparently OK to ignore this warning message:
# Warning message:
# In system(paste(CXX, ARGS), ignore.stdout = TRUE, ignore.stderr = TRUE) :
# 'C:/rtools40/usr/mingw_/bin/g++' not found
# Fit model:
fit_pooled <- sampling(object = no_missing_mod, data = list(n = n,
k = k,
feed_weights = feed_weights))
print(fit_pooled)
feeds <- c("soy", "crops", "fmfo", "animal")
feed_key <- data.frame(alpha_param = paste("alpha[", feeds, "]", sep = ""),
theta_param = paste("theta[", feeds, "]", sep = ""))
fit_pooled_clean <- fit_pooled
names(fit_pooled_clean)[grep(names(fit_pooled_clean), pattern = "alpha")] <- feed_key$alpha_param
names(fit_pooled_clean)[grep(names(fit_pooled_clean), pattern = "theta")] <- feed_key$theta_param
distribution_pooled <- as.matrix(fit_pooled_clean)
plot_theme <- theme(axis.text=element_text(size=14, color = "black"))
p_alpha <- mcmc_areas_ridges(distribution_pooled,
pars = vars(contains("alpha")),
prob = 0.8) +
ggtitle("Oncorhynchus mykiss feed proportion model", "with 80% credible intervals") +
plot_theme
p_alpha
ggsave(filename = file.path(outdir, "bayes-example_trout_feed-proportion_alphas.png"), width = 11, height = 8.5)
p_theta <- mcmc_areas_ridges(distribution_pooled,
pars = vars(contains("theta")),
prob = 0.8) +
ggtitle("Oncorhynchus mykiss feed proportion model", "with 80% credible intervals") +
plot_theme
p_theta
ggsave(filename = file.path(outdir, "bayes-example_trout_feed-proportion_thetas.png"), width = 11, height = 8.5)
######################################################################################################
# Model 2: Remove all NAs - estimate proportion feed for groups of scientific names in the dataset (but no hierarchies)
lca_dat_no_na <- lca_dat_no_zeroes %>%
filter(is.na(feed_soy_new)==FALSE)
lca_groups <- lca_dat_no_na %>%
filter(clean_sci_name %in% c("Oncorhynchus mykiss")) %>%
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar")) %>% # converges
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar", "Macrobrachium amazonicum")) %>% # creates divergent transitions
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar", "Oreochromis niloticus")) %>% # converges
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar", "Pangasius")) %>% # creates divergent transitions
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar", "Penaeus monodon")) %>% # creates divergent transitions
#filter(clean_sci_name %in% c("Penaeus monodon", "Salmo salar")) %>% # creates divergent transitions
# # Add indices for each sci-name
mutate(clean_sci_name = as.factor(clean_sci_name),
sci = as.numeric(clean_sci_name))
# lca_groups <- lca_dat_no_na %>%
# filter(clean_sci_name %in% c("Macrobrachium amazonicum", "Penaeus monodon")) %>%
# # Add indices for each sci-name
# mutate(clean_sci_name = as.factor(clean_sci_name),
# sci = as.numeric(clean_sci_name))
# Now that alpha and theta are vectorized, can include all groups
# lca_groups <- lca_dat_no_na %>%
# # Add indices for each sci-name
# mutate(clean_sci_name = as.factor(clean_sci_name),
# sci = as.numeric(clean_sci_name))
# Try including groups with only n>1; also remove Thunnus orientalis since both data points are identical (effectively n = 1)
lca_groups <- lca_dat_no_na %>%
group_by(clean_sci_name) %>%
mutate(n_sci = n()) %>%
ungroup() %>%
filter(n_sci > 1) %>%
filter(clean_sci_name != "Thunnus orientalis") %>%
# Add indices for each sci-name
mutate(clean_sci_name = as.factor(clean_sci_name),
sci = as.numeric(clean_sci_name))
feed_vars <- c("feed_soy_new", "feed_crops_new", "feed_fmfo_new", "feed_animal_new")
for (i in 1:length(feed_vars)) {
p <- ggplot(lca_groups, aes(x = clean_sci_name, y = !!sym(feed_vars[i]))) +
geom_boxplot() +
theme_classic() +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5, size = 16)) +
labs(title = "Boxplots of feed proportion by scientific name")
print(p)
}
# Set data for model:
feed_weights <- lca_groups %>%
select(contains("new")) %>%
as.matrix()
k = 4
n = nrow(feed_weights)
n_sci = length(unique(lca_groups$sci))
sci = lca_groups$sci
# SIMULATE FAKE DATA TO TEST MODEL
# library(MCMCpack)
# samp_1 <- rdirichlet(n = 10, alpha = c(1,1,1,1))
# samp_2 <- rdirichlet(n = 10, alpha = c(10, 1, 1, 1))
# feed_weights <- rbind(samp_1, samp_2)
# k = 4
# n = nrow(feed_weights)
# n_sci = 2
# sci = c(rep(1, n/2), rep (2, n/2))
# Vectorize over alpha and theta
stan_pooled <- 'data {
int n; // number of observations
int k; // number of feed types
int n_sci;
simplex[k] feed_weights[n]; // array of observed feed weights simplexes
int sci[n]; // sci-name indices
}
parameters {
vector<lower=0>[k] alpha[n_sci]; // vector of dirichlet priors, one for each sci name
simplex[k] theta[n_sci]; // vector of estimated sci-level feed weight simplexes;
}
model {
// priors on alpha
//for (m in 1:k){
// alpha[n_sci][m] ~ uniform(0.1, 10);
//}
for (i in 1:n) {
feed_weights[i] ~ dirichlet(to_vector(alpha[sci[i]]));
}
// now, estimate feed weights based on the vector of alphas
for (j in 1:n_sci) {
theta[j] ~ dirichlet(to_vector(alpha[j]));
}
}'
# # Translated and scaled simplex:
# From: https://mc-stan.org/docs/2_21/stan-users-guide/parameterizing-centered-vectors.html
# stan_pooled <- 'data {
# int n; // number of observations
# int k; // number of feed types
# int n_sci;
# simplex[k] feed_weights[n]; // array of observed feed weights simplexes
# int sci[n]; // sci-name indices
# }
# parameters {
# vector<lower=0>[k] alpha[n_sci]; // vector of dirichlet priors, one for each sci name
# simplex[k] theta_raw[n_sci]; // vector of estimated sci-level feed weight simplexes;
# real theta_scale[n_sci];
# }
# transformed parameters {
# vector[k] theta;
# for (j in 1:n_sci) {
# theta = theta_scale[j] * (theta_raw[j] - inv(k));
# }
#
# }
# model {
# for (i in 1:n) {
# feed_weights[i] ~ dirichlet(to_vector(alpha[sci[i]]));
# }
# // now, estimate feed weights based on the vector of alphas
# for (j in 1:n_sci) {
# theta_raw[j] ~ dirichlet(to_vector(alpha[j]));
# }
# }'
no_missing_mod <- stan_model(model_code = stan_pooled, verbose = TRUE)
# Note: For Windows, apparently OK to ignore this warning message:
# Warning message:
# In system(paste(CXX, ARGS), ignore.stdout = TRUE, ignore.stderr = TRUE) :
# 'C:/rtools40/usr/mingw_/bin/g++' not found
# Fit model:
fit_grouped <- sampling(object = no_missing_mod, data = list(n = n,
k = k,
feed_weights = feed_weights,
n_sci = n_sci,
sci = sci),
cores = 4, seed = "11729")
#cores = 4, iter = 10000) # iter = 10000
# control = list(adapt_delta = 0.99)) # address divergent transitions by increasing delta, i.e., take smaller steps
print(fit_grouped)
# Format of parameters is: theta[sci_name, feed]
sci_feed_key <- lca_groups %>%
select(contains(c("clean_sci_name", "new", "sci"))) %>%
pivot_longer(cols = contains("new"), names_to = "feed") %>%
select(-value) %>%
unique() %>%
mutate(feed_index = case_when(str_detect(feed, "soy") ~ 1,
str_detect(feed, "crops") ~ 2,
str_detect(feed, "fmfo") ~ 3,
str_detect(feed, "animal") ~ 4)) %>%
# Clean feed names
mutate(feed = gsub(feed, pattern = "feed_", replacement = "")) %>%
mutate(feed = gsub(feed, pattern = "_new", replacement = "")) %>%
mutate(param_name = paste("[", sci, ",", feed_index, "]", sep = "")) %>%
mutate(alpha_param_name = paste("alpha", clean_sci_name, feed, sep = "-")) %>%
mutate(theta_param_name = paste("theta", clean_sci_name, feed, sep = "-")) %>%
# IMPORTANT before replaceing param names: ARRANGE BY FEED, THEN SCIENTIFIC NAME TO MATCH HOW NAMES ARE ARRANGED IN STANFIT OBJECT
arrange(feed_index, sci)
# Replace param names; first copy to fit_grouped_clean to avoid having to re-run sampling as a result of doing something wrong to fit_grouped
fit_grouped_clean <- fit_grouped
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "alpha\\[")] <- sci_feed_key$alpha_param_name
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "theta\\[")] <- sci_feed_key$theta_param_name
distribution_grouped <- as.matrix(fit_grouped_clean)
p_alpha <- mcmc_areas(distribution_grouped,
pars = vars(contains("alpha")),
prob = 0.8,
area_method = "scaled height") + ggtitle("")
p_alpha
p_theta <- mcmc_areas(distribution_grouped,
pars = vars(contains("theta")),
prob = 0.8,
area_method = "scaled height") + ggtitle("")
p_theta
######################################################################################################
# Model 2.1: Same as model 2 but with informative priors:
# Remove all NAs - estimate proportion feed for just two scientific names in the dataset
lca_dat_no_na <- lca_dat_no_zeroes %>%
filter(is.na(feed_soy_new)==FALSE)
lca_groups <- lca_dat_no_na %>%
filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar")) %>%
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar", "Macrobrachium amazonicum")) %>% # creates divergent transitions
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar", "Oreochromis niloticus")) %>% # converges
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar", "Pangasius")) %>% # creates divergent transitions
#filter(clean_sci_name %in% c("Oncorhynchus mykiss", "Salmo salar", "Penaeus monodon")) %>% # creates divergent transitions
#filter(clean_sci_name %in% c("Penaeus monodon", "Salmo salar")) %>% # creates divergent transitions
# Add indices for each sci-name
mutate(clean_sci_name = as.factor(clean_sci_name),
sci = as.numeric(clean_sci_name))
# lca_groups <- lca_dat_no_na %>%
# filter(clean_sci_name %in% c("Macrobrachium amazonicum", "Penaeus monodon")) %>%
# # Add indices for each sci-name
# mutate(clean_sci_name = as.factor(clean_sci_name),
# sci = as.numeric(clean_sci_name))
# Now that alpha and theta are vectorized, can include all groups
# lca_groups <- lca_dat_no_na %>%
# # Add indices for each sci-name
# mutate(clean_sci_name = as.factor(clean_sci_name),
# sci = as.numeric(clean_sci_name))
# Set data for model:
feed_weights <- lca_groups %>%
select(contains("new")) %>%
as.matrix()
k = 4
n = nrow(feed_weights)
n_sci = length(unique(lca_groups$sci))
sci = lca_groups$sci
# Get the mean observations across all sci-names
phi_mean <- lca_groups %>%
group_by(sci) %>%
summarise(across(where(is.numeric), mean),
n_obs = n()) %>%
ungroup() %>%
select(contains(c("new", "sci", "obs"))) %>%
arrange(sci)
phi <- phi_mean %>%
select(contains("new")) %>%
as.matrix()
kappa <- phi_mean %>% pull(n_obs) + k
# This code vectorizes over alpha and theta, allowing all groups to be estiamted
# this stan_data list passes phi in as data
# stan_data = list(n = n,
# k = k,
# feed_weights = feed_weights,
# n_sci = n_sci,
# sci = sci,
# phi = phi,
# kappa = kappa)
# Code that passes priors in as data
# stan_pooled <- 'data {
# int n; // number of observations
# int k; // number of feed types
# int n_sci; // number of sci names
# simplex[k] feed_weights[n]; // array of observed feed weights simplexes
# int sci[n]; // sci-name indices
# simplex[k] phi[n_sci];
# int kappa[n_sci];
# }
# parameters {
# // alpha parameter now moved into transformed parameter section
# simplex[k] theta[n_sci]; // vectors of estimated sci-level feed weight simplexes;
# }
# transformed parameters {
# // reparameterize alpha distributions as a vector of means and counts
# // phi is expected value of theta (mean feed weights)
# // kappa is strength of the prior measured in number of prior observations (minus K)
# vector<lower=0>[k] alpha[n_sci];
# for (m in 1:n) {
# alpha[sci[m]] = kappa[sci[m]] * phi[sci[m]];
# }
# }
# model {
#
# for (i in 1:n) {
# feed_weights[i] ~ dirichlet(to_vector(alpha[sci[i]]));
# // theta[sci[i]] ~ dirichlet(to_vector(alpha[sci[i]])); // this has problems converging here
# }
# // now, estimate feed weights based on the vector of alphas
# for (j in 1:n_sci) {
# theta[j] ~ dirichlet(to_vector(alpha[j]));
# }
# }'
# NEW CODE: instead of passing phi in as data, pass it as a parameter with a distribution
# Appears like model is only valid when only one element in the phi simplex (per scientific name) is given a prior
# this stan_data list only defines kappa (not phi) as data
stan_data = list(n = n,
k = k,
feed_weights = feed_weights,
n_sci = n_sci,
sci = sci,
kappa = kappa)
stan_pooled <- 'data {
int n; // number of observations
int k; // number of feed types
int n_sci; // number of sci names
simplex[k] feed_weights[n]; // array of observed feed weights simplexes
int sci[n]; // sci-name indices
int kappa[n_sci];
}
parameters {
// alpha parameter now moved into transformed parameter section
simplex[k] phi[n_sci];
simplex[k] theta[n_sci]; // vectors of estimated sci-level feed weight simplexes
// sigma parameters for mean priors
real<lower=0> sigma_1;
// real<lower=0> sigma_2;
}
transformed parameters {
// reparameterize alpha distributions as a vector of means and counts
// phi is expected value of theta (mean feed weights)
// kappa is strength of the prior measured in number of prior observations (minus K)
vector<lower=0>[k] alpha[n_sci];
for (m in 1:n) {
alpha[sci[m]] = kappa[sci[m]] * phi[sci[m]];
}
}
model {
// priors on specific phi
// phi defined as phi[sci][k]
// option 1: define feed proportion priors as lower upper bounds (but can only give a prior for one element per simplex - i.e., priors on phi[6][1] and phi[6][2] causes error probably because elements within a simplex are constrained?)
// phi[sci][k] ~ uniform(0.1, 0.2); // example prior on lower and upper bounds
// option 2: define feed proportions as means (need to define sigmas in parameters block: real<lower=0> sigma_1, sigma_2 etc; etc;)
// phi[sci][k] ~ normal(0.13, sigma_1); // example prior on mean
sigma_1 ~ uniform(0, 10);
phi[1][1] ~ normal(0.13, sigma_1);
for (i in 1:n) {
feed_weights[i] ~ dirichlet(to_vector(alpha[sci[i]]));
// theta[sci[i]] ~ dirichlet(to_vector(alpha[sci[i]])); // this has problems converging here
}
// now, estimate feed weights based on the vector of alphas
for (j in 1:n_sci) {
theta[j] ~ dirichlet(to_vector(alpha[j]));
}
}'
no_missing_mod <- stan_model(model_code = stan_pooled, verbose = TRUE)
# Note: For Windows, apparently OK to ignore this warning message:
# Warning message:
# In system(paste(CXX, ARGS), ignore.stdout = TRUE, ignore.stderr = TRUE) :
# 'C:/rtools40/usr/mingw_/bin/g++' not found
# RUNS but gives warning about divergent transitions
# Fit model:
fit_grouped <- sampling(object = no_missing_mod, data = stan_data, cores = 4, seed = "11729")
#,cores = 4, iter = 10000,
#control = list(adapt_delta = 0.99)) # address divergent transitions by increasing delta, i.e., take smaller steps
print(fit_grouped)
launch_shinystan(fit_grouped)
# Format of parameters is: theta[sci_name, feed]
sci_feed_key <- lca_groups %>%
select(contains(c("clean_sci_name", "new", "sci"))) %>%
pivot_longer(cols = contains("new"), names_to = "feed") %>%
select(-value) %>%
unique() %>%
mutate(feed_index = case_when(str_detect(feed, "soy") ~ 1,
str_detect(feed, "crops") ~ 2,
str_detect(feed, "fmfo") ~ 3,
str_detect(feed, "animal") ~ 4)) %>%
# Clean feed names
mutate(feed = gsub(feed, pattern = "feed_", replacement = "")) %>%
mutate(feed = gsub(feed, pattern = "_new", replacement = "")) %>%
mutate(param_name = paste("[", sci, ",", feed_index, "]", sep = "")) %>%
mutate(alpha_param_name = paste("alpha", clean_sci_name, feed, sep = "-")) %>%
mutate(theta_param_name = paste("theta", clean_sci_name, feed, sep = "-")) %>%
# IMPORTANT before replaceing param names: ARRANGE BY FEED, THEN SCIENTIFIC NAME TO MATCH HOW NAMES ARE ARRANGED IN STANFIT OBJECT
arrange(feed_index, sci)
# Replace param names; first copy to fit_grouped_clean to avoid having to re-run sampling as a result of doing something wrong to fit_grouped
fit_grouped_clean <- fit_grouped
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "alpha\\[")] <- sci_feed_key$alpha_param_name
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "theta\\[")] <- sci_feed_key$theta_param_name
distribution_grouped <- as.matrix(fit_grouped_clean)
p_alpha <- mcmc_areas(distribution_grouped,
pars = vars(contains("alpha")),
prob = 0.8,
area_method = "scaled height")
p_alpha
p_theta <- mcmc_areas(distribution_grouped,
pars = vars(contains("theta")),
prob = 0.8,
area_method = "scaled height")
p_theta
######################################################################################################
# Model 3 Add hierarchies (two to three levels)
######################################################################################################
# Model 3.1 Two-level model with no priors
lca_dat_no_na <- lca_dat_no_zeroes %>%
filter(is.na(feed_soy_new)==FALSE)
# Keep all data, but Add indices
lca_groups <- lca_dat_no_na %>%
# Add indices for each sci-name
mutate(taxa_group_name = as.factor(taxa_group_name),
tx = as.numeric(taxa_group_name)) %>%
arrange(tx)
# Test a smaller dataset (just salmon/char)
# lca_groups <- lca_dat_no_na %>%
#filter(taxa_group_name %in% c("salmon/char")) %>%
#filter(clean_sci_name %in% c("Oncorhynchus mykiss")) %>%
# filter(taxa_group_name %in% c("salmon/char", "marine shrimp")) %>%
# mutate(taxa_group_name = as.factor(taxa_group_name),
# tx = as.numeric(taxa_group_name)) %>%
# arrange(tx)
# Try analyzing only groups with n>1; also remove Thunnus orientalis since both data points are identical (effectively n = 1)
# lca_groups <- lca_dat_no_na %>%
# group_by(clean_sci_name) %>%
# mutate(n_sci = n()) %>%
# ungroup() %>%
# filter(n_sci > 1) %>%
# filter(clean_sci_name != "Thunnus orientalis") %>%
# # Add indices for each sci-name
# mutate(taxa_group_name = as.factor(taxa_group_name),
# tx = as.numeric(taxa_group_name)) %>%
# arrange(tx)
# Set data for model:
feed_weights <- lca_groups %>%
select(contains("new")) %>%
as.matrix()
K = 4
N = nrow(feed_weights)
N_TX = length(unique(lca_groups$tx))
tx = lca_groups$tx
# Get counts per taxa group:
tx_kappa <- lca_groups %>%
select(contains(c("new", "tx"))) %>%
group_by(tx) %>%
summarise(n_obs = n()) %>%
ungroup() %>%
arrange(tx) %>%
pull(n_obs)
# Get mean observations per taxa group
tx_phi_mean <- lca_groups %>%
select(contains(c("new", "tx"))) %>%
group_by(tx) %>%
summarise(across(contains("new"), mean)) %>%
ungroup() %>%
arrange(tx)
# Two-level model, reparameterize alpha (dirichlet shape parameter) as the expected (mean) feed proportions
stan_data = list(N = N,
K = K,
feed_weights = feed_weights,
N_TX = N_TX,
tx = tx,
tx_kappa = tx_kappa)
stan_pooled <- 'data {
int N; // number of total observations
int K; // number of feed types
int N_TX; // number of taxa groups
simplex[K] feed_weights[N]; // array of observed feed weights simplexes
int tx[N]; // taxa-group indices
int tx_kappa[N_TX]; // number of observations per taxa group
}
parameters {
simplex[K] tx_theta[N_TX]; // vectors of estimated taxa-level feed weight simplexes
simplex[K] theta;
}
transformed parameters {
// define params
vector<lower=0>[K] tx_alpha[N_TX];
vector<lower=0>[K] alpha;
// reparameterize alphas as a vector of means (theta) and counts (kappas)
// theta is expected value of mean feed weights
// kappa is strength of the prior measured in number of prior observations (minus K)
alpha = N * theta;
for (n_tx in 1:N_TX) {
tx_alpha[n_tx] = tx_kappa[n_tx] * tx_theta[n_tx];
}
}
model {
// likelihood
for (n in 1:N) {
feed_weights[n] ~ dirichlet(to_vector(tx_alpha[tx[n]]));
}
for (n_tx in 1:N_TX) {
tx_theta[n_tx] ~ dirichlet(alpha);
}
}'
no_missing_mod <- stan_model(model_code = stan_pooled, verbose = TRUE)
# Note: For Windows, apparently OK to ignore this warning message:
# Warning message:
# In system(paste(CXX, ARGS), ignore.stdout = TRUE, ignore.stderr = TRUE) :
# 'C:/rtools40/usr/mingw_/bin/g++' not found
# RUNS but gives warning about divergent transitions
# Fit model:
fit_grouped <- sampling(object = no_missing_mod, data = stan_data, cores = 4, seed = "11729")
# Increasing adapt_delta decreases the divergences but doesn't get rid of them
# fit_grouped <- sampling(object = no_missing_mod, data = stan_data, cores = 4, seed = "11720", control = list(adapt_delta = 0.9))
# fit_grouped <- sampling(object = no_missing_mod, data = stan_data, cores = 4, seed = "11720", control = list(adapt_delta = 0.99))
#launch_shinystan(fit_grouped)
#,cores = 4, iter = 10000,
#control = list(adapt_delta = 0.99)) # address divergent transitions by increasing delta, i.e., take smaller steps
print(fit_grouped)
# Create formatted names for taxa-group level
# Format of parameters is: theta[sci_name, feed]
tx_feed_key <- lca_groups %>%
select(contains(c("taxa_group_name", "new", "tx"))) %>%
pivot_longer(cols = contains("new"), names_to = "feed") %>%
select(-value) %>%
unique() %>%
mutate(feed_index = case_when(str_detect(feed, "soy") ~ 1,
str_detect(feed, "crops") ~ 2,
str_detect(feed, "fmfo") ~ 3,
str_detect(feed, "animal") ~ 4)) %>%
# Clean feed names
mutate(feed = gsub(feed, pattern = "feed_", replacement = "")) %>%
mutate(feed = gsub(feed, pattern = "_new", replacement = "")) %>%
mutate(index = paste("[", tx, ",", feed_index, "]", sep = "")) %>%
mutate(tx_theta_param_name = paste("theta[", taxa_group_name, ", ", feed, "]", sep = "")) %>%
mutate(tx_alpha_param_name = paste("alpha[", taxa_group_name, ", ", feed, "]", sep = "")) %>%
# IMPORTANT before replaceing param names: ARRANGE BY FEED, THEN TAXA NAME TO MATCH HOW NAMES ARE ARRANGED IN STANFIT OBJECT
arrange(feed_index, tx)
overall_feed_key <- lca_groups %>%
select(contains("new")) %>%
pivot_longer(cols = contains("new"), names_to = "feed") %>%
select(-value) %>%
unique() %>%
mutate(feed_index = case_when(str_detect(feed, "soy") ~ 1,
str_detect(feed, "crops") ~ 2,
str_detect(feed, "fmfo") ~ 3,
str_detect(feed, "animal") ~ 4)) %>%
# Clean feed names
mutate(feed = gsub(feed, pattern = "feed_", replacement = "")) %>%
mutate(feed = gsub(feed, pattern = "_new", replacement = "")) %>%
mutate(overall_theta_param_name = paste("theta[overall, ", feed, "]", sep = "")) %>%
mutate(overall_alpha_param_name = paste("alpha[overall, ", feed, "]", sep = "")) %>%
# IMPORTANT before replaceing param names: ARRANGE BY FEED TO MATCH HOW NAMES ARE ARRANGED IN STANFIT OBJECT
arrange(feed_index)
# Replace param names; first copy to fit_grouped_clean to avoid having to re-run sampling as a result of doing something wrong to fit_grouped
fit_grouped_clean <- fit_grouped
# Taxa-level
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "tx_alpha")] <- tx_feed_key$tx_alpha_param_name
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "tx_theta")] <- tx_feed_key$tx_theta_param_name
# Global-level
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "alpha\\[[1-4]")] <- overall_feed_key$overall_alpha_param_name
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "theta\\[[1-4]")] <- overall_feed_key$overall_theta_param_name
distribution_grouped <- as.matrix(fit_grouped_clean)
p_alpha <- mcmc_areas(distribution_grouped,
pars = vars(contains("alpha")),
prob = 0.8,
area_method = "scaled height")
p_alpha
p_theta <- mcmc_areas(distribution_grouped,
pars = vars(contains("theta")),
prob = 0.8,
area_method = "scaled height")
p_theta
######################################################################################################
# Model 3.2: Three-level model
lca_dat_no_na <- lca_dat_no_zeroes %>%
filter(is.na(feed_soy_new)==FALSE)
# Keep all data, but Add indices
lca_groups <- lca_dat_no_na %>%
# Add indices for each sci-name
mutate(clean_sci_name = as.factor(clean_sci_name),
sci = as.numeric(clean_sci_name),
taxa_group_name = as.factor(taxa_group_name),
tx = as.numeric(taxa_group_name)) %>%
arrange(sci)
# Test a smaller dataset (just salmon/char and marine shrimp - i.e., two taxa levels + overall level)
# lca_groups <- lca_dat_no_na %>%
# filter(taxa_group_name %in% c("salmon/char", "marine shrimp")) %>%
# mutate(clean_sci_name = as.factor(clean_sci_name),
# sci = as.numeric(clean_sci_name),
# taxa_group_name = as.factor(taxa_group_name),
# tx = as.numeric(taxa_group_name)) %>%
# arrange(sci)
# # Try analyzing only groups with n>1; also remove Thunnus orientalis since both data points are identical (effectively n = 1)
# lca_groups <- lca_dat_no_na %>%
# group_by(clean_sci_name) %>%
# mutate(n_sci = n()) %>%
# ungroup() %>%
# filter(n_sci > 1) %>%
# filter(clean_sci_name != "Thunnus orientalis") %>%
# # Add indices for each sci-name
# mutate(clean_sci_name = as.factor(clean_sci_name),
# sci = as.numeric(clean_sci_name),
# taxa_group_name = as.factor(taxa_group_name),
# tx = as.numeric(taxa_group_name)) %>%
# arrange(sci)
# Set data for model:
feed_weights <- lca_groups %>%
select(contains("new")) %>%
as.matrix()
K = 4
N = nrow(feed_weights)
N_SCI = length(unique(lca_groups$sci))
N_TX = length(unique(lca_groups$tx))
n_to_sci = lca_groups$sci
sci_to_tx = lca_groups %>%
select(sci, tx) %>%
unique() %>%
pull(tx)
# Get counts per sci name and counts per taxa group (also included as data in the model):
sci_kappa <- lca_groups %>%
select(contains(c("new", "sci", "obs"))) %>%
group_by(sci) %>%
summarise(n_obs = n()) %>%
ungroup() %>%
arrange(sci) %>%
pull(n_obs)
tx_kappa <- lca_groups %>%
select(contains(c("new", "tx", "obs"))) %>%
group_by(tx) %>%
summarise(n_obs = n()) %>%
ungroup() %>%
arrange(tx) %>%
pull(n_obs)
# For priors, get the mean of observations per sci-name
sci_mean <- lca_groups %>%
select(contains(c("new", "clean_sci_name", "sci"))) %>%
group_by(clean_sci_name, sci) %>%
summarise(across(contains("new"), mean),
n_sci = n()) %>%
ungroup() %>%
arrange(sci)
# Get mean observations per taxa group
tx_mean <- lca_groups %>%
select(contains(c("new", "taxa_group_name", "tx"))) %>%
group_by(taxa_group_name, tx) %>%
summarise(across(contains("new"), mean),
n_tx = n()) %>%
ungroup() %>%
arrange(tx)
overall_mean <- lca_groups %>%
select(contains(c("new"))) %>%
summarise(across(contains("new"), mean))
stan_data = list(N = N,
K = K,
feed_weights = feed_weights,
N_SCI = N_SCI,
N_TX = N_TX,
n_to_sci = n_to_sci,
sci_to_tx = sci_to_tx,
sci_kappa = sci_kappa,
tx_kappa = tx_kappa)
stan_pooled <- 'data {
int N; // number of total observations
int K; // number of feed types
int N_SCI; // number of sci names
int N_TX; // number of taxa groups
simplex[K] feed_weights[N]; // array of observed feed weights simplexes
int n_to_sci[N]; // sci-name indices
int sci_to_tx[N_SCI]; // taxa-group indices
int sci_kappa[N_SCI]; // number of observations per sci-name
int tx_kappa[N_TX]; // number of observations per taxa group
}
parameters {
simplex[K] sci_theta[N_SCI]; // vectors of estimated sci-level feed weight simplexes
simplex[K] tx_theta[N_TX]; // vectors of estimated taxa-level feed weight simplexes
simplex[K] theta;
// if needed, define sigma params for mean priors:
// real<lower=0> sigma_1;
}
transformed parameters {
// define params
vector<lower=0>[K] sci_alpha[N_SCI];
vector<lower=0>[K] tx_alpha[N_TX];
vector<lower=0>[K] alpha;
// reparameterize alphas as a vector of means (theta) and counts (kappas)
// theta is expected value of mean feed weights
// kappa is strength of the prior measured in number of prior observations (minus K)
alpha = N * theta;
for (n_tx in 1:N_TX) {
tx_alpha[n_tx] = tx_kappa[n_tx] * tx_theta[n_tx];
}
for (n_sci in 1:N_SCI) {
sci_alpha[n_sci] = sci_kappa[n_sci] * sci_theta[n_sci];
}
}
model {
// priors on specific theta
// sci_theta defined as sci_theta[sci][K]
// option 1: define feed proportion priors as lower upper bounds
//sci_theta[24][1] ~ uniform(0.001, 0.05); // hypothetical lower and upper bounds
// option 2: define feed proportions as means (also need to define sigmas in parameters block: real<lower=0> sigma_1 etc; etc;)
// sci_theta[24][1] ~ normal(0.9, sigma_1); // hypothetical mean prior
// likelihood
for (n in 1:N) {
feed_weights[n] ~ dirichlet(to_vector(sci_alpha[n_to_sci[n]]));
}
for (n_sci in 1:N_SCI){
sci_theta[n_sci] ~ dirichlet(tx_alpha[sci_to_tx[n_sci]]);
}
for (n_tx in 1:N_TX){
tx_theta[n_tx] ~ dirichlet(alpha);
}
}'
# Three level model (no priors), but to help with convergence, try offsetting and scaling simplex:
# From: https://mc-stan.org/docs/2_21/stan-users-guide/parameterizing-centered-vectors.html
# stan_data = list(N = N,
# K = K,
# feed_weights = feed_weights,
# N_SCI = N_SCI,
# N_TX = N_TX,
# sci = sci,
# tx = tx)
#
# stan_pooled <- 'data {
# int N; // number of total observations
# int K; // number of feed types
# int N_SCI; // number of sci names
# int N_TX; // number of taxa groups
# simplex[K] feed_weights[N]; // array of observed feed weights simplexes
# int sci[N]; // sci-name indices
# int tx[N]; // taxa-group indices
# }
# parameters {
# vector<lower=0>[K] sci_alpha[N_SCI]; // vector of dirichlet priors, one for each sci name (alpha is not a simplex)
# simplex[K] sci_theta_raw[N_SCI]; // vectors of estimated sci-level feed weight simplexes
# vector<lower=0>[K] tx_alpha[N_TX];
# simplex[K] tx_theta_raw[N_TX];
# vector<lower=0>[K] alpha;
# simplex[K] theta_raw;
#
# // scaling parameters
# real sci_theta_scale[N_SCI]; // vectors of estimated sci-level feed weight simplexes
# real tx_theta_scale[N_TX];
# real theta_scale;
# }
# transformed parameters {
# vector[K] sci_theta[N_SCI];
# vector[K] tx_theta[N_TX];
# vector[K] theta;
#
# for (n_sci in 1:N_SCI){
# sci_theta[N_SCI] = sci_theta_scale[N_SCI] * (sci_theta_raw[N_SCI] - inv(K));
# }
#
# for (n_tx in 1:N_TX) {
# tx_theta[N_TX] = tx_theta_scale[N_TX] * (tx_theta_raw[N_TX] - inv(K));
# }
# theta = theta_scale * (theta_raw - inv(K));
#
# }
# model {
#
# // likelihood
# for (n in 1:N) {
# tx_theta_raw[tx[n]] ~ dirichlet(alpha);
# sci_theta_raw[sci[n]] ~ dirichlet(to_vector(tx_alpha[tx[n]]));
# feed_weights[n] ~ dirichlet(to_vector(sci_alpha[sci[n]]));
# }
# // now, estimate feed weights based on the vector of alphas
# theta_raw ~ dirichlet(to_vector(alpha));
# for (n_tx in 1:N_TX) {
# tx_theta_raw[n_tx] ~ dirichlet(to_vector(tx_alpha[n_tx]));
# }
# for (n_sci in 1:N_SCI) {
# sci_theta_raw[n_sci] ~ dirichlet(to_vector(sci_alpha[n_sci]));
# }
# }'
no_missing_mod <- stan_model(model_code = stan_pooled, verbose = TRUE)
# Note: For Windows, apparently OK to ignore this warning message:
# Warning message:
# In system(paste(CXX, ARGS), ignore.stdout = TRUE, ignore.stderr = TRUE) :
# 'C:/rtools40/usr/mingw_/bin/g++' not found
# RUNS but gives warning about divergent transitions
# Fit model:
fit_grouped <- sampling(object = no_missing_mod, data = stan_data, cores = 4, seed = "11729")
# Increasing adapt_delta decreases the divergences but doesn't get rid of them
# fit_grouped <- sampling(object = no_missing_mod, data = stan_data, cores = 4, seed = "11720", control = list(adapt_delta = 0.9))
# fit_grouped <- sampling(object = no_missing_mod, data = stan_data, cores = 4, seed = "11720", control = list(adapt_delta = 0.99))
#launch_shinystan(fit_grouped)
print(fit_grouped)
distribution_grouped <- as.matrix(fit_grouped)
# Plot all in one plot
p_alpha <- mcmc_areas(distribution_grouped,
pars = vars(contains("tx_alpha")),
prob = 0.8,
area_method = "scaled height")
p_alpha
p_theta <- mcmc_areas(distribution_grouped,
pars = vars(contains("tx_theta")),
prob = 0.8,
area_method = "scaled height")
p_theta
# Create formatted names for sci-name level
# Format of parameters is: theta[sci_name, feed]
sci_feed_key <- lca_groups %>%
select(contains(c("clean_sci_name", "new", "sci"))) %>%
pivot_longer(cols = contains("new"), names_to = "feed") %>%
select(-value) %>%
unique() %>%
mutate(feed_index = case_when(str_detect(feed, "soy") ~ 1,
str_detect(feed, "crops") ~ 2,
str_detect(feed, "fmfo") ~ 3,
str_detect(feed, "animal") ~ 4)) %>%
# Clean feed names
mutate(feed = gsub(feed, pattern = "feed_", replacement = "")) %>%
mutate(feed = gsub(feed, pattern = "_new", replacement = "")) %>%
mutate(index = paste("[", sci, ",", feed_index, "]", sep = "")) %>%
mutate(sci_theta_param_name = paste("theta[", clean_sci_name, ", ", feed, "]", sep = "")) %>%
mutate(sci_alpha_param_name = paste("alpha[", clean_sci_name, ", ", feed, "]", sep = "")) %>%
# IMPORTANT before replaceing param names: ARRANGE BY FEED, THEN SCIENTIFIC NAME TO MATCH HOW NAMES ARE ARRANGED IN STANFIT OBJECT
arrange(feed_index, sci)
# Create formatted names for taxa-group level
# Format of parameters is: theta[sci_name, feed]
tx_feed_key <- lca_groups %>%
select(contains(c("taxa_group_name", "new", "tx"))) %>%
pivot_longer(cols = contains("new"), names_to = "feed") %>%
select(-value) %>%
unique() %>%
mutate(feed_index = case_when(str_detect(feed, "soy") ~ 1,
str_detect(feed, "crops") ~ 2,
str_detect(feed, "fmfo") ~ 3,
str_detect(feed, "animal") ~ 4)) %>%
# Clean feed names
mutate(feed = gsub(feed, pattern = "feed_", replacement = "")) %>%
mutate(feed = gsub(feed, pattern = "_new", replacement = "")) %>%
mutate(index = paste("[", tx, ",", feed_index, "]", sep = "")) %>%
mutate(tx_theta_param_name = paste("theta[", taxa_group_name, ", ", feed, "]", sep = "")) %>%
mutate(tx_alpha_param_name = paste("alpha[", taxa_group_name, ", ", feed, "]", sep = "")) %>%
# IMPORTANT before replaceing param names: ARRANGE BY FEED, THEN TAXA NAME TO MATCH HOW NAMES ARE ARRANGED IN STANFIT OBJECT
arrange(feed_index, tx)
overall_feed_key <- lca_groups %>%
select(contains("new")) %>%
pivot_longer(cols = contains("new"), names_to = "feed") %>%
select(-value) %>%
unique() %>%
mutate(feed_index = case_when(str_detect(feed, "soy") ~ 1,
str_detect(feed, "crops") ~ 2,
str_detect(feed, "fmfo") ~ 3,
str_detect(feed, "animal") ~ 4)) %>%
# Clean feed names
mutate(feed = gsub(feed, pattern = "feed_", replacement = "")) %>%
mutate(feed = gsub(feed, pattern = "_new", replacement = "")) %>%
mutate(overall_theta_param_name = paste("theta[overall, ", feed, "]", sep = "")) %>%
mutate(overall_alpha_param_name = paste("alpha[overall, ", feed, "]", sep = "")) %>%
# IMPORTANT before replaceing param names: ARRANGE BY FEED TO MATCH HOW NAMES ARE ARRANGED IN STANFIT OBJECT
arrange(feed_index)
# Replace param names; first copy to fit_grouped_clean to avoid having to re-run sampling as a result of doing something wrong to fit_grouped
fit_grouped_clean <- fit_grouped
# Sci-Level
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "sci_alpha")] <- sci_feed_key$sci_alpha_param_name
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "sci_theta")] <- sci_feed_key$sci_theta_param_name
# Taxa-level
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "tx_alpha")] <- tx_feed_key$tx_alpha_param_name
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "tx_theta")] <- tx_feed_key$tx_theta_param_name
# Global-level
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "alpha\\[[1-4]")] <- overall_feed_key$overall_alpha_param_name
names(fit_grouped_clean)[grep(names(fit_grouped_clean), pattern = "theta\\[[1-4]")] <- overall_feed_key$overall_theta_param_name
distribution_grouped_clean <- as.matrix(fit_grouped_clean)
# Choose secific plot
p_alpha <- mcmc_areas(distribution_grouped_clean,
pars = vars(contains("alpha") & contains("Thunnus thynnus")),
prob = 0.8,
area_method = "scaled height")
p_alpha
p_theta <- mcmc_areas(distribution_grouped_clean,
pars = vars(contains("theta") & contains("Thunnus thynnus")),
prob = 0.8,
area_method = "scaled height")
p_theta
# Plot per sci-name
for (i in 1:length(unique(lca_groups$clean_sci_name))){
name_i <- as.character(unique(lca_groups$clean_sci_name)[i])
p_alpha <- mcmc_areas(distribution_grouped_clean,
pars = vars(contains("alpha") & contains(name_i)),
prob = 0.8,
area_method = "scaled height")
print(p_alpha)
p_theta <- mcmc_areas(distribution_grouped_clean,
pars = vars(contains("theta") & contains(name_i)),
prob = 0.8,
area_method = "scaled height")
print(p_theta)
}
# Plot per taxa-name
for (i in 1:length(unique(lca_groups$taxa_group_name))){
name_i <- as.character(unique(lca_groups$taxa_group_name)[i])
p_alpha <- mcmc_areas(distribution_grouped_clean,
pars = vars(contains("alpha") & contains(name_i)),
prob = 0.8,
area_method = "scaled height")
print(p_alpha)
p_theta <- mcmc_areas(distribution_grouped_clean,
pars = vars(contains("theta") & contains(name_i)),
prob = 0.8,
area_method = "scaled height")
print(p_theta)
}
#Plot overall
p_alpha <- mcmc_areas(distribution_grouped_clean,
pars = vars(contains("alpha") & contains("overall")),
prob = 0.8,
area_method = "scaled height")
print(p_alpha)
p_theta <- mcmc_areas(distribution_grouped_clean,
pars = vars(contains("theta") & contains("overall")),
prob = 0.8,
area_method = "scaled height")
print(p_theta)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/get_state_info.R
\name{decode_LOH}
\alias{decode_LOH}
\title{Decode the Titan State To Give Copy number and State Name.}
\usage{
decode_LOH(G, symmetric = TRUE)
}
\arguments{
\item{G}{titan state}
\item{symmetric}{Boolean flag to indicate whether "similar" states should be collapsed}
}
\description{
Decode the Titan State To Give Copy number and State Name.
}
\author{
Gavin Ha
\url{https://github.com/gavinha/TitanCNA/blob/master/R/utils.R}
}
| /man/decode_LOH.Rd | no_license | tinyheero/titanCNAutils | R | false | false | 534 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/get_state_info.R
\name{decode_LOH}
\alias{decode_LOH}
\title{Decode the Titan State To Give Copy number and State Name.}
\usage{
decode_LOH(G, symmetric = TRUE)
}
\arguments{
\item{G}{titan state}
\item{symmetric}{Boolean flag to indicate whether "similar" states should be collapsed}
}
\description{
Decode the Titan State To Give Copy number and State Name.
}
\author{
Gavin Ha
\url{https://github.com/gavinha/TitanCNA/blob/master/R/utils.R}
}
|
rankall <- function(outcome, num = "best") {
## create static vars
# path of outcomes csv file
path <- "C:/Users/Zeuce/Documents/rprog_data_ProgAssignment3-data/outcome-of-care-measures.csv"
# named list with possible outcomes and integer value
# representing the related column in the outcomes file
outcomes <- list("heart attack" = 11, "heart failure" = 17, "pneumonia" = 23)
# other vars coded to column position
hosp_name_pos <- 2
state_pos <- 7
## Read outcome data
df <- read.csv(path)
## Check that outcome is valid
if(!(outcome %in% names(outcomes))) {
stop("invalid outcome")
}
# overwrite outcome to int value for convenience
outcome <- outcomes[[outcome]]
# rename relevant columns for simplicity
names(df)[c(hosp_name_pos, state_pos, outcome)] <- c("hospital", "state", "outcome")
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the
## (abbreviated) state name
# remove "Not Available"
df <- df[df$outcome != "Not Available", ]
# convert outcome to numeric
df$outcome <- as.numeric(df$outcome)
# split on state
state_split <- split(df, df$state)
# get rankings withink each state
split_ranked <- lapply(state_split, function(sp) sp[order(sp$outcome, sp$hospital), ])
# get ranking results based on num
if(num == "best") {
results <- sapply(split_ranked, function(sp, num) sp[num, "hospital"], num=1)
} else if (num == "worst") {
results <- sapply(split_ranked, function(sp) sp[nrow(sp), "hospital"])
} else {
results <- sapply(split_ranked, function(sp, num) sp[num, "hospital"], num=num)
}
# convert to data frame
results <- data.frame(hospital = results, state = names(results))
# return result ordered by state
results[order(results$state), ]
}
| /hospital_quality/rankall.R | no_license | hookskl/r_prog_jhu | R | false | false | 1,833 | r | rankall <- function(outcome, num = "best") {
## create static vars
# path of outcomes csv file
path <- "C:/Users/Zeuce/Documents/rprog_data_ProgAssignment3-data/outcome-of-care-measures.csv"
# named list with possible outcomes and integer value
# representing the related column in the outcomes file
outcomes <- list("heart attack" = 11, "heart failure" = 17, "pneumonia" = 23)
# other vars coded to column position
hosp_name_pos <- 2
state_pos <- 7
## Read outcome data
df <- read.csv(path)
## Check that outcome is valid
if(!(outcome %in% names(outcomes))) {
stop("invalid outcome")
}
# overwrite outcome to int value for convenience
outcome <- outcomes[[outcome]]
# rename relevant columns for simplicity
names(df)[c(hosp_name_pos, state_pos, outcome)] <- c("hospital", "state", "outcome")
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the
## (abbreviated) state name
# remove "Not Available"
df <- df[df$outcome != "Not Available", ]
# convert outcome to numeric
df$outcome <- as.numeric(df$outcome)
# split on state
state_split <- split(df, df$state)
# get rankings withink each state
split_ranked <- lapply(state_split, function(sp) sp[order(sp$outcome, sp$hospital), ])
# get ranking results based on num
if(num == "best") {
results <- sapply(split_ranked, function(sp, num) sp[num, "hospital"], num=1)
} else if (num == "worst") {
results <- sapply(split_ranked, function(sp) sp[nrow(sp), "hospital"])
} else {
results <- sapply(split_ranked, function(sp, num) sp[num, "hospital"], num=num)
}
# convert to data frame
results <- data.frame(hospital = results, state = names(results))
# return result ordered by state
results[order(results$state), ]
}
|
# LABORATORIUM 9 [27.11.2018]
# ------------------------------------------------------------------------
# ___ ZADANIE 1 __________________________________________________________
ludzie <- data.frame("wiek" = c(23, 25, 28, 22, 46, 50, 48),
"waga" = c(75, 67, 120, 65, 70, 68, 97),
"wzrost" = c(176, 180, 175, 165, 187, 180, 178),
"gra" = c(TRUE, TRUE, FALSE, TRUE, TRUE, FALSE, FALSE))
ludzie
w <- c(-0.46122, 0.97314, -0.39203, 0.78548, 2.10584, -0.57847)
v <- c(-0.81546, 1.03775)
b <- c(0.80109, 0.43529, -0.2368)
activate <- function(x) {
return(1/(1+exp(-x)))
}
forwardPass <- function(wiek, waga, wzrost) {
hidden1 <- activate((wiek * w[1]) + (waga * w[2]) + (wzrost * w[3]) + b[1])
hidden2 <- activate((wiek * w[4]) + (waga * w[5]) + (wzrost * w[6]) + b[2])
output <- (v[1] * hidden1) + (v[2] * hidden2) + b[3]
return(output)
}
forwarded <- c()
for (row in 1:nrow(ludzie)) {
tmp <- forwardPass(ludzie[row,][1], ludzie[row,][2], ludzie[row,][3])
forwarded <- c(forwarded, tmp)
}
forwarded <- data.frame("forwarded" = as.numeric(forwarded))
forwarded
# ___ ZADANIE 2 __________________________________________________________
iris.data <- iris
norm <- function(x)
{
(x-min(x))/(max(x)-min(x))
}
iris.norm <- data.frame(norm(iris.data[1]), norm(iris.data[2]),
norm(iris.data[3]), norm(iris.data[4]),
iris.data[5])
set.seed(1234)
ind <- sample(2, nrow(iris), replace=TRUE, prob=c(0.67, 0.33))
iris.train <- iris.norm[ind==1,1:5]
iris.test <- iris.norm[ind==2,1:5]
#install.packages("neuralnet")
#library(neuralnet)
iris.train$Setosa <- 0
iris.train$Versicolor <- 0
iris.train$Virginica <- 0
for (row in 1:nrow(iris.train)) {
if (iris.train[row,]["Species"] == "setosa") iris.train[row,]["Setosa"] = 1
if (iris.train[row,]["Species"] == "versicolor") iris.train[row,]["Versicolor"] = 1
if (iris.train[row,]["Species"] == "virginica") iris.train[row,]["Virginica"] = 1
}
iris.train <- subset(iris.train, select = -c(Species))
iris.neuralnet <- neuralnet(Setosa + Versicolor + Virginica ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width,
iris.train, hidden=4)
iris.pred <- compute(iris.neuralnet, iris.test[,1:4])
plot(iris.neuralnet)
iris.pred_species <- c()
for (row in 1:nrow(iris.pred$net.result)) {
col_number <- match(max(iris.pred$net.result[row,]), iris.pred$net.result[row,])
if (col_number == 1) iris.pred_species <- c(iris.pred_species, "setosa")
if (col_number == 2) iris.pred_species <- c(iris.pred_species, "versicolor")
if (col_number == 3) iris.pred_species <- c(iris.pred_species, "virginica")
}
iris.comparison <- cbind("real" = as.character(iris.test["Species"][,1]), "predicted" = iris.pred_species)
iris.result <- c()
for (row in 1:nrow(iris.comparison)) {
if (iris.comparison[,1][row] == iris.comparison[,2][row]) iris.result <- c(iris.result, TRUE)
else iris.result <- c(iris.result, FALSE)
}
accuracy <- (as.numeric(table(iris.result)["TRUE"])/40)*100
accuracy
# ___ ZADANIE 3 __________________________________________________________
# ...
# ------------------------------------------------------------------------ | /lab09/lab9_rozwiazania.R | no_license | mmazepa/InteligencjaObliczeniowa | R | false | false | 3,242 | r | # LABORATORIUM 9 [27.11.2018]
# ------------------------------------------------------------------------
# ___ ZADANIE 1 __________________________________________________________
ludzie <- data.frame("wiek" = c(23, 25, 28, 22, 46, 50, 48),
"waga" = c(75, 67, 120, 65, 70, 68, 97),
"wzrost" = c(176, 180, 175, 165, 187, 180, 178),
"gra" = c(TRUE, TRUE, FALSE, TRUE, TRUE, FALSE, FALSE))
ludzie
w <- c(-0.46122, 0.97314, -0.39203, 0.78548, 2.10584, -0.57847)
v <- c(-0.81546, 1.03775)
b <- c(0.80109, 0.43529, -0.2368)
activate <- function(x) {
return(1/(1+exp(-x)))
}
forwardPass <- function(wiek, waga, wzrost) {
hidden1 <- activate((wiek * w[1]) + (waga * w[2]) + (wzrost * w[3]) + b[1])
hidden2 <- activate((wiek * w[4]) + (waga * w[5]) + (wzrost * w[6]) + b[2])
output <- (v[1] * hidden1) + (v[2] * hidden2) + b[3]
return(output)
}
forwarded <- c()
for (row in 1:nrow(ludzie)) {
tmp <- forwardPass(ludzie[row,][1], ludzie[row,][2], ludzie[row,][3])
forwarded <- c(forwarded, tmp)
}
forwarded <- data.frame("forwarded" = as.numeric(forwarded))
forwarded
# ___ ZADANIE 2 __________________________________________________________
iris.data <- iris
norm <- function(x)
{
(x-min(x))/(max(x)-min(x))
}
iris.norm <- data.frame(norm(iris.data[1]), norm(iris.data[2]),
norm(iris.data[3]), norm(iris.data[4]),
iris.data[5])
set.seed(1234)
ind <- sample(2, nrow(iris), replace=TRUE, prob=c(0.67, 0.33))
iris.train <- iris.norm[ind==1,1:5]
iris.test <- iris.norm[ind==2,1:5]
#install.packages("neuralnet")
#library(neuralnet)
iris.train$Setosa <- 0
iris.train$Versicolor <- 0
iris.train$Virginica <- 0
for (row in 1:nrow(iris.train)) {
if (iris.train[row,]["Species"] == "setosa") iris.train[row,]["Setosa"] = 1
if (iris.train[row,]["Species"] == "versicolor") iris.train[row,]["Versicolor"] = 1
if (iris.train[row,]["Species"] == "virginica") iris.train[row,]["Virginica"] = 1
}
iris.train <- subset(iris.train, select = -c(Species))
iris.neuralnet <- neuralnet(Setosa + Versicolor + Virginica ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width,
iris.train, hidden=4)
iris.pred <- compute(iris.neuralnet, iris.test[,1:4])
plot(iris.neuralnet)
iris.pred_species <- c()
for (row in 1:nrow(iris.pred$net.result)) {
col_number <- match(max(iris.pred$net.result[row,]), iris.pred$net.result[row,])
if (col_number == 1) iris.pred_species <- c(iris.pred_species, "setosa")
if (col_number == 2) iris.pred_species <- c(iris.pred_species, "versicolor")
if (col_number == 3) iris.pred_species <- c(iris.pred_species, "virginica")
}
iris.comparison <- cbind("real" = as.character(iris.test["Species"][,1]), "predicted" = iris.pred_species)
iris.result <- c()
for (row in 1:nrow(iris.comparison)) {
if (iris.comparison[,1][row] == iris.comparison[,2][row]) iris.result <- c(iris.result, TRUE)
else iris.result <- c(iris.result, FALSE)
}
accuracy <- (as.numeric(table(iris.result)["TRUE"])/40)*100
accuracy
# ___ ZADANIE 3 __________________________________________________________
# ...
# ------------------------------------------------------------------------ |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/artefact_rejection.R
\name{faster_chans}
\alias{faster_chans}
\title{Perform global bad channel detection for FASTER}
\usage{
faster_chans(data, sds = 3, ...)
}
\arguments{
\item{data}{A matrix of EEG data signals}
\item{sds}{Standard deviation thresholds}
\item{...}{Further parameters (tbd)}
}
\description{
Perform global bad channel detection for FASTER
}
\keyword{internal}
| /man/faster_chans.Rd | permissive | dannydaniel/eegUtils | R | false | true | 459 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/artefact_rejection.R
\name{faster_chans}
\alias{faster_chans}
\title{Perform global bad channel detection for FASTER}
\usage{
faster_chans(data, sds = 3, ...)
}
\arguments{
\item{data}{A matrix of EEG data signals}
\item{sds}{Standard deviation thresholds}
\item{...}{Further parameters (tbd)}
}
\description{
Perform global bad channel detection for FASTER
}
\keyword{internal}
|
# Created: april 29 2021
# last edited:
#
# purpose: see if 2009 means the same thing in each site
#
# notes:
rm(list = ls())
library(tidysawyer2)
library(tidyverse)
library(saapsim)
theme_set(theme_bw())
scale_this <- function(x){
(x - mean(x, na.rm=TRUE)) / sd(x, na.rm=TRUE)
}
#--the years I have yield data for
mysiteyears <-
ilia_wea %>%
select(state, site, year) %>%
distinct()
mytheme <- theme(legend.position = "bottom",
axis.text.x = element_blank())
# full season precip-------------------------------------------------------------
ilia_wealt %>%
group_by(state, site, year) %>%
summarise(precip_tot = sum(precip_mm, na.rm = T)) %>%
ggplot(aes(year, precip_tot)) +
geom_line() +
facet_wrap(~site) +
labs(title = "Total precip")
ilia_wealt %>%
group_by(site, year) %>%
summarise(precip_tot = sum(precip_mm, na.rm = T)) %>%
group_by(site) %>%
mutate(precip_sc = scale_this(precip_tot)) %>%
ggplot(aes(year, precip_sc)) +
geom_line() +
geom_hline(yintercept = 0) +
facet_wrap(~site) +
labs(title = "scaled precip")
#--scaled long-term precip
p_sc <-
ilia_wealt %>%
group_by(state, site, year) %>%
summarise(precip_tot = sum(precip_mm, na.rm = T)) %>%
group_by(state, site) %>%
mutate(precip_sc = scale_this(precip_tot)) %>%
semi_join(mysiteyears)
f_p <-
p_sc %>%
group_by(year) %>%
mutate(yearmn = mean(precip_sc)) %>%
ggplot(aes(site, precip_sc)) +
geom_point(aes(color = state), size = 4) +
geom_hline(yintercept = 0) +
geom_hline(aes(yintercept = yearmn), linetype = "dashed") +
facet_wrap(~year) +
labs(x = "Site",
y = "Yearly total precipitation, scaled",
title = "Does 2006 mean the same thing at each site?",
subtitle = "Yearly precip") +
scale_color_manual(values = c("purple", "darkorange")) +
mytheme
f_p
# full season temp-------------------------------------------------------------
ilia_wealt %>%
mutate(avgt_c = (maxt_c + mint_c)/2) %>%
group_by(state, site, year) %>%
summarise(avgt_c = mean(avgt_c, na.rm = T)) %>%
ggplot(aes(year, avgt_c)) +
geom_line() +
facet_wrap(~site) +
labs(title = "Average temp")
ilia_wealt %>%
mutate(avgt_c = (maxt_c + mint_c)/2) %>%
group_by(state, site, year) %>%
summarise(avgt_c = mean(avgt_c, na.rm = T)) %>%
group_by(site) %>%
mutate(avgt_sc = scale_this(avgt_c)) %>%
ggplot(aes(year, avgt_sc)) +
geom_line() +
geom_hline(yintercept = 0) +
facet_wrap(~site) +
labs(title = "scaled avg temp")
#--scaled long-term precip
t_sc <-
ilia_wealt %>%
mutate(avgt_c = (maxt_c + mint_c)/2) %>%
group_by(state, site, year) %>%
summarise(avgt_c = mean(avgt_c, na.rm = T)) %>%
group_by(state, site) %>%
mutate(avgt_sc = scale_this(avgt_c)) %>%
semi_join(mysiteyears)
f_t <-
t_sc %>%
group_by(year) %>%
mutate(yearmn = mean(avgt_sc)) %>%
ggplot(aes(site, avgt_sc)) +
geom_point(aes(color = state), size = 4) +
geom_hline(yintercept = 0) +
geom_hline(aes(yintercept = yearmn), linetype = "dashed") +
facet_wrap(~year) +
labs(x = "Site",
y = "Yearly average temperature, scaled",
title = "Does 2006 mean the same thing at each site?",
subtitle = "Yearly temperature") +
scale_color_manual(values = c("blue", "red")) +
mytheme
f_t
# growing season precip-------------------------------------------------------------
gs_start <- saf_date_to_doy("2001-03-01")
gs_end <- saf_date_to_doy("2001-09-01")
#--scaled long-term precip
pgs_sc <-
ilia_wealt %>%
filter(day > gs_start, day < gs_end) %>%
group_by(state, site, year) %>%
summarise(precip_tot = sum(precip_mm, na.rm = T)) %>%
group_by(state, site) %>%
mutate(precip_sc = scale_this(precip_tot)) %>%
semi_join(mysiteyears)
f_pgs <-
pgs_sc %>%
group_by(year) %>%
mutate(yearmn = mean(precip_sc)) %>%
ggplot(aes(site, precip_sc)) +
geom_point(aes(color = state), size = 4, pch = 18) +
geom_hline(yintercept = 0) +
geom_hline(aes(yintercept = yearmn), linetype = "dashed") +
facet_wrap(~year) +
labs(x = "Site",
y = "Growing season precipitation, scaled",
title = "Does 2006 mean the same thing at each site?",
subtitle = "Growing season precip") +
scale_color_manual(values = c("purple", "darkorange")) +
mytheme
f_pgs
# gs season temp-------------------------------------------------------------
tgs_sc <-
ilia_wealt %>%
filter(day > gs_start, day < gs_end) %>%
group_by(state, site, year) %>%
mutate(avgt_c = (maxt_c + mint_c)/2) %>%
group_by(state, site, year) %>%
summarise(avgt_c = mean(avgt_c, na.rm = T)) %>%
group_by(state, site) %>%
mutate(avgt_sc = scale_this(avgt_c)) %>%
semi_join(mysiteyears)
f_tgs <-
tgs_sc %>%
group_by(year) %>%
mutate(yearmn = mean(avgt_sc)) %>%
ggplot(aes(site, avgt_sc)) +
geom_point(aes(color = state), size = 4, pch = 18) +
geom_hline(yintercept = 0) +
geom_hline(aes(yintercept = yearmn), linetype = "dashed") +
facet_wrap(~year) +
labs(x = "Site",
y = "GS temperature, scaled",
title = "Does 2006 mean the same thing at each site?",
subtitle = "GS temperature") +
scale_color_manual(values = c("blue", "red")) +
mytheme
f_tgs
# together ----------------------------------------------------------------
library(patchwork)
f_t + f_tgs
ggsave("00_exp-explore/fig_weather-year-temperature.png")
f_p + f_pgs
ggsave("00_exp-explore/fig_weather-year-precip.png")
# curiouis ----------------------------------------------------------------
gaps <-
read_csv("00_empirical-n-cont/dat_gap-components.csv") %>%
filter(!is.na(ngap_frac))
gaps %>%
left_join(pgs_sc) %>%
select(site, year, nonngap, ngap, precip_sc) %>%
pivot_longer(nonngap:ngap) %>%
ggplot(aes(value, precip_sc)) +
geom_point(size = 5, aes(color = name)) +
geom_hline(yintercept = 0) +
facet_wrap(~name)
gaps %>%
left_join(pgs_sc) %>%
select(site, year, nonngap, ngap, precip_sc) %>%
pivot_longer(nonngap:ngap) %>%
ggplot(aes(value, precip_sc)) +
geom_hex(bins = 5, color = "black") +
geom_point() +
geom_hline(yintercept = 0) +
geom_text(x = 4000, y = 2, label = "wet", check_overlap = T) +
geom_text(x = 4000, y = -1.5, label = "dry", check_overlap = T) +
facet_wrap(~name) +
scale_fill_viridis_c()
gaps %>%
left_join(tgs_sc) %>%
select(site, year, nonngap, ngap, avgt_sc) %>%
pivot_longer(nonngap:ngap) %>%
ggplot(aes(value, avgt_sc)) +
geom_hex(bins = 5, color = "black") +
geom_point() +
geom_hline(yintercept = 0) +
geom_text(x = 5000, y = 3, label = "hot", check_overlap = T) +
geom_text(x = 5000, y = -1, label = "cool", check_overlap = T) +
facet_wrap(~name) +
scale_fill_viridis_c() +
facet_wrap(~name)
gaps %>%
ggplot(aes(ngap, nonngap)) +
geom_hex(bins = 5, color = "black") +
geom_point() +
geom_abline() +
scale_fill_viridis_c() +
coord_cartesian(ylim = c(0, 6000),
xlim = c(0, 6000))
#--%N vs tot gap colored by weather
gaps %>%
left_join(tgs_sc) %>%
left_join(pgs_sc) %>%
mutate(tot_gap = ngap + nonngap) %>%
ggplot(aes(tot_gap, ngap_frac)) +
geom_point(aes(color = precip_sc), size = 4) +
scale_color_viridis_c()
gaps %>%
left_join(tgs_sc) %>%
left_join(pgs_sc) %>%
mutate(tot_gap = ngap + nonngap) %>%
ggplot(aes(tot_gap, ngap_frac)) +
geom_point(aes(color = avgt_sc), size = 4) +
scale_color_gradient2(midpoint = 0)
scale_colour_brewer(
...,
type = "seq",
palette = 1,
direction = 1,
aesthetics = "colour"
)
)
scale_color_viridis_c()
| /00_exp-explore/code_explore-long-term-weather.R | no_license | vanichols/ghproj_ccgap | R | false | false | 7,703 | r | # Created: april 29 2021
# last edited:
#
# purpose: see if 2009 means the same thing in each site
#
# notes:
rm(list = ls())
library(tidysawyer2)
library(tidyverse)
library(saapsim)
theme_set(theme_bw())
scale_this <- function(x){
(x - mean(x, na.rm=TRUE)) / sd(x, na.rm=TRUE)
}
#--the years I have yield data for
mysiteyears <-
ilia_wea %>%
select(state, site, year) %>%
distinct()
mytheme <- theme(legend.position = "bottom",
axis.text.x = element_blank())
# full season precip-------------------------------------------------------------
ilia_wealt %>%
group_by(state, site, year) %>%
summarise(precip_tot = sum(precip_mm, na.rm = T)) %>%
ggplot(aes(year, precip_tot)) +
geom_line() +
facet_wrap(~site) +
labs(title = "Total precip")
ilia_wealt %>%
group_by(site, year) %>%
summarise(precip_tot = sum(precip_mm, na.rm = T)) %>%
group_by(site) %>%
mutate(precip_sc = scale_this(precip_tot)) %>%
ggplot(aes(year, precip_sc)) +
geom_line() +
geom_hline(yintercept = 0) +
facet_wrap(~site) +
labs(title = "scaled precip")
#--scaled long-term precip
p_sc <-
ilia_wealt %>%
group_by(state, site, year) %>%
summarise(precip_tot = sum(precip_mm, na.rm = T)) %>%
group_by(state, site) %>%
mutate(precip_sc = scale_this(precip_tot)) %>%
semi_join(mysiteyears)
f_p <-
p_sc %>%
group_by(year) %>%
mutate(yearmn = mean(precip_sc)) %>%
ggplot(aes(site, precip_sc)) +
geom_point(aes(color = state), size = 4) +
geom_hline(yintercept = 0) +
geom_hline(aes(yintercept = yearmn), linetype = "dashed") +
facet_wrap(~year) +
labs(x = "Site",
y = "Yearly total precipitation, scaled",
title = "Does 2006 mean the same thing at each site?",
subtitle = "Yearly precip") +
scale_color_manual(values = c("purple", "darkorange")) +
mytheme
f_p
# full season temp-------------------------------------------------------------
ilia_wealt %>%
mutate(avgt_c = (maxt_c + mint_c)/2) %>%
group_by(state, site, year) %>%
summarise(avgt_c = mean(avgt_c, na.rm = T)) %>%
ggplot(aes(year, avgt_c)) +
geom_line() +
facet_wrap(~site) +
labs(title = "Average temp")
ilia_wealt %>%
mutate(avgt_c = (maxt_c + mint_c)/2) %>%
group_by(state, site, year) %>%
summarise(avgt_c = mean(avgt_c, na.rm = T)) %>%
group_by(site) %>%
mutate(avgt_sc = scale_this(avgt_c)) %>%
ggplot(aes(year, avgt_sc)) +
geom_line() +
geom_hline(yintercept = 0) +
facet_wrap(~site) +
labs(title = "scaled avg temp")
#--scaled long-term precip
t_sc <-
ilia_wealt %>%
mutate(avgt_c = (maxt_c + mint_c)/2) %>%
group_by(state, site, year) %>%
summarise(avgt_c = mean(avgt_c, na.rm = T)) %>%
group_by(state, site) %>%
mutate(avgt_sc = scale_this(avgt_c)) %>%
semi_join(mysiteyears)
f_t <-
t_sc %>%
group_by(year) %>%
mutate(yearmn = mean(avgt_sc)) %>%
ggplot(aes(site, avgt_sc)) +
geom_point(aes(color = state), size = 4) +
geom_hline(yintercept = 0) +
geom_hline(aes(yintercept = yearmn), linetype = "dashed") +
facet_wrap(~year) +
labs(x = "Site",
y = "Yearly average temperature, scaled",
title = "Does 2006 mean the same thing at each site?",
subtitle = "Yearly temperature") +
scale_color_manual(values = c("blue", "red")) +
mytheme
f_t
# growing season precip-------------------------------------------------------------
gs_start <- saf_date_to_doy("2001-03-01")
gs_end <- saf_date_to_doy("2001-09-01")
#--scaled long-term precip
pgs_sc <-
ilia_wealt %>%
filter(day > gs_start, day < gs_end) %>%
group_by(state, site, year) %>%
summarise(precip_tot = sum(precip_mm, na.rm = T)) %>%
group_by(state, site) %>%
mutate(precip_sc = scale_this(precip_tot)) %>%
semi_join(mysiteyears)
f_pgs <-
pgs_sc %>%
group_by(year) %>%
mutate(yearmn = mean(precip_sc)) %>%
ggplot(aes(site, precip_sc)) +
geom_point(aes(color = state), size = 4, pch = 18) +
geom_hline(yintercept = 0) +
geom_hline(aes(yintercept = yearmn), linetype = "dashed") +
facet_wrap(~year) +
labs(x = "Site",
y = "Growing season precipitation, scaled",
title = "Does 2006 mean the same thing at each site?",
subtitle = "Growing season precip") +
scale_color_manual(values = c("purple", "darkorange")) +
mytheme
f_pgs
# gs season temp-------------------------------------------------------------
tgs_sc <-
ilia_wealt %>%
filter(day > gs_start, day < gs_end) %>%
group_by(state, site, year) %>%
mutate(avgt_c = (maxt_c + mint_c)/2) %>%
group_by(state, site, year) %>%
summarise(avgt_c = mean(avgt_c, na.rm = T)) %>%
group_by(state, site) %>%
mutate(avgt_sc = scale_this(avgt_c)) %>%
semi_join(mysiteyears)
f_tgs <-
tgs_sc %>%
group_by(year) %>%
mutate(yearmn = mean(avgt_sc)) %>%
ggplot(aes(site, avgt_sc)) +
geom_point(aes(color = state), size = 4, pch = 18) +
geom_hline(yintercept = 0) +
geom_hline(aes(yintercept = yearmn), linetype = "dashed") +
facet_wrap(~year) +
labs(x = "Site",
y = "GS temperature, scaled",
title = "Does 2006 mean the same thing at each site?",
subtitle = "GS temperature") +
scale_color_manual(values = c("blue", "red")) +
mytheme
f_tgs
# together ----------------------------------------------------------------
library(patchwork)
f_t + f_tgs
ggsave("00_exp-explore/fig_weather-year-temperature.png")
f_p + f_pgs
ggsave("00_exp-explore/fig_weather-year-precip.png")
# curiouis ----------------------------------------------------------------
gaps <-
read_csv("00_empirical-n-cont/dat_gap-components.csv") %>%
filter(!is.na(ngap_frac))
gaps %>%
left_join(pgs_sc) %>%
select(site, year, nonngap, ngap, precip_sc) %>%
pivot_longer(nonngap:ngap) %>%
ggplot(aes(value, precip_sc)) +
geom_point(size = 5, aes(color = name)) +
geom_hline(yintercept = 0) +
facet_wrap(~name)
gaps %>%
left_join(pgs_sc) %>%
select(site, year, nonngap, ngap, precip_sc) %>%
pivot_longer(nonngap:ngap) %>%
ggplot(aes(value, precip_sc)) +
geom_hex(bins = 5, color = "black") +
geom_point() +
geom_hline(yintercept = 0) +
geom_text(x = 4000, y = 2, label = "wet", check_overlap = T) +
geom_text(x = 4000, y = -1.5, label = "dry", check_overlap = T) +
facet_wrap(~name) +
scale_fill_viridis_c()
gaps %>%
left_join(tgs_sc) %>%
select(site, year, nonngap, ngap, avgt_sc) %>%
pivot_longer(nonngap:ngap) %>%
ggplot(aes(value, avgt_sc)) +
geom_hex(bins = 5, color = "black") +
geom_point() +
geom_hline(yintercept = 0) +
geom_text(x = 5000, y = 3, label = "hot", check_overlap = T) +
geom_text(x = 5000, y = -1, label = "cool", check_overlap = T) +
facet_wrap(~name) +
scale_fill_viridis_c() +
facet_wrap(~name)
gaps %>%
ggplot(aes(ngap, nonngap)) +
geom_hex(bins = 5, color = "black") +
geom_point() +
geom_abline() +
scale_fill_viridis_c() +
coord_cartesian(ylim = c(0, 6000),
xlim = c(0, 6000))
#--%N vs tot gap colored by weather
gaps %>%
left_join(tgs_sc) %>%
left_join(pgs_sc) %>%
mutate(tot_gap = ngap + nonngap) %>%
ggplot(aes(tot_gap, ngap_frac)) +
geom_point(aes(color = precip_sc), size = 4) +
scale_color_viridis_c()
gaps %>%
left_join(tgs_sc) %>%
left_join(pgs_sc) %>%
mutate(tot_gap = ngap + nonngap) %>%
ggplot(aes(tot_gap, ngap_frac)) +
geom_point(aes(color = avgt_sc), size = 4) +
scale_color_gradient2(midpoint = 0)
scale_colour_brewer(
...,
type = "seq",
palette = 1,
direction = 1,
aesthetics = "colour"
)
)
scale_color_viridis_c()
|
#______________________________________________________________________________
# FILE: r/vis/PathQuery-app/global.R
# DESC: Path Query App
# SRC :
# IN : Stardog triplestore CTDasRDFOnt (triples from Onotology instances)
# OUT :
# REQ : r/validation/Functions.R
# Stardog running on localhost with database CTDasRDFOnt populated
# SRC :
# NOTE:
# TODO:
#
#______________________________________________________________________________
library(plyr) # rename
library(reshape) # melt
library(SPARQL)
library(visNetwork)
# Set wd 3 levels up, to folder CTDasRDF. Navigate down from
# there to data/source/ to obtain TTL source data.
setwd("../../../")
currDir<-getwd()
source("r/validation/Functions.R") # IRI to prefix and other fun
# Endpoint
endpoint <- "http://localhost:5820/CTDasRDFOnt/query"
#-- Legend Nodes Legend ----
# Yellow node: #FFBD09
# Blue node: #2C52DA
# Bright. Turq: #3DDAFD
# Green node: #008D00
# BlueGreen node: #1C5B64
# DK red node: #870922
# Br red node: #C71B5F
# Purp Node: #482C79
# Br. Or Node: #FE7900
lnodes <- read.table(header = TRUE, text = "
label color.border color.background font.color
'Start Node' 'red' 'yellow' 'black'
cdiscpilot01 'black' '#2C52DA' 'white'
cdo1p 'black' '#008D00' 'white'
code 'black' '#1C5B64' 'white'
study 'black' '#FFBD09' 'white'
custom 'black' '#C71B5F' 'white'
Literal 'black' 'white' 'black'
")
lnodes$shape <- "box"
lnodes$title <- "Legend" | /r/vis/PathyQuery-app/global.R | permissive | i-akiya/CTDasRDF | R | false | false | 1,592 | r | #______________________________________________________________________________
# FILE: r/vis/PathQuery-app/global.R
# DESC: Path Query App
# SRC :
# IN : Stardog triplestore CTDasRDFOnt (triples from Onotology instances)
# OUT :
# REQ : r/validation/Functions.R
# Stardog running on localhost with database CTDasRDFOnt populated
# SRC :
# NOTE:
# TODO:
#
#______________________________________________________________________________
library(plyr) # rename
library(reshape) # melt
library(SPARQL)
library(visNetwork)
# Set wd 3 levels up, to folder CTDasRDF. Navigate down from
# there to data/source/ to obtain TTL source data.
setwd("../../../")
currDir<-getwd()
source("r/validation/Functions.R") # IRI to prefix and other fun
# Endpoint
endpoint <- "http://localhost:5820/CTDasRDFOnt/query"
#-- Legend Nodes Legend ----
# Yellow node: #FFBD09
# Blue node: #2C52DA
# Bright. Turq: #3DDAFD
# Green node: #008D00
# BlueGreen node: #1C5B64
# DK red node: #870922
# Br red node: #C71B5F
# Purp Node: #482C79
# Br. Or Node: #FE7900
lnodes <- read.table(header = TRUE, text = "
label color.border color.background font.color
'Start Node' 'red' 'yellow' 'black'
cdiscpilot01 'black' '#2C52DA' 'white'
cdo1p 'black' '#008D00' 'white'
code 'black' '#1C5B64' 'white'
study 'black' '#FFBD09' 'white'
custom 'black' '#C71B5F' 'white'
Literal 'black' 'white' 'black'
")
lnodes$shape <- "box"
lnodes$title <- "Legend" |
##########
#FACTORES#
##########
x = c("tipo1", "tipo1", "tipo2", "tipo2", "tipo1", "tipo2", "tipo1")
factor(x) #Arroja el vector con los niveles
as.factor(x) #Mismo resultado que factor(x)
#Niveles y modificaciรณn
factor(x,levels = c("tipo1", "tipo2", "Otros")) #Determino los niveles exactos, incluso aunque no exista en el factor
fx <- as.factor(x)
levels(fx)
levels(fx) = c("T1", "T2", "Otros") #Cambio los nombres de los niveles
#Ordenar los factores y renombrar niveles
notas = c(5, 5, 3, 5, 2, 1, 5, 3, 1, 3, 5)
factor(notas) #niveles: 1, 2, 3, 5
notas <- ordered(notas, levels = c(1, 2, 3, 5), labels = c("Sus", "Sus", "Sus", "Apr")) #Factores ordenados
| /0. Bรกsicos/0.4. Factores.R | no_license | luismor85/cursoR | R | false | false | 733 | r | ##########
#FACTORES#
##########
x = c("tipo1", "tipo1", "tipo2", "tipo2", "tipo1", "tipo2", "tipo1")
factor(x) #Arroja el vector con los niveles
as.factor(x) #Mismo resultado que factor(x)
#Niveles y modificaciรณn
factor(x,levels = c("tipo1", "tipo2", "Otros")) #Determino los niveles exactos, incluso aunque no exista en el factor
fx <- as.factor(x)
levels(fx)
levels(fx) = c("T1", "T2", "Otros") #Cambio los nombres de los niveles
#Ordenar los factores y renombrar niveles
notas = c(5, 5, 3, 5, 2, 1, 5, 3, 1, 3, 5)
factor(notas) #niveles: 1, 2, 3, 5
notas <- ordered(notas, levels = c(1, 2, 3, 5), labels = c("Sus", "Sus", "Sus", "Apr")) #Factores ordenados
|
๏ปฟ<?xml version="1.0" encoding="utf-8"?>
<serviceModel xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" name="AzureCloudService1" generation="1" functional="0" release="0" Id="cf6630e6-0fd4-4910-8cac-963ab892f6fa" dslVersion="1.2.0.0" xmlns="http://schemas.microsoft.com/dsltools/RDSM">
<groups>
<group name="AzureCloudService1Group" generation="1" functional="0" release="0">
<componentports>
<inPort name="WebRole1:Endpoint1" protocol="http">
<inToChannel>
<lBChannelMoniker name="/AzureCloudService1/AzureCloudService1Group/LB:WebRole1:Endpoint1" />
</inToChannel>
</inPort>
</componentports>
<settings>
<aCS name="WebRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="">
<maps>
<mapMoniker name="/AzureCloudService1/AzureCloudService1Group/MapWebRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</maps>
</aCS>
<aCS name="WebRole1Instances" defaultValue="[1,1,1]">
<maps>
<mapMoniker name="/AzureCloudService1/AzureCloudService1Group/MapWebRole1Instances" />
</maps>
</aCS>
<aCS name="WorkerRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="">
<maps>
<mapMoniker name="/AzureCloudService1/AzureCloudService1Group/MapWorkerRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</maps>
</aCS>
<aCS name="WorkerRole1Instances" defaultValue="[1,1,1]">
<maps>
<mapMoniker name="/AzureCloudService1/AzureCloudService1Group/MapWorkerRole1Instances" />
</maps>
</aCS>
</settings>
<channels>
<lBChannel name="LB:WebRole1:Endpoint1">
<toPorts>
<inPortMoniker name="/AzureCloudService1/AzureCloudService1Group/WebRole1/Endpoint1" />
</toPorts>
</lBChannel>
</channels>
<maps>
<map name="MapWebRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" kind="Identity">
<setting>
<aCSMoniker name="/AzureCloudService1/AzureCloudService1Group/WebRole1/Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</setting>
</map>
<map name="MapWebRole1Instances" kind="Identity">
<setting>
<sCSPolicyIDMoniker name="/AzureCloudService1/AzureCloudService1Group/WebRole1Instances" />
</setting>
</map>
<map name="MapWorkerRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" kind="Identity">
<setting>
<aCSMoniker name="/AzureCloudService1/AzureCloudService1Group/WorkerRole1/Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</setting>
</map>
<map name="MapWorkerRole1Instances" kind="Identity">
<setting>
<sCSPolicyIDMoniker name="/AzureCloudService1/AzureCloudService1Group/WorkerRole1Instances" />
</setting>
</map>
</maps>
<components>
<groupHascomponents>
<role name="WebRole1" generation="1" functional="0" release="0" software="E:\PA4\AzureCloudService1\AzureCloudService1\csx\Release\roles\WebRole1" entryPoint="base\x64\WaHostBootstrapper.exe" parameters="base\x64\WaIISHost.exe " memIndex="-1" hostingEnvironment="frontendadmin" hostingEnvironmentVersion="2">
<componentports>
<inPort name="Endpoint1" protocol="http" portRanges="80" />
</componentports>
<settings>
<aCS name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="" />
<aCS name="__ModelData" defaultValue="<m role="WebRole1" xmlns="urn:azure:m:v1"><r name="WebRole1"><e name="Endpoint1" /></r><r name="WorkerRole1" /></m>" />
</settings>
<resourcereferences>
<resourceReference name="DiagnosticStore" defaultAmount="[4096,4096,4096]" defaultSticky="true" kind="Directory" />
<resourceReference name="EventStore" defaultAmount="[1000,1000,1000]" defaultSticky="false" kind="LogStore" />
</resourcereferences>
</role>
<sCSPolicy>
<sCSPolicyIDMoniker name="/AzureCloudService1/AzureCloudService1Group/WebRole1Instances" />
<sCSPolicyUpdateDomainMoniker name="/AzureCloudService1/AzureCloudService1Group/WebRole1UpgradeDomains" />
<sCSPolicyFaultDomainMoniker name="/AzureCloudService1/AzureCloudService1Group/WebRole1FaultDomains" />
</sCSPolicy>
</groupHascomponents>
<groupHascomponents>
<role name="WorkerRole1" generation="1" functional="0" release="0" software="E:\PA4\AzureCloudService1\AzureCloudService1\csx\Release\roles\WorkerRole1" entryPoint="base\x64\WaHostBootstrapper.exe" parameters="base\x64\WaWorkerHost.exe " memIndex="-1" hostingEnvironment="consoleroleadmin" hostingEnvironmentVersion="2">
<settings>
<aCS name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="" />
<aCS name="__ModelData" defaultValue="<m role="WorkerRole1" xmlns="urn:azure:m:v1"><r name="WebRole1"><e name="Endpoint1" /></r><r name="WorkerRole1" /></m>" />
</settings>
<resourcereferences>
<resourceReference name="DiagnosticStore" defaultAmount="[4096,4096,4096]" defaultSticky="true" kind="Directory" />
<resourceReference name="EventStore" defaultAmount="[1000,1000,1000]" defaultSticky="false" kind="LogStore" />
</resourcereferences>
</role>
<sCSPolicy>
<sCSPolicyIDMoniker name="/AzureCloudService1/AzureCloudService1Group/WorkerRole1Instances" />
<sCSPolicyUpdateDomainMoniker name="/AzureCloudService1/AzureCloudService1Group/WorkerRole1UpgradeDomains" />
<sCSPolicyFaultDomainMoniker name="/AzureCloudService1/AzureCloudService1Group/WorkerRole1FaultDomains" />
</sCSPolicy>
</groupHascomponents>
</components>
<sCSPolicy>
<sCSPolicyUpdateDomain name="WebRole1UpgradeDomains" defaultPolicy="[5,5,5]" />
<sCSPolicyUpdateDomain name="WorkerRole1UpgradeDomains" defaultPolicy="[5,5,5]" />
<sCSPolicyFaultDomain name="WebRole1FaultDomains" defaultPolicy="[2,2,2]" />
<sCSPolicyFaultDomain name="WorkerRole1FaultDomains" defaultPolicy="[2,2,2]" />
<sCSPolicyID name="WebRole1Instances" defaultPolicy="[1,1,1]" />
<sCSPolicyID name="WorkerRole1Instances" defaultPolicy="[1,1,1]" />
</sCSPolicy>
</group>
</groups>
<implements>
<implementation Id="b350d8ea-5a51-42ee-871a-735696c70d3c" ref="Microsoft.RedDog.Contract\ServiceContract\AzureCloudService1Contract@ServiceDefinition">
<interfacereferences>
<interfaceReference Id="9f6ea89d-7a2f-4f43-83a1-144f138aa506" ref="Microsoft.RedDog.Contract\Interface\WebRole1:Endpoint1@ServiceDefinition">
<inPort>
<inPortMoniker name="/AzureCloudService1/AzureCloudService1Group/WebRole1:Endpoint1" />
</inPort>
</interfaceReference>
</interfacereferences>
</implementation>
</implements>
</serviceModel> | /AzureCloudService1/csx/Release/ServiceDefinition.rd | no_license | kjneaville/PA4 | R | false | false | 7,644 | rd | ๏ปฟ<?xml version="1.0" encoding="utf-8"?>
<serviceModel xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" name="AzureCloudService1" generation="1" functional="0" release="0" Id="cf6630e6-0fd4-4910-8cac-963ab892f6fa" dslVersion="1.2.0.0" xmlns="http://schemas.microsoft.com/dsltools/RDSM">
<groups>
<group name="AzureCloudService1Group" generation="1" functional="0" release="0">
<componentports>
<inPort name="WebRole1:Endpoint1" protocol="http">
<inToChannel>
<lBChannelMoniker name="/AzureCloudService1/AzureCloudService1Group/LB:WebRole1:Endpoint1" />
</inToChannel>
</inPort>
</componentports>
<settings>
<aCS name="WebRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="">
<maps>
<mapMoniker name="/AzureCloudService1/AzureCloudService1Group/MapWebRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</maps>
</aCS>
<aCS name="WebRole1Instances" defaultValue="[1,1,1]">
<maps>
<mapMoniker name="/AzureCloudService1/AzureCloudService1Group/MapWebRole1Instances" />
</maps>
</aCS>
<aCS name="WorkerRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="">
<maps>
<mapMoniker name="/AzureCloudService1/AzureCloudService1Group/MapWorkerRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</maps>
</aCS>
<aCS name="WorkerRole1Instances" defaultValue="[1,1,1]">
<maps>
<mapMoniker name="/AzureCloudService1/AzureCloudService1Group/MapWorkerRole1Instances" />
</maps>
</aCS>
</settings>
<channels>
<lBChannel name="LB:WebRole1:Endpoint1">
<toPorts>
<inPortMoniker name="/AzureCloudService1/AzureCloudService1Group/WebRole1/Endpoint1" />
</toPorts>
</lBChannel>
</channels>
<maps>
<map name="MapWebRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" kind="Identity">
<setting>
<aCSMoniker name="/AzureCloudService1/AzureCloudService1Group/WebRole1/Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</setting>
</map>
<map name="MapWebRole1Instances" kind="Identity">
<setting>
<sCSPolicyIDMoniker name="/AzureCloudService1/AzureCloudService1Group/WebRole1Instances" />
</setting>
</map>
<map name="MapWorkerRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" kind="Identity">
<setting>
<aCSMoniker name="/AzureCloudService1/AzureCloudService1Group/WorkerRole1/Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</setting>
</map>
<map name="MapWorkerRole1Instances" kind="Identity">
<setting>
<sCSPolicyIDMoniker name="/AzureCloudService1/AzureCloudService1Group/WorkerRole1Instances" />
</setting>
</map>
</maps>
<components>
<groupHascomponents>
<role name="WebRole1" generation="1" functional="0" release="0" software="E:\PA4\AzureCloudService1\AzureCloudService1\csx\Release\roles\WebRole1" entryPoint="base\x64\WaHostBootstrapper.exe" parameters="base\x64\WaIISHost.exe " memIndex="-1" hostingEnvironment="frontendadmin" hostingEnvironmentVersion="2">
<componentports>
<inPort name="Endpoint1" protocol="http" portRanges="80" />
</componentports>
<settings>
<aCS name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="" />
<aCS name="__ModelData" defaultValue="<m role="WebRole1" xmlns="urn:azure:m:v1"><r name="WebRole1"><e name="Endpoint1" /></r><r name="WorkerRole1" /></m>" />
</settings>
<resourcereferences>
<resourceReference name="DiagnosticStore" defaultAmount="[4096,4096,4096]" defaultSticky="true" kind="Directory" />
<resourceReference name="EventStore" defaultAmount="[1000,1000,1000]" defaultSticky="false" kind="LogStore" />
</resourcereferences>
</role>
<sCSPolicy>
<sCSPolicyIDMoniker name="/AzureCloudService1/AzureCloudService1Group/WebRole1Instances" />
<sCSPolicyUpdateDomainMoniker name="/AzureCloudService1/AzureCloudService1Group/WebRole1UpgradeDomains" />
<sCSPolicyFaultDomainMoniker name="/AzureCloudService1/AzureCloudService1Group/WebRole1FaultDomains" />
</sCSPolicy>
</groupHascomponents>
<groupHascomponents>
<role name="WorkerRole1" generation="1" functional="0" release="0" software="E:\PA4\AzureCloudService1\AzureCloudService1\csx\Release\roles\WorkerRole1" entryPoint="base\x64\WaHostBootstrapper.exe" parameters="base\x64\WaWorkerHost.exe " memIndex="-1" hostingEnvironment="consoleroleadmin" hostingEnvironmentVersion="2">
<settings>
<aCS name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="" />
<aCS name="__ModelData" defaultValue="<m role="WorkerRole1" xmlns="urn:azure:m:v1"><r name="WebRole1"><e name="Endpoint1" /></r><r name="WorkerRole1" /></m>" />
</settings>
<resourcereferences>
<resourceReference name="DiagnosticStore" defaultAmount="[4096,4096,4096]" defaultSticky="true" kind="Directory" />
<resourceReference name="EventStore" defaultAmount="[1000,1000,1000]" defaultSticky="false" kind="LogStore" />
</resourcereferences>
</role>
<sCSPolicy>
<sCSPolicyIDMoniker name="/AzureCloudService1/AzureCloudService1Group/WorkerRole1Instances" />
<sCSPolicyUpdateDomainMoniker name="/AzureCloudService1/AzureCloudService1Group/WorkerRole1UpgradeDomains" />
<sCSPolicyFaultDomainMoniker name="/AzureCloudService1/AzureCloudService1Group/WorkerRole1FaultDomains" />
</sCSPolicy>
</groupHascomponents>
</components>
<sCSPolicy>
<sCSPolicyUpdateDomain name="WebRole1UpgradeDomains" defaultPolicy="[5,5,5]" />
<sCSPolicyUpdateDomain name="WorkerRole1UpgradeDomains" defaultPolicy="[5,5,5]" />
<sCSPolicyFaultDomain name="WebRole1FaultDomains" defaultPolicy="[2,2,2]" />
<sCSPolicyFaultDomain name="WorkerRole1FaultDomains" defaultPolicy="[2,2,2]" />
<sCSPolicyID name="WebRole1Instances" defaultPolicy="[1,1,1]" />
<sCSPolicyID name="WorkerRole1Instances" defaultPolicy="[1,1,1]" />
</sCSPolicy>
</group>
</groups>
<implements>
<implementation Id="b350d8ea-5a51-42ee-871a-735696c70d3c" ref="Microsoft.RedDog.Contract\ServiceContract\AzureCloudService1Contract@ServiceDefinition">
<interfacereferences>
<interfaceReference Id="9f6ea89d-7a2f-4f43-83a1-144f138aa506" ref="Microsoft.RedDog.Contract\Interface\WebRole1:Endpoint1@ServiceDefinition">
<inPort>
<inPortMoniker name="/AzureCloudService1/AzureCloudService1Group/WebRole1:Endpoint1" />
</inPort>
</interfaceReference>
</interfacereferences>
</implementation>
</implements>
</serviceModel> |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggside.R
\name{is.ggside}
\alias{is.ggside}
\alias{is.ggside_layer}
\alias{is.ggside_options}
\alias{is.ggside_scale}
\title{Check ggside objects}
\usage{
is.ggside(x)
is.ggside_layer(x)
is.ggside_options(x)
is.ggside_scale(x)
}
\arguments{
\item{x}{Object to test}
}
\value{
A logical value
}
\description{
Check ggside objects
}
| /man/is.ggside.Rd | permissive | seifudd/ggside | R | false | true | 412 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggside.R
\name{is.ggside}
\alias{is.ggside}
\alias{is.ggside_layer}
\alias{is.ggside_options}
\alias{is.ggside_scale}
\title{Check ggside objects}
\usage{
is.ggside(x)
is.ggside_layer(x)
is.ggside_options(x)
is.ggside_scale(x)
}
\arguments{
\item{x}{Object to test}
}
\value{
A logical value
}
\description{
Check ggside objects
}
|
#######################################################################################
#######################################################################################
### AllComparisonRCodeFunctions.r
### (c) 2009 Alan Lenarcic
### Code written for Edoardo Airoldi Lab, Harvard
###
### This code is not usually used in future work. It was an attempt to print out
### formatted Latex Tables with proper formatting of key estimators as used
### in Lenarcic 2009 thesis.
###
#### This code is for making Latex demonstration table summaries of simulation output
###
###
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# https://www.R-project.org/Licenses/
#
# Note, Comparisonr Code functions is probably undesired in any library
# and is really only valuable as reference.
#########################################################################################
###
### These are Latex titles, row and column headers
###
EstimatorNames<- c("Lasso Fixed", "Lars Cp", "Lasso Lin and Yuan", "Limit Ridge", "Quick Two Lasso",
"Limit Lasso", "Marginal Median");
FunctionPlot <- paste(" $ \\begin{array} {c} ", "\\mbox{\\footnotesize{\\# II}} \\\\ \\hline",
"\\mbox{\\footnotesize{\\# I}} \\\\ \\hline",
"\\mbox{\\footnotesize{$\\sum \\delta_{\\mbox{\\tiny{$\\beta$}}}^2$}} \\\\ \\hline",
##"\\mbox{\\footnotesize{\\% Perf}} \\\\ \\hline ",
"\\mbox{\\footnotesize{Run}}",
"\\end{array} $ ", sep="");
EstimatorColNames<- c(
paste("\\begin{array}{c} \\mbox{LARS} \\\\",
" \\mbox{Fixed $\\kappa_{\\mbox{\\tiny{$\\mathcal{A}$}}}$",
" } \\end{array} ", sep=""),
paste("\\begin{array}{c} \\mbox{LARS} \\\\",
" \\mbox{$C_{\\mbox{\\tiny{p}}}$",
"} \\end{array}", sep=""),
paste("\\begin{array}{c} \\mbox{\\small{L}\\footnotesize{asso $w$=1}}",
" \\\\ \\mbox{\\small{L}\\footnotesize{in \\& }\\small{Y}\\foootnotesize{uan}}} ",
" \\end{array}", sep=""),
paste( "\\begin{array}{c}",
" \\mbox{\\small{L}\\footnotesize{im}",
"\\small{R}\\footnotesize{idge}} \\\\",
" \\mbox{$\\pi_{\\mbox{\\tiny{$\\mathcal{A}$}}}$",
" \\small{K}\\footnotesize{nown}} \\end{array}", sep=""),
paste("\\begin{array}{c} \\mbox{\\small{T}\\footnotesize{wo}",
"\\small{L}\\footnotesize{asso}}\\\\",
" \\mbox{$\\times$ 9} \\end{array}", sep=""),
paste("\\begin{array}{c} ",
"\\mbox{\\small{L}\\footnotesize{im}\\small{L}",
"\\footnotesize{asso}} \\\\",
" \\mbox{$\\pi_{\\mbox{\\tiny{$\\mathcal{A}$}}}$",
" \\small{K}\\footnotesize{nown}}",
" \\end{array}", sep=""),
paste("\\begin{array}{c} ",
"\\mbox{\\small{L}\\footnotesize{im}",
"\\small{L}\\footnotesize{asso}} \\\\",
" \\mbox{$\\pi_{\\mbox{\\tiny{$\\mathcal{A}$}}}$",
" Est.} \\end{array}", sep=""),
paste("\\begin{array}{c} ",
"\\mbox{\\small{P}\\footnotesize{sd}-\\small{M}",
"\\footnotesize{arg}} \\\\ \\mbox{\\small{M}\\footnotesize{edian}} ",
"\\end{array}", sep=""),
paste("\\begin{array}{c} \\mbox{Fermi-D} \\\\ ",
"\\mbox{\\small{L}\\footnotesize{im}\\small{L}",
"\\footnotesize{asso}} \\end{array}", sep=""),
paste("\\begin{array}{c} \\mbox{\\small{M}\\footnotesize{arg}",
"\\small{M}\\footnotesize{edian}} \\\\ ",
"\\mbox{\\small{L}\\footnotesize{im}\\small{L}\\footnotesize{asso}} \\end{array}", sep="")
);
TopPlot <- c(" \\begin{array}{c} \\mbox{Mean Type II} \\\\ \\mbox{Real Factors Missed} \\\\ \\end{array} ",
" \\begin{array}{c} \\mbox{Mean Type I} \\\\ \\mbox{Real Factors Missed} \\\\ \\end{array} ",
" \\begin{array}{c} \\mbox{\\% True Model} \\\\ \\end{array}",
" \\begin{array}{c} \\mbox{SD Type II} \\\\ \\mbox{Real Factors Missed} \\\\ \\end{array} ",
" \\begin{array}{c} \\mbox{SD Type I} \\\\ \\mbox{Real Factors Missed} \\\\ \\end{array} ",
" \\begin{array}{c} \\sum \\left( \\hat{\\beta}_{j} - \\beta_{j-\\mbox{\\tiny{TRUE}}} \\right)^2 \\\\ \\end{array}",
" \\begin{array}{c} \\mbox{Computation Time} \\\\ \\mbox{User (sec)} \\end{array} ",
" \\begin{array}{c} \\mbox{Computation Time} \\\\ \\mbox{Computer (sec)} \\end{array} ",
" \\begin{array}{c} \\mbox{Computation Time} \\\\ \\mbox{Total (sec)} \\end{array} "
)
EstimatorColNames2 <- paste( "$ ", EstimatorColNames, " $", sep="");
#####################################################################################
### rd0 is a function for Latex formatting of numbers to reduce their space occupied in tables
###
###
rd0 <- function(RoundNumber) {
if (length(RoundNumber) == 1) {
if (RoundNumber >= .01 && RoundNumber < 1) {
MSSplit <- unlist(strsplit(as.character(round(RoundNumber,2)), "\\."));
if (length(MSSplit) == 1) {
MSSplit <- c(MSSplit,"0");
}
return( paste( ".",
(MSSplit)[2], sep=""));
} else if (RoundNumber >= 100) {
L2 <- floor(log(RoundNumber,10));
MSSplit <- unlist(strsplit(as.character(round(RoundNumber/10^(L2),1)), "\\."));
if (length(MSSplit) == 1) {
MSSplit <- c( MSSplit,"0");
}
return( paste("\\mbox{", MSSplit[1],".","{\\footnotesize", MSSplit[2],
"e", L2, "", "\\normalsize}}", sep="") );
} else if (RoundNumber >= 10) {
MSSplit <- unlist(strsplit(as.character(round(RoundNumber,1)), "\\."));
if (length(MSSplit) == 1) {
MSSplit <- c(MSSplit,"0");
}
return( paste("\\mbox{", MSSplit[1],".","{\\footnotesize", MSSplit[2],"\\normalsize}}", sep="") );
} else if (RoundNumber >= 1 && RoundNumber < 10) {
MSSplit <- unlist(strsplit(as.character(round(RoundNumber,2)), "\\."));
if (length(MSSplit) == 1) {
MSSplit <- c(MSSplit,"0");
}
return( paste("\\mbox{", MSSplit[1],".","{\\footnotesize", MSSplit[2],"\\normalsize}}", sep="") );
} else if (RoundNumber > 0 && RoundNumber < .01) {
L2 <- floor(log(RoundNumber,10));
MSSplit <- unlist(strsplit(as.character(round(RoundNumber/10^(L2),1)), "\\."));
if (length(MSSplit) == 1) {
MSSplit <- c( MSSplit,"0");
}
return( paste("\\mbox{", MSSplit[1],".","{\\footnotesize", MSSplit[2],
"e", L2, "", "\\normalsize}}", sep="") );
} else if (RoundNumber == 0) {
return("\\mbox{0.\\footnotesize{0}}");
} else {
return(as.character(round(RoundNumber,2)));
}
} else {
RTV <- RoundNumber;
for (ii in 1:length(RoundNumber)) {
RTV[ii] = rd0(RoundNumber[ii]);
}
return(RTV);
RTV[RoundNumber >= 0 & RoundNumber < 1] <-
paste( ".",
(unlist(strsplit(as.character(RoundNumber[RoundNumber >= 0 & RoundNumber < 1]), "\\."))[2]),
sep="")
MSSplit <- unlist(strsplit(as.character(round(RoundNumber[RoundNumber >= 1 & RoundNumber < 10],2)), "\\."));
RTV[RoundNumber >= 1 & RoundNumber < 10] <- paste("\\mbox{", MSSplit[1],".","{\\footnotesize", MSSplit[2],"\\normalsize}}", sep="");
MSSplit <- unlist(strsplit(as.character(round(RoundNumber[RoundNumber >= 10],1)), "\\."));
RTV[RoundNumber >= 10] <- paste("\\mbox{", MSSplit[1],".","{\\footnotesize", MSSplit[2],"\\normalsize}}", sep="");
return(RTV);
}
}
##############################
## BoxArangement :
## Mean Type II (sd Type II)
## Mean Type 1 (sd Type I)
## Mean sum ( hat beta j - beta j True )^2 (sd sum)
## Computer Time
###############################################################################
## MySaveFileName ()
##
## Based upon characteristics of table, picks a title for Latex file to save
##
##
##
##
MySaveFileName <- function(OneVV, KPAm, NCount, PrMeVec, LL = FALSE) {
STD <- LoadSavedTableDirectory();
if (LL== TRUE) {
My = "L" } else { My = "" } ;
name <- paste(STD,"/","OutputTable", My, "KP", KPAm, "CNT", NCount,
"TB", paste(PrMeVec, collapse=""),
"mNN", tSeq(min(OneVV[,4])), "MNN", tSeq(max(OneVV[,4])),
"mKP", tSeq(min(OneVV[,5])), "MKP", tSeq(max(OneVV[,5])),
"msig", tSeq(min(OneVV[,6])), "Msig", tSeq(max(OneVV[,6])),
".tex", sep="");
return(name);
}
#############################################################################
## DoAllTheSaving <- function(GFMAA, OneVV, KPAm, PrMeVec, NCount)
##
## Saves the table created by these function. Call this function with
## GFMAA: simulation out put, OneVV matrix of columns requested
## KPAm is a statement of what the size of active set was before doing study
## PrMeVec: which of the 10 types of simulation estimators to use
## NCount: How many N was the sample size per parameter set.
DoAllTheSaving <- function(GFMAA, OneVV, KPAm, PrMeVec, NCount, rndit=2) {
OnMyFileName <- MySaveFileName(OneVV, KPAm, NCount, PrMeVec);
TableGot <- CreatePrintTable(GFMAA, OneVV, KPAm, PrMeVec);
BiggerTable <- matrix(0, length(TableGot$MyPrintTB[,1]) +1,
length(TableGot$MyPrintTB[1,]) + 2);
BiggerTable[1, 3:length(BiggerTable[1,])] <- EstimatorColNames2[PrMeVec];
BiggerTable[2:length(BiggerTable[,1]),1] <- TableGot$RowsNames;
BiggerTable[2:length(BiggerTable[,1]),2] <- rep( FunctionPlot, length(TableGot$MyPrintTB[,1]));
BiggerTable[2:length(BiggerTable[,1]), 3:length(BiggerTable[1,]) ] <- TableGot$MyPrintTB;
BiggerTable[1,1] = ""; BiggerTable[1,2] = "";
if (KPAm == 6) {
BiggerTable[1,1] = "$ \\beta_{\\mbox{\\tiny{1:6}}} = \\begin{array}{c} ( 1,-1, 1, \\\\ -1, 1, -1 ) \\end{array}$";
TDCaption <- paste("$ \\beta_{\\mbox{\\tiny{1:6}}}",
" = \\left( 1,-1,1,-1,1,-1 \right)$",
" and $\\sigma = ", round(max(OneVV[,6]),rndit),"$", sep="");
} else if (KPAm == 4) {
BiggerTable[1,1] = "$ \\beta_{\\mbox{\\tiny{1:4}}} = \\left( 4,3,-2.5,1 \\right)$$\\mbox{ }$";
TDCaption <- paste("$ \\beta_{\\mbox{\\tiny{1:4}}}",
" = \\left( 4,3,-2.5,1 \right)$",
" and $\\sigma = ", round(max(OneVV[,6]),rndit), "$", sep="");
}
ArrayColumns <- paste("{@{\\extracolsep{-1.25mm}}|c@{\\hspace{-.5mm}}|c@{\\hspace{-.5mm}}|",
paste(rep( "@{\\hspace{-.5mm}}|c", length(BiggerTable[1,])-1), collapse=""),
"@{\\hspace{-.5mm}}|}", sep="");
StartF <- paste(" \\begin{tabular} ", ArrayColumns, " \\hline ", sep="");
MyF <- file(OnMyFileName, open="wt", blocking=FALSE );
writeLines(StartF, con=MyF);
close(MyF);
write.table(x=BiggerTable, file=OnMyFileName, append=TRUE,
sep = " & \n", eol=" \\\\ \\hline \\hline \n", na="NA", quote=FALSE,
row.names=FALSE, col.names=FALSE,);
## open(MyF, "at");
MyF <- file(OnMyFileName, open="at", blocking=FALSE );
writeLines(" \\end{tabular} \n", con=MyF);
close(MyF);
}
#############################################################################
## DoAllTheSavingL <- function(GFMAA, OneVV, KPAm, PrMeVec, NCount)
##
## Same as DoAllTheSaving() except, this makes a Latex "longtable"
## Saves the table created by these function. Call this function with
## GFMAA: simulation out put, OneVV matrix of columns requested
## KPAm is a statement of what the size of active set was before doing study
## PrMeVec: which of the 10 types of simulation estimators to use
## NCount: How many N was the sample size per parameter set.
DoAllTheSavingL <- function(GFMAA, OneVV, KPAm, PrMeVec, NCount, rndit=2) {
OnMyFileName <- MySaveFileName(OneVV, KPAm, NCount, PrMeVec, LL = TRUE);
TableGot <- CreatePrintTable(GFMAA, OneVV, KPAm, PrMeVec);
BiggerTable <- matrix(0, length(TableGot$MyPrintTB[,1]) +1,
length(TableGot$MyPrintTB[1,]) + 2);
BiggerTable[1, 3:length(BiggerTable[1,])] <- EstimatorColNames2[PrMeVec];
BiggerTable[2:length(BiggerTable[,1]),1] <- TableGot$RowsNames;
BiggerTable[2:length(BiggerTable[,1]),2] <- rep( FunctionPlot, length(TableGot$MyPrintTB[,1]));
BiggerTable[2:length(BiggerTable[,1]), 3:length(BiggerTable[1,]) ] <- TableGot$MyPrintTB;
BiggerTable[1,1] = ""; BiggerTable[1,2] = "";
if (KPAm == 6) {
BiggerTable[1,1] = "$ \\beta_{\\mbox{\\tiny{1:6}}} = \\begin{array}{c} ( 1,-1, 1, \\\\ -1, 1, -1 ) \\end{array}$";
TDCaption <- paste("$ \\beta_{\\mbox{\\tiny{1:6}}}",
" = \\left( 1,-1,1,-1,1,-1 \\right)$",
" and $\\sigma = ", round(max(OneVV[,6]),rndit), "$", sep="");
} else if (KPAm == 4) {
BiggerTable[1,1] = "$ \\beta_{\\mbox{\\tiny{1:4}}} = \\left( 4,3,-2.5,1 \\right)$";
TDCaption <- paste("$ \\beta_{\\mbox{\\tiny{1:4}}}",
" = \\left( 4,3,-2.5,1 \\right)$$\\mbox{ }$",
" and $\\sigma = ", round(max(OneVV[,6]),rndit), "$", sep="");
}
ArrayColumns <- paste("{@{\\extracolsep{-1.25mm}}|c@{\\hspace{-.5mm}}|c@{\\hspace{-.5mm}}|",
paste(rep( "@{\\hspace{-.5mm}}|c", length(BiggerTable[1,])-1), collapse=""),
"@{\\hspace{-.5mm}}|}", sep="");
StartF <- paste(" \\begin{longtable} ", ArrayColumns, " \\hline ", sep="");
MyF <- file(OnMyFileName, open="wt", blocking=FALSE );
writeLines(StartF, con=MyF);
close(MyF);
write.table(x=BiggerTable, file=OnMyFileName, append=TRUE,
sep = " & \n", eol=" \\\\ \\hline \\hline \n", na="NA", quote=FALSE,
row.names=FALSE, col.names=FALSE,);
## open(MyF, "at");
MyF <- file(OnMyFileName, open="at", blocking=FALSE );
writeLines(paste(" \\caption{", TDCaption, "}", sep=""), con=MyF);
tabnameS <- unlist(strsplit(OnMyFileName, "/"));
tabnameS <- tabnameS[length(tabnameS)];
tabnameS <- unlist(strsplit(tabnameS, "\\."));
tabnameS <- tabnameS[1];
writeLines(paste(" \\label{tabl:", tabnameS, "}", sep=""), con=MyF);
writeLines(" \\end{longtable} \n", con=MyF);
close(MyF);
}
#############################################################################
## CreatePrintTable <- function(GFMAA, OneVV, KPAm, PrMeVec)
##
## Helper function to DoAllTheSaving, gets the numbers for the table
CreatePrintTable <- function(GFMAA, OneVV, KPAm, PrMeVec) {
PrintTB <- matrix(0, length(OneVV[,1]), length(PrMeVec))
for (cto in 1:length(OneVV[,1])) {
PrintTB[cto,] <- SubTable(GFMAA, OneVV, PrMeVec, cto, rndit = 2);
}
MyPrintTB <- PrintTB;
for (ii in 1:length(PrintTB[,1])) {
for (jj in 1:length(PrintTB[1,])) {
MyPrintTB[ii,jj] <- paste(" $ ", PrintTB[ii,jj], " $ ", sep="");
}
}
RowsNames <- paste( " $ ", SubRows(OneVV, KPAm, rndit = 2), " $ ", sep="");
ColsNames <- EstimatorColNames2[PrMeVec];
RetMakeMe <- list(MyPrintTB = MyPrintTB, RowsNames=RowsNames, ColsNames = ColsNames);
return(RetMakeMe);
}
#############################################################################
## SubRows <- function(OneVV, KPAm, rndit = 2)
##
## Helper function to CreatePrintTable, creates Latex string explaining
## characteristics of individual sim.
SubRows <- function(OneVV, KPAm, rndit = 2) {
rt <- paste(" \\begin{array}{c} P_{\\mbox{\\tiny{xcor}}} = ",
".", unlist(strsplit(as.character(round(OneVV[,2], rndit)), "\\."))[2],
" \\mbox{ , } \\xi = ",
".", unlist(strsplit(as.character(round(OneVV[,3], rndit)), "\\."))[2],
" \\\\",
" \\kappa_{\\mbox{\\tiny{$\\mathcal{A}$}}} = ", KPAm,
" \\mbox{ , } \\sigma = ", OneVV[, 6], "\\\\",
" n = ", OneVV[,4], "\\mbox{ , } ",
" \\kappa = ", OneVV[,5], "\\end{array} ", sep="");
return(rt);
}
#############################################################################
## SubTable <- function(GFMAA, OneVV, PrMeVec, cto, rndit =2, TMM = FALSE)
##
## Helper function to CreatePrintTable, creates rows for Latex table
##
SubTable <- function(GFMAA, OneVV, PrMeVec, cto, rndit =2, TMM = FALSE) {
MeanTII = PrMeVec *0; SdTII = PrMeVec * 0;
MeanTI = PrMeVec * 0; SdTI = PrMeVec * 0;
MeanBetaSq = PrMeVec * 0; SdBetaSq = PrMeVec * 0;
PercentPer = PrMeVec * 0;
CompTU = PrMeVec * 0; SdCompTU = PrMeVec * 0;
CompTC = PrMeVec * 0; SdCompTC = PrMeVec * 0;
ALTA <- 10 * 11 / 2; ALTB <- 10;
SubSPlot <- GFMAA[ GFMAA[,1] == OneVV[cto,1] & GFMAA[,2] == OneVV[cto,2] &
GFMAA[,3] == OneVV[cto,3] & GFMAA[,4] == OneVV[cto,4] &
GFMAA[,5] == OneVV[cto,5] & GFMAA[,6] == OneVV[cto,6], ];
if (length(SubSPlot) == 0) {
print("SubTable, cannot get any for OneVV = ");
print(OneVV[cto,]);
return(0);
}
for (tt in 1:length(PrMeVec)) {
if (TMM == FALSE) {
PrV1 <-SubSPlot[, 7 + PrMeVec[tt]];
PrV1 <- PrV1[!is.na(PrV1) & PrV1 >= 0 ];
PrV2 <-SubSPlot[, 7 + ALTA + PrMeVec[tt]];
PrV2 <- PrV2[!is.na(PrV2) & PrV2 >= 0 ];
PrV3 <- SubSPlot[, 7 + ALTA*2 + PrMeVec[tt]];
PrV3 <- PrV3[!is.na(PrV3) & PrV3 >= 0 ];
PrV4 <- SubSPlot[, 7 + ALTA*3 + PrMeVec[tt]];
PrV4 <- PrV4[!is.na(PrV4) & PrV4 >= 0 ];
PrV5 <- SubSPlot[, 7 + ALTA*3 + ALTB*2 + PrMeVec[tt]];
PrV5 <- PrV5[!is.na(PrV5) & PrV5 >= 0 ];
} else {
PrV1 <-SubSPlot[, 7 + PrMeVec[tt]];
PrV1[is.na(PrV1) | PrV1 < 0 ] <- max(PrV1[!is.na(PrV1) & PrV1 >= 0 ]);
PrV2 <-SubSPlot[, 7 + ALTA + PrMeVec[tt]];
PrV2[is.na(PrV2) | PrV2 < 0] <- max(PrV1[!is.na(PrV2) & PrV2 >= 0 ]);
PrV3 <- SubSPlot[, 7 + ALTA*2 + PrMeVec[tt]];
PrV3[is.na(PrV3) | PrV3 < 0] <- max(PrV3[!is.na(PrV3) & PrV3 >= 0 ]);
PrV4 <- SubSPlot[, 7 + ALTA*3 + PrMeVec[tt]];
PrV4[is.na(PrV4) | PrV4 < 0] <- max(PrV4[!is.na(PrV4) & PrV4 >= 0 ]);
PrV5 <- SubSPlot[, 7 + ALTA*3 + ALTB*2 + PrMeVec[tt]];
PrV5[is.na(PrV5) | PrV5 < 0] <- max(PrV5[!is.na(PrV5) & PrV5 >= 0 ]);
}
MeanTII[tt] <- mean(PrV1); SdTII[tt] <- sd(PrV1);
MeanTI[tt] <- mean(PrV2); SdTI[tt] <- sd(PrV2);
MeanBetaSq[tt] <- mean(PrV3); SdBetaSq[tt] <- sd(PrV3);
PercentPer[tt] <- length( PrV1[PrV1 ==0 & PrV2 == 0] ) /
length(SubSPlot[,1]);
CompTU[tt] <- mean(PrV4); CompTC[tt] <- mean(PrV5);
SdCompTU[tt] <- sd(PrV4); SdCompTC[tt] <- sd(PrV5);
}
rt <- WhatGoesEachBoxAA(tt=0, MeanTII, SdTII, MeanTI, SdTI, MeanBetaSq, SdBetaSq,
PercentPer, CompTU, SdCompTU, CompTC, SdCompTC, rndit);
return(rt);
}
#############################################################################
## WhatGoesEachBoxAA <- function(tt =0, MeanTII, SdTII,...
##
## Helper function to CreatePrintTable, use to input all sumary
## statistics one desires for simulation
##
WhatGoesEachBoxAA <- function(tt =0, MeanTII, SdTII, MeanTI, SdTI, MeanBetaSq, SdBetaSq,
PercentPer, CompTU, SdCompTU, CompTC, SdCompTC, rndit=2) {
SdComptTU = SdCompTU;
if (tt <= 0) {
## rt <- paste(" \\begin{array}{c} \\hline ",
rt <- paste(" \\begin{array}{c} ",
rd0(round(MeanTII,rndit)), "\\mbox{ (",
rd0(round(SdTII,rndit)), ")} \\\\ \\hline ",
rd0(round(MeanTI, rndit)), "\\mbox{ (", rd0(round(SdTI,rndit)), ") } \\\\ \\hline",
rd0(round(MeanBetaSq, rndit)), "\\mbox{ (", rd0(round(SdBetaSq, rndit)), ") } \\\\ \\hline ",
##round(PercentPer, rndit), "\\mbox{\\%} \\\\ \\hline ",
rd0(round(CompTC, rndit)), " \\\\ ",
##round(CompTC, rndit), "\\mbox{s (", round(SdComptTU, rndit), ") } \\\\",
" \\end{array} ", sep="");
} else {
## rt <- paste(" \\begin{array}{|c|c|} \\hline ",
rt <- paste(" \\begin{array}{c} ",
rd0(round(MeanTII[tt],rndit)), "\\mbox{ (",
rd0(round(SdTII[tt],rndit)), ")} \\\\ \\hline ",
rd0(round(MeanTI[tt], rndit)), "\\mbox{ (", rd0(round(SdTI[tt],rndit)), ") } \\\\ \\hline",
rd0(round(MeanBetaSq[tt], rndit)), "\\mbox{ (", rd0(round(SdBetaSq[tt], rndit)), ") } \\\\ \\hline",
##round(PercentPer[tt]*100, rndit), "\\mbox{\\%} \\\\ \\hline ",
rd0(round(CompTC, rndit)), " \\\\ ",
##rd0(round(CompTC[tt], rndit)), "\\mbox{s (", rd0(round(SdComptTU[tt], rndit)), ") } \\\\",
" \\end{array} ", sep="");
}
return(rt)
}
#############################################################################
## WhatGoesEachBoxBB <- function(tt =0, MeanTII, SdTII, MeanTI,,...
##
## Helper function to CreatePrintTable, use to input all sumary
## statistics one desires for simulation
##
WhatGoesEachBoxBB <- function(tt =0, MeanTII, SdTII, MeanTI, SdTI, MeanBetaSq, SdBetaSq,
PercentPer, CompTU, SdCompTU, CompTC, SdCompTC, rndit=2) {
if (tt <= 0) {
## rt <- paste(" \\begin{array}{|c|c|} \\hline ",
rt <- paste(" \\begin{array}{c|c} \\hline ",
rd0(round(MeanTII,rndit)), "\\mbox{ (",
rd0(round(SdTII,rndit)), ")} & ",
rd0(round(MeanTI, rndit)), "\\mbox{ (", rd0(round(SdTI,rndit)), ") } \\\\ \\hline",
rd0(round(MeanBetaSq, rndit)), "\\mbox{ (", rd0(round(SdBetaSq, rndit)), ") } & ",
rd0(round(PercentPer, rndit)), "\\mbox{\\%} \\\\ \\hline ",
rd0(round(CompTU, rndit)), "\\mbox{sc (", rd0(round(SdCompTU, rndit)), ") } & ",
rd0(round(CompTC, rndit)), "\\mbox{sc (", rd0(round(SdCompTU, rndit)), ") } \\\\ \\hline",
" \\end{array} ", sep="");
} else {
## rt <- paste(" \\begin{array}{|c|c|} \\hline ",
rt <- paste(" \\begin{array}{c|c} \\hline ",
rd0(round(MeanTII[tt],rndit)), "\\mbox{ (",
rd0(round(SdTII[tt],rndit)), ")} & ",
rd0(round(MeanTI[tt], rndit)), "\\mbox{ (", rd0(round(SdTI[tt],rndit)), ") } \\\\ \\hline",
rd0(round(MeanBetaSq[tt], rndit)), "\\mbox{ (", rd0(round(SdBetaSq[tt], rndit)), ") } & ",
rd0(round(PercentPer[tt]*100, rndit)), "\\mbox{\\%} \\\\ \\hline ",
rd0(round(CompTU[tt], rndit)), "\\mbox{sc (", rd0(round(SdCompTU[tt], rndit)), ") } & ",
rd0(round(CompTC[tt], rndit)), "\\mbox{sc (", rd0(round(SdCompTU[tt], rndit)), ") } \\\\ \\hline",
" \\end{array} ", sep="");
}
return(rt)
}
#############################################################################
## LoadSavedTableDirectory <- function()
##
## Tries to identify directory to save Latex Tables into.
##
LoadSavedTableDirectory <- function() {
FilesInDIR <- unlist(list.files(.Library));
if (any(FilesInDIR == "PrintTables")) {
PathMeR <- MakePathMe();
FilesInDIR <- unlist(list.files(PathMeR));
if (any(FilesInDIR == "PrintTables")) {
PathMeR = paste(PathMeR, "PrintTables", sep="");
} else {
PathMeR = paste(PathMeR, "PrintTables", sep="");
dir.create(PathMeR, showWarnings = FALSE, recursive = FALSE);
}
} else {
PathMeR = "c://Stat//2008Summer//LarsProject//code//PrintTables//"
}
SavedOutPutDirectory = PathMeR;
return(SavedOutPutDirectory);
}
#######################################################################################
####################################################################################### | /TwoLassoCpp/R/AlllComparisonRCodeFunctions.r | no_license | lenarcica/SimulationStackForBayesSpike | R | false | false | 25,094 | r | #######################################################################################
#######################################################################################
### AllComparisonRCodeFunctions.r
### (c) 2009 Alan Lenarcic
### Code written for Edoardo Airoldi Lab, Harvard
###
### This code is not usually used in future work. It was an attempt to print out
### formatted Latex Tables with proper formatting of key estimators as used
### in Lenarcic 2009 thesis.
###
#### This code is for making Latex demonstration table summaries of simulation output
###
###
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# https://www.R-project.org/Licenses/
#
# Note, Comparisonr Code functions is probably undesired in any library
# and is really only valuable as reference.
#########################################################################################
###
### These are Latex titles, row and column headers
###
EstimatorNames<- c("Lasso Fixed", "Lars Cp", "Lasso Lin and Yuan", "Limit Ridge", "Quick Two Lasso",
"Limit Lasso", "Marginal Median");
FunctionPlot <- paste(" $ \\begin{array} {c} ", "\\mbox{\\footnotesize{\\# II}} \\\\ \\hline",
"\\mbox{\\footnotesize{\\# I}} \\\\ \\hline",
"\\mbox{\\footnotesize{$\\sum \\delta_{\\mbox{\\tiny{$\\beta$}}}^2$}} \\\\ \\hline",
##"\\mbox{\\footnotesize{\\% Perf}} \\\\ \\hline ",
"\\mbox{\\footnotesize{Run}}",
"\\end{array} $ ", sep="");
EstimatorColNames<- c(
paste("\\begin{array}{c} \\mbox{LARS} \\\\",
" \\mbox{Fixed $\\kappa_{\\mbox{\\tiny{$\\mathcal{A}$}}}$",
" } \\end{array} ", sep=""),
paste("\\begin{array}{c} \\mbox{LARS} \\\\",
" \\mbox{$C_{\\mbox{\\tiny{p}}}$",
"} \\end{array}", sep=""),
paste("\\begin{array}{c} \\mbox{\\small{L}\\footnotesize{asso $w$=1}}",
" \\\\ \\mbox{\\small{L}\\footnotesize{in \\& }\\small{Y}\\foootnotesize{uan}}} ",
" \\end{array}", sep=""),
paste( "\\begin{array}{c}",
" \\mbox{\\small{L}\\footnotesize{im}",
"\\small{R}\\footnotesize{idge}} \\\\",
" \\mbox{$\\pi_{\\mbox{\\tiny{$\\mathcal{A}$}}}$",
" \\small{K}\\footnotesize{nown}} \\end{array}", sep=""),
paste("\\begin{array}{c} \\mbox{\\small{T}\\footnotesize{wo}",
"\\small{L}\\footnotesize{asso}}\\\\",
" \\mbox{$\\times$ 9} \\end{array}", sep=""),
paste("\\begin{array}{c} ",
"\\mbox{\\small{L}\\footnotesize{im}\\small{L}",
"\\footnotesize{asso}} \\\\",
" \\mbox{$\\pi_{\\mbox{\\tiny{$\\mathcal{A}$}}}$",
" \\small{K}\\footnotesize{nown}}",
" \\end{array}", sep=""),
paste("\\begin{array}{c} ",
"\\mbox{\\small{L}\\footnotesize{im}",
"\\small{L}\\footnotesize{asso}} \\\\",
" \\mbox{$\\pi_{\\mbox{\\tiny{$\\mathcal{A}$}}}$",
" Est.} \\end{array}", sep=""),
paste("\\begin{array}{c} ",
"\\mbox{\\small{P}\\footnotesize{sd}-\\small{M}",
"\\footnotesize{arg}} \\\\ \\mbox{\\small{M}\\footnotesize{edian}} ",
"\\end{array}", sep=""),
paste("\\begin{array}{c} \\mbox{Fermi-D} \\\\ ",
"\\mbox{\\small{L}\\footnotesize{im}\\small{L}",
"\\footnotesize{asso}} \\end{array}", sep=""),
paste("\\begin{array}{c} \\mbox{\\small{M}\\footnotesize{arg}",
"\\small{M}\\footnotesize{edian}} \\\\ ",
"\\mbox{\\small{L}\\footnotesize{im}\\small{L}\\footnotesize{asso}} \\end{array}", sep="")
);
TopPlot <- c(" \\begin{array}{c} \\mbox{Mean Type II} \\\\ \\mbox{Real Factors Missed} \\\\ \\end{array} ",
" \\begin{array}{c} \\mbox{Mean Type I} \\\\ \\mbox{Real Factors Missed} \\\\ \\end{array} ",
" \\begin{array}{c} \\mbox{\\% True Model} \\\\ \\end{array}",
" \\begin{array}{c} \\mbox{SD Type II} \\\\ \\mbox{Real Factors Missed} \\\\ \\end{array} ",
" \\begin{array}{c} \\mbox{SD Type I} \\\\ \\mbox{Real Factors Missed} \\\\ \\end{array} ",
" \\begin{array}{c} \\sum \\left( \\hat{\\beta}_{j} - \\beta_{j-\\mbox{\\tiny{TRUE}}} \\right)^2 \\\\ \\end{array}",
" \\begin{array}{c} \\mbox{Computation Time} \\\\ \\mbox{User (sec)} \\end{array} ",
" \\begin{array}{c} \\mbox{Computation Time} \\\\ \\mbox{Computer (sec)} \\end{array} ",
" \\begin{array}{c} \\mbox{Computation Time} \\\\ \\mbox{Total (sec)} \\end{array} "
)
EstimatorColNames2 <- paste( "$ ", EstimatorColNames, " $", sep="");
#####################################################################################
### rd0 is a function for Latex formatting of numbers to reduce their space occupied in tables
###
###
rd0 <- function(RoundNumber) {
if (length(RoundNumber) == 1) {
if (RoundNumber >= .01 && RoundNumber < 1) {
MSSplit <- unlist(strsplit(as.character(round(RoundNumber,2)), "\\."));
if (length(MSSplit) == 1) {
MSSplit <- c(MSSplit,"0");
}
return( paste( ".",
(MSSplit)[2], sep=""));
} else if (RoundNumber >= 100) {
L2 <- floor(log(RoundNumber,10));
MSSplit <- unlist(strsplit(as.character(round(RoundNumber/10^(L2),1)), "\\."));
if (length(MSSplit) == 1) {
MSSplit <- c( MSSplit,"0");
}
return( paste("\\mbox{", MSSplit[1],".","{\\footnotesize", MSSplit[2],
"e", L2, "", "\\normalsize}}", sep="") );
} else if (RoundNumber >= 10) {
MSSplit <- unlist(strsplit(as.character(round(RoundNumber,1)), "\\."));
if (length(MSSplit) == 1) {
MSSplit <- c(MSSplit,"0");
}
return( paste("\\mbox{", MSSplit[1],".","{\\footnotesize", MSSplit[2],"\\normalsize}}", sep="") );
} else if (RoundNumber >= 1 && RoundNumber < 10) {
MSSplit <- unlist(strsplit(as.character(round(RoundNumber,2)), "\\."));
if (length(MSSplit) == 1) {
MSSplit <- c(MSSplit,"0");
}
return( paste("\\mbox{", MSSplit[1],".","{\\footnotesize", MSSplit[2],"\\normalsize}}", sep="") );
} else if (RoundNumber > 0 && RoundNumber < .01) {
L2 <- floor(log(RoundNumber,10));
MSSplit <- unlist(strsplit(as.character(round(RoundNumber/10^(L2),1)), "\\."));
if (length(MSSplit) == 1) {
MSSplit <- c( MSSplit,"0");
}
return( paste("\\mbox{", MSSplit[1],".","{\\footnotesize", MSSplit[2],
"e", L2, "", "\\normalsize}}", sep="") );
} else if (RoundNumber == 0) {
return("\\mbox{0.\\footnotesize{0}}");
} else {
return(as.character(round(RoundNumber,2)));
}
} else {
RTV <- RoundNumber;
for (ii in 1:length(RoundNumber)) {
RTV[ii] = rd0(RoundNumber[ii]);
}
return(RTV);
RTV[RoundNumber >= 0 & RoundNumber < 1] <-
paste( ".",
(unlist(strsplit(as.character(RoundNumber[RoundNumber >= 0 & RoundNumber < 1]), "\\."))[2]),
sep="")
MSSplit <- unlist(strsplit(as.character(round(RoundNumber[RoundNumber >= 1 & RoundNumber < 10],2)), "\\."));
RTV[RoundNumber >= 1 & RoundNumber < 10] <- paste("\\mbox{", MSSplit[1],".","{\\footnotesize", MSSplit[2],"\\normalsize}}", sep="");
MSSplit <- unlist(strsplit(as.character(round(RoundNumber[RoundNumber >= 10],1)), "\\."));
RTV[RoundNumber >= 10] <- paste("\\mbox{", MSSplit[1],".","{\\footnotesize", MSSplit[2],"\\normalsize}}", sep="");
return(RTV);
}
}
##############################
## BoxArangement :
## Mean Type II (sd Type II)
## Mean Type 1 (sd Type I)
## Mean sum ( hat beta j - beta j True )^2 (sd sum)
## Computer Time
###############################################################################
## MySaveFileName ()
##
## Based upon characteristics of table, picks a title for Latex file to save
##
##
##
##
MySaveFileName <- function(OneVV, KPAm, NCount, PrMeVec, LL = FALSE) {
STD <- LoadSavedTableDirectory();
if (LL== TRUE) {
My = "L" } else { My = "" } ;
name <- paste(STD,"/","OutputTable", My, "KP", KPAm, "CNT", NCount,
"TB", paste(PrMeVec, collapse=""),
"mNN", tSeq(min(OneVV[,4])), "MNN", tSeq(max(OneVV[,4])),
"mKP", tSeq(min(OneVV[,5])), "MKP", tSeq(max(OneVV[,5])),
"msig", tSeq(min(OneVV[,6])), "Msig", tSeq(max(OneVV[,6])),
".tex", sep="");
return(name);
}
#############################################################################
## DoAllTheSaving <- function(GFMAA, OneVV, KPAm, PrMeVec, NCount)
##
## Saves the table created by these function. Call this function with
## GFMAA: simulation out put, OneVV matrix of columns requested
## KPAm is a statement of what the size of active set was before doing study
## PrMeVec: which of the 10 types of simulation estimators to use
## NCount: How many N was the sample size per parameter set.
DoAllTheSaving <- function(GFMAA, OneVV, KPAm, PrMeVec, NCount, rndit=2) {
OnMyFileName <- MySaveFileName(OneVV, KPAm, NCount, PrMeVec);
TableGot <- CreatePrintTable(GFMAA, OneVV, KPAm, PrMeVec);
BiggerTable <- matrix(0, length(TableGot$MyPrintTB[,1]) +1,
length(TableGot$MyPrintTB[1,]) + 2);
BiggerTable[1, 3:length(BiggerTable[1,])] <- EstimatorColNames2[PrMeVec];
BiggerTable[2:length(BiggerTable[,1]),1] <- TableGot$RowsNames;
BiggerTable[2:length(BiggerTable[,1]),2] <- rep( FunctionPlot, length(TableGot$MyPrintTB[,1]));
BiggerTable[2:length(BiggerTable[,1]), 3:length(BiggerTable[1,]) ] <- TableGot$MyPrintTB;
BiggerTable[1,1] = ""; BiggerTable[1,2] = "";
if (KPAm == 6) {
BiggerTable[1,1] = "$ \\beta_{\\mbox{\\tiny{1:6}}} = \\begin{array}{c} ( 1,-1, 1, \\\\ -1, 1, -1 ) \\end{array}$";
TDCaption <- paste("$ \\beta_{\\mbox{\\tiny{1:6}}}",
" = \\left( 1,-1,1,-1,1,-1 \right)$",
" and $\\sigma = ", round(max(OneVV[,6]),rndit),"$", sep="");
} else if (KPAm == 4) {
BiggerTable[1,1] = "$ \\beta_{\\mbox{\\tiny{1:4}}} = \\left( 4,3,-2.5,1 \\right)$$\\mbox{ }$";
TDCaption <- paste("$ \\beta_{\\mbox{\\tiny{1:4}}}",
" = \\left( 4,3,-2.5,1 \right)$",
" and $\\sigma = ", round(max(OneVV[,6]),rndit), "$", sep="");
}
ArrayColumns <- paste("{@{\\extracolsep{-1.25mm}}|c@{\\hspace{-.5mm}}|c@{\\hspace{-.5mm}}|",
paste(rep( "@{\\hspace{-.5mm}}|c", length(BiggerTable[1,])-1), collapse=""),
"@{\\hspace{-.5mm}}|}", sep="");
StartF <- paste(" \\begin{tabular} ", ArrayColumns, " \\hline ", sep="");
MyF <- file(OnMyFileName, open="wt", blocking=FALSE );
writeLines(StartF, con=MyF);
close(MyF);
write.table(x=BiggerTable, file=OnMyFileName, append=TRUE,
sep = " & \n", eol=" \\\\ \\hline \\hline \n", na="NA", quote=FALSE,
row.names=FALSE, col.names=FALSE,);
## open(MyF, "at");
MyF <- file(OnMyFileName, open="at", blocking=FALSE );
writeLines(" \\end{tabular} \n", con=MyF);
close(MyF);
}
#############################################################################
## DoAllTheSavingL <- function(GFMAA, OneVV, KPAm, PrMeVec, NCount)
##
## Same as DoAllTheSaving() except, this makes a Latex "longtable"
## Saves the table created by these function. Call this function with
## GFMAA: simulation out put, OneVV matrix of columns requested
## KPAm is a statement of what the size of active set was before doing study
## PrMeVec: which of the 10 types of simulation estimators to use
## NCount: How many N was the sample size per parameter set.
DoAllTheSavingL <- function(GFMAA, OneVV, KPAm, PrMeVec, NCount, rndit=2) {
OnMyFileName <- MySaveFileName(OneVV, KPAm, NCount, PrMeVec, LL = TRUE);
TableGot <- CreatePrintTable(GFMAA, OneVV, KPAm, PrMeVec);
BiggerTable <- matrix(0, length(TableGot$MyPrintTB[,1]) +1,
length(TableGot$MyPrintTB[1,]) + 2);
BiggerTable[1, 3:length(BiggerTable[1,])] <- EstimatorColNames2[PrMeVec];
BiggerTable[2:length(BiggerTable[,1]),1] <- TableGot$RowsNames;
BiggerTable[2:length(BiggerTable[,1]),2] <- rep( FunctionPlot, length(TableGot$MyPrintTB[,1]));
BiggerTable[2:length(BiggerTable[,1]), 3:length(BiggerTable[1,]) ] <- TableGot$MyPrintTB;
BiggerTable[1,1] = ""; BiggerTable[1,2] = "";
if (KPAm == 6) {
BiggerTable[1,1] = "$ \\beta_{\\mbox{\\tiny{1:6}}} = \\begin{array}{c} ( 1,-1, 1, \\\\ -1, 1, -1 ) \\end{array}$";
TDCaption <- paste("$ \\beta_{\\mbox{\\tiny{1:6}}}",
" = \\left( 1,-1,1,-1,1,-1 \\right)$",
" and $\\sigma = ", round(max(OneVV[,6]),rndit), "$", sep="");
} else if (KPAm == 4) {
BiggerTable[1,1] = "$ \\beta_{\\mbox{\\tiny{1:4}}} = \\left( 4,3,-2.5,1 \\right)$";
TDCaption <- paste("$ \\beta_{\\mbox{\\tiny{1:4}}}",
" = \\left( 4,3,-2.5,1 \\right)$$\\mbox{ }$",
" and $\\sigma = ", round(max(OneVV[,6]),rndit), "$", sep="");
}
ArrayColumns <- paste("{@{\\extracolsep{-1.25mm}}|c@{\\hspace{-.5mm}}|c@{\\hspace{-.5mm}}|",
paste(rep( "@{\\hspace{-.5mm}}|c", length(BiggerTable[1,])-1), collapse=""),
"@{\\hspace{-.5mm}}|}", sep="");
StartF <- paste(" \\begin{longtable} ", ArrayColumns, " \\hline ", sep="");
MyF <- file(OnMyFileName, open="wt", blocking=FALSE );
writeLines(StartF, con=MyF);
close(MyF);
write.table(x=BiggerTable, file=OnMyFileName, append=TRUE,
sep = " & \n", eol=" \\\\ \\hline \\hline \n", na="NA", quote=FALSE,
row.names=FALSE, col.names=FALSE,);
## open(MyF, "at");
MyF <- file(OnMyFileName, open="at", blocking=FALSE );
writeLines(paste(" \\caption{", TDCaption, "}", sep=""), con=MyF);
tabnameS <- unlist(strsplit(OnMyFileName, "/"));
tabnameS <- tabnameS[length(tabnameS)];
tabnameS <- unlist(strsplit(tabnameS, "\\."));
tabnameS <- tabnameS[1];
writeLines(paste(" \\label{tabl:", tabnameS, "}", sep=""), con=MyF);
writeLines(" \\end{longtable} \n", con=MyF);
close(MyF);
}
#############################################################################
## CreatePrintTable <- function(GFMAA, OneVV, KPAm, PrMeVec)
##
## Helper function to DoAllTheSaving, gets the numbers for the table
CreatePrintTable <- function(GFMAA, OneVV, KPAm, PrMeVec) {
PrintTB <- matrix(0, length(OneVV[,1]), length(PrMeVec))
for (cto in 1:length(OneVV[,1])) {
PrintTB[cto,] <- SubTable(GFMAA, OneVV, PrMeVec, cto, rndit = 2);
}
MyPrintTB <- PrintTB;
for (ii in 1:length(PrintTB[,1])) {
for (jj in 1:length(PrintTB[1,])) {
MyPrintTB[ii,jj] <- paste(" $ ", PrintTB[ii,jj], " $ ", sep="");
}
}
RowsNames <- paste( " $ ", SubRows(OneVV, KPAm, rndit = 2), " $ ", sep="");
ColsNames <- EstimatorColNames2[PrMeVec];
RetMakeMe <- list(MyPrintTB = MyPrintTB, RowsNames=RowsNames, ColsNames = ColsNames);
return(RetMakeMe);
}
#############################################################################
## SubRows <- function(OneVV, KPAm, rndit = 2)
##
## Helper function to CreatePrintTable, creates Latex string explaining
## characteristics of individual sim.
SubRows <- function(OneVV, KPAm, rndit = 2) {
rt <- paste(" \\begin{array}{c} P_{\\mbox{\\tiny{xcor}}} = ",
".", unlist(strsplit(as.character(round(OneVV[,2], rndit)), "\\."))[2],
" \\mbox{ , } \\xi = ",
".", unlist(strsplit(as.character(round(OneVV[,3], rndit)), "\\."))[2],
" \\\\",
" \\kappa_{\\mbox{\\tiny{$\\mathcal{A}$}}} = ", KPAm,
" \\mbox{ , } \\sigma = ", OneVV[, 6], "\\\\",
" n = ", OneVV[,4], "\\mbox{ , } ",
" \\kappa = ", OneVV[,5], "\\end{array} ", sep="");
return(rt);
}
#############################################################################
## SubTable <- function(GFMAA, OneVV, PrMeVec, cto, rndit =2, TMM = FALSE)
##
## Helper function to CreatePrintTable, creates rows for Latex table
##
SubTable <- function(GFMAA, OneVV, PrMeVec, cto, rndit =2, TMM = FALSE) {
MeanTII = PrMeVec *0; SdTII = PrMeVec * 0;
MeanTI = PrMeVec * 0; SdTI = PrMeVec * 0;
MeanBetaSq = PrMeVec * 0; SdBetaSq = PrMeVec * 0;
PercentPer = PrMeVec * 0;
CompTU = PrMeVec * 0; SdCompTU = PrMeVec * 0;
CompTC = PrMeVec * 0; SdCompTC = PrMeVec * 0;
ALTA <- 10 * 11 / 2; ALTB <- 10;
SubSPlot <- GFMAA[ GFMAA[,1] == OneVV[cto,1] & GFMAA[,2] == OneVV[cto,2] &
GFMAA[,3] == OneVV[cto,3] & GFMAA[,4] == OneVV[cto,4] &
GFMAA[,5] == OneVV[cto,5] & GFMAA[,6] == OneVV[cto,6], ];
if (length(SubSPlot) == 0) {
print("SubTable, cannot get any for OneVV = ");
print(OneVV[cto,]);
return(0);
}
for (tt in 1:length(PrMeVec)) {
if (TMM == FALSE) {
PrV1 <-SubSPlot[, 7 + PrMeVec[tt]];
PrV1 <- PrV1[!is.na(PrV1) & PrV1 >= 0 ];
PrV2 <-SubSPlot[, 7 + ALTA + PrMeVec[tt]];
PrV2 <- PrV2[!is.na(PrV2) & PrV2 >= 0 ];
PrV3 <- SubSPlot[, 7 + ALTA*2 + PrMeVec[tt]];
PrV3 <- PrV3[!is.na(PrV3) & PrV3 >= 0 ];
PrV4 <- SubSPlot[, 7 + ALTA*3 + PrMeVec[tt]];
PrV4 <- PrV4[!is.na(PrV4) & PrV4 >= 0 ];
PrV5 <- SubSPlot[, 7 + ALTA*3 + ALTB*2 + PrMeVec[tt]];
PrV5 <- PrV5[!is.na(PrV5) & PrV5 >= 0 ];
} else {
PrV1 <-SubSPlot[, 7 + PrMeVec[tt]];
PrV1[is.na(PrV1) | PrV1 < 0 ] <- max(PrV1[!is.na(PrV1) & PrV1 >= 0 ]);
PrV2 <-SubSPlot[, 7 + ALTA + PrMeVec[tt]];
PrV2[is.na(PrV2) | PrV2 < 0] <- max(PrV1[!is.na(PrV2) & PrV2 >= 0 ]);
PrV3 <- SubSPlot[, 7 + ALTA*2 + PrMeVec[tt]];
PrV3[is.na(PrV3) | PrV3 < 0] <- max(PrV3[!is.na(PrV3) & PrV3 >= 0 ]);
PrV4 <- SubSPlot[, 7 + ALTA*3 + PrMeVec[tt]];
PrV4[is.na(PrV4) | PrV4 < 0] <- max(PrV4[!is.na(PrV4) & PrV4 >= 0 ]);
PrV5 <- SubSPlot[, 7 + ALTA*3 + ALTB*2 + PrMeVec[tt]];
PrV5[is.na(PrV5) | PrV5 < 0] <- max(PrV5[!is.na(PrV5) & PrV5 >= 0 ]);
}
MeanTII[tt] <- mean(PrV1); SdTII[tt] <- sd(PrV1);
MeanTI[tt] <- mean(PrV2); SdTI[tt] <- sd(PrV2);
MeanBetaSq[tt] <- mean(PrV3); SdBetaSq[tt] <- sd(PrV3);
PercentPer[tt] <- length( PrV1[PrV1 ==0 & PrV2 == 0] ) /
length(SubSPlot[,1]);
CompTU[tt] <- mean(PrV4); CompTC[tt] <- mean(PrV5);
SdCompTU[tt] <- sd(PrV4); SdCompTC[tt] <- sd(PrV5);
}
rt <- WhatGoesEachBoxAA(tt=0, MeanTII, SdTII, MeanTI, SdTI, MeanBetaSq, SdBetaSq,
PercentPer, CompTU, SdCompTU, CompTC, SdCompTC, rndit);
return(rt);
}
#############################################################################
## WhatGoesEachBoxAA <- function(tt =0, MeanTII, SdTII,...
##
## Helper function to CreatePrintTable, use to input all sumary
## statistics one desires for simulation
##
WhatGoesEachBoxAA <- function(tt =0, MeanTII, SdTII, MeanTI, SdTI, MeanBetaSq, SdBetaSq,
PercentPer, CompTU, SdCompTU, CompTC, SdCompTC, rndit=2) {
SdComptTU = SdCompTU;
if (tt <= 0) {
## rt <- paste(" \\begin{array}{c} \\hline ",
rt <- paste(" \\begin{array}{c} ",
rd0(round(MeanTII,rndit)), "\\mbox{ (",
rd0(round(SdTII,rndit)), ")} \\\\ \\hline ",
rd0(round(MeanTI, rndit)), "\\mbox{ (", rd0(round(SdTI,rndit)), ") } \\\\ \\hline",
rd0(round(MeanBetaSq, rndit)), "\\mbox{ (", rd0(round(SdBetaSq, rndit)), ") } \\\\ \\hline ",
##round(PercentPer, rndit), "\\mbox{\\%} \\\\ \\hline ",
rd0(round(CompTC, rndit)), " \\\\ ",
##round(CompTC, rndit), "\\mbox{s (", round(SdComptTU, rndit), ") } \\\\",
" \\end{array} ", sep="");
} else {
## rt <- paste(" \\begin{array}{|c|c|} \\hline ",
rt <- paste(" \\begin{array}{c} ",
rd0(round(MeanTII[tt],rndit)), "\\mbox{ (",
rd0(round(SdTII[tt],rndit)), ")} \\\\ \\hline ",
rd0(round(MeanTI[tt], rndit)), "\\mbox{ (", rd0(round(SdTI[tt],rndit)), ") } \\\\ \\hline",
rd0(round(MeanBetaSq[tt], rndit)), "\\mbox{ (", rd0(round(SdBetaSq[tt], rndit)), ") } \\\\ \\hline",
##round(PercentPer[tt]*100, rndit), "\\mbox{\\%} \\\\ \\hline ",
rd0(round(CompTC, rndit)), " \\\\ ",
##rd0(round(CompTC[tt], rndit)), "\\mbox{s (", rd0(round(SdComptTU[tt], rndit)), ") } \\\\",
" \\end{array} ", sep="");
}
return(rt)
}
#############################################################################
## WhatGoesEachBoxBB <- function(tt =0, MeanTII, SdTII, MeanTI,,...
##
## Helper function to CreatePrintTable, use to input all sumary
## statistics one desires for simulation
##
WhatGoesEachBoxBB <- function(tt =0, MeanTII, SdTII, MeanTI, SdTI, MeanBetaSq, SdBetaSq,
PercentPer, CompTU, SdCompTU, CompTC, SdCompTC, rndit=2) {
if (tt <= 0) {
## rt <- paste(" \\begin{array}{|c|c|} \\hline ",
rt <- paste(" \\begin{array}{c|c} \\hline ",
rd0(round(MeanTII,rndit)), "\\mbox{ (",
rd0(round(SdTII,rndit)), ")} & ",
rd0(round(MeanTI, rndit)), "\\mbox{ (", rd0(round(SdTI,rndit)), ") } \\\\ \\hline",
rd0(round(MeanBetaSq, rndit)), "\\mbox{ (", rd0(round(SdBetaSq, rndit)), ") } & ",
rd0(round(PercentPer, rndit)), "\\mbox{\\%} \\\\ \\hline ",
rd0(round(CompTU, rndit)), "\\mbox{sc (", rd0(round(SdCompTU, rndit)), ") } & ",
rd0(round(CompTC, rndit)), "\\mbox{sc (", rd0(round(SdCompTU, rndit)), ") } \\\\ \\hline",
" \\end{array} ", sep="");
} else {
## rt <- paste(" \\begin{array}{|c|c|} \\hline ",
rt <- paste(" \\begin{array}{c|c} \\hline ",
rd0(round(MeanTII[tt],rndit)), "\\mbox{ (",
rd0(round(SdTII[tt],rndit)), ")} & ",
rd0(round(MeanTI[tt], rndit)), "\\mbox{ (", rd0(round(SdTI[tt],rndit)), ") } \\\\ \\hline",
rd0(round(MeanBetaSq[tt], rndit)), "\\mbox{ (", rd0(round(SdBetaSq[tt], rndit)), ") } & ",
rd0(round(PercentPer[tt]*100, rndit)), "\\mbox{\\%} \\\\ \\hline ",
rd0(round(CompTU[tt], rndit)), "\\mbox{sc (", rd0(round(SdCompTU[tt], rndit)), ") } & ",
rd0(round(CompTC[tt], rndit)), "\\mbox{sc (", rd0(round(SdCompTU[tt], rndit)), ") } \\\\ \\hline",
" \\end{array} ", sep="");
}
return(rt)
}
#############################################################################
## LoadSavedTableDirectory <- function()
##
## Tries to identify directory to save Latex Tables into.
##
LoadSavedTableDirectory <- function() {
FilesInDIR <- unlist(list.files(.Library));
if (any(FilesInDIR == "PrintTables")) {
PathMeR <- MakePathMe();
FilesInDIR <- unlist(list.files(PathMeR));
if (any(FilesInDIR == "PrintTables")) {
PathMeR = paste(PathMeR, "PrintTables", sep="");
} else {
PathMeR = paste(PathMeR, "PrintTables", sep="");
dir.create(PathMeR, showWarnings = FALSE, recursive = FALSE);
}
} else {
PathMeR = "c://Stat//2008Summer//LarsProject//code//PrintTables//"
}
SavedOutPutDirectory = PathMeR;
return(SavedOutPutDirectory);
}
#######################################################################################
####################################################################################### |
# by Valentina Galata
# 2013.09.30
# 2013.10.14: adding prior knowledge checks
# 2013.10.16: modified to use for HiWi job: miRNA, mRNA data
## Graph representation
# As sparse, upper/right triangular adjacency matrix:
# x---y: m[x,y] = 2
# x-->y: m[x,y] = 1
# x<--y: m[x,y] = -1
# Sparse: Since the resulting graph should be sparse
# Upper/right triangular:
# Less memory needed, edge(x,y) can be encoded by one number, no need in edge(y,x)
# Fast test for presence/absence/orientation of the edges
# Upper/right: m[x,y] with x < y can be non-zero; rest is zero and thus not saved
# Disadventage: check whether x<y
## J. Pearl "Causality", Ch. 2.5: IC Algorithmus:
# 0) Initialization: Complete graph
# 1) for all x,y in V: if no Z s.t. x _|_ y | Z => x---y
# 2) for all non-adj. x, y with commong neighbor w: if w not in Z => x -> w <- y
# 3) orient as many undirect. edges as possible s.t.
# i) any allternative orientation would yield a new v-structure
# ii) any allternative orientation would yield a direct. cycle
# There are four rules ensuring i) and ii) (see the implementation of step 3))
## Implementation/Extensions/Modifications of the IC algo.:
# Step 0): Prior knowledge: no complete graph as init. graph or see step 1)
# Step 1): PC algorithm; Prior knowledge: consistency, criteria for not applying the indep. test
# Step 2): Prior knowledge: consistency
# Step 3): 4 rules of Verma and Pearl (1992); Prior knowledge: consistency
## Independence test:
# As independent function, which can be varied
# Should have parameter p.value: result$p.value
##################################################################################################################################
## Implementation: Build the initial graph from data and prior knowledge
# Input:
# data: matrix/data frame containing numeric data, one column per variable
# Prior: prior knowledge, default: NULL
# Output: initial undirected graph G
init_graph <- function(data,Prior=NULL){
G <- NULL
# no prior knowledge: create a complete undirected graph (diagonal has still zeros)
if (is.null(Prior)){
G <- triu(Matrix(2, nrow = ncol(data), ncol = ncol(data), sparse = TRUE)) # upper triag. matrix filled with 2s
diag(G) <- 0 # set the diagonal entries to 0
}
# use prior knowledge to initialize the graph
else {
# TODO
}
return(G)
}
##################################################################################################################################
## Implementation: IC
# Input:
# G: Initial graph, will be changed during the procedure (sparse upper triangular adj. matrix, class Matrix)
# data: numeric matrix containing observed data, one column per variable
# Prior: NULL if no prior knowledge; list of Ga, Ge, Go (same datatype as G) otherwise
# IT: Independence test, should return as result the p-value
# threshold: threshold for the p-values of the indep. test
# debug: default is 0 - print nothing, 1 - print the graph modifications, 2 - print the modifications and the graph
# steps: 3 perform all 3 steps, 2 perform the first 2 steps, 1 perform only the first step
# Notes: ...
# Output: G, as a PDAG
IC <- function(G, data, Prior=NULL, IT, threshold=0.05, debug=0, steps=3){
# results of step 1): mod. graph and the sets Z for x,y with x_|_y | Z
G <- skeleton_mod(suffStat=list(C = cor(data), n = nrow(data)), indepTest=IT, p=ncol(data), alpha=threshold, verbose = (debug>0),
fixedGaps = NULL, fixedEdges = NULL, NAdelete = TRUE, m.max = Inf)
# G <- IC_stepI(G=G, data=data, Prior=Prior, IT=IT, threshold=threshold, debug=debug)
if (debug == 2){print(G[[1]])}
print('IC: step 1 finished')
if (steps>=2){
# mod. graph after step 2)
G <- IC_stepII(G=G[[1]], Prior=Prior, Z_xy=G[[2]], debug=debug)
if (debug == 2){print(G)}
print('IC: step 2 finished')
}
if (steps==3){
# mod. graph after step 3)
G <- IC_steppIII(G=G, Prior=Prior, debug=debug)
if (debug == 2){print(G)}
print('IC: step 3 finished')
}
return(G)
}
# Step 2) of the IC algorithm
# Input:
# G: mod. graph from step 1), sparse upper triangular adj. matrix, class Matrix
# Prior: list of Ga, Ge, Go (same data type as G)
# Z_xy: list, contains for each x,y set Z with x_|_y|Z if such Z exists
# debug: 0 - no info printing, 1-2: information is printed
# Output: PDAG G
IC_stepII <- function(G, Prior, Z_xy, debug){
nvar <- ncol(G) # number of variables
G_ <- G # copy of G, which stays unmodified during this step
if (debug>0) {print('Info: Starting step 2) of the IC algorithm')}
und.edges <- which(G_==2, arr.ind=TRUE) # all undirected edges (in the old copy of G)
for (e in 1:nrow(und.edges)){
x <- und.edges[e,1]; w <- und.edges[e,2]
ys <- get_adj(G_,w,'-') # all y with x-w-y (in the old copy of G)
for (y in ys){ # for each possible y check
Z <- Z_xy[[x]][[y]]; if (is.null(Z)){Z <- Z_xy[[y]][[x]]} # x _|_ y | Z: (empty) vector if Z_xy was saved, otherwise NULL
# if: x!=y, w not in Z, x,y not adj. (in the old copy of G)
if (x!=y & length(intersect(Z,w))==0 & length(intersect(get_adj(G_,x,'?'),y))==0) {
G <- mod_edge(G,x,w,'->')
G <- mod_edge(G,w,y,'<-')
if (debug>0) {print(paste('Build: ',x,'->',w,'<-',y,' because x=',x,', y=',y,', Z=',paste(Z,collapse=','),sep=''))}
}
}
# analogue to above: change w and x
w <- und.edges[e,1]; x <- und.edges[e,2]
ys <- get_adj(G_,w,'-') # all y with x-w-y (in the old copy of G)
for (y in ys){
Z <- Z_xy[[x]][[y]]; if (is.null(Z)){Z <- Z_xy[[y]][[x]]}
if (x!=y & length(intersect(Z,w))==0 & length(intersect(get_adj(G_,x,'?'),y))==0) {
G <- mod_edge(G,x,w,'->')
G <- mod_edge(G,w,y,'<-')
if (debug>0) {print(paste('Build: ',x,'->',w,'<-',y,' because x=',x,', y=',y,', Z=',paste(Z,collapse=','),sep=''))}
}
}
}
return(G)
}
# Step 3) of the IC algorithm
# Input:
# G: mod. graph from step 1), sparse upper triangular adj. matrix, class Matrix
# Prior: list of Ga, Ge, Go (same data type as G)
# debug: 0 - no info printing, 1-2: information is printed
# Output: (P)DAG G
IC_steppIII <- function(G, Prior, debug){
nvar <- ncol(G) # number of variables
if (debug>0) {print('Info: Starting step 3) of the IC algorithm')}
changed <- TRUE # TRUE if the orientation of an edge was changed, FALSE otherwise
while(changed){
changed <- FALSE
und.edges <- which(G==2, arr.ind=TRUE) # all undirected edges
if(nrow(und.edges)==0){break}
for (e in 1:nrow(und.edges)){
a <- und.edges[e,1]; b <- und.edges[e,2]
# check the rules for a->b
if (check_rules(G,a,b,debug=debug)){ # if a rule can be applied: create a->b, set changed, go to next edge
G <- mod_edge(G,a,b,'->')
changed <- TRUE
next
}
# check the rules for b->a
if (check_rules(G,b,a,debug=debug)){ # if a rule can be applied: create a<-b, set changed, go to next edge
G <- mod_edge(G,a,b,'<-')
changed <- TRUE
next
}
}
}
return(G)
}
# Help function for step 3)
# Input: Graph G, nodes a and b
# Output: true if any rule could be applied to a and b, otherwise - false
check_rules <- function(G,a,b,debug){
# rule 1: a-b into a->b if (c->a and c,b non-adj.)
adj_a <- get_adj(G,a,'<-') # all c with c->a
adj_b <- get_nadj(G,b) # all c non-adj. to b (for any kind of edges)
if (length(intersect(adj_a,adj_b))>0){
if (debug>0){print(paste('Rule 1: Build ',a,'->',b,' where c=',intersect(adj_a,adj_b)[1],sep=''))}
return(TRUE)
}
# rule 2: a-b into a->b if a->c->b
adj_a <- get_adj(G,a,'->') # all c with a->c
adj_b <- get_adj(G,b,'<-') # all c with c->b
if (length(intersect(adj_a,adj_b))>0) {
if (debug>0){print(paste('Rule 2: Build ',a,'->',b,' where c=',intersect(adj_a,adj_b)[1],sep=''))}
return(TRUE)
}
# rule 3: a-b into a->b if (a-c->b and a-d->b and c,d non-adj.)
adj_a <- get_adj(G,a,'-') # all c,d with a-c/d (undirected)
adj_b <- get_adj(G,b,'<-') # all c,d with c/d->b
cd <- intersect(adj_a,adj_b) # all c,d with a-c/d->b
if (length(cd)>=2) {
for (c in cd){
for (d in setdiff(cd,c)){
if (c!=d & length(intersect(get_adj(G,c,'?'),d))==0){ # if c!=d and c,d non-adj
if (debug>0){print(paste('Rule 3: Build ',a,'->',b,' where c=',c,' and d=',d,sep=''))}
return(TRUE)
}
}
}
}
# rule 4: a-b into a->b if (a-c->d and c->d->b and c,b non-adj.)
adj_a <- get_adj(G,a,'-') # all c with a-c (undirected)
adj_b <- get_adj(G,b,'<-') # all d with d->b
for (c in adj_a){
for (d in adj_b){
if ((G[c,d]==1 | G[d,c]==-1) & length(intersect(get_adj(G,c,'?'),b))==0) { # if c->d and c,b non-adj.
if (debug>0){print(paste('Rule 4: Build ',a,'->',b,' where c=',c,' and d=',d,sep=''))}
return(TRUE)
}
}
}
return(FALSE)
}
##################################################################################################################################
## Implementation: Help functions, setter/getter for a given graph (as sparce right triangular adj. matrix)
# get all combinations of size i from vector adj
combinations <- function(adj,i){
if (length(adj)==1 & i==1){return(matrix(adj))}
else {return(combn(x=adj, m=i))}
} | /code/Verhaak/HiWi_BN/Val_IC/IC.R | no_license | VGalata/miRNA_analysis | R | false | false | 9,933 | r | # by Valentina Galata
# 2013.09.30
# 2013.10.14: adding prior knowledge checks
# 2013.10.16: modified to use for HiWi job: miRNA, mRNA data
## Graph representation
# As sparse, upper/right triangular adjacency matrix:
# x---y: m[x,y] = 2
# x-->y: m[x,y] = 1
# x<--y: m[x,y] = -1
# Sparse: Since the resulting graph should be sparse
# Upper/right triangular:
# Less memory needed, edge(x,y) can be encoded by one number, no need in edge(y,x)
# Fast test for presence/absence/orientation of the edges
# Upper/right: m[x,y] with x < y can be non-zero; rest is zero and thus not saved
# Disadventage: check whether x<y
## J. Pearl "Causality", Ch. 2.5: IC Algorithmus:
# 0) Initialization: Complete graph
# 1) for all x,y in V: if no Z s.t. x _|_ y | Z => x---y
# 2) for all non-adj. x, y with commong neighbor w: if w not in Z => x -> w <- y
# 3) orient as many undirect. edges as possible s.t.
# i) any allternative orientation would yield a new v-structure
# ii) any allternative orientation would yield a direct. cycle
# There are four rules ensuring i) and ii) (see the implementation of step 3))
## Implementation/Extensions/Modifications of the IC algo.:
# Step 0): Prior knowledge: no complete graph as init. graph or see step 1)
# Step 1): PC algorithm; Prior knowledge: consistency, criteria for not applying the indep. test
# Step 2): Prior knowledge: consistency
# Step 3): 4 rules of Verma and Pearl (1992); Prior knowledge: consistency
## Independence test:
# As independent function, which can be varied
# Should have parameter p.value: result$p.value
##################################################################################################################################
## Implementation: Build the initial graph from data and prior knowledge
# Input:
# data: matrix/data frame containing numeric data, one column per variable
# Prior: prior knowledge, default: NULL
# Output: initial undirected graph G
init_graph <- function(data,Prior=NULL){
G <- NULL
# no prior knowledge: create a complete undirected graph (diagonal has still zeros)
if (is.null(Prior)){
G <- triu(Matrix(2, nrow = ncol(data), ncol = ncol(data), sparse = TRUE)) # upper triag. matrix filled with 2s
diag(G) <- 0 # set the diagonal entries to 0
}
# use prior knowledge to initialize the graph
else {
# TODO
}
return(G)
}
##################################################################################################################################
## Implementation: IC
# Input:
# G: Initial graph, will be changed during the procedure (sparse upper triangular adj. matrix, class Matrix)
# data: numeric matrix containing observed data, one column per variable
# Prior: NULL if no prior knowledge; list of Ga, Ge, Go (same datatype as G) otherwise
# IT: Independence test, should return as result the p-value
# threshold: threshold for the p-values of the indep. test
# debug: default is 0 - print nothing, 1 - print the graph modifications, 2 - print the modifications and the graph
# steps: 3 perform all 3 steps, 2 perform the first 2 steps, 1 perform only the first step
# Notes: ...
# Output: G, as a PDAG
IC <- function(G, data, Prior=NULL, IT, threshold=0.05, debug=0, steps=3){
# results of step 1): mod. graph and the sets Z for x,y with x_|_y | Z
G <- skeleton_mod(suffStat=list(C = cor(data), n = nrow(data)), indepTest=IT, p=ncol(data), alpha=threshold, verbose = (debug>0),
fixedGaps = NULL, fixedEdges = NULL, NAdelete = TRUE, m.max = Inf)
# G <- IC_stepI(G=G, data=data, Prior=Prior, IT=IT, threshold=threshold, debug=debug)
if (debug == 2){print(G[[1]])}
print('IC: step 1 finished')
if (steps>=2){
# mod. graph after step 2)
G <- IC_stepII(G=G[[1]], Prior=Prior, Z_xy=G[[2]], debug=debug)
if (debug == 2){print(G)}
print('IC: step 2 finished')
}
if (steps==3){
# mod. graph after step 3)
G <- IC_steppIII(G=G, Prior=Prior, debug=debug)
if (debug == 2){print(G)}
print('IC: step 3 finished')
}
return(G)
}
# Step 2) of the IC algorithm
# Input:
# G: mod. graph from step 1), sparse upper triangular adj. matrix, class Matrix
# Prior: list of Ga, Ge, Go (same data type as G)
# Z_xy: list, contains for each x,y set Z with x_|_y|Z if such Z exists
# debug: 0 - no info printing, 1-2: information is printed
# Output: PDAG G
IC_stepII <- function(G, Prior, Z_xy, debug){
nvar <- ncol(G) # number of variables
G_ <- G # copy of G, which stays unmodified during this step
if (debug>0) {print('Info: Starting step 2) of the IC algorithm')}
und.edges <- which(G_==2, arr.ind=TRUE) # all undirected edges (in the old copy of G)
for (e in 1:nrow(und.edges)){
x <- und.edges[e,1]; w <- und.edges[e,2]
ys <- get_adj(G_,w,'-') # all y with x-w-y (in the old copy of G)
for (y in ys){ # for each possible y check
Z <- Z_xy[[x]][[y]]; if (is.null(Z)){Z <- Z_xy[[y]][[x]]} # x _|_ y | Z: (empty) vector if Z_xy was saved, otherwise NULL
# if: x!=y, w not in Z, x,y not adj. (in the old copy of G)
if (x!=y & length(intersect(Z,w))==0 & length(intersect(get_adj(G_,x,'?'),y))==0) {
G <- mod_edge(G,x,w,'->')
G <- mod_edge(G,w,y,'<-')
if (debug>0) {print(paste('Build: ',x,'->',w,'<-',y,' because x=',x,', y=',y,', Z=',paste(Z,collapse=','),sep=''))}
}
}
# analogue to above: change w and x
w <- und.edges[e,1]; x <- und.edges[e,2]
ys <- get_adj(G_,w,'-') # all y with x-w-y (in the old copy of G)
for (y in ys){
Z <- Z_xy[[x]][[y]]; if (is.null(Z)){Z <- Z_xy[[y]][[x]]}
if (x!=y & length(intersect(Z,w))==0 & length(intersect(get_adj(G_,x,'?'),y))==0) {
G <- mod_edge(G,x,w,'->')
G <- mod_edge(G,w,y,'<-')
if (debug>0) {print(paste('Build: ',x,'->',w,'<-',y,' because x=',x,', y=',y,', Z=',paste(Z,collapse=','),sep=''))}
}
}
}
return(G)
}
# Step 3) of the IC algorithm
# Input:
# G: mod. graph from step 1), sparse upper triangular adj. matrix, class Matrix
# Prior: list of Ga, Ge, Go (same data type as G)
# debug: 0 - no info printing, 1-2: information is printed
# Output: (P)DAG G
IC_steppIII <- function(G, Prior, debug){
nvar <- ncol(G) # number of variables
if (debug>0) {print('Info: Starting step 3) of the IC algorithm')}
changed <- TRUE # TRUE if the orientation of an edge was changed, FALSE otherwise
while(changed){
changed <- FALSE
und.edges <- which(G==2, arr.ind=TRUE) # all undirected edges
if(nrow(und.edges)==0){break}
for (e in 1:nrow(und.edges)){
a <- und.edges[e,1]; b <- und.edges[e,2]
# check the rules for a->b
if (check_rules(G,a,b,debug=debug)){ # if a rule can be applied: create a->b, set changed, go to next edge
G <- mod_edge(G,a,b,'->')
changed <- TRUE
next
}
# check the rules for b->a
if (check_rules(G,b,a,debug=debug)){ # if a rule can be applied: create a<-b, set changed, go to next edge
G <- mod_edge(G,a,b,'<-')
changed <- TRUE
next
}
}
}
return(G)
}
# Help function for step 3)
# Input: Graph G, nodes a and b
# Output: true if any rule could be applied to a and b, otherwise - false
check_rules <- function(G,a,b,debug){
# rule 1: a-b into a->b if (c->a and c,b non-adj.)
adj_a <- get_adj(G,a,'<-') # all c with c->a
adj_b <- get_nadj(G,b) # all c non-adj. to b (for any kind of edges)
if (length(intersect(adj_a,adj_b))>0){
if (debug>0){print(paste('Rule 1: Build ',a,'->',b,' where c=',intersect(adj_a,adj_b)[1],sep=''))}
return(TRUE)
}
# rule 2: a-b into a->b if a->c->b
adj_a <- get_adj(G,a,'->') # all c with a->c
adj_b <- get_adj(G,b,'<-') # all c with c->b
if (length(intersect(adj_a,adj_b))>0) {
if (debug>0){print(paste('Rule 2: Build ',a,'->',b,' where c=',intersect(adj_a,adj_b)[1],sep=''))}
return(TRUE)
}
# rule 3: a-b into a->b if (a-c->b and a-d->b and c,d non-adj.)
adj_a <- get_adj(G,a,'-') # all c,d with a-c/d (undirected)
adj_b <- get_adj(G,b,'<-') # all c,d with c/d->b
cd <- intersect(adj_a,adj_b) # all c,d with a-c/d->b
if (length(cd)>=2) {
for (c in cd){
for (d in setdiff(cd,c)){
if (c!=d & length(intersect(get_adj(G,c,'?'),d))==0){ # if c!=d and c,d non-adj
if (debug>0){print(paste('Rule 3: Build ',a,'->',b,' where c=',c,' and d=',d,sep=''))}
return(TRUE)
}
}
}
}
# rule 4: a-b into a->b if (a-c->d and c->d->b and c,b non-adj.)
adj_a <- get_adj(G,a,'-') # all c with a-c (undirected)
adj_b <- get_adj(G,b,'<-') # all d with d->b
for (c in adj_a){
for (d in adj_b){
if ((G[c,d]==1 | G[d,c]==-1) & length(intersect(get_adj(G,c,'?'),b))==0) { # if c->d and c,b non-adj.
if (debug>0){print(paste('Rule 4: Build ',a,'->',b,' where c=',c,' and d=',d,sep=''))}
return(TRUE)
}
}
}
return(FALSE)
}
##################################################################################################################################
## Implementation: Help functions, setter/getter for a given graph (as sparce right triangular adj. matrix)
# get all combinations of size i from vector adj
combinations <- function(adj,i){
if (length(adj)==1 & i==1){return(matrix(adj))}
else {return(combn(x=adj, m=i))}
} |
\alias{pango-Fonts}
\alias{PangoFontDescription}
\alias{PangoFontMetrics}
\alias{PangoFont}
\alias{PangoFontFamily}
\alias{PangoFontFace}
\alias{PangoFontMap}
\alias{PangoFontset}
\alias{PangoFontsetSimple}
\alias{PangoFontsetForeachFunc}
\alias{PangoStyle}
\alias{PangoWeight}
\alias{PangoVariant}
\alias{PangoStretch}
\alias{PangoFontMask}
\name{pango-Fonts}
\title{Fonts}
\description{Structures representing abstract fonts}
\section{Methods and Functions}{
\code{\link{pangoFontDescriptionNew}()}\cr
\code{\link{pangoFontDescriptionCopy}(object)}\cr
\code{\link{pangoFontDescriptionCopyStatic}(object)}\cr
\code{\link{pangoFontDescriptionHash}(object)}\cr
\code{\link{pangoFontDescriptionEqual}(object, desc2)}\cr
\code{\link{pangoFontDescriptionSetFamily}(object, family)}\cr
\code{\link{pangoFontDescriptionSetFamilyStatic}(object, family)}\cr
\code{\link{pangoFontDescriptionGetFamily}(object)}\cr
\code{\link{pangoFontDescriptionSetStyle}(object, style)}\cr
\code{\link{pangoFontDescriptionGetStyle}(object)}\cr
\code{\link{pangoFontDescriptionSetVariant}(object, variant)}\cr
\code{\link{pangoFontDescriptionGetVariant}(object)}\cr
\code{\link{pangoFontDescriptionSetWeight}(object, weight)}\cr
\code{\link{pangoFontDescriptionGetWeight}(object)}\cr
\code{\link{pangoFontDescriptionSetStretch}(object, stretch)}\cr
\code{\link{pangoFontDescriptionGetStretch}(object)}\cr
\code{\link{pangoFontDescriptionSetSize}(object, size)}\cr
\code{\link{pangoFontDescriptionGetSize}(object)}\cr
\code{\link{pangoFontDescriptionSetAbsoluteSize}(object, size)}\cr
\code{\link{pangoFontDescriptionGetSizeIsAbsolute}(object)}\cr
\code{\link{pangoFontDescriptionGetSetFields}(object)}\cr
\code{\link{pangoFontDescriptionUnsetFields}(object, to.unset)}\cr
\code{\link{pangoFontDescriptionMerge}(object, desc.to.merge, replace.existing)}\cr
\code{\link{pangoFontDescriptionBetterMatch}(object, old.match = NULL, new.match)}\cr
\code{\link{pangoFontDescriptionFromString}(str)}\cr
\code{\link{pangoFontDescriptionToString}(object)}\cr
\code{\link{pangoFontDescriptionToFilename}(object)}\cr
\code{\link{pangoFontMetricsGetAscent}(object)}\cr
\code{\link{pangoFontMetricsGetDescent}(object)}\cr
\code{\link{pangoFontMetricsGetApproximateCharWidth}(object)}\cr
\code{\link{pangoFontMetricsGetApproximateDigitWidth}(object)}\cr
\code{\link{pangoFontMetricsGetUnderlineThickness}(object)}\cr
\code{\link{pangoFontMetricsGetUnderlinePosition}(object)}\cr
\code{\link{pangoFontMetricsGetStrikethroughThickness}(object)}\cr
\code{\link{pangoFontMetricsGetStrikethroughPosition}(object)}\cr
\code{\link{pangoFontDescribe}(object)}\cr
\code{\link{pangoFontDescribeWithAbsoluteSize}(object)}\cr
\code{\link{pangoFontGetCoverage}(object, language)}\cr
\code{\link{pangoFontGetGlyphExtents}(object, glyph)}\cr
\code{\link{pangoFontGetMetrics}(object, language = NULL)}\cr
\code{\link{pangoFontGetFontMap}(object)}\cr
\code{\link{pangoFontFamilyGetName}(object)}\cr
\code{\link{pangoFontFamilyIsMonospace}(object)}\cr
\code{\link{pangoFontFamilyListFaces}(object)}\cr
\code{\link{pangoFontFaceGetFaceName}(object)}\cr
\code{\link{pangoFontFaceListSizes}(object)}\cr
\code{\link{pangoFontFaceDescribe}(object)}\cr
\code{\link{pangoFontMapLoadFont}(object, context, desc)}\cr
\code{\link{pangoFontMapLoadFontset}(object, context, desc, language)}\cr
\code{\link{pangoFontMapListFamilies}(object)}\cr
\code{\link{pangoFontsetGetFont}(object, wc)}\cr
\code{\link{pangoFontsetGetMetrics}(object)}\cr
\code{\link{pangoFontsetForeach}(object, func, data)}\cr
}
\section{Hierarchy}{\preformatted{
\link{GObject}
+----PangoFont
+----PangoFcFont
\link{GObject}
+----PangoFontFamily
\link{GObject}
+----PangoFontFace
\link{GObject}
+----PangoFontMap
+----PangoFcFontMap
\link{GObject}
+----PangoFontset
+----\link{PangoFontsetSimple}
\link{GObject}
+----\link{PangoFontset}
+----PangoFontsetSimple
}}
\section{Interface Derivations}{PangoFontMap is required by
\code{\link{PangoCairoFontMap}}.}
\section{Detailed Description}{Pango supports a flexible architecture where a
particular rendering architecture can supply an
implementation of fonts. The \code{\link{PangoFont}} structure
represents an abstract rendering-system-indepent font.
Pango provides routines to list available fonts, and
to load a font of a given description.}
\section{Structures}{\describe{
\item{\code{PangoFontDescription}}{
The \code{\link{PangoFontDescription}} structure represents the description
of an ideal font. These structures are used both to list
what fonts are available on the system and also for specifying
the characteristics of a font to load.
}
\item{\code{PangoFontMetrics}}{
A \code{\link{PangoFontMetrics}} structure holds the overall metric information
for a font (possibly restricted to a script). The fields of this
structure are private to implementations of a font backend. See
the documentation of the corresponding getters for documentation
of their meaning.
\describe{
\item{\code{ref_count}}{[numeric] reference count. Used internally. See \code{pangoFontMetricsRef()}
and \code{pangoFontMetricsUnref()}.}
\item{\code{ascent}}{[integer] the distance from the baseline to the highest point of the glyphs of
the font. This is positive in practically all fonts.}
\item{\code{descent}}{[integer] the distance from the baseline to the lowest point of the glyphs of
the font. This is positive in practically all fonts.}
\item{\code{approximate_char_width}}{[integer] approximate average width of the regular glyphs of
the font.}
\item{\code{approximate_digit_width}}{[integer] approximate average width of the glyphs for digits
of the font.}
\item{\code{underline_position}}{[integer] position of the underline. This is normally negative.}
\item{\code{underline_thickness}}{[integer] thickness of the underline.}
\item{\code{strikethrough_position}}{[integer] position of the strikethrough line. This is
normally positive.}
\item{\code{strikethrough_thickness}}{[integer] thickness of the strikethrough line.}
}
}
\item{\code{PangoFont}}{
The \code{\link{PangoFont}} structure is used to represent
a font in a rendering-system-independent matter.
To create an implementation of a \code{\link{PangoFont}},
the rendering-system specific code should malloc
a larger structure that contains a nested
\code{\link{PangoFont}}, fill in the klass member of
the nested \code{\link{PangoFont}} with a pointer to
a appropriate \code{PangoFontClass}, then call
\code{pangoFontInit()} on the structure.
The \code{\link{PangoFont}} structure contains one member
which the implementation fills in.
}
\item{\code{PangoFontFamily}}{
The \code{\link{PangoFontFamily}} structure is used to represent a family of related
font faces. The faces in a family share a common design, but differ in
slant, weight, width and other aspects.
}
\item{\code{PangoFontFace}}{
The \code{\link{PangoFontFace}} structure is used to represent a group of fonts with
the same family, slant, weight, width, but varying sizes.
}
\item{\code{PangoFontMap}}{
The \code{\link{PangoFontMap}} represents the set of fonts available for a
particular rendering system. This is a virtual object with
implementations being specific to particular rendering systems. To
create an implementation of a \code{\link{PangoFontMap}}, the rendering-system
specific code should malloc a larger structure that contains a nested
\code{\link{PangoFontMap}}, fill in the klass member of the nested \code{\link{PangoFontMap}} with a
pointer to a appropriate \code{PangoFontMapClass}, then call
\code{pangoFontMapInit()} on the structure.
The \code{\link{PangoFontMap}} structure contains one member which the implementation
fills in.
}
\item{\code{PangoFontset}}{
A \code{\link{PangoFontset}} represents a set of \code{\link{PangoFont}} to use
when rendering text. It is the result of resolving a
\code{\link{PangoFontDescription}} against a particular \code{\link{PangoContext}}.
It has operations for finding the component font for
a particular Unicode character, and for finding a composite
set of metrics for the entire fontset.
}
\item{\code{PangoFontsetSimple}}{
\code{\link{PangoFontsetSimple}} is a implementation of the abstract
\code{\link{PangoFontset}} base class in terms of a list of fonts,
which the creator provides when constructing the
\code{\link{PangoFontsetSimple}}.
}
}}
\section{Enums and Flags}{\describe{
\item{\code{PangoStyle}}{
An enumeration specifying the various slant styles possible for a font.
\describe{
\item{\code{normal}}{ the font is upright.}
\item{\code{oblique}}{ the font is slanted, but in a roman style.}
\item{\code{italic}}{ the font is slanted in an italic style.}
}
}
\item{\code{PangoWeight}}{
An enumeration specifying the weight (boldness) of a font. This is a numerical
value ranging from 100 to 900, but there are some predefined values:
\describe{
\item{\code{ultralight}}{the ultralight weight (= 200)}
\item{\code{light}}{ the light weight (=300)}
\item{\code{normal}}{the default weight (= 400)}
\item{\code{semibold}}{a weight intermediate between normal and bold (=600)}
\item{\code{bold}}{the bold weight (= 700)}
\item{\code{ultrabold}}{the ultrabold weight (= 800)}
\item{\code{heavy}}{the heavy weight (= 900)}
}
}
\item{\code{PangoVariant}}{
An enumeration specifying capitalization variant of the font.
\describe{
\item{\code{normal}}{A normal font.}
\item{\code{small-caps}}{A font with the lower case characters
replaced by smaller variants of the capital characters.}
}
}
\item{\code{PangoStretch}}{
An enumeration specifying the width of the font relative to other designs
within a family.
\describe{
\item{\code{ultra-condensed}}{ultra condensed width}
\item{\code{extra-condensed}}{extra condensed width}
\item{\code{condensed}}{condensed width}
\item{\code{semi-condensed}}{semi condensed width}
\item{\code{normal}}{the normal width}
\item{\code{semi-expanded}}{semi expanded width}
\item{\code{expanded}}{expanded width}
\item{\code{extra-expanded}}{extra expanded width}
\item{\code{ultra-expanded}}{ultra expanded width}
}
}
\item{\code{PangoFontMask}}{
The bits in a \code{\link{PangoFontMask}} correspond to fields in a
\code{\link{PangoFontDescription}} that have been set.
\describe{
\item{\code{family}}{the font family is specified.}
\item{\code{style}}{the font style is specified.}
\item{\code{variant}}{the font variant is specified.}
\item{\code{weight}}{the font weight is specified.}
\item{\code{stretch}}{the font stretch is specified.}
\item{\code{size}}{the font size is specified.}
}
}
}}
\section{User Functions}{\describe{\item{\code{PangoFontsetForeachFunc(fontset, font, data)}}{
A callback function used by \code{\link{pangoFontsetForeach}} when enumerating
the fonts in a fontset.
Since 1.4
\describe{
\item{\code{fontset}}{[\code{\link{PangoFontset}}] a \code{\link{PangoFontset}}}
\item{\code{font}}{[\code{\link{PangoFont}}] a font from \code{fontset}}
\item{\code{data}}{[R object] callback data}
}
\emph{Returns:} [logical] if \code{TRUE}, stop iteration and return immediately.
}}}
\references{\url{http://developer.gnome.org/doc/API/2.0/pango/pango-Fonts.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /man/pango-Fonts.Rd | no_license | cran/RGtk2.10 | R | false | false | 11,290 | rd | \alias{pango-Fonts}
\alias{PangoFontDescription}
\alias{PangoFontMetrics}
\alias{PangoFont}
\alias{PangoFontFamily}
\alias{PangoFontFace}
\alias{PangoFontMap}
\alias{PangoFontset}
\alias{PangoFontsetSimple}
\alias{PangoFontsetForeachFunc}
\alias{PangoStyle}
\alias{PangoWeight}
\alias{PangoVariant}
\alias{PangoStretch}
\alias{PangoFontMask}
\name{pango-Fonts}
\title{Fonts}
\description{Structures representing abstract fonts}
\section{Methods and Functions}{
\code{\link{pangoFontDescriptionNew}()}\cr
\code{\link{pangoFontDescriptionCopy}(object)}\cr
\code{\link{pangoFontDescriptionCopyStatic}(object)}\cr
\code{\link{pangoFontDescriptionHash}(object)}\cr
\code{\link{pangoFontDescriptionEqual}(object, desc2)}\cr
\code{\link{pangoFontDescriptionSetFamily}(object, family)}\cr
\code{\link{pangoFontDescriptionSetFamilyStatic}(object, family)}\cr
\code{\link{pangoFontDescriptionGetFamily}(object)}\cr
\code{\link{pangoFontDescriptionSetStyle}(object, style)}\cr
\code{\link{pangoFontDescriptionGetStyle}(object)}\cr
\code{\link{pangoFontDescriptionSetVariant}(object, variant)}\cr
\code{\link{pangoFontDescriptionGetVariant}(object)}\cr
\code{\link{pangoFontDescriptionSetWeight}(object, weight)}\cr
\code{\link{pangoFontDescriptionGetWeight}(object)}\cr
\code{\link{pangoFontDescriptionSetStretch}(object, stretch)}\cr
\code{\link{pangoFontDescriptionGetStretch}(object)}\cr
\code{\link{pangoFontDescriptionSetSize}(object, size)}\cr
\code{\link{pangoFontDescriptionGetSize}(object)}\cr
\code{\link{pangoFontDescriptionSetAbsoluteSize}(object, size)}\cr
\code{\link{pangoFontDescriptionGetSizeIsAbsolute}(object)}\cr
\code{\link{pangoFontDescriptionGetSetFields}(object)}\cr
\code{\link{pangoFontDescriptionUnsetFields}(object, to.unset)}\cr
\code{\link{pangoFontDescriptionMerge}(object, desc.to.merge, replace.existing)}\cr
\code{\link{pangoFontDescriptionBetterMatch}(object, old.match = NULL, new.match)}\cr
\code{\link{pangoFontDescriptionFromString}(str)}\cr
\code{\link{pangoFontDescriptionToString}(object)}\cr
\code{\link{pangoFontDescriptionToFilename}(object)}\cr
\code{\link{pangoFontMetricsGetAscent}(object)}\cr
\code{\link{pangoFontMetricsGetDescent}(object)}\cr
\code{\link{pangoFontMetricsGetApproximateCharWidth}(object)}\cr
\code{\link{pangoFontMetricsGetApproximateDigitWidth}(object)}\cr
\code{\link{pangoFontMetricsGetUnderlineThickness}(object)}\cr
\code{\link{pangoFontMetricsGetUnderlinePosition}(object)}\cr
\code{\link{pangoFontMetricsGetStrikethroughThickness}(object)}\cr
\code{\link{pangoFontMetricsGetStrikethroughPosition}(object)}\cr
\code{\link{pangoFontDescribe}(object)}\cr
\code{\link{pangoFontDescribeWithAbsoluteSize}(object)}\cr
\code{\link{pangoFontGetCoverage}(object, language)}\cr
\code{\link{pangoFontGetGlyphExtents}(object, glyph)}\cr
\code{\link{pangoFontGetMetrics}(object, language = NULL)}\cr
\code{\link{pangoFontGetFontMap}(object)}\cr
\code{\link{pangoFontFamilyGetName}(object)}\cr
\code{\link{pangoFontFamilyIsMonospace}(object)}\cr
\code{\link{pangoFontFamilyListFaces}(object)}\cr
\code{\link{pangoFontFaceGetFaceName}(object)}\cr
\code{\link{pangoFontFaceListSizes}(object)}\cr
\code{\link{pangoFontFaceDescribe}(object)}\cr
\code{\link{pangoFontMapLoadFont}(object, context, desc)}\cr
\code{\link{pangoFontMapLoadFontset}(object, context, desc, language)}\cr
\code{\link{pangoFontMapListFamilies}(object)}\cr
\code{\link{pangoFontsetGetFont}(object, wc)}\cr
\code{\link{pangoFontsetGetMetrics}(object)}\cr
\code{\link{pangoFontsetForeach}(object, func, data)}\cr
}
\section{Hierarchy}{\preformatted{
\link{GObject}
+----PangoFont
+----PangoFcFont
\link{GObject}
+----PangoFontFamily
\link{GObject}
+----PangoFontFace
\link{GObject}
+----PangoFontMap
+----PangoFcFontMap
\link{GObject}
+----PangoFontset
+----\link{PangoFontsetSimple}
\link{GObject}
+----\link{PangoFontset}
+----PangoFontsetSimple
}}
\section{Interface Derivations}{PangoFontMap is required by
\code{\link{PangoCairoFontMap}}.}
\section{Detailed Description}{Pango supports a flexible architecture where a
particular rendering architecture can supply an
implementation of fonts. The \code{\link{PangoFont}} structure
represents an abstract rendering-system-indepent font.
Pango provides routines to list available fonts, and
to load a font of a given description.}
\section{Structures}{\describe{
\item{\code{PangoFontDescription}}{
The \code{\link{PangoFontDescription}} structure represents the description
of an ideal font. These structures are used both to list
what fonts are available on the system and also for specifying
the characteristics of a font to load.
}
\item{\code{PangoFontMetrics}}{
A \code{\link{PangoFontMetrics}} structure holds the overall metric information
for a font (possibly restricted to a script). The fields of this
structure are private to implementations of a font backend. See
the documentation of the corresponding getters for documentation
of their meaning.
\describe{
\item{\code{ref_count}}{[numeric] reference count. Used internally. See \code{pangoFontMetricsRef()}
and \code{pangoFontMetricsUnref()}.}
\item{\code{ascent}}{[integer] the distance from the baseline to the highest point of the glyphs of
the font. This is positive in practically all fonts.}
\item{\code{descent}}{[integer] the distance from the baseline to the lowest point of the glyphs of
the font. This is positive in practically all fonts.}
\item{\code{approximate_char_width}}{[integer] approximate average width of the regular glyphs of
the font.}
\item{\code{approximate_digit_width}}{[integer] approximate average width of the glyphs for digits
of the font.}
\item{\code{underline_position}}{[integer] position of the underline. This is normally negative.}
\item{\code{underline_thickness}}{[integer] thickness of the underline.}
\item{\code{strikethrough_position}}{[integer] position of the strikethrough line. This is
normally positive.}
\item{\code{strikethrough_thickness}}{[integer] thickness of the strikethrough line.}
}
}
\item{\code{PangoFont}}{
The \code{\link{PangoFont}} structure is used to represent
a font in a rendering-system-independent matter.
To create an implementation of a \code{\link{PangoFont}},
the rendering-system specific code should malloc
a larger structure that contains a nested
\code{\link{PangoFont}}, fill in the klass member of
the nested \code{\link{PangoFont}} with a pointer to
a appropriate \code{PangoFontClass}, then call
\code{pangoFontInit()} on the structure.
The \code{\link{PangoFont}} structure contains one member
which the implementation fills in.
}
\item{\code{PangoFontFamily}}{
The \code{\link{PangoFontFamily}} structure is used to represent a family of related
font faces. The faces in a family share a common design, but differ in
slant, weight, width and other aspects.
}
\item{\code{PangoFontFace}}{
The \code{\link{PangoFontFace}} structure is used to represent a group of fonts with
the same family, slant, weight, width, but varying sizes.
}
\item{\code{PangoFontMap}}{
The \code{\link{PangoFontMap}} represents the set of fonts available for a
particular rendering system. This is a virtual object with
implementations being specific to particular rendering systems. To
create an implementation of a \code{\link{PangoFontMap}}, the rendering-system
specific code should malloc a larger structure that contains a nested
\code{\link{PangoFontMap}}, fill in the klass member of the nested \code{\link{PangoFontMap}} with a
pointer to a appropriate \code{PangoFontMapClass}, then call
\code{pangoFontMapInit()} on the structure.
The \code{\link{PangoFontMap}} structure contains one member which the implementation
fills in.
}
\item{\code{PangoFontset}}{
A \code{\link{PangoFontset}} represents a set of \code{\link{PangoFont}} to use
when rendering text. It is the result of resolving a
\code{\link{PangoFontDescription}} against a particular \code{\link{PangoContext}}.
It has operations for finding the component font for
a particular Unicode character, and for finding a composite
set of metrics for the entire fontset.
}
\item{\code{PangoFontsetSimple}}{
\code{\link{PangoFontsetSimple}} is a implementation of the abstract
\code{\link{PangoFontset}} base class in terms of a list of fonts,
which the creator provides when constructing the
\code{\link{PangoFontsetSimple}}.
}
}}
\section{Enums and Flags}{\describe{
\item{\code{PangoStyle}}{
An enumeration specifying the various slant styles possible for a font.
\describe{
\item{\code{normal}}{ the font is upright.}
\item{\code{oblique}}{ the font is slanted, but in a roman style.}
\item{\code{italic}}{ the font is slanted in an italic style.}
}
}
\item{\code{PangoWeight}}{
An enumeration specifying the weight (boldness) of a font. This is a numerical
value ranging from 100 to 900, but there are some predefined values:
\describe{
\item{\code{ultralight}}{the ultralight weight (= 200)}
\item{\code{light}}{ the light weight (=300)}
\item{\code{normal}}{the default weight (= 400)}
\item{\code{semibold}}{a weight intermediate between normal and bold (=600)}
\item{\code{bold}}{the bold weight (= 700)}
\item{\code{ultrabold}}{the ultrabold weight (= 800)}
\item{\code{heavy}}{the heavy weight (= 900)}
}
}
\item{\code{PangoVariant}}{
An enumeration specifying capitalization variant of the font.
\describe{
\item{\code{normal}}{A normal font.}
\item{\code{small-caps}}{A font with the lower case characters
replaced by smaller variants of the capital characters.}
}
}
\item{\code{PangoStretch}}{
An enumeration specifying the width of the font relative to other designs
within a family.
\describe{
\item{\code{ultra-condensed}}{ultra condensed width}
\item{\code{extra-condensed}}{extra condensed width}
\item{\code{condensed}}{condensed width}
\item{\code{semi-condensed}}{semi condensed width}
\item{\code{normal}}{the normal width}
\item{\code{semi-expanded}}{semi expanded width}
\item{\code{expanded}}{expanded width}
\item{\code{extra-expanded}}{extra expanded width}
\item{\code{ultra-expanded}}{ultra expanded width}
}
}
\item{\code{PangoFontMask}}{
The bits in a \code{\link{PangoFontMask}} correspond to fields in a
\code{\link{PangoFontDescription}} that have been set.
\describe{
\item{\code{family}}{the font family is specified.}
\item{\code{style}}{the font style is specified.}
\item{\code{variant}}{the font variant is specified.}
\item{\code{weight}}{the font weight is specified.}
\item{\code{stretch}}{the font stretch is specified.}
\item{\code{size}}{the font size is specified.}
}
}
}}
\section{User Functions}{\describe{\item{\code{PangoFontsetForeachFunc(fontset, font, data)}}{
A callback function used by \code{\link{pangoFontsetForeach}} when enumerating
the fonts in a fontset.
Since 1.4
\describe{
\item{\code{fontset}}{[\code{\link{PangoFontset}}] a \code{\link{PangoFontset}}}
\item{\code{font}}{[\code{\link{PangoFont}}] a font from \code{fontset}}
\item{\code{data}}{[R object] callback data}
}
\emph{Returns:} [logical] if \code{TRUE}, stop iteration and return immediately.
}}}
\references{\url{http://developer.gnome.org/doc/API/2.0/pango/pango-Fonts.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
testlist <- list(hi = 9.88131291682493e-324, lo = 4.94065645841247e-323, mu = 0, sig = 0)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result) | /gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610045249-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 151 | r | testlist <- list(hi = 9.88131291682493e-324, lo = 4.94065645841247e-323, mu = 0, sig = 0)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result) |
suppressMessages(library(quantmod))
suppressMessages(library(dplyr))
rm(list=ls())
setwd("F:/Financial_Model")
# load("data/data_newest.RData")
## source files
source("script/get_Symbols.R")
source("script/data_retrieve.R")
source("script/data_generating.R")
source("script/get_endpoint.R")
symbols = get_Symbols()
stock_list = lapply(symbols[1:50], function(x) data_retrieve_single(x))
stock_index = lapply(stock_list, function(x) data_generating_single(x, test_date))
dataset = data_retrieve(symbols)
save(file="data/data_newest.RData", symbols, dataset)
load("data/data_newest.RData")
# Hyper parameters
starting_day = as.Date("2018-01-25")
# ending_day = starting_day
ending_day = Sys.Date()
n=65
p=14
## Start creating Day-wise datasets
for (i in starting_day:ending_day){
test_date = as.character(as.Date(i))
if (length(dataset[test_date,])>2000){
result = data_generating(symbols, dataset, test_date=test_date, n=n, p=p)
raw_data_norm = result$rawdata / result$rawdata[,ncol(result$rawdata)]
Data_Raw = cbind(result$rawdata, result$rawdata_index)
Data_Index = result$indexing
Data_label = result$label
output = paste("data/data_all/dataset_",test_date,".RData",sep="")
save(file=output,Data_Raw, Data_Index, Data_label, test_date, n, p)
cat(paste("\nFinished ", test_date, " Data Generation!\n",sep=""))
}else{
cat(paste(test_date, " is a holiday!\n", sep=""))
}
}
| /Main_Proc_Ver1.R | no_license | seldas/Stock_Model_Test | R | false | false | 1,440 | r | suppressMessages(library(quantmod))
suppressMessages(library(dplyr))
rm(list=ls())
setwd("F:/Financial_Model")
# load("data/data_newest.RData")
## source files
source("script/get_Symbols.R")
source("script/data_retrieve.R")
source("script/data_generating.R")
source("script/get_endpoint.R")
symbols = get_Symbols()
stock_list = lapply(symbols[1:50], function(x) data_retrieve_single(x))
stock_index = lapply(stock_list, function(x) data_generating_single(x, test_date))
dataset = data_retrieve(symbols)
save(file="data/data_newest.RData", symbols, dataset)
load("data/data_newest.RData")
# Hyper parameters
starting_day = as.Date("2018-01-25")
# ending_day = starting_day
ending_day = Sys.Date()
n=65
p=14
## Start creating Day-wise datasets
for (i in starting_day:ending_day){
test_date = as.character(as.Date(i))
if (length(dataset[test_date,])>2000){
result = data_generating(symbols, dataset, test_date=test_date, n=n, p=p)
raw_data_norm = result$rawdata / result$rawdata[,ncol(result$rawdata)]
Data_Raw = cbind(result$rawdata, result$rawdata_index)
Data_Index = result$indexing
Data_label = result$label
output = paste("data/data_all/dataset_",test_date,".RData",sep="")
save(file=output,Data_Raw, Data_Index, Data_label, test_date, n, p)
cat(paste("\nFinished ", test_date, " Data Generation!\n",sep=""))
}else{
cat(paste(test_date, " is a holiday!\n", sep=""))
}
}
|
BayesCPH = function(y, t, x, steps = 1000,
priorMean = NULL, priorVar = NULL,
mleMean = NULL, mleVar,
startValue = NULL, randomSeed = NULL,
plots = FALSE){
if(!is.null(randomSeed))
set.seed(randomSeed)
nObs = length(y)
if(is.vector(x))
x = as.matrix(x, ncol = 1)
nParameters = ncol(x) + 1 ## number of covariates + intercept
if(!is.null(startValue)){
if(length(startValue) < nParameters){
stop("You must have as many starting values as you have model parameters")
}
}
## inital mean of the matched curvature likelihood
if(is.null(mleMean))
mleMean = c(log(mean(y)), rep(0, nParameters - 1))
X = cbind(rep(1 , nObs) , x)
Xt = t(X)
calcMatchedCurvatureNormLike = function(){
betaX = X %*% mleMean
Mu = t * exp(betaX)
Vdiag = Mu
Y = betaX + (y - Mu) / Mu
## I have no idea why the diag command doesn't work as it should:
## e.g. Vyinv = diag(Vdiag, nrow = length(Vdiag))
## therefore this two-step procedure is needed
Vyinv = matrix(0, nrow = nObs, ncol = nObs)
diag(Vyinv) = Vdiag
XtV = Xt %*% Vyinv
VLinv = XtV %*% X
VL = solve(VLinv)
w1 = VL %*% XtV
mleMean = w1 %*% Y
## Loop iterations to converge to MLE
for(k in 1:20){
betaX = X %*% mleMean
Mu = t * exp(betaX)
Vdiag = Mu
Y = betaX + (y - Mu) / Mu
Vyinv = matrix(0, nrow = nObs, ncol = nObs)
diag(Vyinv) = Vdiag
XtV = Xt %*% Vyinv
VLinv = XtV %*% X
VL = solve(VLinv)
w1 = VL %*% XtV
mleMean = w1 %*% Y
}
return(list(mleMean = mleMean, mleVar = VL))
} ## calcMatchedCurvatureNormLike
normApproxPosterior = function(){
result = list(postMean = rep(0, nParameters),
postVar = matrix(0, ncol = nParameters, nrow = nParameters))
## if the prior mean and variance isn't specified then
## set it equal to the mle mean and variance
if(is.null(priorMean) & is.null(priorVar)){
result$postMean = mleMean
result$postVar = mleVar
}else{
mleVarInv = solve(mleVar)
priorVarInv = solve(priorVar)
postPrec = mleVarInv + priorVarInv
result$postVar = solve(postPrec)
w2 = postVar %*% priorVarInv
w4 = w2 * priorMean
w3 = postVar %*% mleVarInv
w5 = w3 * mleMean
result$postMean = w4 + w5
}
return(result)
}
#debug(calcMatchedCurvatureNormLike)
mleParams = calcMatchedCurvatureNormLike()
mleMean = mleParams$mleMean
mleVar = mleParams$mleVar
posterior = normApproxPosterior()
postMean = posterior$postMean
postVar = posterior$postVar
U = chol(postVar)
candBeta = matrix(rt(steps * nParameters, df = 4), ncol = nParameters)
if(!is.null(startValue))
candBeta[1,]=startValue
WM2 = candBeta %*% U
WM3 = matrix(rep(postMean , rep(steps,nParameters)),ncol = nParameters)
WM4 = WM2 + WM3
V2 = cov(WM4)
ft0 = apply(dt(candBeta, df = 4), 1, prod)
ftn = apply(dnorm(candBeta), 1, prod)
q1 = ft0 / 1
## Metropolis-Hastings
BetaXt = WM4 %*% Xt
BetaXt = exp(BetaXt)
for(j in 1:nObs)
BetaXt[ , j] = -t[j] * BetaXt[,j] + y[j] * log(t[j] * BetaXt[,j])
logg1 = rowSums(BetaXt)
logg1 = logg1 - max(logg1)
#g1 = exp(logg1)
logq1 = log(q1)
u = runif(steps)
i1 = 1
betaSample = WM4
for(n in 2:steps){
alpha = exp(logq1[i1] + logg1[n] - logq1[n] - logg1[i1])
alpha = ifelse(alpha>1, 1, alpha)
if(u[n] >= alpha){ ## reject
betaSample[n,] = WM4[i1,]
}else{
betaSample[n,] = WM4[n,]
i1 = n
}
}
beta.df = data.frame(betaSample)
names(beta.df) = paste("b",0:(ncol(beta.df) - 1),sep = "")
describe(beta.df)
Mean.beta = sapply(beta.df,mean)
StdDev.beta = sapply(beta.df,sd)
Z.beta = Mean.beta / StdDev.beta
print(data.frame(Mean.beta,StdDev.beta,Z.beta))
if(plots){
## nRows = ceiling(sqrt(nParameters))
nRows = nParameters
## nCols = floor(sqrt(nParamerts))
nCols = 2
oldPar = par(mfrow = c(nRows, nCols))
nms = names(beta.df)
for(i in 1:nParameters){
plot(ts(beta.df[,i]),
main = paste("Time series plot of",nms[i]),
ylab = nms[i])
plot(acf(beta.df[,i], plot = FALSE),
main = paste("Autocorrelation plot of", nms[i]))
}
par(oldPar)
}
invisible(list(beta = beta.df, mleMean = mleMean, mleVar = mleVar))
}
| /Bolstad2/R/BayesCPH.r | no_license | ingted/R-Examples | R | false | false | 5,128 | r | BayesCPH = function(y, t, x, steps = 1000,
priorMean = NULL, priorVar = NULL,
mleMean = NULL, mleVar,
startValue = NULL, randomSeed = NULL,
plots = FALSE){
if(!is.null(randomSeed))
set.seed(randomSeed)
nObs = length(y)
if(is.vector(x))
x = as.matrix(x, ncol = 1)
nParameters = ncol(x) + 1 ## number of covariates + intercept
if(!is.null(startValue)){
if(length(startValue) < nParameters){
stop("You must have as many starting values as you have model parameters")
}
}
## inital mean of the matched curvature likelihood
if(is.null(mleMean))
mleMean = c(log(mean(y)), rep(0, nParameters - 1))
X = cbind(rep(1 , nObs) , x)
Xt = t(X)
calcMatchedCurvatureNormLike = function(){
betaX = X %*% mleMean
Mu = t * exp(betaX)
Vdiag = Mu
Y = betaX + (y - Mu) / Mu
## I have no idea why the diag command doesn't work as it should:
## e.g. Vyinv = diag(Vdiag, nrow = length(Vdiag))
## therefore this two-step procedure is needed
Vyinv = matrix(0, nrow = nObs, ncol = nObs)
diag(Vyinv) = Vdiag
XtV = Xt %*% Vyinv
VLinv = XtV %*% X
VL = solve(VLinv)
w1 = VL %*% XtV
mleMean = w1 %*% Y
## Loop iterations to converge to MLE
for(k in 1:20){
betaX = X %*% mleMean
Mu = t * exp(betaX)
Vdiag = Mu
Y = betaX + (y - Mu) / Mu
Vyinv = matrix(0, nrow = nObs, ncol = nObs)
diag(Vyinv) = Vdiag
XtV = Xt %*% Vyinv
VLinv = XtV %*% X
VL = solve(VLinv)
w1 = VL %*% XtV
mleMean = w1 %*% Y
}
return(list(mleMean = mleMean, mleVar = VL))
} ## calcMatchedCurvatureNormLike
normApproxPosterior = function(){
result = list(postMean = rep(0, nParameters),
postVar = matrix(0, ncol = nParameters, nrow = nParameters))
## if the prior mean and variance isn't specified then
## set it equal to the mle mean and variance
if(is.null(priorMean) & is.null(priorVar)){
result$postMean = mleMean
result$postVar = mleVar
}else{
mleVarInv = solve(mleVar)
priorVarInv = solve(priorVar)
postPrec = mleVarInv + priorVarInv
result$postVar = solve(postPrec)
w2 = postVar %*% priorVarInv
w4 = w2 * priorMean
w3 = postVar %*% mleVarInv
w5 = w3 * mleMean
result$postMean = w4 + w5
}
return(result)
}
#debug(calcMatchedCurvatureNormLike)
mleParams = calcMatchedCurvatureNormLike()
mleMean = mleParams$mleMean
mleVar = mleParams$mleVar
posterior = normApproxPosterior()
postMean = posterior$postMean
postVar = posterior$postVar
U = chol(postVar)
candBeta = matrix(rt(steps * nParameters, df = 4), ncol = nParameters)
if(!is.null(startValue))
candBeta[1,]=startValue
WM2 = candBeta %*% U
WM3 = matrix(rep(postMean , rep(steps,nParameters)),ncol = nParameters)
WM4 = WM2 + WM3
V2 = cov(WM4)
ft0 = apply(dt(candBeta, df = 4), 1, prod)
ftn = apply(dnorm(candBeta), 1, prod)
q1 = ft0 / 1
## Metropolis-Hastings
BetaXt = WM4 %*% Xt
BetaXt = exp(BetaXt)
for(j in 1:nObs)
BetaXt[ , j] = -t[j] * BetaXt[,j] + y[j] * log(t[j] * BetaXt[,j])
logg1 = rowSums(BetaXt)
logg1 = logg1 - max(logg1)
#g1 = exp(logg1)
logq1 = log(q1)
u = runif(steps)
i1 = 1
betaSample = WM4
for(n in 2:steps){
alpha = exp(logq1[i1] + logg1[n] - logq1[n] - logg1[i1])
alpha = ifelse(alpha>1, 1, alpha)
if(u[n] >= alpha){ ## reject
betaSample[n,] = WM4[i1,]
}else{
betaSample[n,] = WM4[n,]
i1 = n
}
}
beta.df = data.frame(betaSample)
names(beta.df) = paste("b",0:(ncol(beta.df) - 1),sep = "")
describe(beta.df)
Mean.beta = sapply(beta.df,mean)
StdDev.beta = sapply(beta.df,sd)
Z.beta = Mean.beta / StdDev.beta
print(data.frame(Mean.beta,StdDev.beta,Z.beta))
if(plots){
## nRows = ceiling(sqrt(nParameters))
nRows = nParameters
## nCols = floor(sqrt(nParamerts))
nCols = 2
oldPar = par(mfrow = c(nRows, nCols))
nms = names(beta.df)
for(i in 1:nParameters){
plot(ts(beta.df[,i]),
main = paste("Time series plot of",nms[i]),
ylab = nms[i])
plot(acf(beta.df[,i], plot = FALSE),
main = paste("Autocorrelation plot of", nms[i]))
}
par(oldPar)
}
invisible(list(beta = beta.df, mleMean = mleMean, mleVar = mleVar))
}
|
rm(list=ls())
#reading the survival data into R
Control.and.Treatment<-read.table("C:/Users/Owner/Documents/memphisclassesbooks/FALL2013/R PROGRAMMING/book2.txt",header=T,sep="\t")
Control.and.Treatment[,2]
#selecting only Control and Treatment group rows
x<-Control.and.Treatment[Control.and.Treatment[,2]=="ControlGoup",]
y<-Control.and.Treatment[Control.and.Treatment[,2]=="TreatmentGroup",]
#x= Control Group
#y=Treatment Group
Control.Group<-x[,1]
Treatment.Group<-y[,1]
Normal.plot<-function(x,y){
par(mfrow=c(3,1))
hist(Control.Group,breaks = "Sturges")
hist(Treatment.Group,breaks = "Sturges")
}
Normal.plot(Control.Group,Treatment.Group)
######function to compute t test#########################
x=x[,1]
y=y[,1]
my.t.test<-function(x,y,alternative = c("Two.sided", "less", "greater"),df){
############finding mean of control and treatment#############
xbar=mean(x)
ybar=mean(y)
n=length(x)
m=length(y)
df=m+n-2
critical.value=0.05
##############finding pooled variance and test statistic##################
sp2=((n-1)*var(x)+(m-1)*var(y))/(n+m-2)
T1=(xbar-ybar)/sqrt(sp2*(1/m+1/n))
if(alternative== "Two.sided"){
cat('H0:mean.x=mean.y','\n')
cat('Ha:mean.x != meany','\n')
####################finding p-value for two tailed test######################
p.value=2*(1-pt(abs(T1),df))
if (p.value<=critical.value){
cat('Reject H0','\n')
}else{
cat('Fail to reject H0','\n')
}
####################finding p-value for rigth tailed test######################
}else if(alternative=="greater"){
cat('H0:mean.x=mean.y','\n')
cat('Ha:mean.x > meany','\n')
p.value=1-pt(T1,df)
if (p.value<=critical.value){
cat('Reject H0','\n')
}else{
cat('Fail to reject H0','\n')
}
}else{
####################finding p-value for left tailed test######################
alternative=="less"
cat('H0:mean.x=mean.y','\n')
cat('Ha:mean.x <meany','\n')
p.value=pt(T1,df)}
if (p.value<=critical.value){
cat('Reject H0','\n')
}else{
cat('Fail to reject H0','\n')
}
return(list(pvalue=p.value,Test.statistic=T1 ))
}
############function of my t test###################
my.t.test(x,y,alternative="Two.sided",15)
####################2 sample t test in R##########################
t.test(x, y,alternative = "two.sided",mu = 0, paired = FALSE, var.equal = TRUE,conf.level = 0.95)
######################## two sample permutation test#######################
my.permutation.dist<-function(x,y){
n<-length(x)
m<-length(y)
N<-n+m
numperm<-choose(N,n)
num.iterations<-numperm/2
################# the observed mean difference###################
T<-mean(x)-mean(y)
##################combined sample of control and treatment################
xy=c(x,y)
if(numperm>numperm){
cat("Number of permutations is too large,compute a smaller number of permutations as a sample of the total number of permutations")
}else{
mean.difference <- as.numeric(num.iterations)
for(i in 1:num.iterations){
# Sample numbers 1-N ,n times and store in perm
perm<-sample(1:N, n, replace = FALSE, prob = NULL)
# Assign the sampled values to control.perm
control.perm <- xy[perm]
#Assign remainder to treatment.perm
treatment.perm <- xy[-perm]
mean.difference[i] <- mean(control.perm) - mean(treatment.perm)
}
}
return(mean.difference)
}
my.permutation.dist(x,y)
############################## Plot of hitogram of difference in means###########################
hist(my.permutation.dist(x,y),breaks = "Sturges", xlab='Difference in Control and Traetment means', prob=T, main='')
#################Adding a line to indicate the observed value########
T<-(mean(x)-mean(y))
abline(v =T, untf = FALSE, col ='blue' , lty = 2, lwd = 2)
###########p-value#################
permutation.p.value=function(x,y){
T<-(mean(x)-mean(y))
p.value=mean(abs(my.permutation.dist(x,y)) >= abs(T))
return(p.value)
}
permutation.p.value(x,y)
| /Fall2013/programming/hw2.R | no_license | NanaAkwasiAbayieBoateng/MemphisClasses | R | false | false | 4,092 | r | rm(list=ls())
#reading the survival data into R
Control.and.Treatment<-read.table("C:/Users/Owner/Documents/memphisclassesbooks/FALL2013/R PROGRAMMING/book2.txt",header=T,sep="\t")
Control.and.Treatment[,2]
#selecting only Control and Treatment group rows
x<-Control.and.Treatment[Control.and.Treatment[,2]=="ControlGoup",]
y<-Control.and.Treatment[Control.and.Treatment[,2]=="TreatmentGroup",]
#x= Control Group
#y=Treatment Group
Control.Group<-x[,1]
Treatment.Group<-y[,1]
Normal.plot<-function(x,y){
par(mfrow=c(3,1))
hist(Control.Group,breaks = "Sturges")
hist(Treatment.Group,breaks = "Sturges")
}
Normal.plot(Control.Group,Treatment.Group)
######function to compute t test#########################
x=x[,1]
y=y[,1]
my.t.test<-function(x,y,alternative = c("Two.sided", "less", "greater"),df){
############finding mean of control and treatment#############
xbar=mean(x)
ybar=mean(y)
n=length(x)
m=length(y)
df=m+n-2
critical.value=0.05
##############finding pooled variance and test statistic##################
sp2=((n-1)*var(x)+(m-1)*var(y))/(n+m-2)
T1=(xbar-ybar)/sqrt(sp2*(1/m+1/n))
if(alternative== "Two.sided"){
cat('H0:mean.x=mean.y','\n')
cat('Ha:mean.x != meany','\n')
####################finding p-value for two tailed test######################
p.value=2*(1-pt(abs(T1),df))
if (p.value<=critical.value){
cat('Reject H0','\n')
}else{
cat('Fail to reject H0','\n')
}
####################finding p-value for rigth tailed test######################
}else if(alternative=="greater"){
cat('H0:mean.x=mean.y','\n')
cat('Ha:mean.x > meany','\n')
p.value=1-pt(T1,df)
if (p.value<=critical.value){
cat('Reject H0','\n')
}else{
cat('Fail to reject H0','\n')
}
}else{
####################finding p-value for left tailed test######################
alternative=="less"
cat('H0:mean.x=mean.y','\n')
cat('Ha:mean.x <meany','\n')
p.value=pt(T1,df)}
if (p.value<=critical.value){
cat('Reject H0','\n')
}else{
cat('Fail to reject H0','\n')
}
return(list(pvalue=p.value,Test.statistic=T1 ))
}
############function of my t test###################
my.t.test(x,y,alternative="Two.sided",15)
####################2 sample t test in R##########################
t.test(x, y,alternative = "two.sided",mu = 0, paired = FALSE, var.equal = TRUE,conf.level = 0.95)
######################## two sample permutation test#######################
my.permutation.dist<-function(x,y){
n<-length(x)
m<-length(y)
N<-n+m
numperm<-choose(N,n)
num.iterations<-numperm/2
################# the observed mean difference###################
T<-mean(x)-mean(y)
##################combined sample of control and treatment################
xy=c(x,y)
if(numperm>numperm){
cat("Number of permutations is too large,compute a smaller number of permutations as a sample of the total number of permutations")
}else{
mean.difference <- as.numeric(num.iterations)
for(i in 1:num.iterations){
# Sample numbers 1-N ,n times and store in perm
perm<-sample(1:N, n, replace = FALSE, prob = NULL)
# Assign the sampled values to control.perm
control.perm <- xy[perm]
#Assign remainder to treatment.perm
treatment.perm <- xy[-perm]
mean.difference[i] <- mean(control.perm) - mean(treatment.perm)
}
}
return(mean.difference)
}
my.permutation.dist(x,y)
############################## Plot of hitogram of difference in means###########################
hist(my.permutation.dist(x,y),breaks = "Sturges", xlab='Difference in Control and Traetment means', prob=T, main='')
#################Adding a line to indicate the observed value########
T<-(mean(x)-mean(y))
abline(v =T, untf = FALSE, col ='blue' , lty = 2, lwd = 2)
###########p-value#################
permutation.p.value=function(x,y){
T<-(mean(x)-mean(y))
p.value=mean(abs(my.permutation.dist(x,y)) >= abs(T))
return(p.value)
}
permutation.p.value(x,y)
|
# Title : Space Clusters
# Objective : TODO
# Created by: NSora
# Created on: 2020/11/4
local({r <- getOption("repos")
r["CRAN"] <- "http://mirrors.tuna.tsinghua.edu.cn/CRAN/"
options(repos=r)})
install.packages("fpc")
install.packages("factoextra")
library(sp)
library(maptools)
library(rgdal)
library(spatstat)
library(ggplot2)
library(stats)
library(fpc)
library(cluster)
library(factoextra)
csv_path <- "eqlist2.csv"
eq <- read.csv(csv_path, header = FALSE, sep = ",")
pv1 <- data.frame(-1*eq$V5, eq$V6,eq$V11,eq$V12)
pv2 <- scale(pv1)
fviz_nbclust(pv2, kmeans, method = "wss") + geom_vline(xintercept = 6, linetype=2)
ek <- kmeans(pv2, centers = 6, iter.max = 100)
fviz_cluster(ek, data=pv2)
res <- ek$cluster
cen <- ek$centers
d2 <- cbind(eq, type=ek$cluster)
write.csv(cen, "centers.csv")
write.csv(d2, "clusters.csv") | /R/spatial_cluster_analysis.R | no_license | Vangie8412/code | R | false | false | 838 | r | # Title : Space Clusters
# Objective : TODO
# Created by: NSora
# Created on: 2020/11/4
local({r <- getOption("repos")
r["CRAN"] <- "http://mirrors.tuna.tsinghua.edu.cn/CRAN/"
options(repos=r)})
install.packages("fpc")
install.packages("factoextra")
library(sp)
library(maptools)
library(rgdal)
library(spatstat)
library(ggplot2)
library(stats)
library(fpc)
library(cluster)
library(factoextra)
csv_path <- "eqlist2.csv"
eq <- read.csv(csv_path, header = FALSE, sep = ",")
pv1 <- data.frame(-1*eq$V5, eq$V6,eq$V11,eq$V12)
pv2 <- scale(pv1)
fviz_nbclust(pv2, kmeans, method = "wss") + geom_vline(xintercept = 6, linetype=2)
ek <- kmeans(pv2, centers = 6, iter.max = 100)
fviz_cluster(ek, data=pv2)
res <- ek$cluster
cen <- ek$centers
d2 <- cbind(eq, type=ek$cluster)
write.csv(cen, "centers.csv")
write.csv(d2, "clusters.csv") |
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
<<<<<<< HEAD
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inversemat) m <<- inversemat
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
=======
>>>>>>> 7f657dd22ac20d22698c53b23f0057e1a12c09b7
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
<<<<<<< HEAD
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
=======
}
>>>>>>> 7f657dd22ac20d22698c53b23f0057e1a12c09b7
| /cachematrix.R | no_license | xavisxavis/ProgrammingAssignment2 | R | false | false | 912 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
<<<<<<< HEAD
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inversemat) m <<- inversemat
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
=======
>>>>>>> 7f657dd22ac20d22698c53b23f0057e1a12c09b7
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
<<<<<<< HEAD
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
=======
}
>>>>>>> 7f657dd22ac20d22698c53b23f0057e1a12c09b7
|
#' Small expression matrix
#'
#' This dataset is subset of a gene expression matrix that we will use to demonstrate functionality of finding genes associated with pseudotime.
#'
#' @format A data frame with 65 rows and 744 columns:
#' \describe{
#' \item{Prog_013}{cell expression profile}
#' \item{Prog_019}{cell expression profile}
#' ...
#' }
#' @source \url{https://pubmed.ncbi.nlm.nih.gov/27365425/}
"genes.small"
| /R/genes_small.R | permissive | arc85/circletime | R | false | false | 425 | r | #' Small expression matrix
#'
#' This dataset is subset of a gene expression matrix that we will use to demonstrate functionality of finding genes associated with pseudotime.
#'
#' @format A data frame with 65 rows and 744 columns:
#' \describe{
#' \item{Prog_013}{cell expression profile}
#' \item{Prog_019}{cell expression profile}
#' ...
#' }
#' @source \url{https://pubmed.ncbi.nlm.nih.gov/27365425/}
"genes.small"
|
library(reshape2)
# Set path and data file name.
path <- "D:/Research/Dissertation/Results/Stop Signal/Treatment/"
dataFile <- "SST_TreatEffect_N200 -ForR.csv"
# Load (arranged) data
DF <- read.csv(paste(path,dataFile,sep=""), header=TRUE, check.names=FALSE)
colnames(DF)[[1]] = "Group" # Fix first col name.
# Exclude left ROI side (for N200)
DF = subset(DF,Side !="Left")
# Remove redundant columns
DF_short <- DF[c('Group','Subject','Session','Condition','Measure','Power')]
# Convert long to wide
DF_wide <- dcast(melt(DF_short, id.vars=c("Group","Subject","Session","Condition","Measure","Power")),
Subject+Group~Session+Condition+Measure)
# Re-aarange column order
DF_wide <- DF_wide[c(1,2,5,9,3,7,6,10,4,8)]
# Save as a csv file
write.csv(DF_wide,paste(path,"SST_TreatEffect_N200 -ForFigs.csv",sep="")) | /AA_FinalAnalysis_Data_LongtoWide.R | no_license | AmirAvnit/PhD_Dissertation_Analyses | R | false | false | 861 | r | library(reshape2)
# Set path and data file name.
path <- "D:/Research/Dissertation/Results/Stop Signal/Treatment/"
dataFile <- "SST_TreatEffect_N200 -ForR.csv"
# Load (arranged) data
DF <- read.csv(paste(path,dataFile,sep=""), header=TRUE, check.names=FALSE)
colnames(DF)[[1]] = "Group" # Fix first col name.
# Exclude left ROI side (for N200)
DF = subset(DF,Side !="Left")
# Remove redundant columns
DF_short <- DF[c('Group','Subject','Session','Condition','Measure','Power')]
# Convert long to wide
DF_wide <- dcast(melt(DF_short, id.vars=c("Group","Subject","Session","Condition","Measure","Power")),
Subject+Group~Session+Condition+Measure)
# Re-aarange column order
DF_wide <- DF_wide[c(1,2,5,9,3,7,6,10,4,8)]
# Save as a csv file
write.csv(DF_wide,paste(path,"SST_TreatEffect_N200 -ForFigs.csv",sep="")) |
## File Name: tam2mirt.aux.R
## File Version: 0.04
## File Last Change: 2017-01-18 11:02:55
##################################################################
# return lavaan syntax with fixed parameters
tam2mirt_fix <- function( D , factors , B , dat , AXsi ,
mean.trait , cov.trait , tamobj ){
# create lavaan syntax with constraints
lavsyn <- NULL
for (dd in 1:D){
# dd <- 1
fac.dd <- factors[dd]
# create terms for loadings
B2.dd <- round( B[,2,dd] , 4)
syn0 <- paste0( paste0( B2.dd[ B2.dd!=0] , "*" , colnames(dat)[ B2.dd!=0] ) , collapse="+" )
syn0 <- paste0( fac.dd , " =~ " , syn0 , "\n")
lavsyn <- paste0( lavsyn , syn0 )
}
# create syntax for intercepts
maxK <- ncol(AXsi) - 1
for (kk in 1:maxK){
t1 <- round( AXsi[,kk+1] , 4 )
string1 <- paste0("t" , kk )
syn0 <- paste0( colnames(dat) , " | " , t1 , "*" , string1)
syn0 <- paste0( syn0 , collapse="\n")
hh <- ""
if (kk != maxK){ hh <- "\n" }
lavsyn <- paste0( lavsyn , syn0 , hh)
}
# guessing and slipping parameters
itemg <- colnames(dat)[ maxK == 1 ]
lavsyn <- paste0( lavsyn , "\n" ,
paste0( paste0( itemg , " ?= 0*g1" ) , collapse="\n") )
lavsyn <- paste0( lavsyn , "\n" ,
paste0( paste0( itemg , " ?= 0*s1" ) , collapse="\n") )
# syntax for means
syn0 <- paste0( factors , " ~ " , round(as.vector(mean.trait),4) , "*1" )
syn0 <- paste0( syn0 , collapse="\n")
lavsyn <- paste0( lavsyn , "\n" , syn0 )
# syntax for variances
syn0 <- paste0( factors , " ~~ " , round( as.vector(diag(cov.trait)),4) , "*" ,factors )
syn0 <- paste0( syn0 , collapse="\n")
lavsyn <- paste0( lavsyn , "\n" , syn0 )
# syntax for covariances
if (D>1){
for (dd in 1:(D-1)){
for (ee in (dd+1):(D)){
syn0 <- paste0( factors[dd] , " ~~ " ,
round( cov.trait[dd,ee] ,4) , "*" ,factors[ee] )
syn0 <- paste0( syn0 , collapse="\n")
lavsyn <- paste0( lavsyn , "\n" , syn0 )
}
}
}
# finalize lavaan syntax
lavsyn <- paste0( lavsyn , " \n")
return(lavsyn)
}
##################################################################
##################################################################
# return lavaan syntax with freed parameters
tam2mirt_freed <- function( D , factors , B , dat , AXsi ,
mean.trait , cov.trait , tamobj ){
# create lavaan syntax with constraints
lavsyn <- NULL
if ( tamobj$irtmodel == "2PL" ){
class(tamobj) <- "tam.mml.2pl" }
for (dd in 1:D){
# dd <- 1
fac.dd <- factors[dd]
# create terms for loadings
B2.dd <- round( B[,2,dd] , 4)
if (class(tamobj)=="tam.mml"){
syn0 <- paste0( paste0( B2.dd[ B2.dd!=0] , "*" , colnames(dat)[ B2.dd!=0] ) , collapse="+" )
syn0 <- paste0( fac.dd , " =~ " , syn0 , "\n")
}
if (class(tamobj)=="tam.mml.2pl"){
d4 <- paste0( B2.dd[ B2.dd!=0] )
d4 <- paste0( "a" , dd , "_" , seq(1,length(d4) ) )
syn0 <- paste0( paste0( d4 , "*" , colnames(dat)[ B2.dd!=0] ) ,
collapse="+" )
syn0 <- paste0( fac.dd , " =~ " , syn0 , "\n")
}
lavsyn <- paste0( lavsyn , syn0 )
}
# create syntax for intercepts
maxK <- ncol(AXsi) - 1
for (kk in 1:maxK){
t1 <- round( AXsi[,kk+1] , 4 )
string1 <- paste0("t" , kk )
t1 <- paste0(string1, "_" , seq(1,length(t1) ) )
syn0 <- paste0( colnames(dat) , " | " , t1 , "*" , string1)
syn0 <- paste0( syn0 , collapse="\n")
lavsyn <- paste0( lavsyn , syn0)
}
# syntax for means
syn0 <- paste0( factors , " ~ " , round(as.vector(mean.trait),4) , "*1" )
syn0 <- paste0( syn0 , collapse="\n")
lavsyn <- paste0( lavsyn , "\n" , syn0 )
# syntax for variances
if (class(tamobj)=="tam.mml"){
g1 <- paste0( "Cov_" , 1:D , 1:D )
syn0 <- paste0( factors , " ~~ " , g1 ,
"*" ,factors )
syn0 <- paste0( syn0 , collapse="\n")
lavsyn <- paste0( lavsyn , "\n" , syn0 )
}
if (class(tamobj)=="tam.mml.2pl"){
syn0 <- paste0( factors , " ~~ " , round( as.vector(diag(cov.trait)),4) , "*" ,factors )
syn0 <- paste0( syn0 , collapse="\n")
lavsyn <- paste0( lavsyn , "\n" , syn0 )
}
# syntax for covariances
if (D>1){
for (dd in 1:(D-1)){
for (ee in (dd+1):(D)){
syn0 <- paste0( factors[dd] , " ~~ " ,
paste0("Cov_" ,dd ,ee) , "*" ,factors[ee] )
syn0 <- paste0( syn0 , collapse="\n")
lavsyn <- paste0( lavsyn , "\n" , syn0 )
}
}
}
# finalize lavaan syntax
lavsyn <- paste0( lavsyn , " \n")
return(lavsyn)
}
##################################################################
| /R/tam2mirt.aux.R | no_license | SanVerhavert/sirt | R | false | false | 4,539 | r | ## File Name: tam2mirt.aux.R
## File Version: 0.04
## File Last Change: 2017-01-18 11:02:55
##################################################################
# return lavaan syntax with fixed parameters
tam2mirt_fix <- function( D , factors , B , dat , AXsi ,
mean.trait , cov.trait , tamobj ){
# create lavaan syntax with constraints
lavsyn <- NULL
for (dd in 1:D){
# dd <- 1
fac.dd <- factors[dd]
# create terms for loadings
B2.dd <- round( B[,2,dd] , 4)
syn0 <- paste0( paste0( B2.dd[ B2.dd!=0] , "*" , colnames(dat)[ B2.dd!=0] ) , collapse="+" )
syn0 <- paste0( fac.dd , " =~ " , syn0 , "\n")
lavsyn <- paste0( lavsyn , syn0 )
}
# create syntax for intercepts
maxK <- ncol(AXsi) - 1
for (kk in 1:maxK){
t1 <- round( AXsi[,kk+1] , 4 )
string1 <- paste0("t" , kk )
syn0 <- paste0( colnames(dat) , " | " , t1 , "*" , string1)
syn0 <- paste0( syn0 , collapse="\n")
hh <- ""
if (kk != maxK){ hh <- "\n" }
lavsyn <- paste0( lavsyn , syn0 , hh)
}
# guessing and slipping parameters
itemg <- colnames(dat)[ maxK == 1 ]
lavsyn <- paste0( lavsyn , "\n" ,
paste0( paste0( itemg , " ?= 0*g1" ) , collapse="\n") )
lavsyn <- paste0( lavsyn , "\n" ,
paste0( paste0( itemg , " ?= 0*s1" ) , collapse="\n") )
# syntax for means
syn0 <- paste0( factors , " ~ " , round(as.vector(mean.trait),4) , "*1" )
syn0 <- paste0( syn0 , collapse="\n")
lavsyn <- paste0( lavsyn , "\n" , syn0 )
# syntax for variances
syn0 <- paste0( factors , " ~~ " , round( as.vector(diag(cov.trait)),4) , "*" ,factors )
syn0 <- paste0( syn0 , collapse="\n")
lavsyn <- paste0( lavsyn , "\n" , syn0 )
# syntax for covariances
if (D>1){
for (dd in 1:(D-1)){
for (ee in (dd+1):(D)){
syn0 <- paste0( factors[dd] , " ~~ " ,
round( cov.trait[dd,ee] ,4) , "*" ,factors[ee] )
syn0 <- paste0( syn0 , collapse="\n")
lavsyn <- paste0( lavsyn , "\n" , syn0 )
}
}
}
# finalize lavaan syntax
lavsyn <- paste0( lavsyn , " \n")
return(lavsyn)
}
##################################################################
##################################################################
# return lavaan syntax with freed parameters
tam2mirt_freed <- function( D , factors , B , dat , AXsi ,
mean.trait , cov.trait , tamobj ){
# create lavaan syntax with constraints
lavsyn <- NULL
if ( tamobj$irtmodel == "2PL" ){
class(tamobj) <- "tam.mml.2pl" }
for (dd in 1:D){
# dd <- 1
fac.dd <- factors[dd]
# create terms for loadings
B2.dd <- round( B[,2,dd] , 4)
if (class(tamobj)=="tam.mml"){
syn0 <- paste0( paste0( B2.dd[ B2.dd!=0] , "*" , colnames(dat)[ B2.dd!=0] ) , collapse="+" )
syn0 <- paste0( fac.dd , " =~ " , syn0 , "\n")
}
if (class(tamobj)=="tam.mml.2pl"){
d4 <- paste0( B2.dd[ B2.dd!=0] )
d4 <- paste0( "a" , dd , "_" , seq(1,length(d4) ) )
syn0 <- paste0( paste0( d4 , "*" , colnames(dat)[ B2.dd!=0] ) ,
collapse="+" )
syn0 <- paste0( fac.dd , " =~ " , syn0 , "\n")
}
lavsyn <- paste0( lavsyn , syn0 )
}
# create syntax for intercepts
maxK <- ncol(AXsi) - 1
for (kk in 1:maxK){
t1 <- round( AXsi[,kk+1] , 4 )
string1 <- paste0("t" , kk )
t1 <- paste0(string1, "_" , seq(1,length(t1) ) )
syn0 <- paste0( colnames(dat) , " | " , t1 , "*" , string1)
syn0 <- paste0( syn0 , collapse="\n")
lavsyn <- paste0( lavsyn , syn0)
}
# syntax for means
syn0 <- paste0( factors , " ~ " , round(as.vector(mean.trait),4) , "*1" )
syn0 <- paste0( syn0 , collapse="\n")
lavsyn <- paste0( lavsyn , "\n" , syn0 )
# syntax for variances
if (class(tamobj)=="tam.mml"){
g1 <- paste0( "Cov_" , 1:D , 1:D )
syn0 <- paste0( factors , " ~~ " , g1 ,
"*" ,factors )
syn0 <- paste0( syn0 , collapse="\n")
lavsyn <- paste0( lavsyn , "\n" , syn0 )
}
if (class(tamobj)=="tam.mml.2pl"){
syn0 <- paste0( factors , " ~~ " , round( as.vector(diag(cov.trait)),4) , "*" ,factors )
syn0 <- paste0( syn0 , collapse="\n")
lavsyn <- paste0( lavsyn , "\n" , syn0 )
}
# syntax for covariances
if (D>1){
for (dd in 1:(D-1)){
for (ee in (dd+1):(D)){
syn0 <- paste0( factors[dd] , " ~~ " ,
paste0("Cov_" ,dd ,ee) , "*" ,factors[ee] )
syn0 <- paste0( syn0 , collapse="\n")
lavsyn <- paste0( lavsyn , "\n" , syn0 )
}
}
}
# finalize lavaan syntax
lavsyn <- paste0( lavsyn , " \n")
return(lavsyn)
}
##################################################################
|
## GVKey company codes
## include:
# company name,
# SIC - standard industry classification code,
# FYEAR - fiscal data year
# SALE - sales/turnover (net)
# AT - total assets
# import the data into R as a data.table
setwd("/Users/chloesegale/Desktop/econ 5529 - bayesian statistics/Final Project)
library(data.table)
data<-data.table(read.csv("400be901b8744372.csv",header=TRUE))
# subset the data to only include the manufacturing industry
# (SIC 2000-3900) and complete cases. You should have no NAs
# after this step.
data<-data[sic >= 2000 & sic <= 3900,]
data<-data[complete.cases(data),]
# create a new variable representing the years survived by each firm.
# for example data[,surv:=length(fyear),by=gvkey] (this is just ading a column taking a count of the years per id)
data<-data[,surv:=length(fyear),by=gvkey]
years.survived.var<-data$surv
# subset the data to only include firms that have lived the span of the data set
# span of data set is 54 years 2016- 1962 = 54
data<-data[surv>=54,]
head(data)
# create a unique numeric identifier for each firm
data$id<-as.numeric(factor(data$gvkey))
J<-length(unique(data$id))
### 184 firms in data
##mean of sales by year, independent of firm
data<-data[,rbar:=mean(sale),by=fyear]
##add result of div equation into table for visibility into equations
##write div equation (salei,t/mean(salet))
data<-data[,equation:=data$sale/data$rbar]
###keep above!!!!
## log of equation to get growth
data<-data[,growth:=log(equation)]
##log growth column is wanted column throughout all.
### test model
#g_t = a + pg_t-1 + e_t
#g_i,t=ln(s_i,t/mean(sale_t))
#g_i,t = alpha_i +(p_i)*(g_i,t-1)+epsilon_i,t *****
#g_i,t= ln(s_i,t/mean(sale_t))
##i by firm, t by time
#log(g_i,t)=log(alpha_i)+(p_i) log(g_i,t-1) (i=1,......,n)
library(ggplot2)
library(StanHeaders)
library(rstan)
#testing
model="
data
{int<lower=0> N; //number of observations
int<lower=1> J; //number of firms
vector[N] g; //data for Hierarchical AR(1)
//vector[N] k; //specify what k is later - going to need to be transformed for time series.
int<lower=1,upper=J> firm[N]; //number of sets of observations for N??
}
parameters {
vector[J] alpha; //alpha is a length, J vector of integers
vector[J] rho; //rho is a length, J vector of integers
real mu_alpha;
real mu_rho;
real sigma_alpha;
real sigma_rho;
real sigma_g;
}
transformed parameters {
vector[N] g_hat; //ghat is a length, N vector of integer
// for (i in 1:N)
for (i in 2:N)
//g_hat[i] = alpha[firm[i]] + rho[firm[i]] * k[i];
g_hat[i] = alpha[firm[i]] + rho[firm[i]] * g_hat[i-1];
}
model {
mu_alpha ~ normal(0, 1);
alpha ~ normal(0.0001*mu_alpha, -sigma_alpha);
mu_rho ~ normal(0, 1);
rho ~ normal(0.0001*mu_rho, sigma_rho);
g ~ normal(g_hat, sigma_g); //specified priors
//for n in 2:N
//g[n]~normal(alpha+rho*k(n-1), sigma)
//g[n]~normal(g_hat[n],sigma[g])
}
dat <- list(N=nrow(data), J=J, firm=data$id, g=log(data$equation))
stan.out <- stan(model_code = model, data = dat, iter = 2000, chains = 3,warmup = 500)
install.packages("Rtools")
##############
#sucks but only one that works
model="
data {
int<lower=0> N; //total number of observations
int<lower=1> J; //number of firms
int<lower=1,upper=J> firm[N] // Sizes of observations across groups
vector[N] y;
}
parameters {
real alpha;
real beta;
real<lower=0> sigma;
}
transformed parameters {
vector[N] yhat;
for (i in 1:N)
yhat[i]=alpha[firm[i]] + beta[firm[i]]*y[i]; //x[i] is growth of firm in prior time period
}
model {
for (t in 2:N)
y[t] ~ normal(alpha + beta * y[t-1], sigma);
}"
dat<- list(N=nrow(data),J=J,y=data$growth)
stan.out<-stan(model_code=model,data=dat,iter=10000,chains=3,thin=2)
## doesnt workf
model="
data {
int<lower=0> N; //number of observations
int<lower=1> J; //number of firms
vector[N] y; //data
}
parameters {
real alpha;
real beta;
real<lower=0> sigma;
}
transformed parameters {
vector[N] yhat;
for (i in 1:N)
yhat[i]=alpha[firm[i]] + beta[firm[i]]*x[i]; //x[i] is growth of firm in prior time period
}
model {
for (t in 2:N)
y[t] ~ normal(alpha + beta * y[t-1], sigma);
}"
######none of below models work
model="
data {
int<lower=0> N;
int<lower=1> J;
int<lower=1,upper=J> firm[N];
int<lower=0,upper=1> x[N];
vector[N] y;
//vector[N] k;
}
parameters {
real<lower=0> sigma;
vector[J] alpha;
vector[J] beta;
//real alpha;
//real beta;
}
transformed parameters {
vector[N] yhat;
for (i in 1:N)
yhat[i]=alpha[firm[i]] + beta[firm[i]]*x[i]; //x[i] is growth of firm in prior time period
//for (t in 2:N)
//x[t]=y[t-1]
}
//k[i]= y[n-1]
//alpha and beta only depend on i.
//log(g_i,t)=log(alpha_i)+(p_i) log(g_i,t-1) (i=1,......,n)
model {
for (n in 2:N)
y[n] ~ normal(alpha + beta * yhat[n-1], sigma);
}"
dat<- list(N=nrow(data),J=J,firm=data$id,y=log(data$equation), k=log(data$equation))
stan.out<-stan(model_code=model,data=dat,iter=10000,chains=3,thin=2)
print(stan.out)
##OR
vis.model="data {
int<lower=0> N; //N is an integer value that has a lower bound of zero
int<lower=1> J; // number of countries
int<lower=1,upper=J> country[N];
vector[N] x; // x is a length, N vector of integers
vector[N] k; // k is a length, N vector of integers
}
parameters {
vector[J] A; //A is a length, J vector of integers
vector[J] a; //a is a length, J vector of integers
real mu_A;
real mu_a;
real<lower=0,upper=100> sigma_A;
real<lower=0,upper=100> sigma_a;
real<lower=0,upper=100> sigma_x;
}
transformed parameters {
vector[N] x_hat; //xhat is a length, N vector of integer
for (i in 1:N)
x_hat[i] = A[country[i]] + a[country[i]] * k[i];
}
model {
mu_A ~ normal(0, 1);
A ~ normal(mu_A, sigma_A);
mu_a ~ normal(0, 1);
a ~ normal(0.1*mu_a, sigma_a);
x ~ normal(x_hat, sigma_x); //specified priors
}"
dat <- list(N = nrow(data), J=J,country=data$id, x = data$growth, k=log(data$equation))
vis.stan <- stan(model_code = vis.model, data = dat,
iter = 2000, chains = 3,warmup = 500)
print(stan.out)
#######ignore jibberish models above.
print(stan.out)
##does it appear Gibrat's law holds for long-lived manufacturing firms in the US?
## explain in terms of the posterior density of p, i.e. how
## probable is gibrat's law?? resoure lecture 8 posterior pred checking
library(shinystan)
## do i need to set seed? comparison to 8 model checks r example
launch_shinystan(stan.out)
## growth changes with
# Extract MCMC samples
params1<-extract(stan.out)
alpha<-params1$alpha
beta<-params1$beta
sigma<- params1$sigma
nsims <-length(params1$sigma)
# produce the replications from posterior and inspect
N<-nrow(data)
y<-data$growth
yRep <- sapply(1:nsims, function(i) rnorm(N, alpha+beta*y[i-1], sigma))
# Check min, max, and mean
min_rep <- apply(yRep, 2, min)
max_rep <- apply(yRep,2,max)
mean_rep <- apply(yRep,2,mean)
sd_rep <- apply(yRep,2,sd)
# Plot posterior mins against actual min
hist(min_rep, main='posterior min & actual min',breaks = 50)
abline(v=min(data$`growth`),lwd=3)
min(data$growth)
#centerd neat -1.5
# Plot posterior maxs against actual maxs
hist(max_rep, main='posterior max & actual max',breaks = 50)
abline(v=max(data$growth),lwd=3)
max(data$growth)
# centered
# Plot posterior sds against actual sds
hist(sd_rep, main='posterior max & actual standard deviation',xlim=c(0.21,0.59), breaks = 50)
abline(v=sd(data$growth),lwd=3)
sd(data$growth)
#not even close
sd(data$`growth`)
# Plot predicted data
hist(data$growth,breaks=50,prob=T,xlim=c(-2,1),col="red")
# Compare to predicted data
for(i in 2:N){
lines(density(yRep[,i]),col="blue")
}
##looks good
##redefine growth as at (total assets)
##mean of total assets by year, independent of firm
data<-data[,rbar:=mean(at),by=fyear]
##write div equation (salei,t/mean(salet))
equation<-data$at/data$rbar
##add result of div equation into table for visibility into equations
data<-data[,equation:=equation, by=id]
## log of equation to get growth
growth<-log(equation)
data<-data[,growth:=growth, by=id]
##log growth column is wanted column throughout all.
###final table###
firmdata<-data.table("Company Code"=data$gvkey,"Fiscal Data Year"=data$fyear,
"Company Name"=data$conm, "Total Assets"= data$at,
"Sales/Turnover (net)"=data$sale, "Years survived"=data$surv,
"Growth of firm size"=data$growth,"numeric identifier"=data$id)
dat<- list(N=nrow(firmdata),J=J,y=firmdata$`Growth of firm size`)
stan.totalassets<-stan(model_code=model,data=dat,iter=10000,chains=3,thin=2)
print(stan.totalassets)
##looks good
##redefine growth as gvkey (company code)
##mean of total assets by year, independent of firm
data<-data[,rbar:=mean(gvkey),by=fyear]
##write div equation (salei,t/mean(salet))
equation<-data$gvkey/data$rbar
##add result of div equation into table for visibility into equations
data<-data[,equation:=equation, by=id]
## log of equation to get growth
growth<-log(equation)
data<-data[,growth:=growth, by=id]
##log growth column is wanted column throughout all.
###final table###
firmdata<-data.table("Company Code"=data$gvkey,"Fiscal Data Year"=data$fyear,
"Company Name"=data$conm, "Total Assets"= data$at,
"Sales/Turnover (net)"=data$sale, "Years survived"=data$surv,
"Growth of firm size"=data$growth,"numeric identifier"=data$id)
dat<- list(N=nrow(firmdata),J=J,y=firmdata$`Growth of firm size`)
stan.totalassets<-stan(model_code=model,data=dat,iter=10000,chains=3,thin=2)
print(stan.totalassets)
| /old/lecutures/Final Project/843 am.R | no_license | CGreen4u/R-related | R | false | false | 9,490 | r |
## GVKey company codes
## include:
# company name,
# SIC - standard industry classification code,
# FYEAR - fiscal data year
# SALE - sales/turnover (net)
# AT - total assets
# import the data into R as a data.table
setwd("/Users/chloesegale/Desktop/econ 5529 - bayesian statistics/Final Project)
library(data.table)
data<-data.table(read.csv("400be901b8744372.csv",header=TRUE))
# subset the data to only include the manufacturing industry
# (SIC 2000-3900) and complete cases. You should have no NAs
# after this step.
data<-data[sic >= 2000 & sic <= 3900,]
data<-data[complete.cases(data),]
# create a new variable representing the years survived by each firm.
# for example data[,surv:=length(fyear),by=gvkey] (this is just ading a column taking a count of the years per id)
data<-data[,surv:=length(fyear),by=gvkey]
years.survived.var<-data$surv
# subset the data to only include firms that have lived the span of the data set
# span of data set is 54 years 2016- 1962 = 54
data<-data[surv>=54,]
head(data)
# create a unique numeric identifier for each firm
data$id<-as.numeric(factor(data$gvkey))
J<-length(unique(data$id))
### 184 firms in data
##mean of sales by year, independent of firm
data<-data[,rbar:=mean(sale),by=fyear]
##add result of div equation into table for visibility into equations
##write div equation (salei,t/mean(salet))
data<-data[,equation:=data$sale/data$rbar]
###keep above!!!!
## log of equation to get growth
data<-data[,growth:=log(equation)]
##log growth column is wanted column throughout all.
### test model
#g_t = a + pg_t-1 + e_t
#g_i,t=ln(s_i,t/mean(sale_t))
#g_i,t = alpha_i +(p_i)*(g_i,t-1)+epsilon_i,t *****
#g_i,t= ln(s_i,t/mean(sale_t))
##i by firm, t by time
#log(g_i,t)=log(alpha_i)+(p_i) log(g_i,t-1) (i=1,......,n)
library(ggplot2)
library(StanHeaders)
library(rstan)
#testing
model="
data
{int<lower=0> N; //number of observations
int<lower=1> J; //number of firms
vector[N] g; //data for Hierarchical AR(1)
//vector[N] k; //specify what k is later - going to need to be transformed for time series.
int<lower=1,upper=J> firm[N]; //number of sets of observations for N??
}
parameters {
vector[J] alpha; //alpha is a length, J vector of integers
vector[J] rho; //rho is a length, J vector of integers
real mu_alpha;
real mu_rho;
real sigma_alpha;
real sigma_rho;
real sigma_g;
}
transformed parameters {
vector[N] g_hat; //ghat is a length, N vector of integer
// for (i in 1:N)
for (i in 2:N)
//g_hat[i] = alpha[firm[i]] + rho[firm[i]] * k[i];
g_hat[i] = alpha[firm[i]] + rho[firm[i]] * g_hat[i-1];
}
model {
mu_alpha ~ normal(0, 1);
alpha ~ normal(0.0001*mu_alpha, -sigma_alpha);
mu_rho ~ normal(0, 1);
rho ~ normal(0.0001*mu_rho, sigma_rho);
g ~ normal(g_hat, sigma_g); //specified priors
//for n in 2:N
//g[n]~normal(alpha+rho*k(n-1), sigma)
//g[n]~normal(g_hat[n],sigma[g])
}
dat <- list(N=nrow(data), J=J, firm=data$id, g=log(data$equation))
stan.out <- stan(model_code = model, data = dat, iter = 2000, chains = 3,warmup = 500)
install.packages("Rtools")
##############
#sucks but only one that works
model="
data {
int<lower=0> N; //total number of observations
int<lower=1> J; //number of firms
int<lower=1,upper=J> firm[N] // Sizes of observations across groups
vector[N] y;
}
parameters {
real alpha;
real beta;
real<lower=0> sigma;
}
transformed parameters {
vector[N] yhat;
for (i in 1:N)
yhat[i]=alpha[firm[i]] + beta[firm[i]]*y[i]; //x[i] is growth of firm in prior time period
}
model {
for (t in 2:N)
y[t] ~ normal(alpha + beta * y[t-1], sigma);
}"
dat<- list(N=nrow(data),J=J,y=data$growth)
stan.out<-stan(model_code=model,data=dat,iter=10000,chains=3,thin=2)
## doesnt workf
model="
data {
int<lower=0> N; //number of observations
int<lower=1> J; //number of firms
vector[N] y; //data
}
parameters {
real alpha;
real beta;
real<lower=0> sigma;
}
transformed parameters {
vector[N] yhat;
for (i in 1:N)
yhat[i]=alpha[firm[i]] + beta[firm[i]]*x[i]; //x[i] is growth of firm in prior time period
}
model {
for (t in 2:N)
y[t] ~ normal(alpha + beta * y[t-1], sigma);
}"
######none of below models work
model="
data {
int<lower=0> N;
int<lower=1> J;
int<lower=1,upper=J> firm[N];
int<lower=0,upper=1> x[N];
vector[N] y;
//vector[N] k;
}
parameters {
real<lower=0> sigma;
vector[J] alpha;
vector[J] beta;
//real alpha;
//real beta;
}
transformed parameters {
vector[N] yhat;
for (i in 1:N)
yhat[i]=alpha[firm[i]] + beta[firm[i]]*x[i]; //x[i] is growth of firm in prior time period
//for (t in 2:N)
//x[t]=y[t-1]
}
//k[i]= y[n-1]
//alpha and beta only depend on i.
//log(g_i,t)=log(alpha_i)+(p_i) log(g_i,t-1) (i=1,......,n)
model {
for (n in 2:N)
y[n] ~ normal(alpha + beta * yhat[n-1], sigma);
}"
dat<- list(N=nrow(data),J=J,firm=data$id,y=log(data$equation), k=log(data$equation))
stan.out<-stan(model_code=model,data=dat,iter=10000,chains=3,thin=2)
print(stan.out)
##OR
vis.model="data {
int<lower=0> N; //N is an integer value that has a lower bound of zero
int<lower=1> J; // number of countries
int<lower=1,upper=J> country[N];
vector[N] x; // x is a length, N vector of integers
vector[N] k; // k is a length, N vector of integers
}
parameters {
vector[J] A; //A is a length, J vector of integers
vector[J] a; //a is a length, J vector of integers
real mu_A;
real mu_a;
real<lower=0,upper=100> sigma_A;
real<lower=0,upper=100> sigma_a;
real<lower=0,upper=100> sigma_x;
}
transformed parameters {
vector[N] x_hat; //xhat is a length, N vector of integer
for (i in 1:N)
x_hat[i] = A[country[i]] + a[country[i]] * k[i];
}
model {
mu_A ~ normal(0, 1);
A ~ normal(mu_A, sigma_A);
mu_a ~ normal(0, 1);
a ~ normal(0.1*mu_a, sigma_a);
x ~ normal(x_hat, sigma_x); //specified priors
}"
dat <- list(N = nrow(data), J=J,country=data$id, x = data$growth, k=log(data$equation))
vis.stan <- stan(model_code = vis.model, data = dat,
iter = 2000, chains = 3,warmup = 500)
print(stan.out)
#######ignore jibberish models above.
print(stan.out)
##does it appear Gibrat's law holds for long-lived manufacturing firms in the US?
## explain in terms of the posterior density of p, i.e. how
## probable is gibrat's law?? resoure lecture 8 posterior pred checking
library(shinystan)
## do i need to set seed? comparison to 8 model checks r example
launch_shinystan(stan.out)
## growth changes with
# Extract MCMC samples
params1<-extract(stan.out)
alpha<-params1$alpha
beta<-params1$beta
sigma<- params1$sigma
nsims <-length(params1$sigma)
# produce the replications from posterior and inspect
N<-nrow(data)
y<-data$growth
yRep <- sapply(1:nsims, function(i) rnorm(N, alpha+beta*y[i-1], sigma))
# Check min, max, and mean
min_rep <- apply(yRep, 2, min)
max_rep <- apply(yRep,2,max)
mean_rep <- apply(yRep,2,mean)
sd_rep <- apply(yRep,2,sd)
# Plot posterior mins against actual min
hist(min_rep, main='posterior min & actual min',breaks = 50)
abline(v=min(data$`growth`),lwd=3)
min(data$growth)
#centerd neat -1.5
# Plot posterior maxs against actual maxs
hist(max_rep, main='posterior max & actual max',breaks = 50)
abline(v=max(data$growth),lwd=3)
max(data$growth)
# centered
# Plot posterior sds against actual sds
hist(sd_rep, main='posterior max & actual standard deviation',xlim=c(0.21,0.59), breaks = 50)
abline(v=sd(data$growth),lwd=3)
sd(data$growth)
#not even close
sd(data$`growth`)
# Plot predicted data
hist(data$growth,breaks=50,prob=T,xlim=c(-2,1),col="red")
# Compare to predicted data
for(i in 2:N){
lines(density(yRep[,i]),col="blue")
}
##looks good
##redefine growth as at (total assets)
##mean of total assets by year, independent of firm
data<-data[,rbar:=mean(at),by=fyear]
##write div equation (salei,t/mean(salet))
equation<-data$at/data$rbar
##add result of div equation into table for visibility into equations
data<-data[,equation:=equation, by=id]
## log of equation to get growth
growth<-log(equation)
data<-data[,growth:=growth, by=id]
##log growth column is wanted column throughout all.
###final table###
firmdata<-data.table("Company Code"=data$gvkey,"Fiscal Data Year"=data$fyear,
"Company Name"=data$conm, "Total Assets"= data$at,
"Sales/Turnover (net)"=data$sale, "Years survived"=data$surv,
"Growth of firm size"=data$growth,"numeric identifier"=data$id)
dat<- list(N=nrow(firmdata),J=J,y=firmdata$`Growth of firm size`)
stan.totalassets<-stan(model_code=model,data=dat,iter=10000,chains=3,thin=2)
print(stan.totalassets)
##looks good
##redefine growth as gvkey (company code)
##mean of total assets by year, independent of firm
data<-data[,rbar:=mean(gvkey),by=fyear]
##write div equation (salei,t/mean(salet))
equation<-data$gvkey/data$rbar
##add result of div equation into table for visibility into equations
data<-data[,equation:=equation, by=id]
## log of equation to get growth
growth<-log(equation)
data<-data[,growth:=growth, by=id]
##log growth column is wanted column throughout all.
###final table###
firmdata<-data.table("Company Code"=data$gvkey,"Fiscal Data Year"=data$fyear,
"Company Name"=data$conm, "Total Assets"= data$at,
"Sales/Turnover (net)"=data$sale, "Years survived"=data$surv,
"Growth of firm size"=data$growth,"numeric identifier"=data$id)
dat<- list(N=nrow(firmdata),J=J,y=firmdata$`Growth of firm size`)
stan.totalassets<-stan(model_code=model,data=dat,iter=10000,chains=3,thin=2)
print(stan.totalassets)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/variable_blur.R
\name{with_variable_blur}
\alias{with_variable_blur}
\title{Apply a variable blur to a layer}
\usage{
with_variable_blur(
x,
x_sigma,
y_sigma = x_sigma,
angle = NULL,
x_scale = 1,
y_scale = x_scale,
angle_range = 0,
...
)
}
\arguments{
\item{x}{A ggplot2 layer object, a ggplot, a grob, or a character string
naming a filter}
\item{x_sigma, y_sigma, angle}{The layers to use for looking up the sigma
values and angledefining the blur ellipse at every point. Can either be a
string identifying a registered filter, or a raster object. The maps will be
resized to match the dimensions of x. Only one channel will be used - see
\link[=Channels]{the docs on channels} for info on how to set them.}
\item{x_scale, y_scale}{Which sigma should a maximal channel value correspond
to? If a numeric it will be interpreted as pixel dimensions. If a unit object
it will be converted to pixel dimension when rendered.}
\item{angle_range}{The minimum and maximum angle that min and max in the
\code{angle} layer should correspond to. If \code{angle == NULL} or only a single value
is provided to \code{angle_range} the rotation will be constant across the whole
layer}
\item{...}{Arguments to be passed on to methods. See
\link[=object_support]{the documentation of supported object} for a description of
object specific arguments.}
}
\value{
A modified \code{Layer} object
}
\description{
This filter will blur a layer, but in contrast to \code{\link[=with_blur]{with_blur()}} the amount
and nature of the blur need not be constant across the layer. The blurring is
based on a weighted ellipsoid, with width and height based on the values in
the corresponding \code{x_sigma} and \code{y_sigma} layers. The angle of the ellipsoid
can also be controlled and further varied based on another layer.
}
\examples{
library(ggplot2)
cos_wave <- function(width, height) {
x <- matrix(0, ncol = width, nrow = height)
x <- cos(col(x)/100)
as.raster((x + 1) / 2)
}
ggplot() +
as_reference(
cos_wave,
id = "wave"
) +
with_variable_blur(
geom_point(aes(disp, mpg), mtcars, size = 4),
x_sigma = ch_red("wave"),
y_sigma = ch_alpha("wave"),
angle = ch_red("wave"),
x_scale = 15,
y_scale = 15,
angle_range = c(-45, 45)
)
}
\seealso{
Other blur filters:
\code{\link{with_blur}()},
\code{\link{with_motion_blur}()}
}
\concept{blur filters}
| /man/with_variable_blur.Rd | permissive | gejielin/ggfx | R | false | true | 2,473 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/variable_blur.R
\name{with_variable_blur}
\alias{with_variable_blur}
\title{Apply a variable blur to a layer}
\usage{
with_variable_blur(
x,
x_sigma,
y_sigma = x_sigma,
angle = NULL,
x_scale = 1,
y_scale = x_scale,
angle_range = 0,
...
)
}
\arguments{
\item{x}{A ggplot2 layer object, a ggplot, a grob, or a character string
naming a filter}
\item{x_sigma, y_sigma, angle}{The layers to use for looking up the sigma
values and angledefining the blur ellipse at every point. Can either be a
string identifying a registered filter, or a raster object. The maps will be
resized to match the dimensions of x. Only one channel will be used - see
\link[=Channels]{the docs on channels} for info on how to set them.}
\item{x_scale, y_scale}{Which sigma should a maximal channel value correspond
to? If a numeric it will be interpreted as pixel dimensions. If a unit object
it will be converted to pixel dimension when rendered.}
\item{angle_range}{The minimum and maximum angle that min and max in the
\code{angle} layer should correspond to. If \code{angle == NULL} or only a single value
is provided to \code{angle_range} the rotation will be constant across the whole
layer}
\item{...}{Arguments to be passed on to methods. See
\link[=object_support]{the documentation of supported object} for a description of
object specific arguments.}
}
\value{
A modified \code{Layer} object
}
\description{
This filter will blur a layer, but in contrast to \code{\link[=with_blur]{with_blur()}} the amount
and nature of the blur need not be constant across the layer. The blurring is
based on a weighted ellipsoid, with width and height based on the values in
the corresponding \code{x_sigma} and \code{y_sigma} layers. The angle of the ellipsoid
can also be controlled and further varied based on another layer.
}
\examples{
library(ggplot2)
cos_wave <- function(width, height) {
x <- matrix(0, ncol = width, nrow = height)
x <- cos(col(x)/100)
as.raster((x + 1) / 2)
}
ggplot() +
as_reference(
cos_wave,
id = "wave"
) +
with_variable_blur(
geom_point(aes(disp, mpg), mtcars, size = 4),
x_sigma = ch_red("wave"),
y_sigma = ch_alpha("wave"),
angle = ch_red("wave"),
x_scale = 15,
y_scale = 15,
angle_range = c(-45, 45)
)
}
\seealso{
Other blur filters:
\code{\link{with_blur}()},
\code{\link{with_motion_blur}()}
}
\concept{blur filters}
|
# Load packages
library(tidyverse)
library(knitr)
library(readxl)
library(zoo)
# Question 1
library(tidyverse)
url = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv'
covid = read_csv(url)
head(covid)
state.of.interest = "California"
covid %>%
filter(state == state.of.interest) %>%
group_by(county) %>%
mutate(newCases = cases - lag(cases)) %>%
ungroup(county)
knitr::kable(x, format, )
?knitr::kable
| /LAB02/docs/lab-02.R | no_license | hayleed25/geog-13-labs | R | false | false | 445 | r | # Load packages
library(tidyverse)
library(knitr)
library(readxl)
library(zoo)
# Question 1
library(tidyverse)
url = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv'
covid = read_csv(url)
head(covid)
state.of.interest = "California"
covid %>%
filter(state == state.of.interest) %>%
group_by(county) %>%
mutate(newCases = cases - lag(cases)) %>%
ungroup(county)
knitr::kable(x, format, )
?knitr::kable
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kobo_map_cat.R
\name{kobo_map_cat}
\alias{kobo_map_cat}
\title{Generate Maps for categorical variables}
\usage{
kobo_map_cat(data, xmax, xmin, ymax, ymin, dico)
}
\arguments{
\item{data}{kobodatset to use}
\item{xmax}{Bounding box for the map - max longitude - in decimal degree}
\item{xmin}{Bounding box for the map - min longitude - in decimal degree}
\item{ymax}{Bounding box for the map - max latitude - in decimal degree}
\item{ymin}{Bounding box for the map - min latitude - in decimal degree}
\item{dico}{( generated from kobo_dico)}
}
\description{
Automatically generate maps for all nominal & ordinal variables based on dates. ggplot2 is used.
}
\examples{
kobo_map_cat()
}
\author{
Edouard Legoupil
}
| /man/kobo_map_cat.Rd | no_license | luishernando/koboloadeR | R | false | true | 797 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kobo_map_cat.R
\name{kobo_map_cat}
\alias{kobo_map_cat}
\title{Generate Maps for categorical variables}
\usage{
kobo_map_cat(data, xmax, xmin, ymax, ymin, dico)
}
\arguments{
\item{data}{kobodatset to use}
\item{xmax}{Bounding box for the map - max longitude - in decimal degree}
\item{xmin}{Bounding box for the map - min longitude - in decimal degree}
\item{ymax}{Bounding box for the map - max latitude - in decimal degree}
\item{ymin}{Bounding box for the map - min latitude - in decimal degree}
\item{dico}{( generated from kobo_dico)}
}
\description{
Automatically generate maps for all nominal & ordinal variables based on dates. ggplot2 is used.
}
\examples{
kobo_map_cat()
}
\author{
Edouard Legoupil
}
|
### Download plots and clip to tile extent
#devtools::install_github("Weecology/Neon-Utilities/neonUtilities",dependencies=F)
library(foreach)
library(doSNOW)
###Download RGB and LIDAR, HyperSpec tiles
sites<-c("ARIK","BARR","BART","BONA","CLBJ","CPER","CUPE","DEJU","DELA","DSNY","GRSM","GUAN",
"GUIL","HARV","HEAL","HOPB","HOPB","JERC","JORN","KONZ","LAJA","LENO","LIRO","MCDI","MLBS","MOAB","NIWO","NOGP","OAES","OSBS","PRIN","REDB","RMNP","SCBI","SERC","SJER","SOAP","SRER","STEI","STER","TALL","TEAK","TOOL","UKFS","UNDE","WLOU","WOOD","WREF")
cl<-makeCluster(5,outfile="")
registerDoSNOW(cl)
foreach(x=1:length(sites),.packages=c("neonUtilities","TreeSegmentation","dplyr"),.errorhandling = "pass") %dopar% {
fold<-paste("/orange/ewhite/NeonData/",sites[x],sep="")
byPointsAOP(dpID="DP3.30010.001",site=sites[x],year="2017",check.size=F, savepath=fold)
byPointsAOP(dpID="DP3.30010.001",site=sites[x],year="2018",check.size=F, savepath=fold)
#byPointsAOP(dpID="DP1.30003.001",site=sites[x],year="2018",check.size=F, savepath=fold)
#byPointsAOP(dpID="DP1.30006.001",site=sites[x],year="2017",check.size=F, savepath=fold)
##Cut Tiles
#crop_rgb_plots(sites[x])
#crop_lidar_plots(sites[x])
}
| /analysis/Process_NEON_Plots.R | no_license | pySirin/TreeSegmentation | R | false | false | 1,214 | r | ### Download plots and clip to tile extent
#devtools::install_github("Weecology/Neon-Utilities/neonUtilities",dependencies=F)
library(foreach)
library(doSNOW)
###Download RGB and LIDAR, HyperSpec tiles
sites<-c("ARIK","BARR","BART","BONA","CLBJ","CPER","CUPE","DEJU","DELA","DSNY","GRSM","GUAN",
"GUIL","HARV","HEAL","HOPB","HOPB","JERC","JORN","KONZ","LAJA","LENO","LIRO","MCDI","MLBS","MOAB","NIWO","NOGP","OAES","OSBS","PRIN","REDB","RMNP","SCBI","SERC","SJER","SOAP","SRER","STEI","STER","TALL","TEAK","TOOL","UKFS","UNDE","WLOU","WOOD","WREF")
cl<-makeCluster(5,outfile="")
registerDoSNOW(cl)
foreach(x=1:length(sites),.packages=c("neonUtilities","TreeSegmentation","dplyr"),.errorhandling = "pass") %dopar% {
fold<-paste("/orange/ewhite/NeonData/",sites[x],sep="")
byPointsAOP(dpID="DP3.30010.001",site=sites[x],year="2017",check.size=F, savepath=fold)
byPointsAOP(dpID="DP3.30010.001",site=sites[x],year="2018",check.size=F, savepath=fold)
#byPointsAOP(dpID="DP1.30003.001",site=sites[x],year="2018",check.size=F, savepath=fold)
#byPointsAOP(dpID="DP1.30006.001",site=sites[x],year="2017",check.size=F, savepath=fold)
##Cut Tiles
#crop_rgb_plots(sites[x])
#crop_lidar_plots(sites[x])
}
|
fam.only <- rnorm(10)
nov.only <- rnorm(10)
both <- rnorm(10)
cond <- factor(rep(c("fam", "nov", "fam", "nov"), each=10))
id.only <- c(1:20)
id.both <- rep(c(21:30), 2)
f0.df <- data.frame(id = c(id.only, id.both), f0 = c(fam.only, nov.only, both, both), cond = cond)
library(magrittr)
library(ggplot2)
f0.df %>%
ggplot(.) +
aes(., x = cond, y = f0) +
geom_violin() +
geom_point() +
geom_line(aes(group = id))
f0.df %>%
ggplot(.) +
aes(., x = cond, y = f0) +
f0.ang <- rnorm(40)
f0.neu <- f0.ang - 0.2
f0.hap <- f0.ang - 0.3
f0.sad <- f0.ang - 0.5
id <- c(1:40)
cond.fam <- rep("fam", 40)
cond.nov <- rep("nov", 40)
emo <- rep(c("ang", "neu", "hap", "sad"), each = 80)
f0.df <- data.frame(id = rep(id, 8), cond = rep(c(cond.fam, cond.nov), each = 4),
f0 = c(f0, sample(f0, size = 40, replace = FALSE)))
f0.df %>%
ggplot(.) +
aes(., x = cond, y = f0) +
geom_violin() +
geom_point() +
geom_line(aes(group = id))
| /peep-plots.R | no_license | gilmore-lab/peep-II-ratings-analysis | R | false | false | 961 | r | fam.only <- rnorm(10)
nov.only <- rnorm(10)
both <- rnorm(10)
cond <- factor(rep(c("fam", "nov", "fam", "nov"), each=10))
id.only <- c(1:20)
id.both <- rep(c(21:30), 2)
f0.df <- data.frame(id = c(id.only, id.both), f0 = c(fam.only, nov.only, both, both), cond = cond)
library(magrittr)
library(ggplot2)
f0.df %>%
ggplot(.) +
aes(., x = cond, y = f0) +
geom_violin() +
geom_point() +
geom_line(aes(group = id))
f0.df %>%
ggplot(.) +
aes(., x = cond, y = f0) +
f0.ang <- rnorm(40)
f0.neu <- f0.ang - 0.2
f0.hap <- f0.ang - 0.3
f0.sad <- f0.ang - 0.5
id <- c(1:40)
cond.fam <- rep("fam", 40)
cond.nov <- rep("nov", 40)
emo <- rep(c("ang", "neu", "hap", "sad"), each = 80)
f0.df <- data.frame(id = rep(id, 8), cond = rep(c(cond.fam, cond.nov), each = 4),
f0 = c(f0, sample(f0, size = 40, replace = FALSE)))
f0.df %>%
ggplot(.) +
aes(., x = cond, y = f0) +
geom_violin() +
geom_point() +
geom_line(aes(group = id))
|
rm(list=ls()) #remove all variables from workspace
###########################################################################
#Given a file name, create a list variable that contains any necessary information
#Input: A file
#Output: A list
readPaper <- function(file){
list <- unlist(scan(file, what = list(""), sep = ""))# Read in the text file.
list <- gsub("[[:punct:]]", "", list) #remove all of the punctuation
list <- tolower(list) # change all words to lowercase so "As" and "as" clump together as the same thing.
}
#read about the scan function here:
#1. http://www.ats.ucla.edu/stat/r/modules/raw_data.htm
#2. R help
#3. for gsub: http://stackoverflow.com/questions/11498157/convert-punctuation-to-space
#########################
#Takes output from readPaper and a word (or a vector of words) and gives the frequency the frequency of the word
#Input: A filelist and a word
#Output: The frequency of the number
wordCount <- function(filelist, word){
sum(filelist == word) #make a vector of Trues/Falses for every word. Sum up the matches(trues).
}
#Read about functions here:
#1. http://stackoverflow.com/questions/1923273/counting-the-number-of-elements-with-the-values-of-x-in-a-vector
#########################
#Takes a word and output from readPaper and gives the starting character position of that word indexed from the beginning of the paper.
#Input: A filelist and a word
#Output: A vector of the index of the beginning of each word placement
wordPlacement <- function(filelist, word){
which(filelist == word) #The which function gives the index of a logical object and outputs an array of indices.
}
#read about the which function here:
#1. R help
#########################
#Generates a frequency histogram of the 10 most frequent words in a file, can change the number of words most frequent words
#Input: A filelist and the top X words, default 10
#Output: An image of a histrogram
wordHist <- function(filelist, top = 10){
pap <- as.data.frame(table(filelist)) #Make a data frame of all words in the list and its count.
colnames(pap) <- c("word", "freq") #Change column names to make them more accurate
arg <- order(pap$freq, decreasing = TRUE) #Order freq column by most to least abundant.
pap <- pap[arg, ] #Order dataframe by the most to least abundant based on freq column
pap <- head(pap, n=top) #take the number of rows from "top=?" input
x <- barplot(pap$freq, names = pap$word, col = "royalblue", space = 1, #Make a barplot of frequency
xaxt="n",xlab="", ylab = "Frequency", main = "Word Frequencies")
labels <- pap$word #Create vector of names
text(x, x=x-.5, y=-3.5, labels = labels, srt = 45, pos = 1, xpd = TRUE) #Rotates labels so they look pretty.
}
#Sources:
#1. http://www.dummies.com/how-to/content/how-to-sort-data-frames-in-r.html
#2. http://haotu.wordpress.com/2013/07/09/angle-axis-x-labels-on-r-plot/
#3. http://stackoverflow.com/questions/20241388/rotate-x-axis-labels-45-degrees-on-grouped-bar-plot-r
#########################
#Given a word, give the frequency of the words that follow it.
#Input: A filelist and a word
#Output: Vector of counts
nextWord <- function(filelist, word){
something <- which(filelist == word) #Make a vector of indices of the occurences of the word of interest
something2 <- something + 1 #Get the index of the word that follows the word of interest.
test<-(rep(NA,length(something2))) #Create a vector with the length of occurrences of the word of interest
for(i in 1:length(something2)){
test[i]<- filelist[something2[i]] #Make a vector of all of the next words following the word of interest.
}
sort(table(test)) #Make a vector with next word and its counts and sort it by increasing abundance
}
#Sources:
#1. Help with graduate student: Daniel Katz in SNRE.
#########################
# Given a word, give the freqency of words that preceed it
#Input: A filelist and a word
#Output: Vector of Counts
previousWord <- function(filelist, word){
something <- which(filelist == word) #Make a vector of indices of the occurences of the word of interest
something2 <- something - 1 #Get the index of the word that preceeds the word of interest.
test<-(rep(NA,length(something2))) #Create a vector with the length of occurrences of the word of interest
for(i in 1:length(something2)){
test[i]<- filelist[something2[i]] #Make a vector of all of the previous words before the word of interest.
}
sort(table(test)) #Make a vector with previous word and its counts and sort it by increasing abundance
}
#Sources:
#1. Help from graduate student: Daniel Katz in SNRE.
#########################
# This function takes a readPaper output filelist and outputs a histogram of the frequency of each letter in the alphabet
#Input: A filelist
#Output: Histogram of letter frequency
surpriseMe <- function(filelist){
letter_list <- toString(filelist) #Converts the list of words into a list of letters.
letter_list <- gsub("[[:punct:]]", "", letter_list) #remove all of the punctuation.
letter_list <- gsub("[[:space:]]", "", letter_list) #remove all spaces.
letter_list <- gsub("[[:digit:]]", "", letter_list) #remove numerics.
letter_list <- tolower(letter_list) #Just to make sure all letters are lowercase.
oop <- strsplit(letter_list, split = "") #make each letter it's own unit.
toop <- as.data.frame(table(oop)) #count each letter and make it a data frame.
x <- barplot(toop$Freq, names.arg = toop$oop, col = "violetred", xaxt="n",
xlab="Letter", ylab = "Frequency", main = "Letter Frequencies in filelist") #Plot the data with frequency on y axis and letter on x axis
labels <- toop$oop #Create vector of names
text(x, x=x, y=-3.5, labels = labels, srt = 0, pos = 1, xpd = TRUE) #letter labels closer to x-axis.
}
#########################
###########################################################################
#END
| /marschmi.R | permissive | marschmi/assignment04 | R | false | false | 6,046 | r | rm(list=ls()) #remove all variables from workspace
###########################################################################
#Given a file name, create a list variable that contains any necessary information
#Input: A file
#Output: A list
readPaper <- function(file){
list <- unlist(scan(file, what = list(""), sep = ""))# Read in the text file.
list <- gsub("[[:punct:]]", "", list) #remove all of the punctuation
list <- tolower(list) # change all words to lowercase so "As" and "as" clump together as the same thing.
}
#read about the scan function here:
#1. http://www.ats.ucla.edu/stat/r/modules/raw_data.htm
#2. R help
#3. for gsub: http://stackoverflow.com/questions/11498157/convert-punctuation-to-space
#########################
#Takes output from readPaper and a word (or a vector of words) and gives the frequency the frequency of the word
#Input: A filelist and a word
#Output: The frequency of the number
wordCount <- function(filelist, word){
sum(filelist == word) #make a vector of Trues/Falses for every word. Sum up the matches(trues).
}
#Read about functions here:
#1. http://stackoverflow.com/questions/1923273/counting-the-number-of-elements-with-the-values-of-x-in-a-vector
#########################
#Takes a word and output from readPaper and gives the starting character position of that word indexed from the beginning of the paper.
#Input: A filelist and a word
#Output: A vector of the index of the beginning of each word placement
wordPlacement <- function(filelist, word){
which(filelist == word) #The which function gives the index of a logical object and outputs an array of indices.
}
#read about the which function here:
#1. R help
#########################
#Generates a frequency histogram of the 10 most frequent words in a file, can change the number of words most frequent words
#Input: A filelist and the top X words, default 10
#Output: An image of a histrogram
wordHist <- function(filelist, top = 10){
pap <- as.data.frame(table(filelist)) #Make a data frame of all words in the list and its count.
colnames(pap) <- c("word", "freq") #Change column names to make them more accurate
arg <- order(pap$freq, decreasing = TRUE) #Order freq column by most to least abundant.
pap <- pap[arg, ] #Order dataframe by the most to least abundant based on freq column
pap <- head(pap, n=top) #take the number of rows from "top=?" input
x <- barplot(pap$freq, names = pap$word, col = "royalblue", space = 1, #Make a barplot of frequency
xaxt="n",xlab="", ylab = "Frequency", main = "Word Frequencies")
labels <- pap$word #Create vector of names
text(x, x=x-.5, y=-3.5, labels = labels, srt = 45, pos = 1, xpd = TRUE) #Rotates labels so they look pretty.
}
#Sources:
#1. http://www.dummies.com/how-to/content/how-to-sort-data-frames-in-r.html
#2. http://haotu.wordpress.com/2013/07/09/angle-axis-x-labels-on-r-plot/
#3. http://stackoverflow.com/questions/20241388/rotate-x-axis-labels-45-degrees-on-grouped-bar-plot-r
#########################
#Given a word, give the frequency of the words that follow it.
#Input: A filelist and a word
#Output: Vector of counts
nextWord <- function(filelist, word){
something <- which(filelist == word) #Make a vector of indices of the occurences of the word of interest
something2 <- something + 1 #Get the index of the word that follows the word of interest.
test<-(rep(NA,length(something2))) #Create a vector with the length of occurrences of the word of interest
for(i in 1:length(something2)){
test[i]<- filelist[something2[i]] #Make a vector of all of the next words following the word of interest.
}
sort(table(test)) #Make a vector with next word and its counts and sort it by increasing abundance
}
#Sources:
#1. Help with graduate student: Daniel Katz in SNRE.
#########################
# Given a word, give the freqency of words that preceed it
#Input: A filelist and a word
#Output: Vector of Counts
previousWord <- function(filelist, word){
something <- which(filelist == word) #Make a vector of indices of the occurences of the word of interest
something2 <- something - 1 #Get the index of the word that preceeds the word of interest.
test<-(rep(NA,length(something2))) #Create a vector with the length of occurrences of the word of interest
for(i in 1:length(something2)){
test[i]<- filelist[something2[i]] #Make a vector of all of the previous words before the word of interest.
}
sort(table(test)) #Make a vector with previous word and its counts and sort it by increasing abundance
}
#Sources:
#1. Help from graduate student: Daniel Katz in SNRE.
#########################
# This function takes a readPaper output filelist and outputs a histogram of the frequency of each letter in the alphabet
#Input: A filelist
#Output: Histogram of letter frequency
surpriseMe <- function(filelist){
letter_list <- toString(filelist) #Converts the list of words into a list of letters.
letter_list <- gsub("[[:punct:]]", "", letter_list) #remove all of the punctuation.
letter_list <- gsub("[[:space:]]", "", letter_list) #remove all spaces.
letter_list <- gsub("[[:digit:]]", "", letter_list) #remove numerics.
letter_list <- tolower(letter_list) #Just to make sure all letters are lowercase.
oop <- strsplit(letter_list, split = "") #make each letter it's own unit.
toop <- as.data.frame(table(oop)) #count each letter and make it a data frame.
x <- barplot(toop$Freq, names.arg = toop$oop, col = "violetred", xaxt="n",
xlab="Letter", ylab = "Frequency", main = "Letter Frequencies in filelist") #Plot the data with frequency on y axis and letter on x axis
labels <- toop$oop #Create vector of names
text(x, x=x, y=-3.5, labels = labels, srt = 0, pos = 1, xpd = TRUE) #letter labels closer to x-axis.
}
#########################
###########################################################################
#END
|
##############
##### UI #####
##############
# Defining Sidebar ---------------------------------
sidebar <- dashboardSidebar(
p("\"Improving health is central to the Millennium Development Goals,
and the public sector is the main provider of health care in developing countries.
To reduce inequities, many countries have emphasized primary health care, including
immunization, sanitation, access to safe drinking water, and safe motherhood initiatives.
Data here cover health systems, disease prevention, reproductive health, nutrition,
and population dynamics. Data are from the United Nations Population Division,
World Health Organization, United Nations Children's Fund, the Joint United Nations
Programme on HIV/AIDS, and various other sources.\"",
class = "form-group shiny-input-container"
),
HTML("<p class = 'form-group shiny-input-container'><b> Dataset: </b>
Health World Development Indicators</p>"),
HTML("<p class = 'form-group shiny-input-container'><b> Source: </b>
<a href = 'http://data.worldbank.org/' target='_blank'>World Bank</a></p>"),
width = 300
)
# Defining the body ---------------------------------
body <- dashboardBody(
# Style
tags$head(tags$style(HTML("
.skin-yellow .main-header .logo {
background-color: #f39c12;
}
.skin-yellow .main-header .logo:hover {
background-color: #f39c12;
}
"))),
# Plot Output
plotlyOutput("plot",
height = "900px",
width = "1100px")
)
# Constructing the UI
fluidPage(
tabsetPanel(
# App Tab ---------
tabPanel("App",
dashboardPage(
# Header
dashboardHeader(title = paste0("World Development Indicators (Health)"),
titleWidth = 450),
# Sidebar
sidebar,
# Body
body,
skin = "yellow"
)
),
# Documentation Tab
documentation_tab()
)
)
| /ui.R | no_license | aridhia/demo-world-development-indicators | R | false | false | 1,958 | r | ##############
##### UI #####
##############
# Defining Sidebar ---------------------------------
sidebar <- dashboardSidebar(
p("\"Improving health is central to the Millennium Development Goals,
and the public sector is the main provider of health care in developing countries.
To reduce inequities, many countries have emphasized primary health care, including
immunization, sanitation, access to safe drinking water, and safe motherhood initiatives.
Data here cover health systems, disease prevention, reproductive health, nutrition,
and population dynamics. Data are from the United Nations Population Division,
World Health Organization, United Nations Children's Fund, the Joint United Nations
Programme on HIV/AIDS, and various other sources.\"",
class = "form-group shiny-input-container"
),
HTML("<p class = 'form-group shiny-input-container'><b> Dataset: </b>
Health World Development Indicators</p>"),
HTML("<p class = 'form-group shiny-input-container'><b> Source: </b>
<a href = 'http://data.worldbank.org/' target='_blank'>World Bank</a></p>"),
width = 300
)
# Defining the body ---------------------------------
body <- dashboardBody(
# Style
tags$head(tags$style(HTML("
.skin-yellow .main-header .logo {
background-color: #f39c12;
}
.skin-yellow .main-header .logo:hover {
background-color: #f39c12;
}
"))),
# Plot Output
plotlyOutput("plot",
height = "900px",
width = "1100px")
)
# Constructing the UI
fluidPage(
tabsetPanel(
# App Tab ---------
tabPanel("App",
dashboardPage(
# Header
dashboardHeader(title = paste0("World Development Indicators (Health)"),
titleWidth = 450),
# Sidebar
sidebar,
# Body
body,
skin = "yellow"
)
),
# Documentation Tab
documentation_tab()
)
)
|
#' changes the elements of basic blocks used by rejustify API
#'
#' @description The purpose of the function is to provide a possibly seamless
#' way of adjusting blocks used in communication with rejustify API, in particular with the
#' \code{fill} endpoint. The blocks include: data structure (\code{structure}), default values
#' (\code{default}) and matching keys (\code{keys}). Items may only be deleted for specific matching
#' dimensions proposed by \code{keys}, for the two other blocks it is possible only to change the relevant
#' values.
#'
#' Upon changes in \code{structure}, the corresponding \code{p_class} or \code{p_data} will be set to -1.
#' This is the way to inform API that the original \code{structure} has changed and, if \code{learn}
#' option is enabled, the new values will be used to train the algorithms in the back end. If \code{learn}
#' is disabled, information will not be stored by the API but the changes will be recognized in the current API call.
#'
#' @param block A data structure to be changed. Currently supported structures include \code{structure},
#' \code{default} and \code{keys}.
#' @param column The data column (or raw in case of horizontal datasets) to be adjusted. Vector values are supported.
#' @param id The identifier of the specific element to be changed. Currently it should be only used in \code{structure}
#' with multi-line headers (see \code{analyze} for details).
#' @param items Specific items to be changed with the new values to be assigned. If the values are set to \code{NA}, \code{NULL}
#' or \code{""}, the specific item will be removed from the block (only for \code{keys}). Items may be multi-valued.
#'
#' @return adjusted structure of the \code{df} data set
#'
#' @examples
#' #API setup
#' setCurl()
#'
#' #register token/email
#' register(token = "YOUR_TOKEN", email = "YOUR_EMAIL")
#'
#' #sample data set
#' df <- data.frame(year = c("2009", "2010", "2011"),
#' country = c("Poland", "Poland", "Poland"),
#' `gross domestic product` = c(NA, NA, NA),
#' check.names = FALSE, stringsAsFactors = FALSE)
#'
#' #endpoint analyze
#' st <- analyze(df)
#'
#' #adjust structures
#' st <- adjust(st, id = 2, items = list('feature' = 'country'))
#' st <- adjust(st, column = 3, items = list('provider' = 'IMF', 'table' = 'WEO'))
#'
#' #endpoint fill
#' df1 <- fill(df, st)
#'
#' #adjust default values
#' default <- adjust(df1$default, column = 3, items = list('Time Dimension' = '2013') )
#'
#' #adjust keys
#' keys <- adjust(df1$keys, column = 3, items = list('id.x' = c(3,1,2) , 'id.y' = c(1,2,3) ) )
#' keys <- adjust(df1$keys, column = 3, items = list('id.x' = 3 , 'id.y' = NA ) )
#'
#' @export
adjust = function(block, column = NULL, id = NULL, items = NULL) {
index <- NULL
type <- "undefined"
#define block type
if( all( names(block) %in% c("id", "column", "name", "empty", "class", "feature", "cleaner", "format", "p_class", "provider", "table", "p_data") ) ) {
type <- "structure"
}
#define block type
if( all( names(block) %in% c("column.id.x", "default") ) ) {
type <- "default"
}
#define block type
if( (!is.null(names(block[[1]])) & all( names(block[[1]]) %in% c("id.x", "name.x", "id.y", "name.y", "class", "method", "column.id.x", "column.name.x") ) ) | is.null( names(block) ) ) {
type <- "keys"
}
#adjust structure
if(type == "structure") {
if( !is.null(items) & !is.null(id) ) {
index <- block$id %in% id
}
if( !is.null(items) & !is.null(column) ) {
if( is.numeric(column) ) { #if column id
index <- block$column %in% column
} else{
index <- block$name %in% column
}
}
tryCatch({
block[ index, names(items) ] <- items
if( sum( names(items) %in% c('provider', 'table') ) > 0 ) {
block[ index, 'p_data' ] <- -1
}
if( sum( names(items) %in% c('class', 'feature', 'cleaner', 'format') ) > 0 ) {
block[ index, 'p_class' ] <- -1
}
}, error = function(e) {
stop(
paste0(
"Coulnd't change the values."
),
call. = FALSE
)
})
}
#adjust default labels
if(type == "default") {
if( !is.null(items) & !is.null(column) ) {
if( is.numeric(column) ) { #if column id
index <- which( unlist(block$column.id.x) %in% column )
} else {
index <- which( unlist(block$column.name.x) %in% column )
}
}
tryCatch({
for(i in index) {
if( is.null( rownames(block$default[[i]])) ) {
rnames <- seq(1, nrow(block$default[[i]]))
} else {
rnames <- rownames(block$default[[i]])
}
block$default[[i]][ rnames %in% names(items), 'code_default'] <- unlist( items )
block$default[[i]][ rnames %in% names(items), 'label_default'] <- NA #blank label (will be filled by API)
}
}, error = function(e) {
stop(
paste0(
"Coulnd't change the values."
),
call. = FALSE
)
})
}
#adjust keys
if(type == "keys") {
id_xy <- FALSE
method_xy <- FALSE
class_xy <- FALSE
#consistency checks
if( !is.null( items$id.x ) & !is.null( items$id.y ) ) {
if(length( items$id.x ) == length(items$id.y )) {
id_xy <- TRUE
} else {
stop(
paste0(
"Item ids have different lengths."
) )
}
}
if( !is.null( items$method ) ) {
if(length( items$method ) == length(items$id.y )) {
method_xy <- TRUE
} else {
stop(
paste0(
"Methods have inconsistent length."
) )
}
}
if( !is.null( items$class ) ) {
if(length( items$class ) == length(items$id.y )) {
class_xy <- TRUE
} else {
stop(
paste0(
"Classes have inconsistent length."
) )
}
}
tryCatch({
block <- lapply(block, FUN = function(x) {
if( x$column.id.x == column ) {
if(id_xy) { #change matching ids
for(i in 1:length(items$id.x) ) {
if( sum(items$id.x[[i]] == x$id.x) > 0 ) { #if id.x is already defined, change it
if(isMissing(items$id.y[[i]])) {
x$id.y <- x$id.y[-which( x$id.x == items$id.x[[i]] )];
x$name.y <- x$name.y[-which( x$id.x == items$id.x[[i]] )];
x$method <- x$method[-which( x$id.x == items$id.x[[i]] )];
x$class <- x$class[-which( x$id.x == items$id.x[[i]] )];
x$name.x <- x$name.x[-which( x$id.x == items$id.x[[i]] )];
x$id.x <- x$id.x[-which( x$id.x == items$id.x[[i]] )];
} else {
x$id.y[which( x$id.x == items$id.x[[i]] )] <- items$id.y[[i]];
x$name.y[which( x$id.x == items$id.x[[i]] )] <- NA;
if(method_xy) { x$method[which( x$id.x == items$id.x[[i]] )] <- items$method[[i]] }
else { x$method[which( x$id.x == items$id.x[[i]] )] <- 'synonym-proximity-matching' } }
if(class_xy) { x$class[which( x$id.x == items$id.x[[i]] )] <- items$class[[i]] }
else { x$class[which( x$id.x == items$id.x[[i]] )] <- 'general' } }
else { #if id.x is not defined, add it
if(!isMissing(items$id.y[[i]])) {
x$id.x <- c(x$id.x, items$id.x[[i]]);
x$id.y <- c(x$id.y, items$id.y[[i]])
x$name.x <- c(x$name.x, NA);
x$name.y <- c(x$name.y, NA);
if(method_xy) { x$method <- c(x$method, items$method[[i]]) }
else { x$method <- c(x$method, 'synonym-proximity-matching') }
if(class_xy) { x$class <- c(x$class, items$class[[i]]) }
else { x$class <- c(x$class, 'general') } } } }
}
return(x)
} else {
return(x)
}
})
}, error = function(e) {
stop(
paste0(
"Coulnd't change the values."
),
call. = FALSE
)
})
}
return(block)
}
| /R/adjust.R | no_license | rejustify/r-package | R | false | false | 8,462 | r | #' changes the elements of basic blocks used by rejustify API
#'
#' @description The purpose of the function is to provide a possibly seamless
#' way of adjusting blocks used in communication with rejustify API, in particular with the
#' \code{fill} endpoint. The blocks include: data structure (\code{structure}), default values
#' (\code{default}) and matching keys (\code{keys}). Items may only be deleted for specific matching
#' dimensions proposed by \code{keys}, for the two other blocks it is possible only to change the relevant
#' values.
#'
#' Upon changes in \code{structure}, the corresponding \code{p_class} or \code{p_data} will be set to -1.
#' This is the way to inform API that the original \code{structure} has changed and, if \code{learn}
#' option is enabled, the new values will be used to train the algorithms in the back end. If \code{learn}
#' is disabled, information will not be stored by the API but the changes will be recognized in the current API call.
#'
#' @param block A data structure to be changed. Currently supported structures include \code{structure},
#' \code{default} and \code{keys}.
#' @param column The data column (or raw in case of horizontal datasets) to be adjusted. Vector values are supported.
#' @param id The identifier of the specific element to be changed. Currently it should be only used in \code{structure}
#' with multi-line headers (see \code{analyze} for details).
#' @param items Specific items to be changed with the new values to be assigned. If the values are set to \code{NA}, \code{NULL}
#' or \code{""}, the specific item will be removed from the block (only for \code{keys}). Items may be multi-valued.
#'
#' @return adjusted structure of the \code{df} data set
#'
#' @examples
#' #API setup
#' setCurl()
#'
#' #register token/email
#' register(token = "YOUR_TOKEN", email = "YOUR_EMAIL")
#'
#' #sample data set
#' df <- data.frame(year = c("2009", "2010", "2011"),
#' country = c("Poland", "Poland", "Poland"),
#' `gross domestic product` = c(NA, NA, NA),
#' check.names = FALSE, stringsAsFactors = FALSE)
#'
#' #endpoint analyze
#' st <- analyze(df)
#'
#' #adjust structures
#' st <- adjust(st, id = 2, items = list('feature' = 'country'))
#' st <- adjust(st, column = 3, items = list('provider' = 'IMF', 'table' = 'WEO'))
#'
#' #endpoint fill
#' df1 <- fill(df, st)
#'
#' #adjust default values
#' default <- adjust(df1$default, column = 3, items = list('Time Dimension' = '2013') )
#'
#' #adjust keys
#' keys <- adjust(df1$keys, column = 3, items = list('id.x' = c(3,1,2) , 'id.y' = c(1,2,3) ) )
#' keys <- adjust(df1$keys, column = 3, items = list('id.x' = 3 , 'id.y' = NA ) )
#'
#' @export
adjust = function(block, column = NULL, id = NULL, items = NULL) {
index <- NULL
type <- "undefined"
#define block type
if( all( names(block) %in% c("id", "column", "name", "empty", "class", "feature", "cleaner", "format", "p_class", "provider", "table", "p_data") ) ) {
type <- "structure"
}
#define block type
if( all( names(block) %in% c("column.id.x", "default") ) ) {
type <- "default"
}
#define block type
if( (!is.null(names(block[[1]])) & all( names(block[[1]]) %in% c("id.x", "name.x", "id.y", "name.y", "class", "method", "column.id.x", "column.name.x") ) ) | is.null( names(block) ) ) {
type <- "keys"
}
#adjust structure
if(type == "structure") {
if( !is.null(items) & !is.null(id) ) {
index <- block$id %in% id
}
if( !is.null(items) & !is.null(column) ) {
if( is.numeric(column) ) { #if column id
index <- block$column %in% column
} else{
index <- block$name %in% column
}
}
tryCatch({
block[ index, names(items) ] <- items
if( sum( names(items) %in% c('provider', 'table') ) > 0 ) {
block[ index, 'p_data' ] <- -1
}
if( sum( names(items) %in% c('class', 'feature', 'cleaner', 'format') ) > 0 ) {
block[ index, 'p_class' ] <- -1
}
}, error = function(e) {
stop(
paste0(
"Coulnd't change the values."
),
call. = FALSE
)
})
}
#adjust default labels
if(type == "default") {
if( !is.null(items) & !is.null(column) ) {
if( is.numeric(column) ) { #if column id
index <- which( unlist(block$column.id.x) %in% column )
} else {
index <- which( unlist(block$column.name.x) %in% column )
}
}
tryCatch({
for(i in index) {
if( is.null( rownames(block$default[[i]])) ) {
rnames <- seq(1, nrow(block$default[[i]]))
} else {
rnames <- rownames(block$default[[i]])
}
block$default[[i]][ rnames %in% names(items), 'code_default'] <- unlist( items )
block$default[[i]][ rnames %in% names(items), 'label_default'] <- NA #blank label (will be filled by API)
}
}, error = function(e) {
stop(
paste0(
"Coulnd't change the values."
),
call. = FALSE
)
})
}
#adjust keys
if(type == "keys") {
id_xy <- FALSE
method_xy <- FALSE
class_xy <- FALSE
#consistency checks
if( !is.null( items$id.x ) & !is.null( items$id.y ) ) {
if(length( items$id.x ) == length(items$id.y )) {
id_xy <- TRUE
} else {
stop(
paste0(
"Item ids have different lengths."
) )
}
}
if( !is.null( items$method ) ) {
if(length( items$method ) == length(items$id.y )) {
method_xy <- TRUE
} else {
stop(
paste0(
"Methods have inconsistent length."
) )
}
}
if( !is.null( items$class ) ) {
if(length( items$class ) == length(items$id.y )) {
class_xy <- TRUE
} else {
stop(
paste0(
"Classes have inconsistent length."
) )
}
}
tryCatch({
block <- lapply(block, FUN = function(x) {
if( x$column.id.x == column ) {
if(id_xy) { #change matching ids
for(i in 1:length(items$id.x) ) {
if( sum(items$id.x[[i]] == x$id.x) > 0 ) { #if id.x is already defined, change it
if(isMissing(items$id.y[[i]])) {
x$id.y <- x$id.y[-which( x$id.x == items$id.x[[i]] )];
x$name.y <- x$name.y[-which( x$id.x == items$id.x[[i]] )];
x$method <- x$method[-which( x$id.x == items$id.x[[i]] )];
x$class <- x$class[-which( x$id.x == items$id.x[[i]] )];
x$name.x <- x$name.x[-which( x$id.x == items$id.x[[i]] )];
x$id.x <- x$id.x[-which( x$id.x == items$id.x[[i]] )];
} else {
x$id.y[which( x$id.x == items$id.x[[i]] )] <- items$id.y[[i]];
x$name.y[which( x$id.x == items$id.x[[i]] )] <- NA;
if(method_xy) { x$method[which( x$id.x == items$id.x[[i]] )] <- items$method[[i]] }
else { x$method[which( x$id.x == items$id.x[[i]] )] <- 'synonym-proximity-matching' } }
if(class_xy) { x$class[which( x$id.x == items$id.x[[i]] )] <- items$class[[i]] }
else { x$class[which( x$id.x == items$id.x[[i]] )] <- 'general' } }
else { #if id.x is not defined, add it
if(!isMissing(items$id.y[[i]])) {
x$id.x <- c(x$id.x, items$id.x[[i]]);
x$id.y <- c(x$id.y, items$id.y[[i]])
x$name.x <- c(x$name.x, NA);
x$name.y <- c(x$name.y, NA);
if(method_xy) { x$method <- c(x$method, items$method[[i]]) }
else { x$method <- c(x$method, 'synonym-proximity-matching') }
if(class_xy) { x$class <- c(x$class, items$class[[i]]) }
else { x$class <- c(x$class, 'general') } } } }
}
return(x)
} else {
return(x)
}
})
}, error = function(e) {
stop(
paste0(
"Coulnd't change the values."
),
call. = FALSE
)
})
}
return(block)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pylogger.R
\name{logger.fatal}
\alias{logger.fatal}
\title{Python-style logging statements}
\usage{
logger.fatal(msg, ...)
}
\arguments{
\item{msg}{Message with format strings applied to additional arguments.}
\item{\dots}{Additional arguments to be formatted.}
}
\value{
No return value.
}
\description{
After initializing the level-specific log files with \code{logger.setup(...)},
this function will generate \code{FATAL} level log statements.
}
\note{
All functionality is built on top of the excellent \pkg{futile.logger} package.
}
\examples{
\dontrun{
# Only save three log files
logger.setup(
debugLog = "debug.log",
infoLog = "info.log",
errorLog = "error.log"
)
# But allow log statements at all levels within the code
logger.trace("trace statement #\%d", 1)
logger.debug("debug statement")
logger.info("info statement \%s \%s", "with", "arguments")
logger.warn("warn statement \%s", "about to try something dumb")
result <- try(1/"a", silent=TRUE)
logger.error("error message: \%s", geterrmessage())
logger.fatal("fatal statement \%s", "THE END")
}
}
\seealso{
\code{\link{logger.setup}}
}
| /man/logger.fatal.Rd | no_license | cran/MazamaCoreUtils | R | false | true | 1,187 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pylogger.R
\name{logger.fatal}
\alias{logger.fatal}
\title{Python-style logging statements}
\usage{
logger.fatal(msg, ...)
}
\arguments{
\item{msg}{Message with format strings applied to additional arguments.}
\item{\dots}{Additional arguments to be formatted.}
}
\value{
No return value.
}
\description{
After initializing the level-specific log files with \code{logger.setup(...)},
this function will generate \code{FATAL} level log statements.
}
\note{
All functionality is built on top of the excellent \pkg{futile.logger} package.
}
\examples{
\dontrun{
# Only save three log files
logger.setup(
debugLog = "debug.log",
infoLog = "info.log",
errorLog = "error.log"
)
# But allow log statements at all levels within the code
logger.trace("trace statement #\%d", 1)
logger.debug("debug statement")
logger.info("info statement \%s \%s", "with", "arguments")
logger.warn("warn statement \%s", "about to try something dumb")
result <- try(1/"a", silent=TRUE)
logger.error("error message: \%s", geterrmessage())
logger.fatal("fatal statement \%s", "THE END")
}
}
\seealso{
\code{\link{logger.setup}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simBuyseTest.R
\name{Simulation function}
\alias{Simulation function}
\alias{simBuyseTest}
\title{Simulation of data for the BuyseTest}
\usage{
simBuyseTest(
n.T,
n.C = NULL,
argsBin = list(),
argsCont = list(),
argsTTE = list(),
n.strata = NULL,
names.strata = NULL,
format = "data.table",
latent = FALSE
)
}
\arguments{
\item{n.T}{[integer, >0] number of patients in the treatment arm}
\item{n.C}{[integer, >0] number of patients in the control arm}
\item{argsBin}{[list] arguments to be passed to \code{simBuyseTest_bin}. They specify the distribution parameters of the binary endpoints.}
\item{argsCont}{[list] arguments to be passed to \code{simBuyseTest_continuous}. They specify the distribution parameters of the continuous endpoints.}
\item{argsTTE}{[list] arguments to be passed to \code{simBuyseTest_TTE}. They specify the distribution parameters of the time to event endpoints.}
\item{n.strata}{[integer, >0] number of strata. \code{NULL} indicates no strata.}
\item{names.strata}{[character vector] name of the strata variables. Must have same length as \code{n.strata}.}
\item{format}{[character] the format of the output. Can be \code{"data.table"}, \code{"data.frame"} or \code{"matrix"}.}
\item{latent}{[logical] If \code{TRUE} also export the latent variables (e.g. censoring times or event times).}
}
\description{
Simulate binary, continuous or time to event data, possibly with strata.
Outcomes are simulated independently of each other and independently of the strata variable.
}
\details{
This function is built upon the \code{lvm} and \code{sim} functions from the lava package.
Arguments in the list \code{argsBin}:
\itemize{
\item\code{p.T} probability of event of each endpoint (binary endpoint, treatment group). \cr
\item\code{p.C} same as \code{p.T} but for the control group. \cr
\item\code{name} names of the binary variables. \cr
}
Arguments in the list \code{argsCont}:
\itemize{
\item\code{mu.T} expected value of each endpoint (continuous endpoint, treatment group). \cr
\item\code{mu.C} same as \code{mu.C} but for the control group. \cr
\item\code{sigma.T} standard deviation of the values of each endpoint (continuous endpoint, treatment group). \cr
\item\code{sigma.C} same as \code{sigma.T} but for the control group. \cr
\item\code{name} names of the continuous variables.
}
Arguments in the list \code{argsTTE}:
\itemize{
\item\code{CR} should competing risks be simulated? \cr
\item\code{rates.T} hazard corresponding to each endpoint (time to event endpoint, treatment group). \cr
\item\code{rates.C} same as \code{rates.T} but for the control group. \cr
\item\code{rates.CR} same as \code{rates.T} but for the competing event (same in both groups). \cr
\item\code{rates.Censoring.T} Censoring same as \code{rates.T} but for the censoring. \cr
\item\code{rates.Censoring.C} Censoring same as \code{rates.C} but for the censoring. \cr
\item\code{name} names of the time to event variables. \cr
\item\code{nameCensoring} names of the event type indicators. \cr
}
}
\examples{
library(data.table)
n <- 1e2
#### default option ####
simBuyseTest(n)
## with a strata variable having 5 levels
simBuyseTest(n, n.strata = 5)
## with a strata variable named grade
simBuyseTest(n, n.strata = 5, names.strata = "grade")
## several strata variables
simBuyseTest(1e3, n.strata = c(2,4), names.strata = c("Gender","AgeCategory"))
#### only binary endpoints ####
args <- list(p.T = c(3:5/10))
simBuyseTest(n, argsBin = args, argsCont = NULL, argsTTE = NULL)
#### only continuous endpoints ####
args <- list(mu.T = c(3:5/10), sigma.T = rep(1,3))
simBuyseTest(n, argsBin = NULL, argsCont = args, argsTTE = NULL)
#### only TTE endpoints ####
args <- list(rates.T = c(3:5/10), rates.Censoring.T = rep(1,3))
simBuyseTest(n, argsBin = NULL, argsCont = NULL, argsTTE = args)
}
\author{
Brice Ozenne
}
\keyword{function}
\keyword{simulations}
| /fuzzedpackages/BuyseTest/man/simulation.Rd | no_license | akhikolla/testpackages | R | false | true | 4,073 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simBuyseTest.R
\name{Simulation function}
\alias{Simulation function}
\alias{simBuyseTest}
\title{Simulation of data for the BuyseTest}
\usage{
simBuyseTest(
n.T,
n.C = NULL,
argsBin = list(),
argsCont = list(),
argsTTE = list(),
n.strata = NULL,
names.strata = NULL,
format = "data.table",
latent = FALSE
)
}
\arguments{
\item{n.T}{[integer, >0] number of patients in the treatment arm}
\item{n.C}{[integer, >0] number of patients in the control arm}
\item{argsBin}{[list] arguments to be passed to \code{simBuyseTest_bin}. They specify the distribution parameters of the binary endpoints.}
\item{argsCont}{[list] arguments to be passed to \code{simBuyseTest_continuous}. They specify the distribution parameters of the continuous endpoints.}
\item{argsTTE}{[list] arguments to be passed to \code{simBuyseTest_TTE}. They specify the distribution parameters of the time to event endpoints.}
\item{n.strata}{[integer, >0] number of strata. \code{NULL} indicates no strata.}
\item{names.strata}{[character vector] name of the strata variables. Must have same length as \code{n.strata}.}
\item{format}{[character] the format of the output. Can be \code{"data.table"}, \code{"data.frame"} or \code{"matrix"}.}
\item{latent}{[logical] If \code{TRUE} also export the latent variables (e.g. censoring times or event times).}
}
\description{
Simulate binary, continuous or time to event data, possibly with strata.
Outcomes are simulated independently of each other and independently of the strata variable.
}
\details{
This function is built upon the \code{lvm} and \code{sim} functions from the lava package.
Arguments in the list \code{argsBin}:
\itemize{
\item\code{p.T} probability of event of each endpoint (binary endpoint, treatment group). \cr
\item\code{p.C} same as \code{p.T} but for the control group. \cr
\item\code{name} names of the binary variables. \cr
}
Arguments in the list \code{argsCont}:
\itemize{
\item\code{mu.T} expected value of each endpoint (continuous endpoint, treatment group). \cr
\item\code{mu.C} same as \code{mu.C} but for the control group. \cr
\item\code{sigma.T} standard deviation of the values of each endpoint (continuous endpoint, treatment group). \cr
\item\code{sigma.C} same as \code{sigma.T} but for the control group. \cr
\item\code{name} names of the continuous variables.
}
Arguments in the list \code{argsTTE}:
\itemize{
\item\code{CR} should competing risks be simulated? \cr
\item\code{rates.T} hazard corresponding to each endpoint (time to event endpoint, treatment group). \cr
\item\code{rates.C} same as \code{rates.T} but for the control group. \cr
\item\code{rates.CR} same as \code{rates.T} but for the competing event (same in both groups). \cr
\item\code{rates.Censoring.T} Censoring same as \code{rates.T} but for the censoring. \cr
\item\code{rates.Censoring.C} Censoring same as \code{rates.C} but for the censoring. \cr
\item\code{name} names of the time to event variables. \cr
\item\code{nameCensoring} names of the event type indicators. \cr
}
}
\examples{
library(data.table)
n <- 1e2
#### default option ####
simBuyseTest(n)
## with a strata variable having 5 levels
simBuyseTest(n, n.strata = 5)
## with a strata variable named grade
simBuyseTest(n, n.strata = 5, names.strata = "grade")
## several strata variables
simBuyseTest(1e3, n.strata = c(2,4), names.strata = c("Gender","AgeCategory"))
#### only binary endpoints ####
args <- list(p.T = c(3:5/10))
simBuyseTest(n, argsBin = args, argsCont = NULL, argsTTE = NULL)
#### only continuous endpoints ####
args <- list(mu.T = c(3:5/10), sigma.T = rep(1,3))
simBuyseTest(n, argsBin = NULL, argsCont = args, argsTTE = NULL)
#### only TTE endpoints ####
args <- list(rates.T = c(3:5/10), rates.Censoring.T = rep(1,3))
simBuyseTest(n, argsBin = NULL, argsCont = NULL, argsTTE = args)
}
\author{
Brice Ozenne
}
\keyword{function}
\keyword{simulations}
|
inputbam <- commandArgs(TRUE)[1]
sampleName <- commandArgs(TRUE)[2]
myCAGEset <- new("CAGEset", genomeName="BSgenome.Hsapiens.UCSC.hg19", inputFiles=inputbam, inputFilesType="bam",sampleLabels=c(sampleName))
getCTSS(myCAGEset)
ctss <- CTSStagCount(myCAGEset)
write.table(ctss, file=paste("/home/si14w/gnearline/flu/txt/",sampleName,".txt",sep=""), quote=FALSE, row.names=FALSE)
| /flu/findTSS.R | no_license | sowmyaiyer/new_repo | R | false | false | 378 | r | inputbam <- commandArgs(TRUE)[1]
sampleName <- commandArgs(TRUE)[2]
myCAGEset <- new("CAGEset", genomeName="BSgenome.Hsapiens.UCSC.hg19", inputFiles=inputbam, inputFilesType="bam",sampleLabels=c(sampleName))
getCTSS(myCAGEset)
ctss <- CTSStagCount(myCAGEset)
write.table(ctss, file=paste("/home/si14w/gnearline/flu/txt/",sampleName,".txt",sep=""), quote=FALSE, row.names=FALSE)
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{findOutliers}
\alias{findOutliers}
\title{Calculates potential outliers based on external studentized residuals}
\usage{
findOutliers(modelReturn, localDT, transformResponse = "lognormal")
}
\arguments{
\item{modelReturn}{list returned from censReg}
\item{localDT}{DTframe that includes all response and predictor variables}
\item{transformResponse}{string can be "normal" or "lognormal", perhaps try to generalize this more in future}
}
\value{
outlier vector of index numbers
}
\description{
Find index of outliers using external studentized residuals. Outliers are values that have
external studentized residuals greater than 3 or less than negative 3.
}
\examples{
DTComplete <- StLouisDT
UV <- StLouisUV
QWcodes <- StLouisQWcodes
siteINFO <- StLouisInfo
response <- QWcodes$colName[1]
DT <- DTComplete[c(response,getPredictVariables(names(UV)), "decYear","sinDY","cosDY","datetime")]
DT <- na.omit(DT)
kitchenSink <- createFullFormula(DT,response)
returnPrelim <- prelimModelDev(DT,response,kitchenSink)
modelReturn <- returnPrelim$DT.mod
outlierIndex <- findOutliers(modelReturn,DT)
}
\keyword{residuals}
\keyword{studentized}
| /man/findOutliers.Rd | permissive | sf99167/GSqwsr | R | false | false | 1,195 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{findOutliers}
\alias{findOutliers}
\title{Calculates potential outliers based on external studentized residuals}
\usage{
findOutliers(modelReturn, localDT, transformResponse = "lognormal")
}
\arguments{
\item{modelReturn}{list returned from censReg}
\item{localDT}{DTframe that includes all response and predictor variables}
\item{transformResponse}{string can be "normal" or "lognormal", perhaps try to generalize this more in future}
}
\value{
outlier vector of index numbers
}
\description{
Find index of outliers using external studentized residuals. Outliers are values that have
external studentized residuals greater than 3 or less than negative 3.
}
\examples{
DTComplete <- StLouisDT
UV <- StLouisUV
QWcodes <- StLouisQWcodes
siteINFO <- StLouisInfo
response <- QWcodes$colName[1]
DT <- DTComplete[c(response,getPredictVariables(names(UV)), "decYear","sinDY","cosDY","datetime")]
DT <- na.omit(DT)
kitchenSink <- createFullFormula(DT,response)
returnPrelim <- prelimModelDev(DT,response,kitchenSink)
modelReturn <- returnPrelim$DT.mod
outlierIndex <- findOutliers(modelReturn,DT)
}
\keyword{residuals}
\keyword{studentized}
|
\name{rmh.default}
\alias{rmh.default}
\title{Simulate Point Process Models using the Metropolis-Hastings Algorithm.}
\description{
Generates a random point pattern, simulated from
a chosen point process model, using the Metropolis-Hastings
algorithm.
}
\usage{
\method{rmh}{default}(model, start=NULL,
control=default.rmhcontrol(model),
\dots,
nsim=1, drop=TRUE, saveinfo=TRUE,
verbose=TRUE, snoop=FALSE)
}
\arguments{
\item{model}{Data specifying the point process model
that is to be simulated.
}
\item{start}{Data determining the initial state of
the algorithm.
}
\item{control}{Data controlling the iterative behaviour
and termination of the algorithm.
}
\item{\dots}{
Further arguments passed to \code{\link{rmhcontrol}}
or to trend functions in \code{model}.
}
\item{nsim}{
Number of simulated point patterns that should be generated.
}
\item{drop}{
Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
result will be a point pattern, rather than a list
containing a single point pattern.
}
\item{saveinfo}{
Logical value indicating whether to save auxiliary information.
}
\item{verbose}{
Logical value indicating whether to print progress reports.
}
\item{snoop}{
Logical. If \code{TRUE}, activate the visual debugger.
}
}
\value{
A point pattern (an object of class \code{"ppp"}, see
\code{\link{ppp.object}}) or a list of point patterns.
The returned value has an attribute \code{info} containing
modified versions of the arguments
\code{model}, \code{start}, and \code{control} which together specify
the exact simulation procedure. The \code{info} attribute can be
printed (and is printed automatically by \code{\link{summary.ppp}}).
For computational efficiency, the \code{info} attribute can be omitted
by setting \code{saveinfo=FALSE}.
The value of \code{\link[base:Random]{.Random.seed}} at the start
of the simulations is also saved and returned as an attribute
\code{seed}.
If the argument \code{track=TRUE} was given (see \code{\link{rmhcontrol}}),
the transition history of the algorithm
is saved, and returned as an attribute \code{history}. The transition
history is a data frame containing a factor \code{proposaltype}
identifying the proposal type (Birth, Death or Shift) and
a logical vector \code{accepted} indicating whether the proposal was
accepted.
The data frame also has columns \code{numerator}, \code{denominator}
which give the numerator and denominator of the Hastings ratio for
the proposal.
If the argument \code{nsave} was given (see \code{\link{rmhcontrol}}),
the return value has an attribute \code{saved} which is a list of
point patterns, containing the intermediate states of the algorithm.
}
\details{
This function generates simulated realisations from any of a range of
spatial point processes, using the Metropolis-Hastings algorithm.
It is the default method for the generic function \code{\link{rmh}}.
This function executes a Metropolis-Hastings algorithm
with birth, death and shift proposals as described in
Geyer and \ifelse{latex}{\out{M\o ller}}{Moller} (1994).
The argument \code{model} specifies the point process model to be
simulated. It is either a list, or an object of class
\code{"rmhmodel"}, with the following components:
\describe{
\item{cif}{A character string specifying the choice of
interpoint interaction for the point process.
}
\item{par}{
Parameter values for the conditional
intensity function.
}
\item{w}{
(Optional) window in which the pattern is
to be generated. An object of class \code{"owin"},
or data acceptable to \code{\link{as.owin}}.
}
\item{trend}{
Data specifying the spatial trend in the model, if it has a trend.
This may be a function, a pixel image (of class \code{"im"}),
(or a list of functions or images if the model
is multitype).
If the trend is a function or functions,
any auxiliary arguments \code{...} to \code{rmh.default}
will be passed to these functions, which
should be of the form \code{function(x, y, ...)}.
}
\item{types}{
List of possible types, for a multitype point process.
}
}
For full details of these parameters, see \code{\link{rmhmodel.default}}.
The argument \code{start} determines the initial state of the
Metropolis-Hastings algorithm. It is either \code{NULL},
or an object of class \code{"rmhstart"},
or a list with the following components:
\describe{
\item{n.start}{
Number of points in the initial point pattern.
A single integer, or a vector of integers giving the
numbers of points of each type in a multitype point pattern.
Incompatible with \code{x.start}.
}
\item{x.start}{
Initial point pattern configuration.
Incompatible with \code{n.start}.
\code{x.start} may be a point pattern (an
object of class \code{"ppp"}), or data which can be coerced
to this class by \code{\link{as.ppp}}, or an object with
components \code{x} and \code{y}, or a two-column matrix.
In the last two cases, the window for the pattern is determined
by \code{model$w}.
In the first two cases, if \code{model$w} is also present,
then the final simulated pattern will be clipped to
the window \code{model$w}.
}
}
For full details of these parameters, see \code{\link{rmhstart}}.
The third argument \code{control} controls the simulation
procedure (including \emph{conditional simulation}),
iterative behaviour, and termination of the
Metropolis-Hastings algorithm. It is either \code{NULL}, or
a list, or an object of class \code{"rmhcontrol"}, with components:
\describe{
\item{p}{The probability of proposing a ``shift''
(as opposed to a birth or death) in the Metropolis-Hastings
algorithm.
}
\item{q}{The conditional probability of proposing a death
(rather than a birth)
given that birth/death has been chosen over shift.
}
\item{nrep}{The number of repetitions or iterations
to be made by the Metropolis-Hastings algorithm. It should
be large.
}
\item{expand}{
Either a numerical expansion factor, or
a window (object of class \code{"owin"}). Indicates that
the process is to be simulated on a larger domain than the
original data window \code{w}, then clipped to \code{w}
when the algorithm has finished.
The default is to expand the simulation window
if the model is stationary and non-Poisson
(i.e. it has no trend and the interaction is not Poisson)
and not to expand in all other cases.
If the model has a trend, then in order for expansion to
be feasible, the trend must be given either as a function,
or an image whose bounding box is large enough to contain
the expanded window.
}
\item{periodic}{A logical scalar; if \code{periodic} is \code{TRUE}
we simulate a process on the torus formed by identifying
opposite edges of a rectangular window.
}
\item{ptypes}{A vector of probabilities (summing to 1) to be used
in assigning a random type to a new point.
}
\item{fixall}{A logical scalar specifying whether to condition on
the number of points of each type.
}
\item{nverb}{An integer specifying how often ``progress reports''
(which consist simply of the number of repetitions completed)
should be printed out. If nverb is left at 0, the default,
the simulation proceeds silently.
}
\item{x.cond}{If this argument is present, then
\emph{conditional simulation} will be performed, and \code{x.cond}
specifies the conditioning points and the type of conditioning.
}
\item{nsave,nburn}{
If these values are specified, then
intermediate states of the simulation algorithm will be saved
every \code{nsave} iterations, after an initial burn-in period of
\code{nburn} iterations.
}
\item{track}{
Logical flag indicating whether to save the transition
history of the simulations.
}
}
For full details of these parameters, see \code{\link{rmhcontrol}}.
The control parameters can also be given in the \code{\dots} arguments.
}
\section{Conditional Simulation}{
There are several kinds of conditional simulation.
\itemize{
\item
Simulation \emph{conditional upon the number of points},
that is, holding the number of points fixed.
To do this, set \code{control$p} (the probability of a shift) equal to 1.
The number of points is then determined by the starting state, which
may be specified either by setting \code{start$n.start} to be a
scalar, or by setting the initial pattern \code{start$x.start}.
\item
In the case of multitype processes, it is possible to simulate the
model \emph{conditionally upon the number of points of each type},
i.e. holding the number of points of each type
to be fixed. To do this, set \code{control$p} equal to 1
and \code{control$fixall} to be \code{TRUE}.
The number of points is then determined by the starting state, which
may be specified either by setting \code{start$n.start} to be an
integer vector, or by setting the initial pattern \code{start$x.start}.
\item
Simulation
\emph{conditional on the configuration observed in a sub-window},
that is, requiring that, inside a specified sub-window \eqn{V},
the simulated pattern should agree with a specified point pattern
\eqn{y}.To do this, set \code{control$x.cond} to equal the
specified point pattern \eqn{y}, making sure that it is an object of class
\code{"ppp"} and that the window \code{Window(control$x.cond)}
is the conditioning window \eqn{V}.
\item
Simulation \emph{conditional on the presence of specified points},
that is, requiring that the simulated pattern should include a
specified set of points. This is simulation from the Palm
distribution of the point process given a pattern \eqn{y}.
To do this, set \code{control$x.cond} to be a
\code{data.frame} containing the coordinates (and marks,
if appropriate) of the specified points.
}
For further information, see \code{\link{rmhcontrol}}.
Note that, when we simulate conditionally on the number of points, or
conditionally on the number of points of each type,
no expansion of the window is possible.
}
\section{Visual Debugger}{
If \code{snoop = TRUE}, an interactive debugger is activated.
On the current plot device, the debugger displays the current
state of the Metropolis-Hastings algorithm together with
the proposed transition to the next state.
Clicking on this graphical display (using the left mouse button)
will re-centre the display at the clicked location.
Surrounding this graphical display is an array of boxes representing
different actions.
Clicking on one of the action boxes (using the left mouse button)
will cause the action to be performed.
Debugger actions include:
\itemize{
\item Zooming in or out
\item Panning (shifting the field of view) left, right, up or down
\item Jumping to the next iteration
\item Skipping 10, 100, 1000, 10000 or 100000 iterations
\item Jumping to the next Birth proposal (etc)
\item Changing the fate of the proposal (i.e. changing whether
the proposal is accepted or rejected)
\item Dumping the current state and proposal to a file
\item Printing detailed information at the terminal
\item Exiting the debugger (so that the simulation
algorithm continues without further interruption).
}
Right-clicking the mouse will also cause the debugger to exit.
}
\references{
Baddeley, A. and Turner, R. (2000) Practical maximum
pseudolikelihood for spatial point patterns.
\emph{Australian and New Zealand Journal of Statistics}
\bold{42}, 283 -- 322.
Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point
Patterns} (2nd ed.) Arnold, London.
Diggle, P.J. and Gratton, R.J. (1984)
Monte Carlo methods of inference for implicit statistical models.
\emph{Journal of the Royal Statistical Society, series B}
\bold{46}, 193 -- 212.
Diggle, P.J., Gates, D.J., and Stibbard, A. (1987)
A nonparametric estimator for pairwise-interaction point processes.
Biometrika \bold{74}, 763 -- 770.
Geyer, C.J. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (1994)
Simulation procedures and likelihood inference for spatial
point processes.
\emph{Scandinavian Journal of Statistics} \bold{21}, 359--373.
Geyer, C.J. (1999)
Likelihood Inference for Spatial Point
Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and
M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and
Computation}, Chapman and Hall / CRC, Monographs on Statistics and
Applied Probability, number 80. Pages 79--140.
}
\section{Warnings}{
There is never a guarantee that the Metropolis-Hastings algorithm
has converged to its limiting distribution.
If \code{start$x.start} is specified then \code{expand} is set equal to 1
and simulation takes place in \code{Window(x.start)}. Any specified
value for \code{expand} is simply ignored.
The presence of both a component \code{w} of \code{model} and a
non-null value for \code{Window(x.start)} makes sense ONLY if \code{w}
is contained in \code{Window(x.start)}.
For multitype processes make sure that, even if there is to be no
trend corresponding to a particular type, there is still a component
(a NULL component) for that type, in the list.
}
\seealso{
\code{\link{rmh}},
\code{\link{rmh.ppm}},
\code{\link{rStrauss}},
\code{\link{ppp}},
\code{\link{ppm}},
\code{\link{AreaInter}},
\code{\link{BadGey}},
\code{\link{DiggleGatesStibbard}},
\code{\link{DiggleGratton}},
\code{\link{Fiksel}},
\code{\link{Geyer}},
\code{\link{Hardcore}},
\code{\link{LennardJones}},
\code{\link{MultiHard}},
\code{\link{MultiStrauss}},
\code{\link{MultiStraussHard}},
\code{\link{PairPiece}},
\code{\link{Poisson}},
\code{\link{Softcore}},
\code{\link{Strauss}},
\code{\link{StraussHard}},
\code{\link{Triplets}}
}
\section{Other models}{
In theory, any finite point process model can be simulated using
the Metropolis-Hastings algorithm, provided the conditional
intensity is uniformly bounded.
In practice, the list of point process models that can be simulated using
\code{rmh.default} is limited to those that have been implemented
in the package's internal C code. More options will be added in the future.
Note that the \code{lookup} conditional intensity function
permits the simulation (in theory, to any desired degree
of approximation) of any pairwise interaction process for
which the interaction depends only on the distance between
the pair of points.
}
\section{Reproducible simulations}{
If the user wants the simulation to be exactly reproducible
(e.g. for a figure in a journal article, where it is useful to
have the figure consistent from draft to draft) then the state of
the random number generator should be set before calling
\code{rmh.default}. This can be done either by calling
\code{\link[base:Random]{set.seed}} or by assigning a value to
\code{\link[base:Random]{.Random.seed}}. In the examples below, we use
\code{\link[base:Random]{set.seed}}.
If a simulation has been performed and the user now wants to
repeat it exactly, the random seed should be extracted from
the simulated point pattern \code{X} by \code{seed <- attr(x, "seed")},
then assigned to the system random nunber state by
\code{.Random.seed <- seed} before calling \code{rmh.default}.
}
\examples{
if(interactive()) {
nr <- 1e5
nv <- 5000
ns <- 200
} else {
nr <- 20
nv <- 5
ns <- 20
oldopt <- spatstat.options()
spatstat.options(expand=1.05)
}
set.seed(961018)
# Strauss process.
mod01 <- list(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7),
w=c(0,10,0,10))
X1.strauss <- rmh(model=mod01,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X1.strauss)
# Strauss process, conditioning on n = 42:
X2.strauss <- rmh(model=mod01,start=list(n.start=42),
control=list(p=1,nrep=nr,nverb=nv))
# Tracking algorithm progress:
X <- rmh(model=mod01,start=list(n.start=ns),
control=list(nrep=nr, nsave=nr/5, nburn=nr/2, track=TRUE))
History <- attr(X, "history")
Saved <- attr(X, "saved")
head(History)
plot(Saved)
# Hard core process:
mod02 <- list(cif="hardcore",par=list(beta=2,hc=0.7),w=c(0,10,0,10))
X3.hardcore <- rmh(model=mod02,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X3.hardcore)
# Strauss process equal to pure hardcore:
mod02s <- list(cif="strauss",par=list(beta=2,gamma=0,r=0.7),w=c(0,10,0,10))
X3.strauss <- rmh(model=mod02s,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# Strauss process in a polygonal window.
x <- c(0.55,0.68,0.75,0.58,0.39,0.37,0.19,0.26,0.42)
y <- c(0.20,0.27,0.68,0.99,0.80,0.61,0.45,0.28,0.33)
mod03 <- list(cif="strauss",par=list(beta=2000,gamma=0.6,r=0.07),
w=owin(poly=list(x=x,y=y)))
X4.strauss <- rmh(model=mod03,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X4.strauss)
# Strauss process in a polygonal window, conditioning on n = 80.
X5.strauss <- rmh(model=mod03,start=list(n.start=ns),
control=list(p=1,nrep=nr,nverb=nv))
# Strauss process, starting off from X4.strauss, but with the
# polygonal window replace by a rectangular one. At the end,
# the generated pattern is clipped to the original polygonal window.
xxx <- X4.strauss
Window(xxx) <- as.owin(c(0,1,0,1))
X6.strauss <- rmh(model=mod03,start=list(x.start=xxx),
control=list(nrep=nr,nverb=nv))
# Strauss with hardcore:
mod04 <- list(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3),
w=c(0,10,0,10))
X1.straush <- rmh(model=mod04,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# Another Strauss with hardcore (with a perhaps surprising result):
mod05 <- list(cif="straush",par=list(beta=80,gamma=0.36,r=45,hc=2.5),
w=c(0,250,0,250))
X2.straush <- rmh(model=mod05,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# Pure hardcore (identical to X3.strauss).
mod06 <- list(cif="straush",par=list(beta=2,gamma=1,r=1,hc=0.7),
w=c(0,10,0,10))
X3.straush <- rmh(model=mod06,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# Soft core:
w <- c(0,10,0,10)
mod07 <- list(cif="sftcr",par=list(beta=0.8,sigma=0.1,kappa=0.5),
w=c(0,10,0,10))
X.sftcr <- rmh(model=mod07,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X.sftcr)
# Area-interaction process:
mod42 <- rmhmodel(cif="areaint",par=list(beta=2,eta=1.6,r=0.7),
w=c(0,10,0,10))
X.area <- rmh(model=mod42,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X.area)
# Triplets process
modtrip <- list(cif="triplets",par=list(beta=2,gamma=0.2,r=0.7),
w=c(0,10,0,10))
X.triplets <- rmh(model=modtrip,
start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X.triplets)
# Multitype Strauss:
beta <- c(0.027,0.008)
gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2)
r <- matrix(c(45,45,45,45),2,2)
mod08 <- list(cif="straussm",par=list(beta=beta,gamma=gmma,radii=r),
w=c(0,250,0,250))
X1.straussm <- rmh(model=mod08,start=list(n.start=ns),
control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv))
if(interactive()) plot(X1.straussm)
# Multitype Strauss conditioning upon the total number
# of points being 80:
X2.straussm <- rmh(model=mod08,start=list(n.start=ns),
control=list(p=1,ptypes=c(0.75,0.25),nrep=nr,
nverb=nv))
# Conditioning upon the number of points of type 1 being 60
# and the number of points of type 2 being 20:
X3.straussm <- rmh(model=mod08,start=list(n.start=c(60,20)),
control=list(fixall=TRUE,p=1,ptypes=c(0.75,0.25),
nrep=nr,nverb=nv))
# Multitype Strauss hardcore:
rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2)
mod09 <- list(cif="straushm",par=list(beta=beta,gamma=gmma,
iradii=r,hradii=rhc),w=c(0,250,0,250))
X.straushm <- rmh(model=mod09,start=list(n.start=ns),
control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv))
# Multitype Strauss hardcore with trends for each type:
beta <- c(0.27,0.08)
tr3 <- function(x,y){x <- x/250; y <- y/250;
exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6)
}
# log quadratic trend
tr4 <- function(x,y){x <- x/250; y <- y/250;
exp(-0.6*x+0.5*y)}
# log linear trend
mod10 <- list(cif="straushm",par=list(beta=beta,gamma=gmma,
iradii=r,hradii=rhc),w=c(0,250,0,250),
trend=list(tr3,tr4))
X1.straushm.trend <- rmh(model=mod10,start=list(n.start=ns),
control=list(ptypes=c(0.75,0.25),
nrep=nr,nverb=nv))
if(interactive()) plot(X1.straushm.trend)
# Multitype Strauss hardcore with trends for each type, given as images:
bigwin <- square(250)
i1 <- as.im(tr3, bigwin)
i2 <- as.im(tr4, bigwin)
mod11 <- list(cif="straushm",par=list(beta=beta,gamma=gmma,
iradii=r,hradii=rhc),w=bigwin,
trend=list(i1,i2))
X2.straushm.trend <- rmh(model=mod11,start=list(n.start=ns),
control=list(ptypes=c(0.75,0.25),expand=1,
nrep=nr,nverb=nv))
# Diggle, Gates, and Stibbard:
mod12 <- list(cif="dgs",par=list(beta=3600,rho=0.08),w=c(0,1,0,1))
X.dgs <- rmh(model=mod12,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X.dgs)
# Diggle-Gratton:
mod13 <- list(cif="diggra",
par=list(beta=1800,kappa=3,delta=0.02,rho=0.04),
w=square(1))
X.diggra <- rmh(model=mod13,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X.diggra)
# Fiksel:
modFik <- list(cif="fiksel",
par=list(beta=180,r=0.15,hc=0.07,kappa=2,a= -1.0),
w=square(1))
X.fiksel <- rmh(model=modFik,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X.fiksel)
# Geyer:
mod14 <- list(cif="geyer",par=list(beta=1.25,gamma=1.6,r=0.2,sat=4.5),
w=c(0,10,0,10))
X1.geyer <- rmh(model=mod14,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X1.geyer)
# Geyer; same as a Strauss process with parameters
# (beta=2.25,gamma=0.16,r=0.7):
mod15 <- list(cif="geyer",par=list(beta=2.25,gamma=0.4,r=0.7,sat=10000),
w=c(0,10,0,10))
X2.geyer <- rmh(model=mod15,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
mod16 <- list(cif="geyer",par=list(beta=8.1,gamma=2.2,r=0.08,sat=3))
data(redwood)
X3.geyer <- rmh(model=mod16,start=list(x.start=redwood),
control=list(periodic=TRUE,nrep=nr,nverb=nv))
# Geyer, starting from the redwood data set, simulating
# on a torus, and conditioning on n:
X4.geyer <- rmh(model=mod16,start=list(x.start=redwood),
control=list(p=1,periodic=TRUE,nrep=nr,nverb=nv))
# Lookup (interaction function h_2 from page 76, Diggle (2003)):
r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0.
h <- 20*(r-0.05)
h[r<0.05] <- 0
h[r>0.10] <- 1
mod17 <- list(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1))
X.lookup <- rmh(model=mod17,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X.lookup)
# Strauss with trend
tr <- function(x,y){x <- x/250; y <- y/250;
exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6)
}
beta <- 0.3
gmma <- 0.5
r <- 45
modStr <- list(cif="strauss",par=list(beta=beta,gamma=gmma,r=r),
w=square(250), trend=tr)
X1.strauss.trend <- rmh(model=modStr,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# Baddeley-Geyer
r <- seq(0,0.2,length=8)[-1]
gmma <- c(0.5,0.6,0.7,0.8,0.7,0.6,0.5)
mod18 <- list(cif="badgey",par=list(beta=4000, gamma=gmma,r=r,sat=5),
w=square(1))
X1.badgey <- rmh(model=mod18,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
mod19 <- list(cif="badgey",
par=list(beta=4000, gamma=gmma,r=r,sat=1e4),
w=square(1))
set.seed(1329)
X2.badgey <- rmh(model=mod18,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# Check:
h <- ((prod(gmma)/cumprod(c(1,gmma)))[-8])^2
hs <- stepfun(r,c(h,1))
mod20 <- list(cif="lookup",par=list(beta=4000,h=hs),w=square(1))
set.seed(1329)
X.check <- rmh(model=mod20,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# X2.badgey and X.check will be identical.
mod21 <- list(cif="badgey",par=list(beta=300,gamma=c(1,0.4,1),
r=c(0.035,0.07,0.14),sat=5), w=square(1))
X3.badgey <- rmh(model=mod21,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# Same result as Geyer model with beta=300, gamma=0.4, r=0.07,
# sat = 5 (if seeds and control parameters are the same)
# Or more simply:
mod22 <- list(cif="badgey",
par=list(beta=300,gamma=0.4,r=0.07, sat=5),
w=square(1))
X4.badgey <- rmh(model=mod22,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# Same again --- i.e. the BadGey model includes the Geyer model.
# Illustrating scalability.
\dontrun{
M1 <- rmhmodel(cif="strauss",par=list(beta=60,gamma=0.5,r=0.04),w=owin())
set.seed(496)
X1 <- rmh(model=M1,start=list(n.start=300))
M2 <- rmhmodel(cif="strauss",par=list(beta=0.6,gamma=0.5,r=0.4),
w=owin(c(0,10),c(0,10)))
set.seed(496)
X2 <- rmh(model=M2,start=list(n.start=300))
chk <- affine(X1,mat=diag(c(10,10)))
all.equal(chk,X2,check.attributes=FALSE)
# Under the default spatstat options the foregoing all.equal()
# will yield TRUE. Setting spatstat.options(scalable=FALSE) and
# re-running the code will reveal differences between X1 and X2.
}
if(!interactive()) spatstat.options(oldopt)
}
\author{\adrian
and \rolf
}
\keyword{spatial}
\keyword{datagen}
| /man/rmh.default.Rd | no_license | kasselhingee/spatstat | R | false | false | 27,604 | rd | \name{rmh.default}
\alias{rmh.default}
\title{Simulate Point Process Models using the Metropolis-Hastings Algorithm.}
\description{
Generates a random point pattern, simulated from
a chosen point process model, using the Metropolis-Hastings
algorithm.
}
\usage{
\method{rmh}{default}(model, start=NULL,
control=default.rmhcontrol(model),
\dots,
nsim=1, drop=TRUE, saveinfo=TRUE,
verbose=TRUE, snoop=FALSE)
}
\arguments{
\item{model}{Data specifying the point process model
that is to be simulated.
}
\item{start}{Data determining the initial state of
the algorithm.
}
\item{control}{Data controlling the iterative behaviour
and termination of the algorithm.
}
\item{\dots}{
Further arguments passed to \code{\link{rmhcontrol}}
or to trend functions in \code{model}.
}
\item{nsim}{
Number of simulated point patterns that should be generated.
}
\item{drop}{
Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
result will be a point pattern, rather than a list
containing a single point pattern.
}
\item{saveinfo}{
Logical value indicating whether to save auxiliary information.
}
\item{verbose}{
Logical value indicating whether to print progress reports.
}
\item{snoop}{
Logical. If \code{TRUE}, activate the visual debugger.
}
}
\value{
A point pattern (an object of class \code{"ppp"}, see
\code{\link{ppp.object}}) or a list of point patterns.
The returned value has an attribute \code{info} containing
modified versions of the arguments
\code{model}, \code{start}, and \code{control} which together specify
the exact simulation procedure. The \code{info} attribute can be
printed (and is printed automatically by \code{\link{summary.ppp}}).
For computational efficiency, the \code{info} attribute can be omitted
by setting \code{saveinfo=FALSE}.
The value of \code{\link[base:Random]{.Random.seed}} at the start
of the simulations is also saved and returned as an attribute
\code{seed}.
If the argument \code{track=TRUE} was given (see \code{\link{rmhcontrol}}),
the transition history of the algorithm
is saved, and returned as an attribute \code{history}. The transition
history is a data frame containing a factor \code{proposaltype}
identifying the proposal type (Birth, Death or Shift) and
a logical vector \code{accepted} indicating whether the proposal was
accepted.
The data frame also has columns \code{numerator}, \code{denominator}
which give the numerator and denominator of the Hastings ratio for
the proposal.
If the argument \code{nsave} was given (see \code{\link{rmhcontrol}}),
the return value has an attribute \code{saved} which is a list of
point patterns, containing the intermediate states of the algorithm.
}
\details{
This function generates simulated realisations from any of a range of
spatial point processes, using the Metropolis-Hastings algorithm.
It is the default method for the generic function \code{\link{rmh}}.
This function executes a Metropolis-Hastings algorithm
with birth, death and shift proposals as described in
Geyer and \ifelse{latex}{\out{M\o ller}}{Moller} (1994).
The argument \code{model} specifies the point process model to be
simulated. It is either a list, or an object of class
\code{"rmhmodel"}, with the following components:
\describe{
\item{cif}{A character string specifying the choice of
interpoint interaction for the point process.
}
\item{par}{
Parameter values for the conditional
intensity function.
}
\item{w}{
(Optional) window in which the pattern is
to be generated. An object of class \code{"owin"},
or data acceptable to \code{\link{as.owin}}.
}
\item{trend}{
Data specifying the spatial trend in the model, if it has a trend.
This may be a function, a pixel image (of class \code{"im"}),
(or a list of functions or images if the model
is multitype).
If the trend is a function or functions,
any auxiliary arguments \code{...} to \code{rmh.default}
will be passed to these functions, which
should be of the form \code{function(x, y, ...)}.
}
\item{types}{
List of possible types, for a multitype point process.
}
}
For full details of these parameters, see \code{\link{rmhmodel.default}}.
The argument \code{start} determines the initial state of the
Metropolis-Hastings algorithm. It is either \code{NULL},
or an object of class \code{"rmhstart"},
or a list with the following components:
\describe{
\item{n.start}{
Number of points in the initial point pattern.
A single integer, or a vector of integers giving the
numbers of points of each type in a multitype point pattern.
Incompatible with \code{x.start}.
}
\item{x.start}{
Initial point pattern configuration.
Incompatible with \code{n.start}.
\code{x.start} may be a point pattern (an
object of class \code{"ppp"}), or data which can be coerced
to this class by \code{\link{as.ppp}}, or an object with
components \code{x} and \code{y}, or a two-column matrix.
In the last two cases, the window for the pattern is determined
by \code{model$w}.
In the first two cases, if \code{model$w} is also present,
then the final simulated pattern will be clipped to
the window \code{model$w}.
}
}
For full details of these parameters, see \code{\link{rmhstart}}.
The third argument \code{control} controls the simulation
procedure (including \emph{conditional simulation}),
iterative behaviour, and termination of the
Metropolis-Hastings algorithm. It is either \code{NULL}, or
a list, or an object of class \code{"rmhcontrol"}, with components:
\describe{
\item{p}{The probability of proposing a ``shift''
(as opposed to a birth or death) in the Metropolis-Hastings
algorithm.
}
\item{q}{The conditional probability of proposing a death
(rather than a birth)
given that birth/death has been chosen over shift.
}
\item{nrep}{The number of repetitions or iterations
to be made by the Metropolis-Hastings algorithm. It should
be large.
}
\item{expand}{
Either a numerical expansion factor, or
a window (object of class \code{"owin"}). Indicates that
the process is to be simulated on a larger domain than the
original data window \code{w}, then clipped to \code{w}
when the algorithm has finished.
The default is to expand the simulation window
if the model is stationary and non-Poisson
(i.e. it has no trend and the interaction is not Poisson)
and not to expand in all other cases.
If the model has a trend, then in order for expansion to
be feasible, the trend must be given either as a function,
or an image whose bounding box is large enough to contain
the expanded window.
}
\item{periodic}{A logical scalar; if \code{periodic} is \code{TRUE}
we simulate a process on the torus formed by identifying
opposite edges of a rectangular window.
}
\item{ptypes}{A vector of probabilities (summing to 1) to be used
in assigning a random type to a new point.
}
\item{fixall}{A logical scalar specifying whether to condition on
the number of points of each type.
}
\item{nverb}{An integer specifying how often ``progress reports''
(which consist simply of the number of repetitions completed)
should be printed out. If nverb is left at 0, the default,
the simulation proceeds silently.
}
\item{x.cond}{If this argument is present, then
\emph{conditional simulation} will be performed, and \code{x.cond}
specifies the conditioning points and the type of conditioning.
}
\item{nsave,nburn}{
If these values are specified, then
intermediate states of the simulation algorithm will be saved
every \code{nsave} iterations, after an initial burn-in period of
\code{nburn} iterations.
}
\item{track}{
Logical flag indicating whether to save the transition
history of the simulations.
}
}
For full details of these parameters, see \code{\link{rmhcontrol}}.
The control parameters can also be given in the \code{\dots} arguments.
}
\section{Conditional Simulation}{
There are several kinds of conditional simulation.
\itemize{
\item
Simulation \emph{conditional upon the number of points},
that is, holding the number of points fixed.
To do this, set \code{control$p} (the probability of a shift) equal to 1.
The number of points is then determined by the starting state, which
may be specified either by setting \code{start$n.start} to be a
scalar, or by setting the initial pattern \code{start$x.start}.
\item
In the case of multitype processes, it is possible to simulate the
model \emph{conditionally upon the number of points of each type},
i.e. holding the number of points of each type
to be fixed. To do this, set \code{control$p} equal to 1
and \code{control$fixall} to be \code{TRUE}.
The number of points is then determined by the starting state, which
may be specified either by setting \code{start$n.start} to be an
integer vector, or by setting the initial pattern \code{start$x.start}.
\item
Simulation
\emph{conditional on the configuration observed in a sub-window},
that is, requiring that, inside a specified sub-window \eqn{V},
the simulated pattern should agree with a specified point pattern
\eqn{y}.To do this, set \code{control$x.cond} to equal the
specified point pattern \eqn{y}, making sure that it is an object of class
\code{"ppp"} and that the window \code{Window(control$x.cond)}
is the conditioning window \eqn{V}.
\item
Simulation \emph{conditional on the presence of specified points},
that is, requiring that the simulated pattern should include a
specified set of points. This is simulation from the Palm
distribution of the point process given a pattern \eqn{y}.
To do this, set \code{control$x.cond} to be a
\code{data.frame} containing the coordinates (and marks,
if appropriate) of the specified points.
}
For further information, see \code{\link{rmhcontrol}}.
Note that, when we simulate conditionally on the number of points, or
conditionally on the number of points of each type,
no expansion of the window is possible.
}
\section{Visual Debugger}{
If \code{snoop = TRUE}, an interactive debugger is activated.
On the current plot device, the debugger displays the current
state of the Metropolis-Hastings algorithm together with
the proposed transition to the next state.
Clicking on this graphical display (using the left mouse button)
will re-centre the display at the clicked location.
Surrounding this graphical display is an array of boxes representing
different actions.
Clicking on one of the action boxes (using the left mouse button)
will cause the action to be performed.
Debugger actions include:
\itemize{
\item Zooming in or out
\item Panning (shifting the field of view) left, right, up or down
\item Jumping to the next iteration
\item Skipping 10, 100, 1000, 10000 or 100000 iterations
\item Jumping to the next Birth proposal (etc)
\item Changing the fate of the proposal (i.e. changing whether
the proposal is accepted or rejected)
\item Dumping the current state and proposal to a file
\item Printing detailed information at the terminal
\item Exiting the debugger (so that the simulation
algorithm continues without further interruption).
}
Right-clicking the mouse will also cause the debugger to exit.
}
\references{
Baddeley, A. and Turner, R. (2000) Practical maximum
pseudolikelihood for spatial point patterns.
\emph{Australian and New Zealand Journal of Statistics}
\bold{42}, 283 -- 322.
Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point
Patterns} (2nd ed.) Arnold, London.
Diggle, P.J. and Gratton, R.J. (1984)
Monte Carlo methods of inference for implicit statistical models.
\emph{Journal of the Royal Statistical Society, series B}
\bold{46}, 193 -- 212.
Diggle, P.J., Gates, D.J., and Stibbard, A. (1987)
A nonparametric estimator for pairwise-interaction point processes.
Biometrika \bold{74}, 763 -- 770.
Geyer, C.J. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (1994)
Simulation procedures and likelihood inference for spatial
point processes.
\emph{Scandinavian Journal of Statistics} \bold{21}, 359--373.
Geyer, C.J. (1999)
Likelihood Inference for Spatial Point
Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and
M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and
Computation}, Chapman and Hall / CRC, Monographs on Statistics and
Applied Probability, number 80. Pages 79--140.
}
\section{Warnings}{
There is never a guarantee that the Metropolis-Hastings algorithm
has converged to its limiting distribution.
If \code{start$x.start} is specified then \code{expand} is set equal to 1
and simulation takes place in \code{Window(x.start)}. Any specified
value for \code{expand} is simply ignored.
The presence of both a component \code{w} of \code{model} and a
non-null value for \code{Window(x.start)} makes sense ONLY if \code{w}
is contained in \code{Window(x.start)}.
For multitype processes make sure that, even if there is to be no
trend corresponding to a particular type, there is still a component
(a NULL component) for that type, in the list.
}
\seealso{
\code{\link{rmh}},
\code{\link{rmh.ppm}},
\code{\link{rStrauss}},
\code{\link{ppp}},
\code{\link{ppm}},
\code{\link{AreaInter}},
\code{\link{BadGey}},
\code{\link{DiggleGatesStibbard}},
\code{\link{DiggleGratton}},
\code{\link{Fiksel}},
\code{\link{Geyer}},
\code{\link{Hardcore}},
\code{\link{LennardJones}},
\code{\link{MultiHard}},
\code{\link{MultiStrauss}},
\code{\link{MultiStraussHard}},
\code{\link{PairPiece}},
\code{\link{Poisson}},
\code{\link{Softcore}},
\code{\link{Strauss}},
\code{\link{StraussHard}},
\code{\link{Triplets}}
}
\section{Other models}{
In theory, any finite point process model can be simulated using
the Metropolis-Hastings algorithm, provided the conditional
intensity is uniformly bounded.
In practice, the list of point process models that can be simulated using
\code{rmh.default} is limited to those that have been implemented
in the package's internal C code. More options will be added in the future.
Note that the \code{lookup} conditional intensity function
permits the simulation (in theory, to any desired degree
of approximation) of any pairwise interaction process for
which the interaction depends only on the distance between
the pair of points.
}
\section{Reproducible simulations}{
If the user wants the simulation to be exactly reproducible
(e.g. for a figure in a journal article, where it is useful to
have the figure consistent from draft to draft) then the state of
the random number generator should be set before calling
\code{rmh.default}. This can be done either by calling
\code{\link[base:Random]{set.seed}} or by assigning a value to
\code{\link[base:Random]{.Random.seed}}. In the examples below, we use
\code{\link[base:Random]{set.seed}}.
If a simulation has been performed and the user now wants to
repeat it exactly, the random seed should be extracted from
the simulated point pattern \code{X} by \code{seed <- attr(x, "seed")},
then assigned to the system random nunber state by
\code{.Random.seed <- seed} before calling \code{rmh.default}.
}
\examples{
if(interactive()) {
nr <- 1e5
nv <- 5000
ns <- 200
} else {
nr <- 20
nv <- 5
ns <- 20
oldopt <- spatstat.options()
spatstat.options(expand=1.05)
}
set.seed(961018)
# Strauss process.
mod01 <- list(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7),
w=c(0,10,0,10))
X1.strauss <- rmh(model=mod01,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X1.strauss)
# Strauss process, conditioning on n = 42:
X2.strauss <- rmh(model=mod01,start=list(n.start=42),
control=list(p=1,nrep=nr,nverb=nv))
# Tracking algorithm progress:
X <- rmh(model=mod01,start=list(n.start=ns),
control=list(nrep=nr, nsave=nr/5, nburn=nr/2, track=TRUE))
History <- attr(X, "history")
Saved <- attr(X, "saved")
head(History)
plot(Saved)
# Hard core process:
mod02 <- list(cif="hardcore",par=list(beta=2,hc=0.7),w=c(0,10,0,10))
X3.hardcore <- rmh(model=mod02,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X3.hardcore)
# Strauss process equal to pure hardcore:
mod02s <- list(cif="strauss",par=list(beta=2,gamma=0,r=0.7),w=c(0,10,0,10))
X3.strauss <- rmh(model=mod02s,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# Strauss process in a polygonal window.
x <- c(0.55,0.68,0.75,0.58,0.39,0.37,0.19,0.26,0.42)
y <- c(0.20,0.27,0.68,0.99,0.80,0.61,0.45,0.28,0.33)
mod03 <- list(cif="strauss",par=list(beta=2000,gamma=0.6,r=0.07),
w=owin(poly=list(x=x,y=y)))
X4.strauss <- rmh(model=mod03,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X4.strauss)
# Strauss process in a polygonal window, conditioning on n = 80.
X5.strauss <- rmh(model=mod03,start=list(n.start=ns),
control=list(p=1,nrep=nr,nverb=nv))
# Strauss process, starting off from X4.strauss, but with the
# polygonal window replace by a rectangular one. At the end,
# the generated pattern is clipped to the original polygonal window.
xxx <- X4.strauss
Window(xxx) <- as.owin(c(0,1,0,1))
X6.strauss <- rmh(model=mod03,start=list(x.start=xxx),
control=list(nrep=nr,nverb=nv))
# Strauss with hardcore:
mod04 <- list(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3),
w=c(0,10,0,10))
X1.straush <- rmh(model=mod04,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# Another Strauss with hardcore (with a perhaps surprising result):
mod05 <- list(cif="straush",par=list(beta=80,gamma=0.36,r=45,hc=2.5),
w=c(0,250,0,250))
X2.straush <- rmh(model=mod05,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# Pure hardcore (identical to X3.strauss).
mod06 <- list(cif="straush",par=list(beta=2,gamma=1,r=1,hc=0.7),
w=c(0,10,0,10))
X3.straush <- rmh(model=mod06,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# Soft core:
w <- c(0,10,0,10)
mod07 <- list(cif="sftcr",par=list(beta=0.8,sigma=0.1,kappa=0.5),
w=c(0,10,0,10))
X.sftcr <- rmh(model=mod07,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X.sftcr)
# Area-interaction process:
mod42 <- rmhmodel(cif="areaint",par=list(beta=2,eta=1.6,r=0.7),
w=c(0,10,0,10))
X.area <- rmh(model=mod42,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X.area)
# Triplets process
modtrip <- list(cif="triplets",par=list(beta=2,gamma=0.2,r=0.7),
w=c(0,10,0,10))
X.triplets <- rmh(model=modtrip,
start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X.triplets)
# Multitype Strauss:
beta <- c(0.027,0.008)
gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2)
r <- matrix(c(45,45,45,45),2,2)
mod08 <- list(cif="straussm",par=list(beta=beta,gamma=gmma,radii=r),
w=c(0,250,0,250))
X1.straussm <- rmh(model=mod08,start=list(n.start=ns),
control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv))
if(interactive()) plot(X1.straussm)
# Multitype Strauss conditioning upon the total number
# of points being 80:
X2.straussm <- rmh(model=mod08,start=list(n.start=ns),
control=list(p=1,ptypes=c(0.75,0.25),nrep=nr,
nverb=nv))
# Conditioning upon the number of points of type 1 being 60
# and the number of points of type 2 being 20:
X3.straussm <- rmh(model=mod08,start=list(n.start=c(60,20)),
control=list(fixall=TRUE,p=1,ptypes=c(0.75,0.25),
nrep=nr,nverb=nv))
# Multitype Strauss hardcore:
rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2)
mod09 <- list(cif="straushm",par=list(beta=beta,gamma=gmma,
iradii=r,hradii=rhc),w=c(0,250,0,250))
X.straushm <- rmh(model=mod09,start=list(n.start=ns),
control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv))
# Multitype Strauss hardcore with trends for each type:
beta <- c(0.27,0.08)
tr3 <- function(x,y){x <- x/250; y <- y/250;
exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6)
}
# log quadratic trend
tr4 <- function(x,y){x <- x/250; y <- y/250;
exp(-0.6*x+0.5*y)}
# log linear trend
mod10 <- list(cif="straushm",par=list(beta=beta,gamma=gmma,
iradii=r,hradii=rhc),w=c(0,250,0,250),
trend=list(tr3,tr4))
X1.straushm.trend <- rmh(model=mod10,start=list(n.start=ns),
control=list(ptypes=c(0.75,0.25),
nrep=nr,nverb=nv))
if(interactive()) plot(X1.straushm.trend)
# Multitype Strauss hardcore with trends for each type, given as images:
bigwin <- square(250)
i1 <- as.im(tr3, bigwin)
i2 <- as.im(tr4, bigwin)
mod11 <- list(cif="straushm",par=list(beta=beta,gamma=gmma,
iradii=r,hradii=rhc),w=bigwin,
trend=list(i1,i2))
X2.straushm.trend <- rmh(model=mod11,start=list(n.start=ns),
control=list(ptypes=c(0.75,0.25),expand=1,
nrep=nr,nverb=nv))
# Diggle, Gates, and Stibbard:
mod12 <- list(cif="dgs",par=list(beta=3600,rho=0.08),w=c(0,1,0,1))
X.dgs <- rmh(model=mod12,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X.dgs)
# Diggle-Gratton:
mod13 <- list(cif="diggra",
par=list(beta=1800,kappa=3,delta=0.02,rho=0.04),
w=square(1))
X.diggra <- rmh(model=mod13,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X.diggra)
# Fiksel:
modFik <- list(cif="fiksel",
par=list(beta=180,r=0.15,hc=0.07,kappa=2,a= -1.0),
w=square(1))
X.fiksel <- rmh(model=modFik,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X.fiksel)
# Geyer:
mod14 <- list(cif="geyer",par=list(beta=1.25,gamma=1.6,r=0.2,sat=4.5),
w=c(0,10,0,10))
X1.geyer <- rmh(model=mod14,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X1.geyer)
# Geyer; same as a Strauss process with parameters
# (beta=2.25,gamma=0.16,r=0.7):
mod15 <- list(cif="geyer",par=list(beta=2.25,gamma=0.4,r=0.7,sat=10000),
w=c(0,10,0,10))
X2.geyer <- rmh(model=mod15,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
mod16 <- list(cif="geyer",par=list(beta=8.1,gamma=2.2,r=0.08,sat=3))
data(redwood)
X3.geyer <- rmh(model=mod16,start=list(x.start=redwood),
control=list(periodic=TRUE,nrep=nr,nverb=nv))
# Geyer, starting from the redwood data set, simulating
# on a torus, and conditioning on n:
X4.geyer <- rmh(model=mod16,start=list(x.start=redwood),
control=list(p=1,periodic=TRUE,nrep=nr,nverb=nv))
# Lookup (interaction function h_2 from page 76, Diggle (2003)):
r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0.
h <- 20*(r-0.05)
h[r<0.05] <- 0
h[r>0.10] <- 1
mod17 <- list(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1))
X.lookup <- rmh(model=mod17,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
if(interactive()) plot(X.lookup)
# Strauss with trend
tr <- function(x,y){x <- x/250; y <- y/250;
exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6)
}
beta <- 0.3
gmma <- 0.5
r <- 45
modStr <- list(cif="strauss",par=list(beta=beta,gamma=gmma,r=r),
w=square(250), trend=tr)
X1.strauss.trend <- rmh(model=modStr,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# Baddeley-Geyer
r <- seq(0,0.2,length=8)[-1]
gmma <- c(0.5,0.6,0.7,0.8,0.7,0.6,0.5)
mod18 <- list(cif="badgey",par=list(beta=4000, gamma=gmma,r=r,sat=5),
w=square(1))
X1.badgey <- rmh(model=mod18,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
mod19 <- list(cif="badgey",
par=list(beta=4000, gamma=gmma,r=r,sat=1e4),
w=square(1))
set.seed(1329)
X2.badgey <- rmh(model=mod18,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# Check:
h <- ((prod(gmma)/cumprod(c(1,gmma)))[-8])^2
hs <- stepfun(r,c(h,1))
mod20 <- list(cif="lookup",par=list(beta=4000,h=hs),w=square(1))
set.seed(1329)
X.check <- rmh(model=mod20,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# X2.badgey and X.check will be identical.
mod21 <- list(cif="badgey",par=list(beta=300,gamma=c(1,0.4,1),
r=c(0.035,0.07,0.14),sat=5), w=square(1))
X3.badgey <- rmh(model=mod21,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# Same result as Geyer model with beta=300, gamma=0.4, r=0.07,
# sat = 5 (if seeds and control parameters are the same)
# Or more simply:
mod22 <- list(cif="badgey",
par=list(beta=300,gamma=0.4,r=0.07, sat=5),
w=square(1))
X4.badgey <- rmh(model=mod22,start=list(n.start=ns),
control=list(nrep=nr,nverb=nv))
# Same again --- i.e. the BadGey model includes the Geyer model.
# Illustrating scalability.
\dontrun{
M1 <- rmhmodel(cif="strauss",par=list(beta=60,gamma=0.5,r=0.04),w=owin())
set.seed(496)
X1 <- rmh(model=M1,start=list(n.start=300))
M2 <- rmhmodel(cif="strauss",par=list(beta=0.6,gamma=0.5,r=0.4),
w=owin(c(0,10),c(0,10)))
set.seed(496)
X2 <- rmh(model=M2,start=list(n.start=300))
chk <- affine(X1,mat=diag(c(10,10)))
all.equal(chk,X2,check.attributes=FALSE)
# Under the default spatstat options the foregoing all.equal()
# will yield TRUE. Setting spatstat.options(scalable=FALSE) and
# re-running the code will reveal differences between X1 and X2.
}
if(!interactive()) spatstat.options(oldopt)
}
\author{\adrian
and \rolf
}
\keyword{spatial}
\keyword{datagen}
|
# Netatmo Spatial Analysis
# this file does the spatial analysis of the log and cws data.
# It compares the (inverse distance weighted)
# mean temperatures of the log/cws within multiple
# radii around every bicycle measurement.
# SET WORKING DIRECTORY
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# install libraries
library("measurements") #for converting lat/lon in degrees,min,sec to decimal degrees
library("tidyverse") # for data manipulation
library("dplyr")
library("raster") # for distance calculations
library("data.table") # for data table manipulations
library("Metrics") # for statistical calculations
dir.create("output_reworked/2_spatial_and_temporal_analysis_reworked/")
dir.create("output_reworked/2_spatial_and_temporal_analysis_reworked/cws_analysis/")
dir.create("output_reworked/2_spatial_and_temporal_analysis_reworked/log_analysis/")
# load functions
source("r_scripts/functions/complete_cases_function.R")
source("r_scripts/functions/convert_df_cols_to_POSIX_tz_Bern_function.r")
# Read Files (from processing output) -------------------------------------
# bicycle
files <- list.files(path="output_reworked/0_pre_processing_orig/bicycle/")
for (f in files){
print(f)
name <- substr(f,1,nchar(f)-4)
assign(name,read.csv(file=paste0("output_reworked/0_pre_processing_orig/bicycle/",f),
header = T, sep = ","))
rm(name)
}; rm(f, files)
rm(bicycle_complete)
# log
files <- list.files(path="output_reworked/0_pre_processing_orig/log/")
for (f in files){
print(f)
name <- substr(f,1,nchar(f)-4)
assign(name,read.csv(file=paste0("output_reworked/0_pre_processing_orig/log/",f),
header = T, sep = ","))
rm(name)
}; rm(f, files)
# cws
files <- list.files(path="output_reworked/0_pre_processing_orig/cws_be_2019/")
for (f in files){
print(f)
name <- substr(f,1,nchar(f)-4)
assign(name,read.csv(file=paste0("output_reworked/0_pre_processing_orig/cws_be_2019/",f),
header = T, sep = ","))
rm(name)
}; rm(f, files)
# distance matrices
files <- list.files(path="output_reworked/0_pre_processing_orig/distance/")
for (f in files){
print(f)
name <- substr(f,1,nchar(f)-4)
assign(name,read.csv(file=paste0("output_reworked/0_pre_processing_orig/distance/",f),
header = T, sep = ","))
rm(name)
}; rm(f, files)
# Convert times to POSIX --------------------------------------------------
for (i in 1:length(cws_be_2019_bicycle_time_orig)){
cws_be_2019_bicycle_time_orig[,i] <- as.POSIXct(as.character(cws_be_2019_bicycle_time_orig[,i]),
tz = "Europe/Berlin")
}
# Mean CWS Analysis ----------------------------------------------------------------
#define variables and vectors
rad <- c(100,150,200,250,300,400,500,600,
700,800,900,1000,1500,2000,3000) # search radius in meters
delta_t = c(60*60, 30*60, 15*60, 10*60, 5*60) # temporal distance in seconds
p <- 1 # power parameter for the inverse distance function
## parameters for testing the loop
# i = 2451 # here there NA values within rad and dt and therefore the script writes give NaN or Inf as temperature...
# i = 2450 # here everything is fine
# r <- rad
# dt <- delta_t
### CWS for loop ###
###
for (r in rad){
for (dt in delta_t){
# create empty vectors to save data in
cws_be_08_dist <- (c(rep(NA, nrow(cws_be_08_bicycle)))) # distances of closest cws measurement
cws_be_08_dist_mean <- (c(rep(NA, nrow(cws_be_08_bicycle)))) # mean of those distances
cws_be_08_dist_name <- (c(rep(NA, nrow(cws_be_08_bicycle)))) # name of the cws within the radius (=column name)
cws_be_08_dt <- (c(rep(NA, nrow(cws_be_08_bicycle))))
cws_be_08_dt_mean <- (c(rep(NA, nrow(cws_be_08_bicycle))))
cws_be_08_temp <- (c(rep(NA, nrow(cws_be_08_bicycle)))) # temperature values of those cws
cws_be_08_temp_weighted_mean <- (c(rep(NA, nrow(cws_be_08_bicycle)))) # weighted mean of those values
cws_be_08_temp_min_T_filter <- (c(rep(NA, nrow(cws_be_08_bicycle)))) # minimum T of cws within that radius
cws_be_08_number_of_cws <- (c(rep(NA, nrow(cws_be_08_bicycle)))) # the ammount of cws within the radius
cws_be_08_temp_difference_weighted_mean <- (c(rep(NA, nrow(cws_be_08_bicycle))))
cws_be_08_temp_difference_min_T_filter <- (c(rep(NA, nrow(cws_be_08_bicycle))))
for (i in 1:nrow(cws_be_08_bicycle)){
print(paste("CWS:","Calulating row",i,"for radius",r,"meters","and time difference",dt/60,"minutes"))
if (is.na(min(dist_cws_be_08_bicycle[i,]) == TRUE)){
# if NA then remove
cws_be_08_dist[i] <- NA
cws_be_08_dist_mean[i] <- NA
cws_be_08_dist_name[i] <- NA
cws_be_08_dt[i] <- NA
cws_be_08_dt_mean[i] <- NA
cws_be_08_temp[i] <- NA
cws_be_08_temp_weighted_mean[i] <- NA
cws_be_08_temp_min_T_filter[i] <- NA
cws_be_08_number_of_cws[i] <- NA
cws_be_08_temp_difference_weighted_mean[i] <- NA
cws_be_08_temp_difference_min_T_filter[i] <- NA
}
# else if ((length(which((dist_cws_be_08_bicycle[i,] <= r) == TRUE)) == 0L)==TRUE){
# # if no distance is within the radius
# cws_be_08_dist[i] <- NA
# cws_be_08_dist_mean[i] <- NA
# cws_be_08_dist_name[i] <- NA
# cws_be_08_dt[i] <- NA
# cws_be_08_dt_mean[i] <- NA
# cws_be_08_temp[i] <- NA
# cws_be_08_temp_weighted_mean[i] <- NA
# cws_be_08_temp_min_T_filter[i] <- NA
# cws_be_08_number_of_cws[i] <- NA
# cws_be_08_temp_difference_weighted_mean[i] <- NA
# cws_be_08_temp_difference_min_T_filter[i] <- NA
# }
# else if ((length(which((abs(as.numeric(cws_be_08_bicycle_time_orig_dt[i,])) <= dt) == TRUE)) == 0L)==TRUE){
# # if no temporal distance is within delta t (then the length of the which() expression will be larger thatn 0)
# cws_be_08_dist[i] <- NA
# cws_be_08_dist_mean[i] <- NA
# cws_be_08_dist_name[i] <- NA
# cws_be_08_dt[i] <- NA
# cws_be_08_dt_mean[i] <- NA
# cws_be_08_temp[i] <- NA
# cws_be_08_temp_weighted_mean[i] <- NA
# cws_be_08_temp_min_T_filter[i] <- NA
# cws_be_08_number_of_cws[i] <- NA
# cws_be_08_temp_difference_weighted_mean[i] <- NA
# cws_be_08_temp_difference_min_T_filter[i] <- NA
# }
# # check whether any CWS is within BOTH dt AND rad
# temp_within_delta_t <- which(abs(as.numeric(cws_be_08_bicycle_time_orig_dt[i,])) <= dt)
# temp_within_rad <- which(dist_cws_be_08_bicycle[i,] <= r)
# temp_within_rad_within_delta_t <- temp_within_rad[temp_within_rad %in% temp_within_delta_t]
# a <- which(dist_cws_be_08_bicycle[i,] <= r)[which(dist_cws_be_08_bicycle[i,] <= r) %in% which(abs(as.numeric(cws_be_08_bicycle_time_orig_dt[i,])) <= dt)]
else if ((length(which(dist_cws_be_08_bicycle[i,] <= r)[which(dist_cws_be_08_bicycle[i,] <= r) %in% which(abs(as.numeric(cws_be_08_bicycle_time_orig_dt[i,])) <= dt)]) == 0)){
# If no value within rad and within dt, then write NA
cws_be_08_dist[i] <- NA
cws_be_08_dist_mean[i] <- NA
cws_be_08_dist_name[i] <- NA
cws_be_08_dt[i] <- NA
cws_be_08_dt_mean[i] <- NA
cws_be_08_temp[i] <- NA
cws_be_08_temp_weighted_mean[i] <- NA
cws_be_08_temp_min_T_filter[i] <- NA
cws_be_08_number_of_cws[i] <- NA
cws_be_08_temp_difference_weighted_mean[i] <- NA
cws_be_08_temp_difference_min_T_filter[i] <- NA
}
else {
## write to temporary variables
# spatial distance
temp_within_rad <- t(as.data.frame(which((dist_cws_be_08_bicycle[i,] <= r) == TRUE))) # indices of distance values within radius
temp_dist <- as.data.frame(dist_cws_be_08_bicycle[i,temp_within_rad]) # distances of those indices (temporarily stored)
# temporal distance
temp_within_delta_t <- t(as.data.frame(which((abs(as.numeric(cws_be_08_bicycle_time_orig_dt[i,])) <= dt) == TRUE))) # indices of distance values within radius
temp_delta_t <- as.data.frame(cws_be_08_bicycle_time_orig_dt[i,temp_within_delta_t]) # delta t of those indices (temporarily stored)
# cws within both dt and r
temp_within_rad_within_delta_t <- temp_within_rad[temp_within_rad %in% temp_within_delta_t]
# distance of only those cws
temp_dist_within_rad_within_delta_t <- as.data.frame(dist_cws_be_08_bicycle[i,temp_within_rad_within_delta_t]) # distances of those indices (temporarily stored)
# delta t of only those cws
temp_delta_t_within_rad_within_delta_t <- as.data.frame(cws_be_08_bicycle_time_orig_dt[i,temp_within_rad_within_delta_t]) # delta t of those indices (temporarily stored)
# T within dt and r
temp_temp <- data.table(cws_be_08_bicycle_ta_int_orig[i, temp_within_rad_within_delta_t]) # temperature of cws within radius
temp_temp <- as.data.frame(temp_temp) #convert back to df
# extract names of the cws
ifelse(names(temp_temp) == "V1", # V1 would be the name if only 1 CWS is within rad and dt. thats what I catch here
temp_name <- colnames(cws_be_08_bicycle_ta_int_orig[temp_within_rad_within_delta_t]),
temp_name <- names(temp_temp))
## write to actual vectors
# spatial distance
cws_be_08_dist_mean[i] <- mean(as.numeric(temp_dist_within_rad_within_delta_t[])) # mean of those distances
# temporal distance
cws_be_08_dt_mean[i] <- mean(as.numeric((temp_delta_t_within_rad_within_delta_t[])))
# names of cws within dt and r
cws_be_08_dist_name[i] <- paste(temp_name[],
collapse = ",") # collaps the names into one cell
# weighted mean should now be according to dt
cws_be_08_temp_weighted_mean[i] <- weighted.mean(temp_temp, (1/((temp_dist_within_rad_within_delta_t))^p), na.rm = TRUE) # mean of these temps
# convert to NA if value is NaN
# ifelse(cws_be_08_temp_weighted_mean[i]== "NaN",
# cws_be_08_temp_weighted_mean[i] <- NA,
# cws_be_08_temp_weighted_mean[i] <- cws_be_08_temp_weighted_mean[i])
# also document the CWS temperature which has the lowest absolute T (so minimum filter)
cws_be_08_temp_min_T_filter[i] <- min(temp_temp, na.rm=T)
# convert to NA if value is Inf
# ifelse(cws_be_08_temp_min_T_filter[i] == "Inf",
# cws_be_08_temp_min_T_filter[i] <- NA,
# cws_be_08_temp_min_T_filter[i] <- cws_be_08_temp_min_T_filter[i])
#
cws_be_08_number_of_cws[i] <- length(temp_within_rad_within_delta_t)
cws_be_08_dt[i] <- apply(temp_delta_t_within_rad_within_delta_t, 1,
function(x) paste(x[!is.na(x)],collapse = ", ")) # collaps distances into one cell
cws_be_08_dist[i] <- apply(temp_dist_within_rad_within_delta_t, 1,
function(x) paste(x[!is.na(x)],collapse = ", "))
cws_be_08_temp[i] <- paste(temp_temp[1:ncol(temp_temp)],collapse = ", ") # collaps temperatures into 1 cell
cws_be_08_temp_difference_weighted_mean[i] <- cws_be_08_temp_weighted_mean[i]- bicycle$Temp.C[i]
cws_be_08_temp_difference_min_T_filter[i] <- cws_be_08_temp_min_T_filter[i]- bicycle$Temp.C[i]
rm(temp_within_rad, temp_dist, temp_temp,temp_name, temp_within_rad, temp_delta_t,
temp_delta_t_within_rad_within_delta_t, temp_dist_within_rad_within_delta_t, temp_within_delta_t,
temp_within_rad_within_delta_t) # remove temporary variables
}
}
cws_analysis <- as.data.frame(cbind(cws_be_08_dist_mean,cws_be_08_dist, cws_be_08_dist_name,
cws_be_08_dt_mean, cws_be_08_dt, cws_be_08_temp,
cws_be_08_temp_weighted_mean, cws_be_08_temp_min_T_filter,
cws_be_08_number_of_cws, cws_be_08_temp_difference_weighted_mean,
cws_be_08_temp_difference_min_T_filter))
# replace NaN and Inf by NA
cws_analysis <- replace(cws_analysis, cws_analysis == "NaN" | cws_analysis == "Inf", NA)
#
write.csv2(cws_analysis, file = paste("output_reworked/2_spatial_and_temporal_analysis_reworked/cws_analysis/cws_analysis_radius_", r, "_dt_" , dt ,".csv", sep = ""))
rm(cws_be_08_dist_mean,cws_be_08_dist, cws_be_08_dist_name,
cws_be_08_dt_mean, cws_be_08_dt, cws_be_08_temp,
cws_be_08_temp_weighted_mean, cws_be_08_temp_min_T_filter,
cws_be_08_number_of_cws, cws_be_08_temp_difference_weighted_mean,
cws_be_08_temp_difference_min_T_filter, cws_analysis)
}
}
# list the generated files
c <- list.files(path="output_reworked/2_spatial_and_temporal_analysis_reworked/cws_analysis/")
cws_list = lapply(paste0("output_reworked/2_spatial_and_temporal_analysis_reworked/cws_analysis/",c), read.csv2)
names(cws_list) <- c(substr(c[1:length(c)],14,24))
# read the files from the list to single df
for (f in c){
print(f)
name <- f
assign(name,read.csv2(file=paste0("output_reworked/2_spatial_and_temporal_analysis_reworked/cws_analysis/",f),
stringsAsFactors = F)[,-1])
rm(name)
}; rm(c, f)
### end cws ###
# plots to check ----------------------------------------------------------
# compare IDW temperature to minimum filter T
t = c(2430:2470)
q = cws_analysis_radius_300_dt_900.csv[t,]
q2 = cws_analysis_radius_300_dt_900_incl_inf_NaN.csv[t,]
t1 = c(2000: 2400)
plot(bicycle$RecNo[t1], as.numeric(cws_analysis_radius_300_dt_900.csv$cws_be_08_temp_weighted_mean[t1]), type = "l")
lines(bicycle$RecNo[t1],as.numeric(cws_analysis_radius_300_dt_900.csv$cws_be_08_temp_min_T_filter[t1]), col = "red")
plot(bicycle$RecNo[t1], as.numeric(cws_analysis_radius_300_dt_900.csv$cws_be_08_temp_difference_weighted_mean[t1]), type = "l")
lines(bicycle$RecNo[t1],as.numeric(cws_analysis_radius_300_dt_900.csv$cws_be_08_temp_difference_min_T_filter[t1]), col = "red")
mean(abs(as.numeric(cws_analysis_radius_300_dt_900.csv$cws_be_08_temp_difference_weighted_mean)), na.rm=T)
mean(abs(as.numeric(cws_analysis_radius_300_dt_900.csv$cws_be_08_temp_difference_min_T_filter )), na.rm=T)
var(cws_analysis_radius_300_dt_900.csv$cws_be_08_temp_difference_weighted_mean, na.rm = T)
var(cws_analysis_radius_300_dt_900.csv$cws_be_08_temp_difference_min_T_filter, na.rm = T)
# Mean Logger Analysis -------------------------------------------------------------
### Define vector with log temperature along the bicycle transect ###
# loop through every bicycle measurement (=timestep, i) and write the following to new columns:
#define
rad <- c(200,500) # search radii
p <- 1 # power parameter for the inverse spatial distance function
### log for loop ###
###
for (r in rad){
log_dist <- (c(rep(NA, nrow(log_bicycle)))) # distances of closest cws measurement
log_dist_mean <- (c(rep(NA, nrow(log_bicycle)))) # mean of those distances
log_dist_name <- (c(rep(NA, nrow(log_bicycle)))) # name of the cws within the radius (=column name)
log_temp <- (c(rep(NA, nrow(log_bicycle)))) # temperature values of those cws
log_temp_weighted_mean <- (c(rep(NA, nrow(log_bicycle)))) # weighted mean of those values
log_number_of_log <- (c(rep(NA, nrow(log_bicycle)))) # the ammount of cws within the radius
log_temp_difference <- (c(rep(NA, nrow(log_bicycle)))) # difference to bicycle temp
for (i in 1:nrow(log_bicycle)){
print(paste("Calulating row",i,"for radius",r,"meters"))
if (is.na(min(dist_log_bicycle[i,]) == TRUE)){
log_dist[i] <- NA
log_dist_mean[i] <- NA
log_dist_name[i] <- NA
log_temp[i] <- NA
log_temp_weighted_mean[i] <- NA
log_number_of_log[i] <- NA
log_temp_difference[i] <- NA
}
else if ((length(which((dist_log_bicycle[i,] <= r) == TRUE)) == 0L)==TRUE){
# if no distance is within the radius
log_dist[i] <- NA
log_dist_mean[i] <- NA
log_dist_name[i] <- NA
log_temp[i] <- NA
log_temp_weighted_mean[i] <- NA
log_number_of_log[i] <- NA
log_temp_difference[i] <- NA
}
else {
# write to temporary variables
temp_within_rad <- t(as.data.frame(which((dist_log_bicycle[i,] <= r) == TRUE))) # indices of distance values within radius
temp_dist <- as.data.frame(dist_log_bicycle[i,temp_within_rad]) # distances of those indices (temporarily stored)
temp_within_rad_1 <- temp_within_rad + 23 # +23 because log_bicycle is 23 columns longer than dist)
temp_temp <- data.table(log_bicycle[i, temp_within_rad_1]) # temperature of log within radius
temp_temp <- as.data.frame(temp_temp) #convert back to df
temp_name <- names(temp_temp) # names of the cws stations within the radius (stored temporarily)
# write to actual vectors
log_dist_mean[i] <- mean(as.numeric(temp_dist[])) # mean of those distances
log_dist_name[i] <- paste(temp_name[],
collapse = ",") # collaps the names into one cell
log_temp_weighted_mean[i] <- weighted.mean(temp_temp, (1/((temp_dist))^p), na.rm = TRUE) # mean of these temps
log_number_of_log[i] <- length(temp_within_rad)
log_dist[i] <- apply(temp_dist, 1,
function(x) paste(x[!is.na(x)],collapse = ", ")) # collaps distances into one cell
log_temp[i] <- paste(temp_temp[1:ncol(temp_temp)],collapse = ", ") # collaps temperatures into 1 cell
log_temp_difference[i] <- log_temp_weighted_mean[i]- bicycle$Temp.degC[i]
rm(temp_within_rad, temp_dist, temp_temp,temp_name, temp_within_rad_1) # remove temporary variables
}
}
log_analysis <- as.data.frame(log_dist_mean)
log_analysis$log_dist <- log_dist
log_analysis$log_dist_name <- log_dist_name
log_analysis$log_temp_weighted_mean <- log_temp_weighted_mean
log_analysis$log_temp <- log_temp
log_analysis$log_number_of_log <- log_number_of_log
log_analysis$log_temp_difference <- log_temp_difference
write.csv2(log_analysis,file = paste("output_reworked/2_spatial_and_temporal_analysis_reworked/log_analysis/log_analysis_radius_", r, ".csv", sep = ""))
rm(log_dist, log_dist_mean,log_dist_name,log_temp,log_analysis,
log_temp_weighted_mean,log_number_of_log, log_temp_difference)
}
# read the .csv files
l <- list.files(path="output_reworked/2_spatial_and_temporal_analysis_reworked/log_analysis/")
log_list = lapply(paste0("output_reworked/2_spatial_and_temporal_analysis_reworked/log_analysis/",l), read.csv2)
names(log_list) <- c(substr(l[1:length(l)],14,24))
rm(rad,p)
# read the files from the list to single df
for (f in l){
print(f)
name <- f
assign(name,read.csv2(file=paste0("output_reworked/2_spatial_analysis_reworked/log_analysis/",f))[,-1])
rm(name)
}; rm(f,l,rad,p)
### end mean log ###
| /2a_Spatial_and_temporal_Analysis.R | no_license | Brian6330/RIG-HeatMap | R | false | false | 19,175 | r | # Netatmo Spatial Analysis
# this file does the spatial analysis of the log and cws data.
# It compares the (inverse distance weighted)
# mean temperatures of the log/cws within multiple
# radii around every bicycle measurement.
# SET WORKING DIRECTORY
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# install libraries
library("measurements") #for converting lat/lon in degrees,min,sec to decimal degrees
library("tidyverse") # for data manipulation
library("dplyr")
library("raster") # for distance calculations
library("data.table") # for data table manipulations
library("Metrics") # for statistical calculations
dir.create("output_reworked/2_spatial_and_temporal_analysis_reworked/")
dir.create("output_reworked/2_spatial_and_temporal_analysis_reworked/cws_analysis/")
dir.create("output_reworked/2_spatial_and_temporal_analysis_reworked/log_analysis/")
# load functions
source("r_scripts/functions/complete_cases_function.R")
source("r_scripts/functions/convert_df_cols_to_POSIX_tz_Bern_function.r")
# Read Files (from processing output) -------------------------------------
# bicycle
files <- list.files(path="output_reworked/0_pre_processing_orig/bicycle/")
for (f in files){
print(f)
name <- substr(f,1,nchar(f)-4)
assign(name,read.csv(file=paste0("output_reworked/0_pre_processing_orig/bicycle/",f),
header = T, sep = ","))
rm(name)
}; rm(f, files)
rm(bicycle_complete)
# log
files <- list.files(path="output_reworked/0_pre_processing_orig/log/")
for (f in files){
print(f)
name <- substr(f,1,nchar(f)-4)
assign(name,read.csv(file=paste0("output_reworked/0_pre_processing_orig/log/",f),
header = T, sep = ","))
rm(name)
}; rm(f, files)
# cws
files <- list.files(path="output_reworked/0_pre_processing_orig/cws_be_2019/")
for (f in files){
print(f)
name <- substr(f,1,nchar(f)-4)
assign(name,read.csv(file=paste0("output_reworked/0_pre_processing_orig/cws_be_2019/",f),
header = T, sep = ","))
rm(name)
}; rm(f, files)
# distance matrices
files <- list.files(path="output_reworked/0_pre_processing_orig/distance/")
for (f in files){
print(f)
name <- substr(f,1,nchar(f)-4)
assign(name,read.csv(file=paste0("output_reworked/0_pre_processing_orig/distance/",f),
header = T, sep = ","))
rm(name)
}; rm(f, files)
# Convert times to POSIX --------------------------------------------------
for (i in 1:length(cws_be_2019_bicycle_time_orig)){
cws_be_2019_bicycle_time_orig[,i] <- as.POSIXct(as.character(cws_be_2019_bicycle_time_orig[,i]),
tz = "Europe/Berlin")
}
# Mean CWS Analysis ----------------------------------------------------------------
#define variables and vectors
rad <- c(100,150,200,250,300,400,500,600,
700,800,900,1000,1500,2000,3000) # search radius in meters
delta_t = c(60*60, 30*60, 15*60, 10*60, 5*60) # temporal distance in seconds
p <- 1 # power parameter for the inverse distance function
## parameters for testing the loop
# i = 2451 # here there NA values within rad and dt and therefore the script writes give NaN or Inf as temperature...
# i = 2450 # here everything is fine
# r <- rad
# dt <- delta_t
### CWS for loop ###
###
for (r in rad){
for (dt in delta_t){
# create empty vectors to save data in
cws_be_08_dist <- (c(rep(NA, nrow(cws_be_08_bicycle)))) # distances of closest cws measurement
cws_be_08_dist_mean <- (c(rep(NA, nrow(cws_be_08_bicycle)))) # mean of those distances
cws_be_08_dist_name <- (c(rep(NA, nrow(cws_be_08_bicycle)))) # name of the cws within the radius (=column name)
cws_be_08_dt <- (c(rep(NA, nrow(cws_be_08_bicycle))))
cws_be_08_dt_mean <- (c(rep(NA, nrow(cws_be_08_bicycle))))
cws_be_08_temp <- (c(rep(NA, nrow(cws_be_08_bicycle)))) # temperature values of those cws
cws_be_08_temp_weighted_mean <- (c(rep(NA, nrow(cws_be_08_bicycle)))) # weighted mean of those values
cws_be_08_temp_min_T_filter <- (c(rep(NA, nrow(cws_be_08_bicycle)))) # minimum T of cws within that radius
cws_be_08_number_of_cws <- (c(rep(NA, nrow(cws_be_08_bicycle)))) # the ammount of cws within the radius
cws_be_08_temp_difference_weighted_mean <- (c(rep(NA, nrow(cws_be_08_bicycle))))
cws_be_08_temp_difference_min_T_filter <- (c(rep(NA, nrow(cws_be_08_bicycle))))
for (i in 1:nrow(cws_be_08_bicycle)){
print(paste("CWS:","Calulating row",i,"for radius",r,"meters","and time difference",dt/60,"minutes"))
if (is.na(min(dist_cws_be_08_bicycle[i,]) == TRUE)){
# if NA then remove
cws_be_08_dist[i] <- NA
cws_be_08_dist_mean[i] <- NA
cws_be_08_dist_name[i] <- NA
cws_be_08_dt[i] <- NA
cws_be_08_dt_mean[i] <- NA
cws_be_08_temp[i] <- NA
cws_be_08_temp_weighted_mean[i] <- NA
cws_be_08_temp_min_T_filter[i] <- NA
cws_be_08_number_of_cws[i] <- NA
cws_be_08_temp_difference_weighted_mean[i] <- NA
cws_be_08_temp_difference_min_T_filter[i] <- NA
}
# else if ((length(which((dist_cws_be_08_bicycle[i,] <= r) == TRUE)) == 0L)==TRUE){
# # if no distance is within the radius
# cws_be_08_dist[i] <- NA
# cws_be_08_dist_mean[i] <- NA
# cws_be_08_dist_name[i] <- NA
# cws_be_08_dt[i] <- NA
# cws_be_08_dt_mean[i] <- NA
# cws_be_08_temp[i] <- NA
# cws_be_08_temp_weighted_mean[i] <- NA
# cws_be_08_temp_min_T_filter[i] <- NA
# cws_be_08_number_of_cws[i] <- NA
# cws_be_08_temp_difference_weighted_mean[i] <- NA
# cws_be_08_temp_difference_min_T_filter[i] <- NA
# }
# else if ((length(which((abs(as.numeric(cws_be_08_bicycle_time_orig_dt[i,])) <= dt) == TRUE)) == 0L)==TRUE){
# # if no temporal distance is within delta t (then the length of the which() expression will be larger thatn 0)
# cws_be_08_dist[i] <- NA
# cws_be_08_dist_mean[i] <- NA
# cws_be_08_dist_name[i] <- NA
# cws_be_08_dt[i] <- NA
# cws_be_08_dt_mean[i] <- NA
# cws_be_08_temp[i] <- NA
# cws_be_08_temp_weighted_mean[i] <- NA
# cws_be_08_temp_min_T_filter[i] <- NA
# cws_be_08_number_of_cws[i] <- NA
# cws_be_08_temp_difference_weighted_mean[i] <- NA
# cws_be_08_temp_difference_min_T_filter[i] <- NA
# }
# # check whether any CWS is within BOTH dt AND rad
# temp_within_delta_t <- which(abs(as.numeric(cws_be_08_bicycle_time_orig_dt[i,])) <= dt)
# temp_within_rad <- which(dist_cws_be_08_bicycle[i,] <= r)
# temp_within_rad_within_delta_t <- temp_within_rad[temp_within_rad %in% temp_within_delta_t]
# a <- which(dist_cws_be_08_bicycle[i,] <= r)[which(dist_cws_be_08_bicycle[i,] <= r) %in% which(abs(as.numeric(cws_be_08_bicycle_time_orig_dt[i,])) <= dt)]
else if ((length(which(dist_cws_be_08_bicycle[i,] <= r)[which(dist_cws_be_08_bicycle[i,] <= r) %in% which(abs(as.numeric(cws_be_08_bicycle_time_orig_dt[i,])) <= dt)]) == 0)){
# If no value within rad and within dt, then write NA
cws_be_08_dist[i] <- NA
cws_be_08_dist_mean[i] <- NA
cws_be_08_dist_name[i] <- NA
cws_be_08_dt[i] <- NA
cws_be_08_dt_mean[i] <- NA
cws_be_08_temp[i] <- NA
cws_be_08_temp_weighted_mean[i] <- NA
cws_be_08_temp_min_T_filter[i] <- NA
cws_be_08_number_of_cws[i] <- NA
cws_be_08_temp_difference_weighted_mean[i] <- NA
cws_be_08_temp_difference_min_T_filter[i] <- NA
}
else {
## write to temporary variables
# spatial distance
temp_within_rad <- t(as.data.frame(which((dist_cws_be_08_bicycle[i,] <= r) == TRUE))) # indices of distance values within radius
temp_dist <- as.data.frame(dist_cws_be_08_bicycle[i,temp_within_rad]) # distances of those indices (temporarily stored)
# temporal distance
temp_within_delta_t <- t(as.data.frame(which((abs(as.numeric(cws_be_08_bicycle_time_orig_dt[i,])) <= dt) == TRUE))) # indices of distance values within radius
temp_delta_t <- as.data.frame(cws_be_08_bicycle_time_orig_dt[i,temp_within_delta_t]) # delta t of those indices (temporarily stored)
# cws within both dt and r
temp_within_rad_within_delta_t <- temp_within_rad[temp_within_rad %in% temp_within_delta_t]
# distance of only those cws
temp_dist_within_rad_within_delta_t <- as.data.frame(dist_cws_be_08_bicycle[i,temp_within_rad_within_delta_t]) # distances of those indices (temporarily stored)
# delta t of only those cws
temp_delta_t_within_rad_within_delta_t <- as.data.frame(cws_be_08_bicycle_time_orig_dt[i,temp_within_rad_within_delta_t]) # delta t of those indices (temporarily stored)
# T within dt and r
temp_temp <- data.table(cws_be_08_bicycle_ta_int_orig[i, temp_within_rad_within_delta_t]) # temperature of cws within radius
temp_temp <- as.data.frame(temp_temp) #convert back to df
# extract names of the cws
ifelse(names(temp_temp) == "V1", # V1 would be the name if only 1 CWS is within rad and dt. thats what I catch here
temp_name <- colnames(cws_be_08_bicycle_ta_int_orig[temp_within_rad_within_delta_t]),
temp_name <- names(temp_temp))
## write to actual vectors
# spatial distance
cws_be_08_dist_mean[i] <- mean(as.numeric(temp_dist_within_rad_within_delta_t[])) # mean of those distances
# temporal distance
cws_be_08_dt_mean[i] <- mean(as.numeric((temp_delta_t_within_rad_within_delta_t[])))
# names of cws within dt and r
cws_be_08_dist_name[i] <- paste(temp_name[],
collapse = ",") # collaps the names into one cell
# weighted mean should now be according to dt
cws_be_08_temp_weighted_mean[i] <- weighted.mean(temp_temp, (1/((temp_dist_within_rad_within_delta_t))^p), na.rm = TRUE) # mean of these temps
# convert to NA if value is NaN
# ifelse(cws_be_08_temp_weighted_mean[i]== "NaN",
# cws_be_08_temp_weighted_mean[i] <- NA,
# cws_be_08_temp_weighted_mean[i] <- cws_be_08_temp_weighted_mean[i])
# also document the CWS temperature which has the lowest absolute T (so minimum filter)
cws_be_08_temp_min_T_filter[i] <- min(temp_temp, na.rm=T)
# convert to NA if value is Inf
# ifelse(cws_be_08_temp_min_T_filter[i] == "Inf",
# cws_be_08_temp_min_T_filter[i] <- NA,
# cws_be_08_temp_min_T_filter[i] <- cws_be_08_temp_min_T_filter[i])
#
cws_be_08_number_of_cws[i] <- length(temp_within_rad_within_delta_t)
cws_be_08_dt[i] <- apply(temp_delta_t_within_rad_within_delta_t, 1,
function(x) paste(x[!is.na(x)],collapse = ", ")) # collaps distances into one cell
cws_be_08_dist[i] <- apply(temp_dist_within_rad_within_delta_t, 1,
function(x) paste(x[!is.na(x)],collapse = ", "))
cws_be_08_temp[i] <- paste(temp_temp[1:ncol(temp_temp)],collapse = ", ") # collaps temperatures into 1 cell
cws_be_08_temp_difference_weighted_mean[i] <- cws_be_08_temp_weighted_mean[i]- bicycle$Temp.C[i]
cws_be_08_temp_difference_min_T_filter[i] <- cws_be_08_temp_min_T_filter[i]- bicycle$Temp.C[i]
rm(temp_within_rad, temp_dist, temp_temp,temp_name, temp_within_rad, temp_delta_t,
temp_delta_t_within_rad_within_delta_t, temp_dist_within_rad_within_delta_t, temp_within_delta_t,
temp_within_rad_within_delta_t) # remove temporary variables
}
}
cws_analysis <- as.data.frame(cbind(cws_be_08_dist_mean,cws_be_08_dist, cws_be_08_dist_name,
cws_be_08_dt_mean, cws_be_08_dt, cws_be_08_temp,
cws_be_08_temp_weighted_mean, cws_be_08_temp_min_T_filter,
cws_be_08_number_of_cws, cws_be_08_temp_difference_weighted_mean,
cws_be_08_temp_difference_min_T_filter))
# replace NaN and Inf by NA
cws_analysis <- replace(cws_analysis, cws_analysis == "NaN" | cws_analysis == "Inf", NA)
#
write.csv2(cws_analysis, file = paste("output_reworked/2_spatial_and_temporal_analysis_reworked/cws_analysis/cws_analysis_radius_", r, "_dt_" , dt ,".csv", sep = ""))
rm(cws_be_08_dist_mean,cws_be_08_dist, cws_be_08_dist_name,
cws_be_08_dt_mean, cws_be_08_dt, cws_be_08_temp,
cws_be_08_temp_weighted_mean, cws_be_08_temp_min_T_filter,
cws_be_08_number_of_cws, cws_be_08_temp_difference_weighted_mean,
cws_be_08_temp_difference_min_T_filter, cws_analysis)
}
}
# list the generated files
c <- list.files(path="output_reworked/2_spatial_and_temporal_analysis_reworked/cws_analysis/")
cws_list = lapply(paste0("output_reworked/2_spatial_and_temporal_analysis_reworked/cws_analysis/",c), read.csv2)
names(cws_list) <- c(substr(c[1:length(c)],14,24))
# read the files from the list to single df
for (f in c){
print(f)
name <- f
assign(name,read.csv2(file=paste0("output_reworked/2_spatial_and_temporal_analysis_reworked/cws_analysis/",f),
stringsAsFactors = F)[,-1])
rm(name)
}; rm(c, f)
### end cws ###
# plots to check ----------------------------------------------------------
# compare IDW temperature to minimum filter T
t = c(2430:2470)
q = cws_analysis_radius_300_dt_900.csv[t,]
q2 = cws_analysis_radius_300_dt_900_incl_inf_NaN.csv[t,]
t1 = c(2000: 2400)
plot(bicycle$RecNo[t1], as.numeric(cws_analysis_radius_300_dt_900.csv$cws_be_08_temp_weighted_mean[t1]), type = "l")
lines(bicycle$RecNo[t1],as.numeric(cws_analysis_radius_300_dt_900.csv$cws_be_08_temp_min_T_filter[t1]), col = "red")
plot(bicycle$RecNo[t1], as.numeric(cws_analysis_radius_300_dt_900.csv$cws_be_08_temp_difference_weighted_mean[t1]), type = "l")
lines(bicycle$RecNo[t1],as.numeric(cws_analysis_radius_300_dt_900.csv$cws_be_08_temp_difference_min_T_filter[t1]), col = "red")
mean(abs(as.numeric(cws_analysis_radius_300_dt_900.csv$cws_be_08_temp_difference_weighted_mean)), na.rm=T)
mean(abs(as.numeric(cws_analysis_radius_300_dt_900.csv$cws_be_08_temp_difference_min_T_filter )), na.rm=T)
var(cws_analysis_radius_300_dt_900.csv$cws_be_08_temp_difference_weighted_mean, na.rm = T)
var(cws_analysis_radius_300_dt_900.csv$cws_be_08_temp_difference_min_T_filter, na.rm = T)
# Mean Logger Analysis -------------------------------------------------------------
### Define vector with log temperature along the bicycle transect ###
# loop through every bicycle measurement (=timestep, i) and write the following to new columns:
#define
rad <- c(200,500) # search radii
p <- 1 # power parameter for the inverse spatial distance function
### log for loop ###
###
for (r in rad){
log_dist <- (c(rep(NA, nrow(log_bicycle)))) # distances of closest cws measurement
log_dist_mean <- (c(rep(NA, nrow(log_bicycle)))) # mean of those distances
log_dist_name <- (c(rep(NA, nrow(log_bicycle)))) # name of the cws within the radius (=column name)
log_temp <- (c(rep(NA, nrow(log_bicycle)))) # temperature values of those cws
log_temp_weighted_mean <- (c(rep(NA, nrow(log_bicycle)))) # weighted mean of those values
log_number_of_log <- (c(rep(NA, nrow(log_bicycle)))) # the ammount of cws within the radius
log_temp_difference <- (c(rep(NA, nrow(log_bicycle)))) # difference to bicycle temp
for (i in 1:nrow(log_bicycle)){
print(paste("Calulating row",i,"for radius",r,"meters"))
if (is.na(min(dist_log_bicycle[i,]) == TRUE)){
log_dist[i] <- NA
log_dist_mean[i] <- NA
log_dist_name[i] <- NA
log_temp[i] <- NA
log_temp_weighted_mean[i] <- NA
log_number_of_log[i] <- NA
log_temp_difference[i] <- NA
}
else if ((length(which((dist_log_bicycle[i,] <= r) == TRUE)) == 0L)==TRUE){
# if no distance is within the radius
log_dist[i] <- NA
log_dist_mean[i] <- NA
log_dist_name[i] <- NA
log_temp[i] <- NA
log_temp_weighted_mean[i] <- NA
log_number_of_log[i] <- NA
log_temp_difference[i] <- NA
}
else {
# write to temporary variables
temp_within_rad <- t(as.data.frame(which((dist_log_bicycle[i,] <= r) == TRUE))) # indices of distance values within radius
temp_dist <- as.data.frame(dist_log_bicycle[i,temp_within_rad]) # distances of those indices (temporarily stored)
temp_within_rad_1 <- temp_within_rad + 23 # +23 because log_bicycle is 23 columns longer than dist)
temp_temp <- data.table(log_bicycle[i, temp_within_rad_1]) # temperature of log within radius
temp_temp <- as.data.frame(temp_temp) #convert back to df
temp_name <- names(temp_temp) # names of the cws stations within the radius (stored temporarily)
# write to actual vectors
log_dist_mean[i] <- mean(as.numeric(temp_dist[])) # mean of those distances
log_dist_name[i] <- paste(temp_name[],
collapse = ",") # collaps the names into one cell
log_temp_weighted_mean[i] <- weighted.mean(temp_temp, (1/((temp_dist))^p), na.rm = TRUE) # mean of these temps
log_number_of_log[i] <- length(temp_within_rad)
log_dist[i] <- apply(temp_dist, 1,
function(x) paste(x[!is.na(x)],collapse = ", ")) # collaps distances into one cell
log_temp[i] <- paste(temp_temp[1:ncol(temp_temp)],collapse = ", ") # collaps temperatures into 1 cell
log_temp_difference[i] <- log_temp_weighted_mean[i]- bicycle$Temp.degC[i]
rm(temp_within_rad, temp_dist, temp_temp,temp_name, temp_within_rad_1) # remove temporary variables
}
}
log_analysis <- as.data.frame(log_dist_mean)
log_analysis$log_dist <- log_dist
log_analysis$log_dist_name <- log_dist_name
log_analysis$log_temp_weighted_mean <- log_temp_weighted_mean
log_analysis$log_temp <- log_temp
log_analysis$log_number_of_log <- log_number_of_log
log_analysis$log_temp_difference <- log_temp_difference
write.csv2(log_analysis,file = paste("output_reworked/2_spatial_and_temporal_analysis_reworked/log_analysis/log_analysis_radius_", r, ".csv", sep = ""))
rm(log_dist, log_dist_mean,log_dist_name,log_temp,log_analysis,
log_temp_weighted_mean,log_number_of_log, log_temp_difference)
}
# read the .csv files
l <- list.files(path="output_reworked/2_spatial_and_temporal_analysis_reworked/log_analysis/")
log_list = lapply(paste0("output_reworked/2_spatial_and_temporal_analysis_reworked/log_analysis/",l), read.csv2)
names(log_list) <- c(substr(l[1:length(l)],14,24))
rm(rad,p)
# read the files from the list to single df
for (f in l){
print(f)
name <- f
assign(name,read.csv2(file=paste0("output_reworked/2_spatial_analysis_reworked/log_analysis/",f))[,-1])
rm(name)
}; rm(f,l,rad,p)
### end mean log ###
|
#analyze the heritability only on invasive cases and five intrinsic subtypes with complete data
rm(list=ls())
#install_github("andrewhaoyu/bc2", ref = "master",args = c('--library="/home/zhangh24/R/x86_64-pc-linux-gnu-library/3.4"'))
library(devtools)
#with_libpaths(new = "/home/zhangh24/R/x86_64-pc-linux-gnu-library/3.6/", install_github('andrewhaoyu/bcutility'))
arg <- commandArgs(trailingOnly=T)
i1 <- as.numeric(arg[[1]])
i2 <- as.numeric(arg[[2]])
print(i1)
print(i2)
library(R.utils)
setwd("/data/zhangh24/breast_cancer_data_analysis/")
n <- 109713
snpvalue <- rep(0,n)
subject.file <- "/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_order.txt.gz"
Icog.order <- read.table(gzfile(subject.file))
library(data.table)
setwd("/data/zhangh24/breast_cancer_data_analysis/")
data1 <- fread("./data/iCOGS_euro_v10_10232017.csv",header=T)
data1 <- as.data.frame(data1)
y.pheno.mis1 <- cbind(data1$Behaviour1,data1$ER_status1,data1$PR_status1,data1$HER2_status1,data1$Grade1)
colnames(y.pheno.mis1) = c("Behavior","ER","PR","HER2","Grade")
#x.test.all.mis1 <- data1[,c(27:206)]
SG_ID <- data1$SG_ID
x.covar1 <- as.matrix(data1[,c(5:14)])
#use z.standard as tumor indicator matrix
#just for easy copy of previous code
z.standard <- y.pheno.mis1[,2:5]
idx.1 <- which(((z.standard[,1]==1|z.standard[,2]==1)
&z.standard[,3]==0
&(z.standard[,4]==1|z.standard[,4]==2))|y.pheno.mis1[,1]==0)
#for second subtype HR+_HER2+
idx.2 <- which(((z.standard[,1]==1|z.standard[,2]==1)
&z.standard[,3]==1)|y.pheno.mis1[,1]==0)
#for third subtype HR+_HER2-_highgrade
idx.3 <- which(((z.standard[,1]==1|z.standard[,2]==1)
&z.standard[,3]==0
&z.standard[,4]==3)|y.pheno.mis1[,1]==0)
#for third subtype HR-_HER2+
idx.4 <- which((z.standard[,1]==0&z.standard[,2]==0
&z.standard[,3]==1)|y.pheno.mis1[,1]==0)
#for third subtype HR-_HER2-
idx.5 <- which((z.standard[,1]==0&z.standard[,2]==0
&z.standard[,3]==0)|y.pheno.mis1[,1]==0)
idx.list <- list(c(1:nrow(y.pheno.mis1)),idx.1,idx.2,idx.3,idx.4,idx.5)
rm(data1)
gc()
idx.fil <- Icog.order[,1]%in%SG_ID
idx.match <- match(SG_ID,Icog.order[idx.fil,1])
#Icog.order.match <- Icog.order[idx.fil,1][idx.match]
#library(bcutility,lib.loc ="/home/zhangh24/R/x86_64-pc-linux-gnu-library/3.6/")
library(bc2,lib.loc ="/home/zhangh24/R/x86_64-pc-linux-gnu-library/3.6/")
Filesdir <- "/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_imputed/"
Files <- dir(Filesdir,pattern="icogs_merged_b1_12.",full.names=T)
Filesex <- dir(Filesdir,pattern="icogs_merged_b1_12.chr23",full.names=T)
idx.sex <- Files%in%Filesex
Files <- Files[!idx.sex]
library(gtools)
Files <- mixedsort(Files)
geno.file <- Files[i1]
# tryCatch(
# {
num <- as.integer(system(paste0("zcat ",geno.file,"| wc -l"),intern=T))
# },
# error=function(cond){
# num <- countLines(geno.file)[1]
# }
# )
size = 3
start.end <- startend(num,size,i2)
start <- start.end[1]
end <- start.end[2]
file.num <- end-start+1
#num = 22349
#num <- countLines(geno.file)[1];
#num <- as.integer(system(paste0("zcat ",geno.file,"| wc -l"),intern=T))
#rm(pheno)
num.of.tumor <- 7
n.sub <- nrow(y.pheno.mis1)
idx.control <- which(y.pheno.mis1[,1]==0)
n.control <- length(idx.control)
standard_analysis <- function(y,
gene1,
x.covar1){
model <- glm(y~gene1+x.covar1,family = binomial(link ='logit'))
coeff <- as.numeric(coef(model)[2])
var <- (summary(model)$coefficient[2,2])^2
return(result = list(coeff=coeff,
var= var))
}
y <- y.pheno.mis1[,1]
no.cores <- 2
library(foreach)
library(doParallel)
inner.size <- 2
registerDoParallel(no.cores)
result.list <- foreach(job.i = 1:2)%dopar%{
inner.start.end <- startend(file.num,inner.size,job.i)
inner.start <- inner.start.end[1]
inner.end <- inner.start.end[2]
inner.file.num <- inner.end-inner.start+1
true.start <- start+inner.start-1
true.end <- start+inner.end-1
score_result <- matrix(0,inner.file.num,num.of.tumor-1)
infor_result <- matrix(0,inner.file.num,(num.of.tumor-1))
snpid_result <- rep("c",inner.file.num)
freq.all <- rep(0,inner.file.num)
temp <- 0
con <- gzfile(geno.file)
open(con)
for(i in 1:num){
if(i%%500==0){
print(i)
}
oneLine <- readLines(con,n=1)
if(i>=true.start){
if(temp%%100==0){
print(paste0("temp",temp))
}
#print(i)
temp = temp+1
#print(i)
myVector <- strsplit(oneLine," ")
snpid <- as.character(myVector[[1]][2])
snpid_result[temp] <- snpid
snpvalue <- rep(0,n)
snppro <- as.numeric(unlist(myVector)[6:length(myVector[[1]])])
if(length(snppro)!=(3*n)){
break
}
snpvalue <- convert(snppro,n)
snpvalue <- snpvalue[idx.fil][idx.match]
snpvalue.control <- snpvalue[idx.control]
freq <- sum(snpvalue.control)/(2*n.control)
freq.all[temp] <- freq
#print(paste0("freq",freq))
# tryCatch(
# {
if(freq<0.006|freq>0.994){
score_result[temp,] <- 0
infor_result[temp,] <- 0
}else{
for(j in 1:6){
jdx <- idx.list[[j]]
standard_analysis_result <- standard_analysis(y[jdx],
snpvalue[jdx],
x.covar1[jdx,])
score_result[temp,j] <- as.numeric(standard_analysis_result[[1]])
infor_result[temp,j] <- as.numeric(standard_analysis_result[[2]])
}
}
}
if(i==true.end){
break
}
}
close(con)
result <- list(snpid_result,score_result,infor_result,freq.all)
return(result)
}
stopImplicitCluster()
score_result <- matrix(0.1,file.num,num.of.tumor-1)
infor_result <- matrix(0.1,file.num,(num.of.tumor-1))
snpid_result <- rep("c",file.num)
freq.all <- rep(0,file.num)
total <- 0
for(i in 1:inner.size){
result.temp <- result.list[[i]]
temp <- length(result.temp[[1]])
snpid_result[total+(1:temp)] <- result.temp[[1]]
score_result[total+(1:temp),] <- result.temp[[2]]
infor_result[total+(1:temp),] <- result.temp[[3]]
freq.all[total+(1:temp)] <- result.temp[[4]]
total <- temp+total
}
result <- list(snpid_reuslt=snpid_result,score_result=score_result,infor_result=infor_result,freq.all=freq.all)
save(result,file=paste0("./whole_genome_age/ICOG/standard_analysis/result/standard_analysis_s",i1,"_",i2))
| /whole_genome_age/ICOG/standard_analysis/code/standard_analysis_icog_s.R | no_license | andrewhaoyu/breast_cancer_data_analysis | R | false | false | 6,673 | r | #analyze the heritability only on invasive cases and five intrinsic subtypes with complete data
rm(list=ls())
#install_github("andrewhaoyu/bc2", ref = "master",args = c('--library="/home/zhangh24/R/x86_64-pc-linux-gnu-library/3.4"'))
library(devtools)
#with_libpaths(new = "/home/zhangh24/R/x86_64-pc-linux-gnu-library/3.6/", install_github('andrewhaoyu/bcutility'))
arg <- commandArgs(trailingOnly=T)
i1 <- as.numeric(arg[[1]])
i2 <- as.numeric(arg[[2]])
print(i1)
print(i2)
library(R.utils)
setwd("/data/zhangh24/breast_cancer_data_analysis/")
n <- 109713
snpvalue <- rep(0,n)
subject.file <- "/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_order.txt.gz"
Icog.order <- read.table(gzfile(subject.file))
library(data.table)
setwd("/data/zhangh24/breast_cancer_data_analysis/")
data1 <- fread("./data/iCOGS_euro_v10_10232017.csv",header=T)
data1 <- as.data.frame(data1)
y.pheno.mis1 <- cbind(data1$Behaviour1,data1$ER_status1,data1$PR_status1,data1$HER2_status1,data1$Grade1)
colnames(y.pheno.mis1) = c("Behavior","ER","PR","HER2","Grade")
#x.test.all.mis1 <- data1[,c(27:206)]
SG_ID <- data1$SG_ID
x.covar1 <- as.matrix(data1[,c(5:14)])
#use z.standard as tumor indicator matrix
#just for easy copy of previous code
z.standard <- y.pheno.mis1[,2:5]
idx.1 <- which(((z.standard[,1]==1|z.standard[,2]==1)
&z.standard[,3]==0
&(z.standard[,4]==1|z.standard[,4]==2))|y.pheno.mis1[,1]==0)
#for second subtype HR+_HER2+
idx.2 <- which(((z.standard[,1]==1|z.standard[,2]==1)
&z.standard[,3]==1)|y.pheno.mis1[,1]==0)
#for third subtype HR+_HER2-_highgrade
idx.3 <- which(((z.standard[,1]==1|z.standard[,2]==1)
&z.standard[,3]==0
&z.standard[,4]==3)|y.pheno.mis1[,1]==0)
#for third subtype HR-_HER2+
idx.4 <- which((z.standard[,1]==0&z.standard[,2]==0
&z.standard[,3]==1)|y.pheno.mis1[,1]==0)
#for third subtype HR-_HER2-
idx.5 <- which((z.standard[,1]==0&z.standard[,2]==0
&z.standard[,3]==0)|y.pheno.mis1[,1]==0)
idx.list <- list(c(1:nrow(y.pheno.mis1)),idx.1,idx.2,idx.3,idx.4,idx.5)
rm(data1)
gc()
idx.fil <- Icog.order[,1]%in%SG_ID
idx.match <- match(SG_ID,Icog.order[idx.fil,1])
#Icog.order.match <- Icog.order[idx.fil,1][idx.match]
#library(bcutility,lib.loc ="/home/zhangh24/R/x86_64-pc-linux-gnu-library/3.6/")
library(bc2,lib.loc ="/home/zhangh24/R/x86_64-pc-linux-gnu-library/3.6/")
Filesdir <- "/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_imputed/"
Files <- dir(Filesdir,pattern="icogs_merged_b1_12.",full.names=T)
Filesex <- dir(Filesdir,pattern="icogs_merged_b1_12.chr23",full.names=T)
idx.sex <- Files%in%Filesex
Files <- Files[!idx.sex]
library(gtools)
Files <- mixedsort(Files)
geno.file <- Files[i1]
# tryCatch(
# {
num <- as.integer(system(paste0("zcat ",geno.file,"| wc -l"),intern=T))
# },
# error=function(cond){
# num <- countLines(geno.file)[1]
# }
# )
size = 3
start.end <- startend(num,size,i2)
start <- start.end[1]
end <- start.end[2]
file.num <- end-start+1
#num = 22349
#num <- countLines(geno.file)[1];
#num <- as.integer(system(paste0("zcat ",geno.file,"| wc -l"),intern=T))
#rm(pheno)
num.of.tumor <- 7
n.sub <- nrow(y.pheno.mis1)
idx.control <- which(y.pheno.mis1[,1]==0)
n.control <- length(idx.control)
standard_analysis <- function(y,
gene1,
x.covar1){
model <- glm(y~gene1+x.covar1,family = binomial(link ='logit'))
coeff <- as.numeric(coef(model)[2])
var <- (summary(model)$coefficient[2,2])^2
return(result = list(coeff=coeff,
var= var))
}
y <- y.pheno.mis1[,1]
no.cores <- 2
library(foreach)
library(doParallel)
inner.size <- 2
registerDoParallel(no.cores)
result.list <- foreach(job.i = 1:2)%dopar%{
inner.start.end <- startend(file.num,inner.size,job.i)
inner.start <- inner.start.end[1]
inner.end <- inner.start.end[2]
inner.file.num <- inner.end-inner.start+1
true.start <- start+inner.start-1
true.end <- start+inner.end-1
score_result <- matrix(0,inner.file.num,num.of.tumor-1)
infor_result <- matrix(0,inner.file.num,(num.of.tumor-1))
snpid_result <- rep("c",inner.file.num)
freq.all <- rep(0,inner.file.num)
temp <- 0
con <- gzfile(geno.file)
open(con)
for(i in 1:num){
if(i%%500==0){
print(i)
}
oneLine <- readLines(con,n=1)
if(i>=true.start){
if(temp%%100==0){
print(paste0("temp",temp))
}
#print(i)
temp = temp+1
#print(i)
myVector <- strsplit(oneLine," ")
snpid <- as.character(myVector[[1]][2])
snpid_result[temp] <- snpid
snpvalue <- rep(0,n)
snppro <- as.numeric(unlist(myVector)[6:length(myVector[[1]])])
if(length(snppro)!=(3*n)){
break
}
snpvalue <- convert(snppro,n)
snpvalue <- snpvalue[idx.fil][idx.match]
snpvalue.control <- snpvalue[idx.control]
freq <- sum(snpvalue.control)/(2*n.control)
freq.all[temp] <- freq
#print(paste0("freq",freq))
# tryCatch(
# {
if(freq<0.006|freq>0.994){
score_result[temp,] <- 0
infor_result[temp,] <- 0
}else{
for(j in 1:6){
jdx <- idx.list[[j]]
standard_analysis_result <- standard_analysis(y[jdx],
snpvalue[jdx],
x.covar1[jdx,])
score_result[temp,j] <- as.numeric(standard_analysis_result[[1]])
infor_result[temp,j] <- as.numeric(standard_analysis_result[[2]])
}
}
}
if(i==true.end){
break
}
}
close(con)
result <- list(snpid_result,score_result,infor_result,freq.all)
return(result)
}
stopImplicitCluster()
score_result <- matrix(0.1,file.num,num.of.tumor-1)
infor_result <- matrix(0.1,file.num,(num.of.tumor-1))
snpid_result <- rep("c",file.num)
freq.all <- rep(0,file.num)
total <- 0
for(i in 1:inner.size){
result.temp <- result.list[[i]]
temp <- length(result.temp[[1]])
snpid_result[total+(1:temp)] <- result.temp[[1]]
score_result[total+(1:temp),] <- result.temp[[2]]
infor_result[total+(1:temp),] <- result.temp[[3]]
freq.all[total+(1:temp)] <- result.temp[[4]]
total <- temp+total
}
result <- list(snpid_reuslt=snpid_result,score_result=score_result,infor_result=infor_result,freq.all=freq.all)
save(result,file=paste0("./whole_genome_age/ICOG/standard_analysis/result/standard_analysis_s",i1,"_",i2))
|
setMethodS3("compileRsp0", "default", function(..., envir=parent.frame(), force=FALSE, verbose=FALSE) {
.Defunct(new="compileRsp()")
}, deprecated=TRUE)
hexToInt <- function(hex, ...) .Defunct()
setMethodS3("importRsp", "default", function(...) {
.Defunct(msg="importRsp() is deprecated. Please use <%@include ...%> instead")
})
setMethodS3("parseRsp", "default", function(rspCode, rspLanguage=getOption("rspLanguage"), trimRsp=TRUE, validate=TRUE, verbose=FALSE, ...) {
.Defunct(new="rcompile()")
}, deprecated=TRUE)
setMethodS3("rsp", "default", function(filename=NULL, path=NULL, text=NULL, response=NULL, ..., envir=parent.frame(), outPath=".", postprocess=TRUE, verbose=FALSE) {
.Defunct(new="rfile()")
}, deprecated=TRUE)
rspCapture <- function(..., wrapAt=80, collapse="\n") {
.Defunct(new="R.utils::withCapture()")
} # rspCapture()
setMethodS3("rspToHtml", "default", function(file=NULL, path=NULL, outFile=NULL, outPath=NULL, extension="html", overwrite=TRUE, ...) {
.Defunct(new="rfile()")
}, deprecated=TRUE, private=TRUE)
setMethodS3("rsptex", "default", function(..., pdf=TRUE, force=FALSE, verbose=FALSE) {
.Defunct(new="rfile()")
}, deprecated=TRUE, private=TRUE)
setMethodS3("sourceAllRsp", "default", function(pattern="[.]rsp$", path=".", extension="html", outputPath=extension, overwrite=FALSE, ..., envir=parent.frame()) {
.Defunct(new="lapply(dir(pattern='[.]rsp$', FUN=rfile)")
}, deprecated=TRUE)
setMethodS3("sourceRsp", "default", function(..., response=FileRspResponse(file=stdout()), request=NULL, envir=parent.frame(), verbose=FALSE) {
.Defunct(new="rfile(), rcat(), or rstring()")
}, deprecated=TRUE)
setMethodS3("sourceRspV2", "default", function(..., response=FileRspResponse(file=stdout()), request=NULL, envir=parent.frame(), verbose=FALSE) {
.Defunct(new="rfile(), rcat(), or rstring()")
}, deprecated=TRUE, private=TRUE)
setMethodS3("translateRsp", "default", function(filename, path=NULL, ..., force=FALSE, verbose=FALSE) {
.Defunct(new="rcode()")
}, deprecated=TRUE)
setMethodS3("translateRspV1", "default", function(file="", text=NULL, path=getParent(file), rspLanguage=getOption("rspLanguage"), trimRsp=TRUE, verbose=FALSE, ...) {
.Defunct(new="rcode()")
}, deprecated=TRUE)
urlDecode <- function(url, ...) {
.Defunct(new="utils::URLdecode()")
}
setMethodS3("import", "RspResponse", function(response, ...) {
.Defunct(msg = "RSP construct <%@import file=\"url\"%> is defunct.")
}, protected=TRUE, deprecated=TRUE)
setMethodS3("rscript", "default", function(...) {
.Defunct(new="rcode()")
}, deprecated=TRUE)
| /R/DEFUNCT.R | no_license | sbfnk/R.rsp | R | false | false | 2,591 | r | setMethodS3("compileRsp0", "default", function(..., envir=parent.frame(), force=FALSE, verbose=FALSE) {
.Defunct(new="compileRsp()")
}, deprecated=TRUE)
hexToInt <- function(hex, ...) .Defunct()
setMethodS3("importRsp", "default", function(...) {
.Defunct(msg="importRsp() is deprecated. Please use <%@include ...%> instead")
})
setMethodS3("parseRsp", "default", function(rspCode, rspLanguage=getOption("rspLanguage"), trimRsp=TRUE, validate=TRUE, verbose=FALSE, ...) {
.Defunct(new="rcompile()")
}, deprecated=TRUE)
setMethodS3("rsp", "default", function(filename=NULL, path=NULL, text=NULL, response=NULL, ..., envir=parent.frame(), outPath=".", postprocess=TRUE, verbose=FALSE) {
.Defunct(new="rfile()")
}, deprecated=TRUE)
rspCapture <- function(..., wrapAt=80, collapse="\n") {
.Defunct(new="R.utils::withCapture()")
} # rspCapture()
setMethodS3("rspToHtml", "default", function(file=NULL, path=NULL, outFile=NULL, outPath=NULL, extension="html", overwrite=TRUE, ...) {
.Defunct(new="rfile()")
}, deprecated=TRUE, private=TRUE)
setMethodS3("rsptex", "default", function(..., pdf=TRUE, force=FALSE, verbose=FALSE) {
.Defunct(new="rfile()")
}, deprecated=TRUE, private=TRUE)
setMethodS3("sourceAllRsp", "default", function(pattern="[.]rsp$", path=".", extension="html", outputPath=extension, overwrite=FALSE, ..., envir=parent.frame()) {
.Defunct(new="lapply(dir(pattern='[.]rsp$', FUN=rfile)")
}, deprecated=TRUE)
setMethodS3("sourceRsp", "default", function(..., response=FileRspResponse(file=stdout()), request=NULL, envir=parent.frame(), verbose=FALSE) {
.Defunct(new="rfile(), rcat(), or rstring()")
}, deprecated=TRUE)
setMethodS3("sourceRspV2", "default", function(..., response=FileRspResponse(file=stdout()), request=NULL, envir=parent.frame(), verbose=FALSE) {
.Defunct(new="rfile(), rcat(), or rstring()")
}, deprecated=TRUE, private=TRUE)
setMethodS3("translateRsp", "default", function(filename, path=NULL, ..., force=FALSE, verbose=FALSE) {
.Defunct(new="rcode()")
}, deprecated=TRUE)
setMethodS3("translateRspV1", "default", function(file="", text=NULL, path=getParent(file), rspLanguage=getOption("rspLanguage"), trimRsp=TRUE, verbose=FALSE, ...) {
.Defunct(new="rcode()")
}, deprecated=TRUE)
urlDecode <- function(url, ...) {
.Defunct(new="utils::URLdecode()")
}
setMethodS3("import", "RspResponse", function(response, ...) {
.Defunct(msg = "RSP construct <%@import file=\"url\"%> is defunct.")
}, protected=TRUE, deprecated=TRUE)
setMethodS3("rscript", "default", function(...) {
.Defunct(new="rcode()")
}, deprecated=TRUE)
|
library(data.table)
setwd("C:/DIA_Course/Tutorial4_OpenSWATH/")
data_osw_ttof <- fread("aligned.tsv")
targets_osw_ttof <- subset(data_osw_ttof, decoy==0)
decoys_osw_ttof <- subset(data_osw_ttof, decoy==1)
peptides_osw_ttof = unique(targets_osw_ttof$Sequence)
proteins_osw_ttof = unique(targets_osw_ttof$ProteinName)
n_peptides_osw_ttof = length(peptides_osw_ttof)
n_proteins_osw_ttof = length(proteins_osw_ttof)
#
setwd("C:/DIA_Course/Tutorial4_OpenSWATH/QE/")
data_osw_qe <- fread("aligned.tsv")
targets_osw_qe <- subset(data_osw_qe, decoy==0)
decoys_osw_qe <- subset(data_osw_qe, decoy==1)
peptides_osw_qe = unique(targets_osw_qe$Sequence)
proteins_osw_qe = unique(targets_osw_qe$ProteinName)
n_peptides_osw_qe = length(peptides_osw_qe)
n_proteins_osw_qe = length(proteins_osw_qe)
| /compareOSWoutput.R | no_license | DIA-SWATH-Course/Tutorials | R | false | false | 823 | r | library(data.table)
setwd("C:/DIA_Course/Tutorial4_OpenSWATH/")
data_osw_ttof <- fread("aligned.tsv")
targets_osw_ttof <- subset(data_osw_ttof, decoy==0)
decoys_osw_ttof <- subset(data_osw_ttof, decoy==1)
peptides_osw_ttof = unique(targets_osw_ttof$Sequence)
proteins_osw_ttof = unique(targets_osw_ttof$ProteinName)
n_peptides_osw_ttof = length(peptides_osw_ttof)
n_proteins_osw_ttof = length(proteins_osw_ttof)
#
setwd("C:/DIA_Course/Tutorial4_OpenSWATH/QE/")
data_osw_qe <- fread("aligned.tsv")
targets_osw_qe <- subset(data_osw_qe, decoy==0)
decoys_osw_qe <- subset(data_osw_qe, decoy==1)
peptides_osw_qe = unique(targets_osw_qe$Sequence)
proteins_osw_qe = unique(targets_osw_qe$ProteinName)
n_peptides_osw_qe = length(peptides_osw_qe)
n_proteins_osw_qe = length(proteins_osw_qe)
|
## makeCacheMatrix( x ) and cachesolve() work together to compute the inverse
## of the matrix x while preventing unnecessary recomputation of that inverse.
## The matrix x is assumed to be numeric, square and invertable.
##
## Examples of use:
##
## # create a 2 by 2, square, numeric matrix (which just happens to be
# invertable)
## nrows <- 2
## ncols <- 2
## x <- matrix( 1:(nrows*ncols), nrows, ncols )
## print( x )
##
## # create an intermediate list object to provide access to the
## # cached inverse value of x
## x_mMatrix <- makeCacheMatrix( x )
##
## # ask for and output the inverse of x (its inverse will be computed
## # and saved within x_mMatrix)
## inverse_1 <- cachesolve( x_mMatrix )
## print( inverse_1 )
##
## # again ask for and output the inverse of x (the inverse need not be
## # computed; it will be pulled from within x_mMatrix)
## inverse_2 <- cachesolve( x_mMatrix )
## print( inverse_2 )
## makeCacheMatrix() constructs an intermediate list which will hold functions
## that can be used to access (set/get) the matrix to be inverted and the
## cached value of its inverse.
makeCacheMatrix <- function(x = matrix()) {
# Indicate with NULL that an inverse has not yet been computed and cached
cachedVal <- NULL
set <- function(newMatrix) {
x <<- newMatrix
# we have a new underlying matrix x, so mark its inverse as not having
# yet been computed
cachedVal <<- NULL
}
get <- function() x
setInv <- function(inv) cachedVal <<- inv
getInv <- function() cachedVal
# return set/get for the matrix to be inverted and setInv/getInv for the
# cached inverse value
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## cachesolve() operates on the intermediate list returned by makeCacheMatric(x)
## to return the inverse of the matric x. If the inverse of x has not been
## computed, it is computed here and returned. If the inverse of x has been
## computed on a previous call to this function, the inverse value is returned
## and no extra computation of the inverse is preformed.
cachesolve <- function(x, ...) {
cachedVal <- x$getInv()
# cachedVal will be NULL iff we have not yet computed the inverse for the
# matrix value currently in x$get().
if(!is.null(cachedVal)) {
# we already have an inverse value cached for this matrix - return it
# we keep the output statement below for debugging purposes
message("Getting cached solve(data): cachedVal=", cachedVal, ", data=", x$get())
return(cachedVal)
}
# we don't yet have an inverse value cached for this matrix so compute it here
# and stash the value using x$setInv()
data <- x$get()
cachedVal <- solve(data, ...)
# we keep the output statement below for debugging purposes
message("Computing new solve(data): cachedVal=", cachedVal, ", data=", data)
x$setInv(cachedVal)
cachedVal
} | /cachematrix.R | no_license | adaongithub/ProgrammingAssignment2 | R | false | false | 2,916 | r |
## makeCacheMatrix( x ) and cachesolve() work together to compute the inverse
## of the matrix x while preventing unnecessary recomputation of that inverse.
## The matrix x is assumed to be numeric, square and invertable.
##
## Examples of use:
##
## # create a 2 by 2, square, numeric matrix (which just happens to be
# invertable)
## nrows <- 2
## ncols <- 2
## x <- matrix( 1:(nrows*ncols), nrows, ncols )
## print( x )
##
## # create an intermediate list object to provide access to the
## # cached inverse value of x
## x_mMatrix <- makeCacheMatrix( x )
##
## # ask for and output the inverse of x (its inverse will be computed
## # and saved within x_mMatrix)
## inverse_1 <- cachesolve( x_mMatrix )
## print( inverse_1 )
##
## # again ask for and output the inverse of x (the inverse need not be
## # computed; it will be pulled from within x_mMatrix)
## inverse_2 <- cachesolve( x_mMatrix )
## print( inverse_2 )
## makeCacheMatrix() constructs an intermediate list which will hold functions
## that can be used to access (set/get) the matrix to be inverted and the
## cached value of its inverse.
makeCacheMatrix <- function(x = matrix()) {
# Indicate with NULL that an inverse has not yet been computed and cached
cachedVal <- NULL
set <- function(newMatrix) {
x <<- newMatrix
# we have a new underlying matrix x, so mark its inverse as not having
# yet been computed
cachedVal <<- NULL
}
get <- function() x
setInv <- function(inv) cachedVal <<- inv
getInv <- function() cachedVal
# return set/get for the matrix to be inverted and setInv/getInv for the
# cached inverse value
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## cachesolve() operates on the intermediate list returned by makeCacheMatric(x)
## to return the inverse of the matric x. If the inverse of x has not been
## computed, it is computed here and returned. If the inverse of x has been
## computed on a previous call to this function, the inverse value is returned
## and no extra computation of the inverse is preformed.
cachesolve <- function(x, ...) {
cachedVal <- x$getInv()
# cachedVal will be NULL iff we have not yet computed the inverse for the
# matrix value currently in x$get().
if(!is.null(cachedVal)) {
# we already have an inverse value cached for this matrix - return it
# we keep the output statement below for debugging purposes
message("Getting cached solve(data): cachedVal=", cachedVal, ", data=", x$get())
return(cachedVal)
}
# we don't yet have an inverse value cached for this matrix so compute it here
# and stash the value using x$setInv()
data <- x$get()
cachedVal <- solve(data, ...)
# we keep the output statement below for debugging purposes
message("Computing new solve(data): cachedVal=", cachedVal, ", data=", data)
x$setInv(cachedVal)
cachedVal
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regularizationutils.R
\name{getFamily}
\alias{getFamily}
\title{Figure out which family to use}
\usage{
getFamily(inputs, config)
}
\arguments{
\item{inputs}{input data streams passed to tool}
\item{config}{configuration passed to tool}
}
\value{
string family
}
\description{
Figure out which family to use
}
| /man/getFamily.Rd | no_license | dputler/AlteryxPredictive | R | false | true | 390 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regularizationutils.R
\name{getFamily}
\alias{getFamily}
\title{Figure out which family to use}
\usage{
getFamily(inputs, config)
}
\arguments{
\item{inputs}{input data streams passed to tool}
\item{config}{configuration passed to tool}
}
\value{
string family
}
\description{
Figure out which family to use
}
|
library(mvtnorm)
library(ggplot2)
sigma_func<-function(myX,mu,y,q,NK){
vari=array(0,dim=c(dim(myX)[2],dim(myX)[2],K))
for(ii in 1:K){
temp = matrix(0,dim(myX)[2],dim(myX)[2])
for(jj in 1:dim(myX)[1]){
temp = temp+(myX[jj,]-mu[ii,])%*%t(myX[jj,]-mu[ii,])*y[jj,ii]
}
vari[,,ii]=temp/NK[ii]
svd1=svd(vari[,,ii])
SQ = 1/(dim(myX)[2]-q)*sum(svd1$d[(q+1):dim(myX)[2]])
if(q==0){
sigma[,,ii]=SQ*diag(1,dim(myX)[2],dim(myX)[2])
}else{
WQ = svd1$v[,1:q]%*%diag(apply(as.matrix(svd1$d[1:q]),1,function(xx){
return(sqrt(xx-SQ))}),q,q)
sigma[,,ii] = WQ%*%t(WQ)+SQ*diag(1,dim(myX)[2],dim(myX)[2])
}
}
return(sigma)
}
likelihood_func<-function(myX,mu,sigma,pi){
temp = matrix(0,dim(myX)[1],K)
for(ii in 1:K){
temp[,ii] = dmvnorm(myX, mean = mu[ii,], sigma = sigma[,,ii])
}
likelihood = sum(log(temp%*%pi))
return(likelihood)
}
misRate_func<-function(myX,myLabel,EMLabel,K){
misRate = matrix(1,K+1,1)
temp1 = 0
for (ii in 1:K){
temp = apply(EMLabel[myLabel==(ii-1),],2,function(xx){
return(sum(xx))})
misRate[ii,] = 1-max(temp)/sum(temp)
temp1 = temp1+max(temp)
}
OverAllMisRate = 1-temp1/dim(myX)[1]
misRate = misRate*100
OverAllMisRate = OverAllMisRate*100
misRate[ii+1,]=OverAllMisRate
cat("\n","misRate=",misRate[1:K]," ","OverAllMisRate=",misRate[K+1])
return(misRate)
}
EM_func<-function(myX,K,mu,pi,q){
sigma=sigma_func(myX,mu,y,q,NK)
old_likelihood=likelihood_func(myX,mu,sigma,pi)
mylikelihood=old_likelihood
continueLoop = TRUE
iter = 0
while(continueLoop){
# E step
temp = matrix(0,dim(myX)[1],K)
for(ii in 1:K){
temp[,ii] = dmvnorm(myX, mean = mu[ii,], sigma = sigma[,,ii])*pi[ii]
}
cond = t(apply(temp,1,function(xx){
return(xx/sum(xx))
}))
# M step
N = apply(cond,2,function(xx){
return(sum(xx))
})
pi=t(t(N)/dim(myX)[1])
mu= t(cond)%*%myX/N
sigma=sigma_func(myX,mu,cond,q,N)
loglikelihood=likelihood_func(myX,mu,sigma,pi)
if(abs((loglikelihood-old_likelihood)/loglikelihood)<0.00001){
continueLoop = FALSE
}
old_likelihood=loglikelihood
mylikelihood = rbind(mylikelihood,old_likelihood)
iter=iter+1
cat("\n","iteration number=",iter," ","likelihood=",loglikelihood)
}
df=data.frame(iteration=1:(iter+1),mylikelihood)
p1=ggplot(df,aes(x=iteration,y=mylikelihood))+ geom_point()+ ggtitle(bquote(paste("q=",.(q))))
# compute AIC
AIC = -2*old_likelihood+2*(dim(myX)[2]*q-q*(q-1)/2)
cat("\n","q=",q," ","AIC=",AIC)
return(list(cond_fun=cond,pt=p1,mu=mu,sigma=sigma))
}
#rep(1,51),rep(2,45),rep(3,48),rep(4,44),rep(5,44),rep(6,42),rep(7,47),
# Read data
myX=read.table("C:/users/dawei/downloads/test2.txt",header=FALSE,sep=',')
myLabel=c(rep(1,31),rep(2,42),rep(3,51),rep(4,51),rep(5,52),rep(6,49),rep(7,51))
#pcmp<-prcomp(myX, center = TRUE)
#tt=predict(pcmp)[,1:50]
#myX=scale(myX,pcmp$center,pcmp$scale)%*%pcmp$rotation[,1:50]
K=7
PreKmean<-kmeans(myX,K)
Clusters=PreKmean$cluster
ClusterMean=PreKmean$centers
y=matrix(0,dim(myX)[1],7)
for(i in 1:dim(myX)[1]){
y[i,Clusters[i]]=1
}
mu=ClusterMean
pi=apply(y,2,function(n){return(sum(n)/dim(myX)[1])})
sigma=array(0,dim=c(dim(myX)[2],dim(myX)[2],K))
NK=pi*dim(myX)[1]
#initial accuracy
misRate<-misRate_func(myX,myLabel,y,K)
#visualize accuracy
barplot(t(misRate[1:7,]),names.arg = c("0", "1", "2","3", "4", "5","6"),xlab="hand-written digits", ylab="miscategorization
rate %", main=paste("Overall mis-categorization rate = ", round(misRate[8,], digits = 2)))
box()
cond1=EM_func(myX,K,mu,pi,0)
cond2=EM_func(myX,K,mu,pi,4)
multiplot(cond1$pt, cond2$pt, cond3$pt, cond4$pt,cols=2)
dev.new(width=6, height=10)
par(mai=c(0,0,0,0),cex=0.8,mfrow=c(10,6))
myDraw = array(0,dim=c(6,dim(myX)[2],K))
clusterMean = cond2$mu
for(ii in 1:K){
myDraw[1,,ii] = clusterMean[ii,]
myDraw[2:6,,ii] = rmvnorm(n=5,mean=cond2$mu[ii,],sigma=cond2$sigma[,,ii])
}
for(ii in 1:K){
for(jj in 1:6){
image(t(matrix(myDraw[jj,,ii],byrow=TRUE,16,16)[16:1,]),col=gray(0:128/128),axes=FALSE)
box()
}
}
# calculate new Labels
EMLabel = matrix(0,dim(myX)[1],K)
for(ii in 1:dim(myX)[1]){
EMLabel[ii,which.max(cond2$cond_fun[ii,])] = 1
}
misRate<-misRate_func(myX,myLabel,EMLabel,K)
barplot(t(misRate[1:7,]),names.arg = c("0", "1", "2","3", "4", "5","6"),xlab="hand-written digits", ylab="miscategorization
rate%", main=paste("Overall mis-categorization rate = ", round(misRate[8], digits = 2), "% (q=",6,")"))
box()
myX1=read.table("C:/users/dawei/downloads/test2.txt",header=FALSE,sep=',')
mylabel1=c(rep(1,51),rep(2,45),rep(3,48),rep(4,44),rep(5,44),rep(6,42),rep(7,47))
write.csv(mu,row.names = FALSE,'E:/A+GTCLASS/mu.csv')
| /Cluster+EM.R | no_license | ryerrabelli/HandPictureAnalysis | R | false | false | 4,928 | r | library(mvtnorm)
library(ggplot2)
sigma_func<-function(myX,mu,y,q,NK){
vari=array(0,dim=c(dim(myX)[2],dim(myX)[2],K))
for(ii in 1:K){
temp = matrix(0,dim(myX)[2],dim(myX)[2])
for(jj in 1:dim(myX)[1]){
temp = temp+(myX[jj,]-mu[ii,])%*%t(myX[jj,]-mu[ii,])*y[jj,ii]
}
vari[,,ii]=temp/NK[ii]
svd1=svd(vari[,,ii])
SQ = 1/(dim(myX)[2]-q)*sum(svd1$d[(q+1):dim(myX)[2]])
if(q==0){
sigma[,,ii]=SQ*diag(1,dim(myX)[2],dim(myX)[2])
}else{
WQ = svd1$v[,1:q]%*%diag(apply(as.matrix(svd1$d[1:q]),1,function(xx){
return(sqrt(xx-SQ))}),q,q)
sigma[,,ii] = WQ%*%t(WQ)+SQ*diag(1,dim(myX)[2],dim(myX)[2])
}
}
return(sigma)
}
likelihood_func<-function(myX,mu,sigma,pi){
temp = matrix(0,dim(myX)[1],K)
for(ii in 1:K){
temp[,ii] = dmvnorm(myX, mean = mu[ii,], sigma = sigma[,,ii])
}
likelihood = sum(log(temp%*%pi))
return(likelihood)
}
misRate_func<-function(myX,myLabel,EMLabel,K){
misRate = matrix(1,K+1,1)
temp1 = 0
for (ii in 1:K){
temp = apply(EMLabel[myLabel==(ii-1),],2,function(xx){
return(sum(xx))})
misRate[ii,] = 1-max(temp)/sum(temp)
temp1 = temp1+max(temp)
}
OverAllMisRate = 1-temp1/dim(myX)[1]
misRate = misRate*100
OverAllMisRate = OverAllMisRate*100
misRate[ii+1,]=OverAllMisRate
cat("\n","misRate=",misRate[1:K]," ","OverAllMisRate=",misRate[K+1])
return(misRate)
}
EM_func<-function(myX,K,mu,pi,q){
sigma=sigma_func(myX,mu,y,q,NK)
old_likelihood=likelihood_func(myX,mu,sigma,pi)
mylikelihood=old_likelihood
continueLoop = TRUE
iter = 0
while(continueLoop){
# E step
temp = matrix(0,dim(myX)[1],K)
for(ii in 1:K){
temp[,ii] = dmvnorm(myX, mean = mu[ii,], sigma = sigma[,,ii])*pi[ii]
}
cond = t(apply(temp,1,function(xx){
return(xx/sum(xx))
}))
# M step
N = apply(cond,2,function(xx){
return(sum(xx))
})
pi=t(t(N)/dim(myX)[1])
mu= t(cond)%*%myX/N
sigma=sigma_func(myX,mu,cond,q,N)
loglikelihood=likelihood_func(myX,mu,sigma,pi)
if(abs((loglikelihood-old_likelihood)/loglikelihood)<0.00001){
continueLoop = FALSE
}
old_likelihood=loglikelihood
mylikelihood = rbind(mylikelihood,old_likelihood)
iter=iter+1
cat("\n","iteration number=",iter," ","likelihood=",loglikelihood)
}
df=data.frame(iteration=1:(iter+1),mylikelihood)
p1=ggplot(df,aes(x=iteration,y=mylikelihood))+ geom_point()+ ggtitle(bquote(paste("q=",.(q))))
# compute AIC
AIC = -2*old_likelihood+2*(dim(myX)[2]*q-q*(q-1)/2)
cat("\n","q=",q," ","AIC=",AIC)
return(list(cond_fun=cond,pt=p1,mu=mu,sigma=sigma))
}
#rep(1,51),rep(2,45),rep(3,48),rep(4,44),rep(5,44),rep(6,42),rep(7,47),
# Read data
myX=read.table("C:/users/dawei/downloads/test2.txt",header=FALSE,sep=',')
myLabel=c(rep(1,31),rep(2,42),rep(3,51),rep(4,51),rep(5,52),rep(6,49),rep(7,51))
#pcmp<-prcomp(myX, center = TRUE)
#tt=predict(pcmp)[,1:50]
#myX=scale(myX,pcmp$center,pcmp$scale)%*%pcmp$rotation[,1:50]
K=7
PreKmean<-kmeans(myX,K)
Clusters=PreKmean$cluster
ClusterMean=PreKmean$centers
y=matrix(0,dim(myX)[1],7)
for(i in 1:dim(myX)[1]){
y[i,Clusters[i]]=1
}
mu=ClusterMean
pi=apply(y,2,function(n){return(sum(n)/dim(myX)[1])})
sigma=array(0,dim=c(dim(myX)[2],dim(myX)[2],K))
NK=pi*dim(myX)[1]
#initial accuracy
misRate<-misRate_func(myX,myLabel,y,K)
#visualize accuracy
barplot(t(misRate[1:7,]),names.arg = c("0", "1", "2","3", "4", "5","6"),xlab="hand-written digits", ylab="miscategorization
rate %", main=paste("Overall mis-categorization rate = ", round(misRate[8,], digits = 2)))
box()
cond1=EM_func(myX,K,mu,pi,0)
cond2=EM_func(myX,K,mu,pi,4)
multiplot(cond1$pt, cond2$pt, cond3$pt, cond4$pt,cols=2)
dev.new(width=6, height=10)
par(mai=c(0,0,0,0),cex=0.8,mfrow=c(10,6))
myDraw = array(0,dim=c(6,dim(myX)[2],K))
clusterMean = cond2$mu
for(ii in 1:K){
myDraw[1,,ii] = clusterMean[ii,]
myDraw[2:6,,ii] = rmvnorm(n=5,mean=cond2$mu[ii,],sigma=cond2$sigma[,,ii])
}
for(ii in 1:K){
for(jj in 1:6){
image(t(matrix(myDraw[jj,,ii],byrow=TRUE,16,16)[16:1,]),col=gray(0:128/128),axes=FALSE)
box()
}
}
# calculate new Labels
EMLabel = matrix(0,dim(myX)[1],K)
for(ii in 1:dim(myX)[1]){
EMLabel[ii,which.max(cond2$cond_fun[ii,])] = 1
}
misRate<-misRate_func(myX,myLabel,EMLabel,K)
barplot(t(misRate[1:7,]),names.arg = c("0", "1", "2","3", "4", "5","6"),xlab="hand-written digits", ylab="miscategorization
rate%", main=paste("Overall mis-categorization rate = ", round(misRate[8], digits = 2), "% (q=",6,")"))
box()
myX1=read.table("C:/users/dawei/downloads/test2.txt",header=FALSE,sep=',')
mylabel1=c(rep(1,51),rep(2,45),rep(3,48),rep(4,44),rep(5,44),rep(6,42),rep(7,47))
write.csv(mu,row.names = FALSE,'E:/A+GTCLASS/mu.csv')
|
# K-Means Clustering
# Import dataset
dataset <- read.csv('Data\\Mall_Customers.csv')
X <- dataset[4:5]
# Use elbow method to find optimal number of clusters
set.seed(6)
wcss <- vector()
for (i in 1:10) wcss[i] <- sum(kmeans(X, i)$withinss)
plot(1:10, wcss, type = 'b', main = paste('Clusters of Clients'), xlab = 'Number of Clusters', ylab = 'WCSS')
# Applying k-means to the dataset using number of clusters found above
set.seed(29)
kmeans = kmeans(X, centers = 5, iter.max = 300, nstart = 10)
# Visualizing the clusters
library(cluster)
clusplot(X,
kmeans$cluster,
lines = 0,
shade = TRUE,
color = TRUE,
labels = 2,
plotchar = FALSE,
span = TRUE,
main = paste('Clusters of Clients'),
xlab = 'Annual Income',
ylab = 'Spending Score')
| /Machine Learning A-Z - Hands-On Python & R Data Science/Clustering/K-Means/kmeans.R | no_license | jacobskr/ML_Python | R | false | false | 830 | r | # K-Means Clustering
# Import dataset
dataset <- read.csv('Data\\Mall_Customers.csv')
X <- dataset[4:5]
# Use elbow method to find optimal number of clusters
set.seed(6)
wcss <- vector()
for (i in 1:10) wcss[i] <- sum(kmeans(X, i)$withinss)
plot(1:10, wcss, type = 'b', main = paste('Clusters of Clients'), xlab = 'Number of Clusters', ylab = 'WCSS')
# Applying k-means to the dataset using number of clusters found above
set.seed(29)
kmeans = kmeans(X, centers = 5, iter.max = 300, nstart = 10)
# Visualizing the clusters
library(cluster)
clusplot(X,
kmeans$cluster,
lines = 0,
shade = TRUE,
color = TRUE,
labels = 2,
plotchar = FALSE,
span = TRUE,
main = paste('Clusters of Clients'),
xlab = 'Annual Income',
ylab = 'Spending Score')
|
# Setting Mike's working directory
setwd("~/Dropbox/_Spring_2013/STAT_2025/Homework/Project1")
preg <- read.csv("./dapm5m6.csv")
#ever been or gotten somone pregnant
table(fem_preg$GRSM5003)
#subsetting for only female who have sex
fem_preg <- subset(preg, preg$GRSM5003==1)
preg$GRSM5003==1
fem_preg_sex <- subset(fem_preg, fem_preg$SXBM5124==1)
fem_preg_sex$SXBM5124
#adding variables that we want to a new data frame
study_data <- as.data.frame(cbind(fem_preg_sex$CNBM5130,fem_preg_sex$CNCM5152, fem_preg_sex$CNCM5153, fem_preg_sex$CNCM5154, fem_preg_sex$CNCM5155, fem_preg_sex$CNCM5597, fem_preg_sex$CNCM5598,fem_preg_sex$CNCM5599,fem_preg_sex$CNCM5600,fem_preg_sex$AGHM5393, fem_preg_sex$RASM5004, fem_preg_sex$RLSM5005,fem_preg_sex$EDSM5017, fem_preg_sex$EDSM5410,fem_preg_sex$CBHM5875, fem_preg_sex$CBSM5105, fem_preg_sex$CBSM5535, fem_preg_sex$CNAM5171, fem_preg_sex$CNBM5429, fem_preg_sex$CNBM5571,fem_preg_sex$EDSM5008, fem_preg_sex$EDSM5010, fem_preg_sex$EDIM5013,fem_preg_sex$RCBM5018, fem_preg_sex$RCBM5412, fem_preg_sex$SABM5421, fem_preg_sex$SABM5423, fem_preg_sex$SABM5424, fem_preg_sex$SAIM5046))
colnames(study_data) <- c("sex_wo_contraception","know_about_pill", "know_about_condoms", "know_about_diaphram","know_about_withdrawl", "know_about_pill_b", "know_about_condoms_b", "know_about_diaphram_b","know_about_withdrawl_b", "age", "ethnicity", "religion", "highest_yr_school_completed", "school_enrollment", "scare_atleast1_prega","know_preg_peera", "know_preg_peerb", "birth_control_used_by_peers", "friends_use_contraception_a", "friends_use_contraception_b", "fathers_education", "mothers_education", "educational_aspiration", "dont_participate_in_school_activites_1", "dont_participate_in_school_activites_2", "smoking_freq", "drinking_freq", "drug_freq", "age_frist_drinking")
#exploring the distribution of some of the variables
# cbind(study_data$know_about_pill, study_data$know_about_pill_b)
# table(study_data$know_about_pill)
# table(study_data$know_about_pill_b)
# cbind(study_data$know_about_condoms, study_data$know_about_condoms_b)
# table(study_data$know_about_condoms)
# table(study_data$know_about_condoms_b)
# cbind(study_data$know_about_diaphram, study_data$know_about_diaphram_b)
# table(study_data$know_about_diaphram)
# table(study_data$know_about_diaphram_b)
#
# cbind(study_data$know_preg_peera, study_data$know_preg_peerb)
# table(study_data$know_preg_peera)
# table(study_data$know_preg_peerb)
# removing the features with lots of NAs because they were not all collected by each survey instrument.
study_data$know_about_pill_b <- NULL
study_data$know_about_condoms_b <- NULL
study_data$know_about_diaphram_b <- NULL
study_data$know_about_withdrawl_b <- NULL
study_data$school_enrollment <- NULL
study_data$scare_atleast1_prega <- NULL
study_data$know_preg_peerb <- NULL
study_data$friends_use_contraception_a <- NULL
study_data$friends_use_contraception_b <- NULL
study_data$dont_participate_in_school_activites_2 <- NULL
#coding variables as either numeric or a factor:
study_data$sex_wo_contraception <- as.factor(study_data$sex_wo_contraception)
is.factor(study_data$sex_wo_contraception)
study_data$ethnicity <- as.factor(study_data$ethnicity)
is.factor(study_data$ethnicity)
study_data$religion<- as.factor(study_data$religion)
is.factor(study_data$religion)
study_data$know_preg_peera <- as.factor(study_data$know_preg_peera)
is.factor(study_data$know_preg_peera)
#next one isn't working
study_data$birth_control_used_by_peers <- study_data[,11]
study_data$birth_control_used_by_peers <- as.factor(study_data$birth_control_used_by_peers)
is.factor(study_data$birth_control_used_by_peers)
study_data[11]<-NULL
study_data$fathers_education <- as.factor(study_data$fathers_education)
is.factor(study_data$fathers_education)
study_data$mothers_education <- as.factor(study_data$mothers_education)
is.factor(study_data$mothers_education)
study_data$educational_aspiration <- as.factor(study_data$educational_aspiration)
is.factor(study_data$educational_aspiration)
study_data$dont_participate_in_school_activites_1 <- as.factor(study_data$dont_participate_in_school_activites_1)
is.factor(study_data$dont_participate_in_school_activites_1)
study_data$educational_aspiration <- as.factor(study_data$educational_aspiration)
is.factor(study_data$educational_aspiration)
study_data$drug_freq <- as.factor(study_data$drug_freq)
is.factor(study_data$drug_freq)
study_data$drinking_freq <- as.factor(study_data$drinking_freq)
is.factor(study_data$drinking_freq)
study_data$smoking_freq <- as.factor(study_data$smoking_freq)
is.factor(study_data$smoking_freq)
#also we decided to take out the the "know about" variables because tg
study_data$know_about_pill <- NULL
study_data$know_about_condoms <- NULL
study_data$know_about_diaphram <- NULL
study_data$know_about_withdrawl <- NULL
#change the response variable to 0, 1
# we want sex without contraception to be 1, with contraception to be 0
# right now sex wihtout contraception is 1
convert_response <- function(response){
if(response==2){
return(0)
}
if(response==1){
return(1)
}
}
study_data$sex_wo_contraception <- as.numeric(as.character(study_data$sex_wo_contraception))
study_data$sex_wo_contraception <- sapply(study_data$sex_wo_contraception, FUN=convert_response)
#study_data$sex_wo_contraception
#we also have to code age in the right way:
study_data$age <- 84-study_data$age
typeof(study_data$age) | /Project1/to turn in/src/constructing_dataset.R | no_license | mdiscenza/STAT_2025_homework | R | false | false | 5,437 | r | # Setting Mike's working directory
setwd("~/Dropbox/_Spring_2013/STAT_2025/Homework/Project1")
preg <- read.csv("./dapm5m6.csv")
#ever been or gotten somone pregnant
table(fem_preg$GRSM5003)
#subsetting for only female who have sex
fem_preg <- subset(preg, preg$GRSM5003==1)
preg$GRSM5003==1
fem_preg_sex <- subset(fem_preg, fem_preg$SXBM5124==1)
fem_preg_sex$SXBM5124
#adding variables that we want to a new data frame
study_data <- as.data.frame(cbind(fem_preg_sex$CNBM5130,fem_preg_sex$CNCM5152, fem_preg_sex$CNCM5153, fem_preg_sex$CNCM5154, fem_preg_sex$CNCM5155, fem_preg_sex$CNCM5597, fem_preg_sex$CNCM5598,fem_preg_sex$CNCM5599,fem_preg_sex$CNCM5600,fem_preg_sex$AGHM5393, fem_preg_sex$RASM5004, fem_preg_sex$RLSM5005,fem_preg_sex$EDSM5017, fem_preg_sex$EDSM5410,fem_preg_sex$CBHM5875, fem_preg_sex$CBSM5105, fem_preg_sex$CBSM5535, fem_preg_sex$CNAM5171, fem_preg_sex$CNBM5429, fem_preg_sex$CNBM5571,fem_preg_sex$EDSM5008, fem_preg_sex$EDSM5010, fem_preg_sex$EDIM5013,fem_preg_sex$RCBM5018, fem_preg_sex$RCBM5412, fem_preg_sex$SABM5421, fem_preg_sex$SABM5423, fem_preg_sex$SABM5424, fem_preg_sex$SAIM5046))
colnames(study_data) <- c("sex_wo_contraception","know_about_pill", "know_about_condoms", "know_about_diaphram","know_about_withdrawl", "know_about_pill_b", "know_about_condoms_b", "know_about_diaphram_b","know_about_withdrawl_b", "age", "ethnicity", "religion", "highest_yr_school_completed", "school_enrollment", "scare_atleast1_prega","know_preg_peera", "know_preg_peerb", "birth_control_used_by_peers", "friends_use_contraception_a", "friends_use_contraception_b", "fathers_education", "mothers_education", "educational_aspiration", "dont_participate_in_school_activites_1", "dont_participate_in_school_activites_2", "smoking_freq", "drinking_freq", "drug_freq", "age_frist_drinking")
#exploring the distribution of some of the variables
# cbind(study_data$know_about_pill, study_data$know_about_pill_b)
# table(study_data$know_about_pill)
# table(study_data$know_about_pill_b)
# cbind(study_data$know_about_condoms, study_data$know_about_condoms_b)
# table(study_data$know_about_condoms)
# table(study_data$know_about_condoms_b)
# cbind(study_data$know_about_diaphram, study_data$know_about_diaphram_b)
# table(study_data$know_about_diaphram)
# table(study_data$know_about_diaphram_b)
#
# cbind(study_data$know_preg_peera, study_data$know_preg_peerb)
# table(study_data$know_preg_peera)
# table(study_data$know_preg_peerb)
# removing the features with lots of NAs because they were not all collected by each survey instrument.
study_data$know_about_pill_b <- NULL
study_data$know_about_condoms_b <- NULL
study_data$know_about_diaphram_b <- NULL
study_data$know_about_withdrawl_b <- NULL
study_data$school_enrollment <- NULL
study_data$scare_atleast1_prega <- NULL
study_data$know_preg_peerb <- NULL
study_data$friends_use_contraception_a <- NULL
study_data$friends_use_contraception_b <- NULL
study_data$dont_participate_in_school_activites_2 <- NULL
#coding variables as either numeric or a factor:
study_data$sex_wo_contraception <- as.factor(study_data$sex_wo_contraception)
is.factor(study_data$sex_wo_contraception)
study_data$ethnicity <- as.factor(study_data$ethnicity)
is.factor(study_data$ethnicity)
study_data$religion<- as.factor(study_data$religion)
is.factor(study_data$religion)
study_data$know_preg_peera <- as.factor(study_data$know_preg_peera)
is.factor(study_data$know_preg_peera)
#next one isn't working
study_data$birth_control_used_by_peers <- study_data[,11]
study_data$birth_control_used_by_peers <- as.factor(study_data$birth_control_used_by_peers)
is.factor(study_data$birth_control_used_by_peers)
study_data[11]<-NULL
study_data$fathers_education <- as.factor(study_data$fathers_education)
is.factor(study_data$fathers_education)
study_data$mothers_education <- as.factor(study_data$mothers_education)
is.factor(study_data$mothers_education)
study_data$educational_aspiration <- as.factor(study_data$educational_aspiration)
is.factor(study_data$educational_aspiration)
study_data$dont_participate_in_school_activites_1 <- as.factor(study_data$dont_participate_in_school_activites_1)
is.factor(study_data$dont_participate_in_school_activites_1)
study_data$educational_aspiration <- as.factor(study_data$educational_aspiration)
is.factor(study_data$educational_aspiration)
study_data$drug_freq <- as.factor(study_data$drug_freq)
is.factor(study_data$drug_freq)
study_data$drinking_freq <- as.factor(study_data$drinking_freq)
is.factor(study_data$drinking_freq)
study_data$smoking_freq <- as.factor(study_data$smoking_freq)
is.factor(study_data$smoking_freq)
#also we decided to take out the the "know about" variables because tg
study_data$know_about_pill <- NULL
study_data$know_about_condoms <- NULL
study_data$know_about_diaphram <- NULL
study_data$know_about_withdrawl <- NULL
#change the response variable to 0, 1
# we want sex without contraception to be 1, with contraception to be 0
# right now sex wihtout contraception is 1
convert_response <- function(response){
if(response==2){
return(0)
}
if(response==1){
return(1)
}
}
study_data$sex_wo_contraception <- as.numeric(as.character(study_data$sex_wo_contraception))
study_data$sex_wo_contraception <- sapply(study_data$sex_wo_contraception, FUN=convert_response)
#study_data$sex_wo_contraception
#we also have to code age in the right way:
study_data$age <- 84-study_data$age
typeof(study_data$age) |
# Session 5: Data visualization with ggplot
##################
# TODAY'S TOPICS #
##################
# base layer & aesthetics
# geoms
# facets
# fitting patterns
# axes, scales & coordinates
# themes
#######################
# package & data used #
#######################
# install.packages("ggplot2")
library(ggplot2)
library(dplyr)
# built-in data
mpg
economics
economics_long
###########################
# base layer & aesthetics #
###########################
# base layer
# map aesthetics
#########
# geoms #
#########
# see the many geom options at ??geom
geom_histogram()
geom_freqpoly()
geom_density()
geom_bar()
geom_point()
geom_line()
geom_boxplot()
##################
# YOUR TURN - #1 #
##################
# 1. import ws_data.csv
# 2. create a histogram of flying hours for all aircraft ws systems
# 3. create a bar chart that plots total end strength for each system in 2014
# 4. create a scatter plot that assesses the relationship between TAI and
# maintenance consumables for aircraft weapon systems
# 5. create a line chart that plots total flying hours by year
######################
# back to aesthetics #
######################
# we can use additional aesthetics to plot more variable features
# color, size, shape, alpha
##########
# facets #
##########
facet_wrap()
facet_grid()
##################
# YOUR TURN - #2 #
##################
# 1. import ws_data.csv
# 2. create a scatter plot that shows the relationship between end strength
# and total O&S costs
# 3. compare this same relationship between the different systems (aircraft,
# missiles, munitions, etc.)
# 4. visually assess the historical total flying hours by base. can you identify
# the bases with some significant flying hour changes?
####################
# fitting patterns #
####################
geom_smooth()
##################
# YOUR TURN - #3 #
##################
# continuing with our ws_data.csv data...
# plot manpower ops costs against flying hours for the F-16 weapon system and
# fit a smoother. which appears to fit better LOESS vs. linear?
##############################
# axes, scales & coordinates #
##############################
ylab()
xlab()
labs()
ylim()
xlim()
coord_cartesian()
coord_flip()
scale_x_continuous()
scale_y_continuous()
# there are several scale transformations that are useful
scale_y_log10()
scale_y_sqrt()
scale_y_reverse()
##################
# YOUR TURN - #4 #
##################
# continuing with our ws_data.csv data...
# plot manpower ops costs against flying hours for the F-16 weapon system and
# fit a LOESS smoother. Use scale_x_continous and scale_y_continuous to improve
# the axis formatting.
##########
# themes #
##########
# there are many built in theme options
theme_classic()
theme_minimal()
theme_dark()
# the ggthemes packages provides even more
# install.packages("ggthemes")
library(ggthemes)
theme_economist()
theme_fivethirtyeight()
theme_tufte()
# theme() also allows you to customize your graphics
theme()
# check out all the options at
?theme
##################
# YOUR TURN - #5 #
##################
# continuing with our ws_data.csv data...
# plot manpower ops costs against flying hours for the F-16 weapon system and
# fit a LOESS smoother. Use scale_x_continous and scale_y_continuous to improve
# the axis formatting and theme() to adjust the overall graphic formatting.
| /05-ggplot-student.R | no_license | bradleyboehmke/OPER-200 | R | false | false | 3,404 | r | # Session 5: Data visualization with ggplot
##################
# TODAY'S TOPICS #
##################
# base layer & aesthetics
# geoms
# facets
# fitting patterns
# axes, scales & coordinates
# themes
#######################
# package & data used #
#######################
# install.packages("ggplot2")
library(ggplot2)
library(dplyr)
# built-in data
mpg
economics
economics_long
###########################
# base layer & aesthetics #
###########################
# base layer
# map aesthetics
#########
# geoms #
#########
# see the many geom options at ??geom
geom_histogram()
geom_freqpoly()
geom_density()
geom_bar()
geom_point()
geom_line()
geom_boxplot()
##################
# YOUR TURN - #1 #
##################
# 1. import ws_data.csv
# 2. create a histogram of flying hours for all aircraft ws systems
# 3. create a bar chart that plots total end strength for each system in 2014
# 4. create a scatter plot that assesses the relationship between TAI and
# maintenance consumables for aircraft weapon systems
# 5. create a line chart that plots total flying hours by year
######################
# back to aesthetics #
######################
# we can use additional aesthetics to plot more variable features
# color, size, shape, alpha
##########
# facets #
##########
facet_wrap()
facet_grid()
##################
# YOUR TURN - #2 #
##################
# 1. import ws_data.csv
# 2. create a scatter plot that shows the relationship between end strength
# and total O&S costs
# 3. compare this same relationship between the different systems (aircraft,
# missiles, munitions, etc.)
# 4. visually assess the historical total flying hours by base. can you identify
# the bases with some significant flying hour changes?
####################
# fitting patterns #
####################
geom_smooth()
##################
# YOUR TURN - #3 #
##################
# continuing with our ws_data.csv data...
# plot manpower ops costs against flying hours for the F-16 weapon system and
# fit a smoother. which appears to fit better LOESS vs. linear?
##############################
# axes, scales & coordinates #
##############################
ylab()
xlab()
labs()
ylim()
xlim()
coord_cartesian()
coord_flip()
scale_x_continuous()
scale_y_continuous()
# there are several scale transformations that are useful
scale_y_log10()
scale_y_sqrt()
scale_y_reverse()
##################
# YOUR TURN - #4 #
##################
# continuing with our ws_data.csv data...
# plot manpower ops costs against flying hours for the F-16 weapon system and
# fit a LOESS smoother. Use scale_x_continous and scale_y_continuous to improve
# the axis formatting.
##########
# themes #
##########
# there are many built in theme options
theme_classic()
theme_minimal()
theme_dark()
# the ggthemes packages provides even more
# install.packages("ggthemes")
library(ggthemes)
theme_economist()
theme_fivethirtyeight()
theme_tufte()
# theme() also allows you to customize your graphics
theme()
# check out all the options at
?theme
##################
# YOUR TURN - #5 #
##################
# continuing with our ws_data.csv data...
# plot manpower ops costs against flying hours for the F-16 weapon system and
# fit a LOESS smoother. Use scale_x_continous and scale_y_continuous to improve
# the axis formatting and theme() to adjust the overall graphic formatting.
|
setwd("/Users/minxiaocn/Desktop/Georgetown/ANLY503 Visualization/exam")
farm<-read.csv("Farmer'sMarket.csv")
head(farm)
library(leaflet)
library(sp)
library(rgdal)
library(maps)
library(dplyr)
library(noncensus)
data("counties")
#data("states")
cty=counties
#farm state name
# clean farm data: remove dc and some islands
farm<-read.csv("Farmer'sMarket.csv")
sapply(farm,class)
farm$state<-as.character(farm$state)
farm<-farm[(farm[,"state"] %in% c(state.name)),]
n=dim(farm)[1]
farm["state_abb"]=0
for (i in c(1:n))
{
farm[i,"state_abb"]= state.abb[which(state.name ==farm[i,"state"])]
}
#combine farm and counties data
cty$state_abb<-cty$state
farm2=merge(farm,cty,by=c("state_abb"))
# pivot table to calculate the farm
amount_state <- group_by(farm,state_abb) %>%
summarise(n_farms=length(fmid),x_mean=mean(x,na.rm=T),y_mean=mean(y,na.rm=T))
cty$no_farms<-0
cty<-merge(cty,amount_state,by=c("state_abb"))
colnames(cty)
state_<-unique(cty[c("state_abb", "state_fips")])
data("states")
#merge state and amount_state
state_farm<-merge(state_,amount_state,by=c("state_abb"))
colnames(state_farm)<-c("state","STATEFP","n_farms", "x","y" )
state_farm<-merge(state_farm,states,by=c("state"))
#import us county data
us.map <- readOGR(dsn = "/Users/minxiaocn/Desktop/Georgetown/ANLY503 Visualization/exam/Code And Data for Leaflet_R Maps Example/.", layer = "cb_2016_us_county_20m", stringsAsFactors = FALSE)
head(us.map)
# Remove Alaska(2), Hawaii(15), Puerto Rico (72), Guam (66), Virgin Islands (78), American Samoa (60)
# Mariana Islands (69), Micronesia (64), Marshall Islands (68), Palau (70), Minor Islands (74)
us.map <- us.map[!us.map$STATEFP %in% c("02", "15", "72", "66", "78", "60", "69",
"64", "68", "70", "74"),]
#head(us.map)
# Make sure other outling islands are removed.
us.map <- us.map[!us.map$STATEFP %in% c("81", "84", "86", "87", "89", "71", "76",
"95", "79"),]
#merge farm map to us map
farm_map<-merge(us.map,state_farm,by=c("STATEFP"))
##Make pop up for the land use sites
# Format popup data for leaflet map.
popup_dat <- paste0("<strong>State: </strong>",
farm_map$state,
"<br><strong>No of farmers: </strong>",
farm_map$n_farms)
# Format popup data for leaflet map.
popup_LU2<- paste0("<strong>State: </strong>",
state_farm$state,
"<br><strong>Population: </strong>",
state_farm$population)
popup_LU<- paste0("<strong>County: </strong>",
farm$market_name,
"<br><strong>Website link</strong>",
farm$website)
pal <- colorQuantile("YlOrRd", NULL, n = 9)
gmap <- leaflet(data = farm_map) %>%
# Base groups
addTiles() %>%
setView(lng = -106, lat = 40, zoom = 4) %>%
addPolygons(fillColor = ~pal(n_farms),
fillOpacity = 0.8,
color = "#BDBDC3",
weight = 1,
popup = popup_dat,
group="No. of farmer markets by states") %>%
# Overlay groups
addMarkers(data=farm,lat=~y, lng=~x, popup=popup_LU, group = "Farmer's market details") %>%
addMarkers(data=state_farm,lat=~y, lng=~x, popup=popup_LU2, group = "Population in each state") %>%
# Layers control
addLayersControl(
baseGroups = c("No. of farmer markets by states"),
overlayGroups = c("Farmer's market details","Population in each state"),
options = layersControlOptions(collapsed = FALSE)
)
gmap
saveWidget(gmap, 'leaflet.html', selfcontained = TRUE)
| /Codes/leaflet/leaflet.R | no_license | minfrdata/FarmersMarkets | R | false | false | 3,639 | r | setwd("/Users/minxiaocn/Desktop/Georgetown/ANLY503 Visualization/exam")
farm<-read.csv("Farmer'sMarket.csv")
head(farm)
library(leaflet)
library(sp)
library(rgdal)
library(maps)
library(dplyr)
library(noncensus)
data("counties")
#data("states")
cty=counties
#farm state name
# clean farm data: remove dc and some islands
farm<-read.csv("Farmer'sMarket.csv")
sapply(farm,class)
farm$state<-as.character(farm$state)
farm<-farm[(farm[,"state"] %in% c(state.name)),]
n=dim(farm)[1]
farm["state_abb"]=0
for (i in c(1:n))
{
farm[i,"state_abb"]= state.abb[which(state.name ==farm[i,"state"])]
}
#combine farm and counties data
cty$state_abb<-cty$state
farm2=merge(farm,cty,by=c("state_abb"))
# pivot table to calculate the farm
amount_state <- group_by(farm,state_abb) %>%
summarise(n_farms=length(fmid),x_mean=mean(x,na.rm=T),y_mean=mean(y,na.rm=T))
cty$no_farms<-0
cty<-merge(cty,amount_state,by=c("state_abb"))
colnames(cty)
state_<-unique(cty[c("state_abb", "state_fips")])
data("states")
#merge state and amount_state
state_farm<-merge(state_,amount_state,by=c("state_abb"))
colnames(state_farm)<-c("state","STATEFP","n_farms", "x","y" )
state_farm<-merge(state_farm,states,by=c("state"))
#import us county data
us.map <- readOGR(dsn = "/Users/minxiaocn/Desktop/Georgetown/ANLY503 Visualization/exam/Code And Data for Leaflet_R Maps Example/.", layer = "cb_2016_us_county_20m", stringsAsFactors = FALSE)
head(us.map)
# Remove Alaska(2), Hawaii(15), Puerto Rico (72), Guam (66), Virgin Islands (78), American Samoa (60)
# Mariana Islands (69), Micronesia (64), Marshall Islands (68), Palau (70), Minor Islands (74)
us.map <- us.map[!us.map$STATEFP %in% c("02", "15", "72", "66", "78", "60", "69",
"64", "68", "70", "74"),]
#head(us.map)
# Make sure other outling islands are removed.
us.map <- us.map[!us.map$STATEFP %in% c("81", "84", "86", "87", "89", "71", "76",
"95", "79"),]
#merge farm map to us map
farm_map<-merge(us.map,state_farm,by=c("STATEFP"))
##Make pop up for the land use sites
# Format popup data for leaflet map.
popup_dat <- paste0("<strong>State: </strong>",
farm_map$state,
"<br><strong>No of farmers: </strong>",
farm_map$n_farms)
# Format popup data for leaflet map.
popup_LU2<- paste0("<strong>State: </strong>",
state_farm$state,
"<br><strong>Population: </strong>",
state_farm$population)
popup_LU<- paste0("<strong>County: </strong>",
farm$market_name,
"<br><strong>Website link</strong>",
farm$website)
pal <- colorQuantile("YlOrRd", NULL, n = 9)
gmap <- leaflet(data = farm_map) %>%
# Base groups
addTiles() %>%
setView(lng = -106, lat = 40, zoom = 4) %>%
addPolygons(fillColor = ~pal(n_farms),
fillOpacity = 0.8,
color = "#BDBDC3",
weight = 1,
popup = popup_dat,
group="No. of farmer markets by states") %>%
# Overlay groups
addMarkers(data=farm,lat=~y, lng=~x, popup=popup_LU, group = "Farmer's market details") %>%
addMarkers(data=state_farm,lat=~y, lng=~x, popup=popup_LU2, group = "Population in each state") %>%
# Layers control
addLayersControl(
baseGroups = c("No. of farmer markets by states"),
overlayGroups = c("Farmer's market details","Population in each state"),
options = layersControlOptions(collapsed = FALSE)
)
gmap
saveWidget(gmap, 'leaflet.html', selfcontained = TRUE)
|
# Sort out time problem etc.
# Time in file seems to be *start* of each interval, but 4_methyl. . . is *cumulative* emission value
dat <- dat[, time.end := time + (time[3] - time[2]), by = .(experiment, treatment, tunnel)]
dat$fmp <- dat$`4_`
# Factors
dat$treatment <- factor(dat$treatment, levels = c('U-CM', 'D-CM', 'D-CM-CC'))
dat$experiment <- factor(dat$experiment)
# Get initial values only for ANOVA
d1 <- subset(dat, time == 0)
| /scripts-4mp/clean.R | no_license | AU-BCE-EE/Lemes-2023-digestate-NH3 | R | false | false | 441 | r | # Sort out time problem etc.
# Time in file seems to be *start* of each interval, but 4_methyl. . . is *cumulative* emission value
dat <- dat[, time.end := time + (time[3] - time[2]), by = .(experiment, treatment, tunnel)]
dat$fmp <- dat$`4_`
# Factors
dat$treatment <- factor(dat$treatment, levels = c('U-CM', 'D-CM', 'D-CM-CC'))
dat$experiment <- factor(dat$experiment)
# Get initial values only for ANOVA
d1 <- subset(dat, time == 0)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reviews.R
\docType{data}
\name{reviews}
\alias{reviews}
\title{Amazon.com Book Reviews}
\format{
A data frame with 243,269 observations on the following 5 variables.
\describe{
\item{\code{book}}{The book under review. Values along with
book-titles are as follows:
\itemize{
\item{\code{hunger: }}{"The Hunger Games"}
\item{\code{shades: }}{"Fifty Shades of Gray"}
\item{\code{fault: }}{"The Fault in our Stars"}
\item{\code{martian: }}{"The Martian"}
\item{\code{unbroken: }}{"Unbroken"}
\item{\code{gonegirl: }}{"The Gone Girl"}
\item{\code{traingirl: }}{"Girl on a Train"}
\item{\code{goldfinch: }}{"The Goldfinch"}
}
}
\item{\code{rating}}{rating assigned (1-5)}
\item{\code{URL_fragment}}{Prepend "https://www.amazon.com/"
to get the full URL of the review.}
\item{\code{review_title}}{Title of the review; usually a
concise judgment of the book.}
\item{\code{content}}{HTML of the review text.}
}
}
\source{
This data frame is a compilation of the data sets in
"Amazon Book Reviews", in the UC-Irvine Machine Learning Repository.
See \url{https://archive.ics.uci.edu/ml/datasets/Amazon+book+reviews}
for more information.
}
\description{
Amazon.com reader-reviews of several popular books.
}
\keyword{datasets}
| /man/reviews.Rd | no_license | homerhanumat/tigerData | R | false | true | 1,307 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reviews.R
\docType{data}
\name{reviews}
\alias{reviews}
\title{Amazon.com Book Reviews}
\format{
A data frame with 243,269 observations on the following 5 variables.
\describe{
\item{\code{book}}{The book under review. Values along with
book-titles are as follows:
\itemize{
\item{\code{hunger: }}{"The Hunger Games"}
\item{\code{shades: }}{"Fifty Shades of Gray"}
\item{\code{fault: }}{"The Fault in our Stars"}
\item{\code{martian: }}{"The Martian"}
\item{\code{unbroken: }}{"Unbroken"}
\item{\code{gonegirl: }}{"The Gone Girl"}
\item{\code{traingirl: }}{"Girl on a Train"}
\item{\code{goldfinch: }}{"The Goldfinch"}
}
}
\item{\code{rating}}{rating assigned (1-5)}
\item{\code{URL_fragment}}{Prepend "https://www.amazon.com/"
to get the full URL of the review.}
\item{\code{review_title}}{Title of the review; usually a
concise judgment of the book.}
\item{\code{content}}{HTML of the review text.}
}
}
\source{
This data frame is a compilation of the data sets in
"Amazon Book Reviews", in the UC-Irvine Machine Learning Repository.
See \url{https://archive.ics.uci.edu/ml/datasets/Amazon+book+reviews}
for more information.
}
\description{
Amazon.com reader-reviews of several popular books.
}
\keyword{datasets}
|
# Set parameter values
# Units are in micrograms/g soil (ug/g)
parms<-c(
# Tref = 25, # Reference temperature at which rates are given
NC = 0.141 , # C:N microbial (MASS)
PC = 0.083, # C:P microbial (MASS)
Q_10 = 2.91 , # Q10 temperature response for basically all growth and death processes
alphaA = 0.070, # 0.0340, #density dependent death rate on autotrophs
alphaH = 0.070, # 0.0340, #density dependent death rate on heterotrophs
g_Sub = 0.000000, # proportional leaching loss of substrate (carbon, nitrogen, phosphorus)
g_DIN = 0.000000, # proportional leaching loss of DIN
g_DIP = 0.000000, # proportional leaching loss of DIP
exA = 0.0140, # exudates from Autotrophs
exH = 0.0140, # exudates from Heterotrophs
p_sub = 0.20, # slow-down parameter for subglacial growth rate
K_sub = 0.80, # half-saturation parameter for subglacial growth
ImaxA = 0.55 , # Maximum growth rates of autotrophs
ImaxH = 0.55 , # Maximum growth rates of heterotrophs
K_L = 11.88 , # Light half saturation for autotrophs
K_S = 349, # 349 , # substratre half saturation for Heterotrophs
K_N = 49.209, # 49.209 , # (NC = 0.141) DIN half saturation
DINt = 0 , # (NC = 0.141) Nitrogen concentration threshold for N-fixation starting
K_N2 = 49.209, # 98.418, # 393.672 , # (8*K_N) shape of logistic function for n-fixation switch
K_P = 28.967, # 28.967 , # (PC = 0.083) DIP half saturation
n_f = 0.25 , # 0.50, # downscaling of efficiency and growth whilst n-fixers are fixing
JS1 = 0.68, # 0.68 , # heterotrophic use of S1
JS2 = 0.15 , #0.15, # heterotrophic use of S2
q = 0.30 , # proportion of losses that becomes labile
Y_A = 0.06 , #BGE of autotrophs (Yield)
Y_H = 0.06 , #BGE oh heterotrophs (Yield)
v_Sub = (0.17/20)*30, # Proportion of substrate deposition available to microbes
v_DIN = 0.17/20, # Proportion of N-deposition available to microbes
v_DIP = 0.17/20, # Proportion of DIP-deposition available to microbes
dor = 0.285 # active fraction
)
#...............................
| /demo/demo_WIN/SHIMMER/library/SHIMMER_set_parameter_values.R | no_license | jbradley8365/2016_17_SHIMMER_demo | R | false | false | 2,095 | r | # Set parameter values
# Units are in micrograms/g soil (ug/g)
parms<-c(
# Tref = 25, # Reference temperature at which rates are given
NC = 0.141 , # C:N microbial (MASS)
PC = 0.083, # C:P microbial (MASS)
Q_10 = 2.91 , # Q10 temperature response for basically all growth and death processes
alphaA = 0.070, # 0.0340, #density dependent death rate on autotrophs
alphaH = 0.070, # 0.0340, #density dependent death rate on heterotrophs
g_Sub = 0.000000, # proportional leaching loss of substrate (carbon, nitrogen, phosphorus)
g_DIN = 0.000000, # proportional leaching loss of DIN
g_DIP = 0.000000, # proportional leaching loss of DIP
exA = 0.0140, # exudates from Autotrophs
exH = 0.0140, # exudates from Heterotrophs
p_sub = 0.20, # slow-down parameter for subglacial growth rate
K_sub = 0.80, # half-saturation parameter for subglacial growth
ImaxA = 0.55 , # Maximum growth rates of autotrophs
ImaxH = 0.55 , # Maximum growth rates of heterotrophs
K_L = 11.88 , # Light half saturation for autotrophs
K_S = 349, # 349 , # substratre half saturation for Heterotrophs
K_N = 49.209, # 49.209 , # (NC = 0.141) DIN half saturation
DINt = 0 , # (NC = 0.141) Nitrogen concentration threshold for N-fixation starting
K_N2 = 49.209, # 98.418, # 393.672 , # (8*K_N) shape of logistic function for n-fixation switch
K_P = 28.967, # 28.967 , # (PC = 0.083) DIP half saturation
n_f = 0.25 , # 0.50, # downscaling of efficiency and growth whilst n-fixers are fixing
JS1 = 0.68, # 0.68 , # heterotrophic use of S1
JS2 = 0.15 , #0.15, # heterotrophic use of S2
q = 0.30 , # proportion of losses that becomes labile
Y_A = 0.06 , #BGE of autotrophs (Yield)
Y_H = 0.06 , #BGE oh heterotrophs (Yield)
v_Sub = (0.17/20)*30, # Proportion of substrate deposition available to microbes
v_DIN = 0.17/20, # Proportion of N-deposition available to microbes
v_DIP = 0.17/20, # Proportion of DIP-deposition available to microbes
dor = 0.285 # active fraction
)
#...............................
|
setwd("C:/Users/dsilv/Desktop/Learning/Data Science/Tidy-Tuesday-Projects/Dallas Animal Shelter - Week 18")
library(tidyverse)
library(readxl)
raw_data <- read_xlsx("week18_dallas_animals.xlsx", sheet = "simple")
#Visualizing Outcomes for all Animals
raw_data %>%
group_by(animal_type, outcome_type) %>%
summarise(animal_count = n()) %>%
ggplot(aes(x = animal_type, y = animal_count, fill = outcome_type)) +
geom_bar(stat = "identity", position = "fill", color = "#303030") +
labs(title = "Outcome for all Animals",
x = "Animal",
y = "Outcome") +
theme_minimal()
ggsave("Week18_plot1.png")
#####################
#Top Dog Breeds that are Adopted
tidy_data <- raw_data %>%
filter (outcome_type == "ADOPTION") %>%
filter(animal_type == "DOG") %>%
group_by(animal_breed)%>%
tally() %>%
top_n(10) %>%
arrange(desc(n))
#To retain the order in the plot
tidy_data$animal_breed = factor(tidy_data$animal_breed, levels = tidy_data$animal_breed)
tidy_data %>% ggplot(aes(x = animal_breed, y=n)) +
geom_bar(stat="identity", width = 0.5, fill = "#0e668b") +
labs(title="Top 10 Dog Breed Adoptions",
y = "Number of Adoptions",
x = "")+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggsave("Week18_plot2.png") | /Dallas Animal Shelter - Week 18/Animal_Shelter.R | no_license | Choke77/Tidy-Tuesday-Projects | R | false | false | 1,335 | r | setwd("C:/Users/dsilv/Desktop/Learning/Data Science/Tidy-Tuesday-Projects/Dallas Animal Shelter - Week 18")
library(tidyverse)
library(readxl)
raw_data <- read_xlsx("week18_dallas_animals.xlsx", sheet = "simple")
#Visualizing Outcomes for all Animals
raw_data %>%
group_by(animal_type, outcome_type) %>%
summarise(animal_count = n()) %>%
ggplot(aes(x = animal_type, y = animal_count, fill = outcome_type)) +
geom_bar(stat = "identity", position = "fill", color = "#303030") +
labs(title = "Outcome for all Animals",
x = "Animal",
y = "Outcome") +
theme_minimal()
ggsave("Week18_plot1.png")
#####################
#Top Dog Breeds that are Adopted
tidy_data <- raw_data %>%
filter (outcome_type == "ADOPTION") %>%
filter(animal_type == "DOG") %>%
group_by(animal_breed)%>%
tally() %>%
top_n(10) %>%
arrange(desc(n))
#To retain the order in the plot
tidy_data$animal_breed = factor(tidy_data$animal_breed, levels = tidy_data$animal_breed)
tidy_data %>% ggplot(aes(x = animal_breed, y=n)) +
geom_bar(stat="identity", width = 0.5, fill = "#0e668b") +
labs(title="Top 10 Dog Breed Adoptions",
y = "Number of Adoptions",
x = "")+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggsave("Week18_plot2.png") |
# Script to extract snow depth characteristics in each watershed upstream of stream crossings for all of 2015
source("scripts/googledrive_read_write_functions.R")
require(sf)
require(lubridate)
require(raster)
require(tidyverse)
# Get watersheds
jmt_watersheds <- load_rgdal_from_googledrive("1yB7ww8YgWCAOHjeuCa4Xu6vIZPthO3aD")
# Get data frame of googledrive ids for all the snowdepth rasters
snodas_gd_depth <- drive_ls(as_id("1_IxGme096iUx6JJQY0nhONKSzBWaiI3k"))
snodas_gd_swe <- drive_ls(as_id("1JPXf6Pq9Ki9zjTSctvO2UxfRn2KTUiq4"))
# 2015 dates
dates_2015 <- seq(ymd("2015-01-01"), ymd("2015-12-31"), "days")
#function to extract sum of snowdepth in each watershed on a particular day
get_snodas_day <- function(date, variable, summary_fun){
if(variable == "SWE"){
snodas_id <- snodas_gd_swe %>%
slice(grep(date, name)) %>%
pull(id)
snodas_data <- load_geotiff_from_googledrive(snodas_id)
return(t(raster::extract(snodas_data, jmt_watersheds, fun = summary_fun, na.rm = T)))
} else if(variable == "snowDepth"){
snodas_id <- snodas_gd_depth %>%
slice(grep(date, name)) %>%
pull(id)
snodas_data <- load_geotiff_from_googledrive(snodas_id)
return(t(raster::extract(snodas_data, jmt_watersheds, fun = summary_fun, na.rm = T)))
} else {
return("Variable must be SWE or snowDepth")
}
}
#Data frame to fill with total snow water equivalent estimates over time
snodas_watershed_year <- function(dates, varble){
jmt_fill <- as.data.frame(matrix(nrow = length(dates),
ncol = nrow(jmt_watersheds)+1))
jmt_fill[,1] <- as.character(dates)
jmt_watersheds_swe <- t(sapply(dates,
get_snodas_day,
variable = varble, summary_fun = sum))
jmt_fill[,2:ncol(jmt_fill)] <- jmt_watersheds_swe
colnames(jmt_fill) <- c("Date", as.character(jmt_watersheds$crossing))
jmt_fill <- jmt_fill %>% mutate(Date = ymd(Date))
return(jmt_fill)
}
#Get 2015 data (year we have survey data) ##############
jmt_swe_2015 <- snodas_watershed_year(dates_2015, "SWE")
#Save two datasets
#SWE in each watershed over time in wide format
write_csv_to_googledrive(jmt_swe_2015, "jmt_watersheds_SWE_2015",
folder_id = "1bvrY-Be43gJOSkNNGhVjGhHX8AXFahzV")
#SWE and SWE melt in each watershed over time in long format
jmt_swe_long <- jmt_swe_2015 %>%
gather("watershed", "SWE", -Date) %>%
group_by(watershed) %>%
mutate(last_swe = dplyr::lag(SWE, order_by = watershed),
SWE_melt = -(SWE - last_swe)) %>%
select(-last_swe)
write_csv_to_googledrive(jmt_swe_long, "jmt_watersheds_SWE_2015_long",
folder_id = "1bvrY-Be43gJOSkNNGhVjGhHX8AXFahzV")
#Get and save data from 2016 ###############
jmt_swe_2016 <- snodas_watershed_year(seq(ymd("2016-01-01"), ymd("2016-12-31"), "days"), "SWE")
#Save
#SWE in each watershed over time in wide format
write_csv_to_googledrive(jmt_swe_2016, "jmt_watersheds_SWE_2016",
folder_id = "1bvrY-Be43gJOSkNNGhVjGhHX8AXFahzV")
#SWE and SWE melt in each watershed over time in long format
jmt_swe_long <- jmt_swe_2016 %>%
gather("watershed", "SWE", -Date) %>%
group_by(watershed) %>%
mutate(last_swe = dplyr::lag(SWE, order_by = watershed),
SWE_melt = -(SWE - last_swe)) %>%
select(-last_swe)
write_csv_to_googledrive(jmt_swe_long, "jmt_watersheds_SWE_2016_long",
folder_id = "1bvrY-Be43gJOSkNNGhVjGhHX8AXFahzV")
#Get and save data from 2017 ##############
jmt_swe_2017 <- snodas_watershed_year(seq(ymd("2017-01-01"), ymd("2017-12-31"), "days"), "SWE")
#Save
#SWE in each watershed over time in wide format
write_csv_to_googledrive(jmt_swe_2017, "jmt_watersheds_SWE_2017",
folder_id = "1bvrY-Be43gJOSkNNGhVjGhHX8AXFahzV")
#SWE and SWE melt in each watershed over time in long format
jmt_swe_long <- jmt_swe_2017 %>%
gather("watershed", "SWE", -Date) %>%
group_by(watershed) %>%
mutate(last_swe = dplyr::lag(SWE, order_by = watershed),
SWE_melt = -(SWE - last_swe)) %>%
select(-last_swe)
write_csv_to_googledrive(jmt_swe_long, "jmt_watersheds_SWE_2017_long",
folder_id = "1bvrY-Be43gJOSkNNGhVjGhHX8AXFahzV")
#Get and save data from 2018 ###################
jmt_swe_2018 <- snodas_watershed_year(seq(ymd("2018-01-01"), ymd("2018-12-31"), "days"), "SWE")
#Save
#SWE in each watershed over time in wide format
write_csv_to_googledrive(jmt_swe_2018, "jmt_watersheds_SWE_2018",
folder_id = "1bvrY-Be43gJOSkNNGhVjGhHX8AXFahzV")
#SWE and SWE melt in each watershed over time in long format
jmt_swe_long <- jmt_swe_2018 %>%
gather("watershed", "SWE", -Date) %>%
group_by(watershed) %>%
mutate(last_swe = dplyr::lag(SWE, order_by = watershed),
SWE_melt = -(SWE - last_swe)) %>%
select(-last_swe)
write_csv_to_googledrive(jmt_swe_long, "jmt_watersheds_SWE_2018_long",
folder_id = "1bvrY-Be43gJOSkNNGhVjGhHX8AXFahzV")
| /scripts/snow/extract_snowdepth_to_watersheds.R | no_license | MiyabiIshihara/JMT-Stream-Crossing-Risk | R | false | false | 5,315 | r | # Script to extract snow depth characteristics in each watershed upstream of stream crossings for all of 2015
source("scripts/googledrive_read_write_functions.R")
require(sf)
require(lubridate)
require(raster)
require(tidyverse)
# Get watersheds
jmt_watersheds <- load_rgdal_from_googledrive("1yB7ww8YgWCAOHjeuCa4Xu6vIZPthO3aD")
# Get data frame of googledrive ids for all the snowdepth rasters
snodas_gd_depth <- drive_ls(as_id("1_IxGme096iUx6JJQY0nhONKSzBWaiI3k"))
snodas_gd_swe <- drive_ls(as_id("1JPXf6Pq9Ki9zjTSctvO2UxfRn2KTUiq4"))
# 2015 dates
dates_2015 <- seq(ymd("2015-01-01"), ymd("2015-12-31"), "days")
#function to extract sum of snowdepth in each watershed on a particular day
get_snodas_day <- function(date, variable, summary_fun){
if(variable == "SWE"){
snodas_id <- snodas_gd_swe %>%
slice(grep(date, name)) %>%
pull(id)
snodas_data <- load_geotiff_from_googledrive(snodas_id)
return(t(raster::extract(snodas_data, jmt_watersheds, fun = summary_fun, na.rm = T)))
} else if(variable == "snowDepth"){
snodas_id <- snodas_gd_depth %>%
slice(grep(date, name)) %>%
pull(id)
snodas_data <- load_geotiff_from_googledrive(snodas_id)
return(t(raster::extract(snodas_data, jmt_watersheds, fun = summary_fun, na.rm = T)))
} else {
return("Variable must be SWE or snowDepth")
}
}
#Data frame to fill with total snow water equivalent estimates over time
snodas_watershed_year <- function(dates, varble){
jmt_fill <- as.data.frame(matrix(nrow = length(dates),
ncol = nrow(jmt_watersheds)+1))
jmt_fill[,1] <- as.character(dates)
jmt_watersheds_swe <- t(sapply(dates,
get_snodas_day,
variable = varble, summary_fun = sum))
jmt_fill[,2:ncol(jmt_fill)] <- jmt_watersheds_swe
colnames(jmt_fill) <- c("Date", as.character(jmt_watersheds$crossing))
jmt_fill <- jmt_fill %>% mutate(Date = ymd(Date))
return(jmt_fill)
}
#Get 2015 data (year we have survey data) ##############
jmt_swe_2015 <- snodas_watershed_year(dates_2015, "SWE")
#Save two datasets
#SWE in each watershed over time in wide format
write_csv_to_googledrive(jmt_swe_2015, "jmt_watersheds_SWE_2015",
folder_id = "1bvrY-Be43gJOSkNNGhVjGhHX8AXFahzV")
#SWE and SWE melt in each watershed over time in long format
jmt_swe_long <- jmt_swe_2015 %>%
gather("watershed", "SWE", -Date) %>%
group_by(watershed) %>%
mutate(last_swe = dplyr::lag(SWE, order_by = watershed),
SWE_melt = -(SWE - last_swe)) %>%
select(-last_swe)
write_csv_to_googledrive(jmt_swe_long, "jmt_watersheds_SWE_2015_long",
folder_id = "1bvrY-Be43gJOSkNNGhVjGhHX8AXFahzV")
#Get and save data from 2016 ###############
jmt_swe_2016 <- snodas_watershed_year(seq(ymd("2016-01-01"), ymd("2016-12-31"), "days"), "SWE")
#Save
#SWE in each watershed over time in wide format
write_csv_to_googledrive(jmt_swe_2016, "jmt_watersheds_SWE_2016",
folder_id = "1bvrY-Be43gJOSkNNGhVjGhHX8AXFahzV")
#SWE and SWE melt in each watershed over time in long format
jmt_swe_long <- jmt_swe_2016 %>%
gather("watershed", "SWE", -Date) %>%
group_by(watershed) %>%
mutate(last_swe = dplyr::lag(SWE, order_by = watershed),
SWE_melt = -(SWE - last_swe)) %>%
select(-last_swe)
write_csv_to_googledrive(jmt_swe_long, "jmt_watersheds_SWE_2016_long",
folder_id = "1bvrY-Be43gJOSkNNGhVjGhHX8AXFahzV")
#Get and save data from 2017 ##############
jmt_swe_2017 <- snodas_watershed_year(seq(ymd("2017-01-01"), ymd("2017-12-31"), "days"), "SWE")
#Save
#SWE in each watershed over time in wide format
write_csv_to_googledrive(jmt_swe_2017, "jmt_watersheds_SWE_2017",
folder_id = "1bvrY-Be43gJOSkNNGhVjGhHX8AXFahzV")
#SWE and SWE melt in each watershed over time in long format
jmt_swe_long <- jmt_swe_2017 %>%
gather("watershed", "SWE", -Date) %>%
group_by(watershed) %>%
mutate(last_swe = dplyr::lag(SWE, order_by = watershed),
SWE_melt = -(SWE - last_swe)) %>%
select(-last_swe)
write_csv_to_googledrive(jmt_swe_long, "jmt_watersheds_SWE_2017_long",
folder_id = "1bvrY-Be43gJOSkNNGhVjGhHX8AXFahzV")
#Get and save data from 2018 ###################
jmt_swe_2018 <- snodas_watershed_year(seq(ymd("2018-01-01"), ymd("2018-12-31"), "days"), "SWE")
#Save
#SWE in each watershed over time in wide format
write_csv_to_googledrive(jmt_swe_2018, "jmt_watersheds_SWE_2018",
folder_id = "1bvrY-Be43gJOSkNNGhVjGhHX8AXFahzV")
#SWE and SWE melt in each watershed over time in long format
jmt_swe_long <- jmt_swe_2018 %>%
gather("watershed", "SWE", -Date) %>%
group_by(watershed) %>%
mutate(last_swe = dplyr::lag(SWE, order_by = watershed),
SWE_melt = -(SWE - last_swe)) %>%
select(-last_swe)
write_csv_to_googledrive(jmt_swe_long, "jmt_watersheds_SWE_2018_long",
folder_id = "1bvrY-Be43gJOSkNNGhVjGhHX8AXFahzV")
|
## set working directory
setwd("~/DBS/AdvancedDataAnalytics/Assignments/CA3-Yachts/Submission")
## check working directory
getwd()
## read in dataset
yachts <- read.csv("yachtData.csv", header = TRUE)
## view dataset
View(yachts)
colnames(yachts)
colnames(yachts) <- c("YachtBuilder", "LOA", "Beam", "Draft", "Displacement")
## check
View(yachts)
## check summary
summary(yachts)
## scatterplot
install.packages("ggvis")
library(ggvis)
yachts %>% ggvis(~LOA, ~Beam, fill = ~YachtBuilder) %>% layer_points()
## Note: there appears to be quite a strong positive correlation between LOA and Beam
## for the Bavaria yachts and the Hanse yachts,
## There appears to be a moderate positive correlation between LOA and Beam
## for the Beneteau and Jeanneau yachts
## Now generate a scatterplot that maps the draft and the displacement:
yachts %>% ggvis(~Draft, ~Displacement, fill = ~YachtBuilder) %>% layer_points()
## Note: the scatterplot indicates a strong positive correlation between the draft and the displacement
## for all four (4) yacht builders.
## This yachts dataset can be used for classification (an example of predictive modeling).
## The first attribute of the dataset (i.e. the column labelled "YachtBuilder") will be the target variable
## (i.e. YachtBuilder is the variable that we want to predict in this instance).
## install the class package class
install.packages("class")
library(class)
## Normalise the dataset
summary(yachts)
## The yachts data set needs to be normalised:
## the LOA attribute has values that go from 9.02 to 22.24
## and Beam contains values from 2.45 to 6.20,
## while Draft values range from 0.93 to 8.57,
## yet Displacement ranges from 7088 to 160650.
## So, Displacement's influence will tend to overpower the influences of the other three (3) attributes.
## Thus, there is a need to normalise the dataset, i.e. adjust the ranges of all attributes,
## so that distances between attributes with larger ranges will not be over-emphasised.
## create normalise function:
normalise <- function(x)
{
num <- x - min(x)
denom <- max(x) - min(x)
return (num/denom)
} ## end function
## place results of normalisation in a data frame using as.data.frame()
## the function lapply() returns a list of the same length as the dataset,
## each element of that list is the result of the application of the normalise argument to the dataset
## For the yachts dataset, the normalise argument is applied to the four (4) numerical measurements
## of the yachts dataset (LOA, Beam, Draft, Displacement),
## the results are placed into a data frame:
yachtsNormalised <- as.data.frame(lapply(yachts[2:5], normalise))
## check normalised dataset
View(yachtsNormalised)
summary(yachtsNormalised)
## Now, values of all attributes are contained within the range of 0.0 to 1.0
set.seed(2345)
ind <- sample(2, nrow(yachtsNormalised), replace = TRUE, prob = c(0.75, 0.25))
## create test dataset & training dataset
## use 3/4 in training dataset & 1/4 in test dataset
yachtsTraining <- yachtsNormalised[ind == 1, 1:4]
yachtsTest <- yachtsNormalised[ind == 2, 1:4]
## check
View(yachtsTraining)
View(yachtsTest)
## Note: do NOT need to take into account ALL attributes to form the training set and test set.
## Only needed to consider LOA, Beam, Draft & Displacement.
## ... because want to predict the 1st attribute, YachtBuilder (this is the target variable).
## However, the YachtBuilder attribute must be incorporated into the KNN algorithm,
## ... otherwise there will never be any prediction for it.
## Therefore, need to store the class labels in factor vectors and divide them across the training and test sets.
## Create a blank 5th column
yachtsTrainLabels <- yachts[ind == 1, 1]
yachtsTestLabels <- yachts[ind == 2, 1]
View(yachtsTrainLabels)
View(yachtsTestLabels)
## To build the classifier, take the KNN() function then add some arguments to it,
yachtsPrediction <- knn(train = yachtsTraining, test = yachtsTest, cl = yachtsTrainLabels, k = 3)
## Store into yachtsPrediction the KNN() function that takes as arguments the training set, the test set,
## the train labels and the amount of neighbours seeking to find with this algorithm.
## The result of this function is a factor vector with the predicted classes for each row of the test data.
## Note: do NOT insert the test labels:
## ... these will be used to see whether the model is good at predicting the actual classes of the instances!
## Retrieve the result of the KNN() function
## (or use write.csv to export to a csv file)
## prediction values
yachtsPrediction
View(yachtsPrediction)
## test labels
yachtsTestLabels
View(yachtsTestLabels)
## datapoints 39, 45, 55 & 65 have been mis-classifed
## i.e. 4 out of 66 are mis-classified (or approximately 6%)
## EVALUATION OF THE MODEL
## An essential next step in machine learning is the evaluation of the model's performance.
## In other words, need to analyze the degree of correctness of the model's predictions.
## For a more abstract view, simply just compare the results of yachtsPrediction to the
## yachtsTestLabels defined above:
## This will give some indication of the model's performance, however,
## the statisctal analysis should be investigated more thoroughly, as follows:
## import the package gmodels:
install.packages("gmodels")
library(gmodels)
## Now, make a cross tabulation or a contingency table.
## This type of table is often used to understand the relationship between 2 variables.
## The goal is to understand how the classes of the test data (stored in yachtsTestLabels)
## relate to the model that is stored in yachtsPrediction:
CrossTable(x = yachtsTestLabels, y = yachtsPrediction, prop.chisq = FALSE)
## Note that the last argument prop.chisq indicates whether or not the chi-square contribution
## of each cell is included.
## The chi-square statistic is the sum of the contributions from each of the individual cells
## and is used to decide whether the difference between the observed and the expected values
## is significant.
## From this table, you can derive the number of correct and incorrect predictions:
## 2 instances from the test set were labelled Bavaria by the model,
## when in actual fact these yachts were from the yacht builder Hanse, and
## 2 instances from the test set were labelled Beneteau by the model,
## when in actual fact these yachts were from the yacht builder Jeaneau.
## This can be seen by looking at the first row of the "Jeaneau" yacht-builder in the yachtsTestLabels column.
## In all other cases, correct predictions were made.
## Conclusion: the model's performance is very good and there is no need to improve the model.
| /yachtsKNeuralNetworks.R | no_license | wade12/AdvancedDataAnalyticsCA3Yachts | R | false | false | 6,667 | r | ## set working directory
setwd("~/DBS/AdvancedDataAnalytics/Assignments/CA3-Yachts/Submission")
## check working directory
getwd()
## read in dataset
yachts <- read.csv("yachtData.csv", header = TRUE)
## view dataset
View(yachts)
colnames(yachts)
colnames(yachts) <- c("YachtBuilder", "LOA", "Beam", "Draft", "Displacement")
## check
View(yachts)
## check summary
summary(yachts)
## scatterplot
install.packages("ggvis")
library(ggvis)
yachts %>% ggvis(~LOA, ~Beam, fill = ~YachtBuilder) %>% layer_points()
## Note: there appears to be quite a strong positive correlation between LOA and Beam
## for the Bavaria yachts and the Hanse yachts,
## There appears to be a moderate positive correlation between LOA and Beam
## for the Beneteau and Jeanneau yachts
## Now generate a scatterplot that maps the draft and the displacement:
yachts %>% ggvis(~Draft, ~Displacement, fill = ~YachtBuilder) %>% layer_points()
## Note: the scatterplot indicates a strong positive correlation between the draft and the displacement
## for all four (4) yacht builders.
## This yachts dataset can be used for classification (an example of predictive modeling).
## The first attribute of the dataset (i.e. the column labelled "YachtBuilder") will be the target variable
## (i.e. YachtBuilder is the variable that we want to predict in this instance).
## install the class package class
install.packages("class")
library(class)
## Normalise the dataset
summary(yachts)
## The yachts data set needs to be normalised:
## the LOA attribute has values that go from 9.02 to 22.24
## and Beam contains values from 2.45 to 6.20,
## while Draft values range from 0.93 to 8.57,
## yet Displacement ranges from 7088 to 160650.
## So, Displacement's influence will tend to overpower the influences of the other three (3) attributes.
## Thus, there is a need to normalise the dataset, i.e. adjust the ranges of all attributes,
## so that distances between attributes with larger ranges will not be over-emphasised.
## create normalise function:
normalise <- function(x)
{
num <- x - min(x)
denom <- max(x) - min(x)
return (num/denom)
} ## end function
## place results of normalisation in a data frame using as.data.frame()
## the function lapply() returns a list of the same length as the dataset,
## each element of that list is the result of the application of the normalise argument to the dataset
## For the yachts dataset, the normalise argument is applied to the four (4) numerical measurements
## of the yachts dataset (LOA, Beam, Draft, Displacement),
## the results are placed into a data frame:
yachtsNormalised <- as.data.frame(lapply(yachts[2:5], normalise))
## check normalised dataset
View(yachtsNormalised)
summary(yachtsNormalised)
## Now, values of all attributes are contained within the range of 0.0 to 1.0
set.seed(2345)
ind <- sample(2, nrow(yachtsNormalised), replace = TRUE, prob = c(0.75, 0.25))
## create test dataset & training dataset
## use 3/4 in training dataset & 1/4 in test dataset
yachtsTraining <- yachtsNormalised[ind == 1, 1:4]
yachtsTest <- yachtsNormalised[ind == 2, 1:4]
## check
View(yachtsTraining)
View(yachtsTest)
## Note: do NOT need to take into account ALL attributes to form the training set and test set.
## Only needed to consider LOA, Beam, Draft & Displacement.
## ... because want to predict the 1st attribute, YachtBuilder (this is the target variable).
## However, the YachtBuilder attribute must be incorporated into the KNN algorithm,
## ... otherwise there will never be any prediction for it.
## Therefore, need to store the class labels in factor vectors and divide them across the training and test sets.
## Create a blank 5th column
yachtsTrainLabels <- yachts[ind == 1, 1]
yachtsTestLabels <- yachts[ind == 2, 1]
View(yachtsTrainLabels)
View(yachtsTestLabels)
## To build the classifier, take the KNN() function then add some arguments to it,
yachtsPrediction <- knn(train = yachtsTraining, test = yachtsTest, cl = yachtsTrainLabels, k = 3)
## Store into yachtsPrediction the KNN() function that takes as arguments the training set, the test set,
## the train labels and the amount of neighbours seeking to find with this algorithm.
## The result of this function is a factor vector with the predicted classes for each row of the test data.
## Note: do NOT insert the test labels:
## ... these will be used to see whether the model is good at predicting the actual classes of the instances!
## Retrieve the result of the KNN() function
## (or use write.csv to export to a csv file)
## prediction values
yachtsPrediction
View(yachtsPrediction)
## test labels
yachtsTestLabels
View(yachtsTestLabels)
## datapoints 39, 45, 55 & 65 have been mis-classifed
## i.e. 4 out of 66 are mis-classified (or approximately 6%)
## EVALUATION OF THE MODEL
## An essential next step in machine learning is the evaluation of the model's performance.
## In other words, need to analyze the degree of correctness of the model's predictions.
## For a more abstract view, simply just compare the results of yachtsPrediction to the
## yachtsTestLabels defined above:
## This will give some indication of the model's performance, however,
## the statisctal analysis should be investigated more thoroughly, as follows:
## import the package gmodels:
install.packages("gmodels")
library(gmodels)
## Now, make a cross tabulation or a contingency table.
## This type of table is often used to understand the relationship between 2 variables.
## The goal is to understand how the classes of the test data (stored in yachtsTestLabels)
## relate to the model that is stored in yachtsPrediction:
CrossTable(x = yachtsTestLabels, y = yachtsPrediction, prop.chisq = FALSE)
## Note that the last argument prop.chisq indicates whether or not the chi-square contribution
## of each cell is included.
## The chi-square statistic is the sum of the contributions from each of the individual cells
## and is used to decide whether the difference between the observed and the expected values
## is significant.
## From this table, you can derive the number of correct and incorrect predictions:
## 2 instances from the test set were labelled Bavaria by the model,
## when in actual fact these yachts were from the yacht builder Hanse, and
## 2 instances from the test set were labelled Beneteau by the model,
## when in actual fact these yachts were from the yacht builder Jeaneau.
## This can be seen by looking at the first row of the "Jeaneau" yacht-builder in the yachtsTestLabels column.
## In all other cases, correct predictions were made.
## Conclusion: the model's performance is very good and there is no need to improve the model.
|
library(forecast)
Amtrak.data <- read.csv("Amtrak data.csv")
ridership.ts <- ts(Amtrak.data$Ridership,
start = c(1991, 1), end = c(2004, 3), freq = 12)
# Figure 3-1
plot(ridership.ts, ylim = c(1300, 2600), ylab = "Ridership", xlab = "Time", bty = "l", xaxt = "n", xlim = c(1991,2006.25))
axis(1, at = seq(1991, 2006, 1), labels = format(seq(1991, 2006, 1), digits = 2))
lines(c(2004.25 - 3 , 2004.25 - 3), c(0, 3500))
lines(c(2004.25, 2004.25), c(0, 3500))
text(1996.25, 2500, "Training")
text(2002.75, 2500, "Validation")
text(2005.25, 2500, "Future")
arrows(2004 - 3,2400,1991.25,2400,code=3,length=0.1,lwd=1,angle=30)
arrows(2004.5 - 3,2400,2004,2400,code=3,length=0.1,lwd=1,angle=30)
arrows(2004.5,2400,2006,2400,code=3,length=0.1,lwd=1,angle=30)
| /Amtrak Fig 3-1.R | no_license | jonathan-marsan/PTS_Forecasting_w_R | R | false | false | 774 | r | library(forecast)
Amtrak.data <- read.csv("Amtrak data.csv")
ridership.ts <- ts(Amtrak.data$Ridership,
start = c(1991, 1), end = c(2004, 3), freq = 12)
# Figure 3-1
plot(ridership.ts, ylim = c(1300, 2600), ylab = "Ridership", xlab = "Time", bty = "l", xaxt = "n", xlim = c(1991,2006.25))
axis(1, at = seq(1991, 2006, 1), labels = format(seq(1991, 2006, 1), digits = 2))
lines(c(2004.25 - 3 , 2004.25 - 3), c(0, 3500))
lines(c(2004.25, 2004.25), c(0, 3500))
text(1996.25, 2500, "Training")
text(2002.75, 2500, "Validation")
text(2005.25, 2500, "Future")
arrows(2004 - 3,2400,1991.25,2400,code=3,length=0.1,lwd=1,angle=30)
arrows(2004.5 - 3,2400,2004,2400,code=3,length=0.1,lwd=1,angle=30)
arrows(2004.5,2400,2006,2400,code=3,length=0.1,lwd=1,angle=30)
|
#########################################################
####### Raw data ###########
#########################################################
library(boot)
library(caret)
value_tru<-rep(c(0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0), 20)
value_pre<-rep(c(1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0), 20)
rawdata<-data.frame(value_tru, value_pre)
### Factor
rawdata$value_tru<-factor(ifelse(rawdata$value_tru >0, "Positive", "Negative"))
rawdata$value_pre<-factor(ifelse(rawdata$value_pre >0, "Positive", "Negative"))
confusionMatrix(data = rawdata$value_pre, reference = rawdata$value_tru)
#########################################################
####### Bootstraping CI for balanced accuracy ###########
#########################################################
bacc<- function(data, i) {
d=data[i,]
bacc<-(sensitivity(d$value_pre, d$value_tru)+specificity(d$value_pre, d$value_tru))/2
c(bacc)
}
bacc.boot<-boot(rawdata, bacc, R = 10000)
boot.ci(bacc.boot, conf=0.95, type = c("bca", "norm", "basic", "perc"))
help(boot.ci)
################################################################
####### CI for balanced accuracy based on Chen Method###########
################################################################
baccCI<-function(data, alpha=0.05){
bacc<-(sensitivity(data[, 2], data[, 1])+specificity(data[, 2], data[, 1]))/2
crosstable<-table(data[, 2], data[, 1])
a<-crosstable[1, 1]
b<-crosstable[1, 2]
c<-crosstable[2, 1]
d<-crosstable[2, 2]
p1<-(d/(b+d))**2
p2<-a*c/((a+c)**3)
p3<-(a/(a+c))**2
p4<-b*d/((b+d)**3)
varbacc<-p1*p2+p3*p4
sebacc<-sqrt(varbacc)
lowerCI<-bacc-qnorm(1-alpha/2)*sebacc
upperCI<-bacc+qnorm(1-alpha/2)*sebacc
cat(" Balanced accuracy =", bacc, "\n",
"standard error =", sebacc, "\n",
"CI for balanced accuracy = [", lowerCI, ",", upperCI, "]")
}
baccCI(rawdata)
################################################################
####### meta analysis of balanced accuracy using micp###########
################################################################
library(micp)
ks<-rbind(rep(80, 2), rep(80, 2))
ns<-rbind(rep(120, 2), rep(160, 2))
micp.stats(ks, ns)
## https://stat.ethz.ch/pipermail/r-help/2012-February/303977.html
## https://pages.uoregon.edu/flournoy/bootstrapping/bootstrapexample.html
| /BCIt.R | no_license | kgmacau/SASgitR | R | false | false | 2,564 | r |
#########################################################
####### Raw data ###########
#########################################################
library(boot)
library(caret)
value_tru<-rep(c(0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0), 20)
value_pre<-rep(c(1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0), 20)
rawdata<-data.frame(value_tru, value_pre)
### Factor
rawdata$value_tru<-factor(ifelse(rawdata$value_tru >0, "Positive", "Negative"))
rawdata$value_pre<-factor(ifelse(rawdata$value_pre >0, "Positive", "Negative"))
confusionMatrix(data = rawdata$value_pre, reference = rawdata$value_tru)
#########################################################
####### Bootstraping CI for balanced accuracy ###########
#########################################################
bacc<- function(data, i) {
d=data[i,]
bacc<-(sensitivity(d$value_pre, d$value_tru)+specificity(d$value_pre, d$value_tru))/2
c(bacc)
}
bacc.boot<-boot(rawdata, bacc, R = 10000)
boot.ci(bacc.boot, conf=0.95, type = c("bca", "norm", "basic", "perc"))
help(boot.ci)
################################################################
####### CI for balanced accuracy based on Chen Method###########
################################################################
baccCI<-function(data, alpha=0.05){
bacc<-(sensitivity(data[, 2], data[, 1])+specificity(data[, 2], data[, 1]))/2
crosstable<-table(data[, 2], data[, 1])
a<-crosstable[1, 1]
b<-crosstable[1, 2]
c<-crosstable[2, 1]
d<-crosstable[2, 2]
p1<-(d/(b+d))**2
p2<-a*c/((a+c)**3)
p3<-(a/(a+c))**2
p4<-b*d/((b+d)**3)
varbacc<-p1*p2+p3*p4
sebacc<-sqrt(varbacc)
lowerCI<-bacc-qnorm(1-alpha/2)*sebacc
upperCI<-bacc+qnorm(1-alpha/2)*sebacc
cat(" Balanced accuracy =", bacc, "\n",
"standard error =", sebacc, "\n",
"CI for balanced accuracy = [", lowerCI, ",", upperCI, "]")
}
baccCI(rawdata)
################################################################
####### meta analysis of balanced accuracy using micp###########
################################################################
library(micp)
ks<-rbind(rep(80, 2), rep(80, 2))
ns<-rbind(rep(120, 2), rep(160, 2))
micp.stats(ks, ns)
## https://stat.ethz.ch/pipermail/r-help/2012-February/303977.html
## https://pages.uoregon.edu/flournoy/bootstrapping/bootstrapexample.html
|
# Title : COVIDdirectlyFromJH
# Objective : Try to get the data directly from JH github
# Created by: Jo
# Created on: 22-4-2020
library(knitr)
library(tidyverse)
library(lubridate)
library(rvest)
library(stringdist)
library(countrycode)
#from Joachim Gassen See: https://github.com/joachim-gassen
clean_jhd_to_long <- function(df) {
df_str <- deparse(substitute(df))
var_str <- substr(df_str, 1, str_length(df_str) - 4)
df %>%
select(-`Province/State`, -Lat, -Long) %>%
rename(country = `Country/Region`) %>%
mutate(iso3c = countrycode(country,
origin = "country.name",
destination = "iso3c")) %>%
select(-country) %>%
filter(!is.na(iso3c)) %>%
group_by(iso3c) %>%
summarise_at(vars(-group_cols()), sum) %>%
pivot_longer(
-iso3c,
names_to = "date_str",
values_to = var_str
) %>%
ungroup() %>%
mutate(date = mdy(date_str)) %>%
select(iso3c, date, !! sym(var_str))
}
confirmed_raw <- read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv", col_types = cols())
deaths_raw <- read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv", col_types = cols())
# Recovered data I pull from the old depreciated dataset. This might generate issues going forward
recovered_raw <- read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv", col_types = cols())
jh_covid19_data <- clean_jhd_to_long(confirmed_raw) %>%
full_join(clean_jhd_to_long(deaths_raw), by = c("iso3c", "date")) %>%
full_join(clean_jhd_to_long(recovered_raw), by = c("iso3c", "date"))
jhd_countries <- tibble(
country = unique(confirmed_raw$`Country/Region`),
iso3c = countrycode(country,
origin = "country.name",
destination = "iso3c")
) %>% filter(!is.na(iso3c))
old_jhd_countries <- tibble(
country = unique(recovered_raw$`Country/Region`),
iso3c = countrycode(country,
origin = "country.name",
destination = "iso3c")
) %>% filter(!is.na(iso3c),
! iso3c %in% jhd_countries$iso3c)
jhd_countries <- rbind(jhd_countries, old_jhd_countries)
jh_covid19_data %>%
left_join(jhd_countries, by = "iso3c") %>%
select(country, iso3c, date, confirmed, deaths, recovered) -> jh_covid19_data
| /R/COVID/COVIDJohnHopkins.R | no_license | JoZelis/learning | R | false | false | 2,615 | r | # Title : COVIDdirectlyFromJH
# Objective : Try to get the data directly from JH github
# Created by: Jo
# Created on: 22-4-2020
library(knitr)
library(tidyverse)
library(lubridate)
library(rvest)
library(stringdist)
library(countrycode)
#from Joachim Gassen See: https://github.com/joachim-gassen
clean_jhd_to_long <- function(df) {
df_str <- deparse(substitute(df))
var_str <- substr(df_str, 1, str_length(df_str) - 4)
df %>%
select(-`Province/State`, -Lat, -Long) %>%
rename(country = `Country/Region`) %>%
mutate(iso3c = countrycode(country,
origin = "country.name",
destination = "iso3c")) %>%
select(-country) %>%
filter(!is.na(iso3c)) %>%
group_by(iso3c) %>%
summarise_at(vars(-group_cols()), sum) %>%
pivot_longer(
-iso3c,
names_to = "date_str",
values_to = var_str
) %>%
ungroup() %>%
mutate(date = mdy(date_str)) %>%
select(iso3c, date, !! sym(var_str))
}
confirmed_raw <- read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv", col_types = cols())
deaths_raw <- read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv", col_types = cols())
# Recovered data I pull from the old depreciated dataset. This might generate issues going forward
recovered_raw <- read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv", col_types = cols())
jh_covid19_data <- clean_jhd_to_long(confirmed_raw) %>%
full_join(clean_jhd_to_long(deaths_raw), by = c("iso3c", "date")) %>%
full_join(clean_jhd_to_long(recovered_raw), by = c("iso3c", "date"))
jhd_countries <- tibble(
country = unique(confirmed_raw$`Country/Region`),
iso3c = countrycode(country,
origin = "country.name",
destination = "iso3c")
) %>% filter(!is.na(iso3c))
old_jhd_countries <- tibble(
country = unique(recovered_raw$`Country/Region`),
iso3c = countrycode(country,
origin = "country.name",
destination = "iso3c")
) %>% filter(!is.na(iso3c),
! iso3c %in% jhd_countries$iso3c)
jhd_countries <- rbind(jhd_countries, old_jhd_countries)
jh_covid19_data %>%
left_join(jhd_countries, by = "iso3c") %>%
select(country, iso3c, date, confirmed, deaths, recovered) -> jh_covid19_data
|
library(ggplot2)
library(tidyverse)
data <- read.table("time_distance_matrix.csv", header=T, dec=".", sep=",")
data$patient <- as.factor(data$patient)
ggplot(data, aes(x=days, y=dist, color=patient)) +
geom_point() +
geom_smooth(method='loess', se=FALSE) +
coord_cartesian(xlim=c(10,265), ylim=c(0.0002,0.007)) +
ggtitle("Genetic Distance in HIV Genome over Time", subtitle = "Measured from first sample") +
xlab("Days since seroconversion") +
ylab("Genetic distance from first sample (GGDC formula 2 distance)") +
theme(plot.title = element_text(hjust = 0.5,size = 14), plot.subtitle = element_text(hjust = 0.5,size=10),
axis.text.y = element_text(angle=50, hjust=0.5),
axis.title = element_text(size = 13))
| /time_analysis.R | no_license | drew-neely/HIV-Evolution-Analysis | R | false | false | 749 | r |
library(ggplot2)
library(tidyverse)
data <- read.table("time_distance_matrix.csv", header=T, dec=".", sep=",")
data$patient <- as.factor(data$patient)
ggplot(data, aes(x=days, y=dist, color=patient)) +
geom_point() +
geom_smooth(method='loess', se=FALSE) +
coord_cartesian(xlim=c(10,265), ylim=c(0.0002,0.007)) +
ggtitle("Genetic Distance in HIV Genome over Time", subtitle = "Measured from first sample") +
xlab("Days since seroconversion") +
ylab("Genetic distance from first sample (GGDC formula 2 distance)") +
theme(plot.title = element_text(hjust = 0.5,size = 14), plot.subtitle = element_text(hjust = 0.5,size=10),
axis.text.y = element_text(angle=50, hjust=0.5),
axis.title = element_text(size = 13))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/printers.R
\name{save_as_pptx}
\alias{save_as_pptx}
\title{save flextable objects in an PowerPoint file}
\usage{
save_as_pptx(..., values = NULL, path)
}
\arguments{
\item{...}{flextable objects, objects, possibly named. If named objects, names are
used as slide titles.}
\item{values}{a list (possibly named), each element is a flextable object. If named objects, names are
used as slide titles. If provided, argument \code{...} will be ignored.}
\item{path}{PowerPoint file to be created}
}
\description{
sugar function to save flextable objects in an PowerPoint file.
}
\examples{
ft1 <- flextable( head( iris ) )
tf <- tempfile(fileext = ".pptx")
save_as_pptx(ft1, path = tf)
ft2 <- flextable( head( mtcars ) )
tf <- tempfile(fileext = ".pptx")
save_as_pptx(`iris table` = ft1, `mtcars table` = ft2, path = tf)
}
\seealso{
Other flextable print function:
\code{\link{as_raster}()},
\code{\link{docx_value}()},
\code{\link{htmltools_value}()},
\code{\link{knit_print.flextable}()},
\code{\link{plot.flextable}()},
\code{\link{print.flextable}()},
\code{\link{save_as_docx}()},
\code{\link{save_as_html}()},
\code{\link{save_as_image}()}
}
\concept{flextable print function}
| /man/save_as_pptx.Rd | no_license | travistdale/flextable | R | false | true | 1,259 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/printers.R
\name{save_as_pptx}
\alias{save_as_pptx}
\title{save flextable objects in an PowerPoint file}
\usage{
save_as_pptx(..., values = NULL, path)
}
\arguments{
\item{...}{flextable objects, objects, possibly named. If named objects, names are
used as slide titles.}
\item{values}{a list (possibly named), each element is a flextable object. If named objects, names are
used as slide titles. If provided, argument \code{...} will be ignored.}
\item{path}{PowerPoint file to be created}
}
\description{
sugar function to save flextable objects in an PowerPoint file.
}
\examples{
ft1 <- flextable( head( iris ) )
tf <- tempfile(fileext = ".pptx")
save_as_pptx(ft1, path = tf)
ft2 <- flextable( head( mtcars ) )
tf <- tempfile(fileext = ".pptx")
save_as_pptx(`iris table` = ft1, `mtcars table` = ft2, path = tf)
}
\seealso{
Other flextable print function:
\code{\link{as_raster}()},
\code{\link{docx_value}()},
\code{\link{htmltools_value}()},
\code{\link{knit_print.flextable}()},
\code{\link{plot.flextable}()},
\code{\link{print.flextable}()},
\code{\link{save_as_docx}()},
\code{\link{save_as_html}()},
\code{\link{save_as_image}()}
}
\concept{flextable print function}
|
library(msgl)
# warnings = errors
options(warn=2)
### Basic tests
data(SimData)
x <- sim.data$x
classes <- sim.data$classes
## Lambda sequence
lambda <- msgl.lambda.seq(x, classes, alpha = .5, d = 100L, lambda.min = 0.01, standardize = TRUE)
fit.qwe <- msgl(x, classes, lambda = lambda, intercept = FALSE)
res <- predict(fit.qwe, x)
if(min(colSums(res$classes != classes)) > 0) stop()
res <- predict(fit.qwe, x, sparse.data = TRUE)
if(min(colSums(res$classes != classes)) > 0) stop()
| /msgl/tests/msgl_predict_test_2.R | no_license | ingted/R-Examples | R | false | false | 492 | r | library(msgl)
# warnings = errors
options(warn=2)
### Basic tests
data(SimData)
x <- sim.data$x
classes <- sim.data$classes
## Lambda sequence
lambda <- msgl.lambda.seq(x, classes, alpha = .5, d = 100L, lambda.min = 0.01, standardize = TRUE)
fit.qwe <- msgl(x, classes, lambda = lambda, intercept = FALSE)
res <- predict(fit.qwe, x)
if(min(colSums(res$classes != classes)) > 0) stop()
res <- predict(fit.qwe, x, sparse.data = TRUE)
if(min(colSums(res$classes != classes)) > 0) stop()
|
library(ggplot2)
car<-read.table("car.txt", header=T)
head(car)
##๋ณ์๊ฐ plot ๊ทธ๋ฆผ์ผ๋ก ์ฃผํจ๊ณผ์ ๊ตํธ์์ฉ ํจ๊ณผ ์ ์ ์๋ค.
qplot(car$Merit, car$Claims/car$Insured) +
geom_line(aes(group = car$Class, color = factor(car$Class)), size=1)+
labs(title="Merit vs. Claims",x ="Meirt", y = "Claims per Insured")
qplot(car$Class, car$Claims/car$Insured) +
geom_line(aes(group = car$Merit, color = factor(car$Merit)), size=1)+
labs(title="Class vs. Claims",x ="Class", y = "Claims per Insured")
car$Merit<-factor(car$Merit); car$Class<-factor(car$Class)
####### ํฌ์์ก ๋ถํฌ ๊ฐ์ ์, ์ ์ ํ ํ๊ท๋ชจํ ๊ตฌ์ถ #######
glm1<-glm(Claims~.*., data=car, family=poisson(link="log"))
step1<-step(glm1, direction="both")
#### ๊ฐ๋ง ๋ถํฌ ๊ฐ์ ์ #####
glm2<-glm(Cost~., data=car, family=Gamma(link="identity"))
#์ ์ ๋ณดํ๋ฃ = 1์ธ๋น ํ๊ท ๋ณดํ ์ฒญ๊ตฌ* 1๊ฑด๋น ํ๊ท ๋ณดํ ์ง๊ธ์ก
lambda<-step1$fitted.values/car$Insured
mu<-glm2$fitted.values/car$Claims
ins <- lambda*mu # ์ ์ ๋ณดํ๋ฃ ๊ณ์ฐ์.
cbind(Merit=car$Merit, Class=car$Class, Optimal.Premium=ins)
dat4<-data.frame(lambda=lambda, mu=mu)
summary(lm(mu~lambda))
#๊ทธ๋ฃน๋ณ ํ๊ท ์ฌ๊ณ ๋น๋ ์ ํ๊ท ์ฌ๊ณ ์ฌ๋ ์ฐ์ ๋
ggplot(data=dat4, aes(x=lambda, y=mu))+geom_point()+geom_smooth(method="lm")+
annotate("text", x =0.11, y=0.35, label = "mu= 0.24788 + 0.40984*lambda")+
annotate("text", x =0.11, y=0.34, label = "R2.adj=0.1366 ")+
labs(x="lambda",y="mu")+ggtitle("lambda vs mu")
| /CarInsurance.R | no_license | wlsuddl/WorkingR | R | false | false | 1,523 | r | library(ggplot2)
car<-read.table("car.txt", header=T)
head(car)
##๋ณ์๊ฐ plot ๊ทธ๋ฆผ์ผ๋ก ์ฃผํจ๊ณผ์ ๊ตํธ์์ฉ ํจ๊ณผ ์ ์ ์๋ค.
qplot(car$Merit, car$Claims/car$Insured) +
geom_line(aes(group = car$Class, color = factor(car$Class)), size=1)+
labs(title="Merit vs. Claims",x ="Meirt", y = "Claims per Insured")
qplot(car$Class, car$Claims/car$Insured) +
geom_line(aes(group = car$Merit, color = factor(car$Merit)), size=1)+
labs(title="Class vs. Claims",x ="Class", y = "Claims per Insured")
car$Merit<-factor(car$Merit); car$Class<-factor(car$Class)
####### ํฌ์์ก ๋ถํฌ ๊ฐ์ ์, ์ ์ ํ ํ๊ท๋ชจํ ๊ตฌ์ถ #######
glm1<-glm(Claims~.*., data=car, family=poisson(link="log"))
step1<-step(glm1, direction="both")
#### ๊ฐ๋ง ๋ถํฌ ๊ฐ์ ์ #####
glm2<-glm(Cost~., data=car, family=Gamma(link="identity"))
#์ ์ ๋ณดํ๋ฃ = 1์ธ๋น ํ๊ท ๋ณดํ ์ฒญ๊ตฌ* 1๊ฑด๋น ํ๊ท ๋ณดํ ์ง๊ธ์ก
lambda<-step1$fitted.values/car$Insured
mu<-glm2$fitted.values/car$Claims
ins <- lambda*mu # ์ ์ ๋ณดํ๋ฃ ๊ณ์ฐ์.
cbind(Merit=car$Merit, Class=car$Class, Optimal.Premium=ins)
dat4<-data.frame(lambda=lambda, mu=mu)
summary(lm(mu~lambda))
#๊ทธ๋ฃน๋ณ ํ๊ท ์ฌ๊ณ ๋น๋ ์ ํ๊ท ์ฌ๊ณ ์ฌ๋ ์ฐ์ ๋
ggplot(data=dat4, aes(x=lambda, y=mu))+geom_point()+geom_smooth(method="lm")+
annotate("text", x =0.11, y=0.35, label = "mu= 0.24788 + 0.40984*lambda")+
annotate("text", x =0.11, y=0.34, label = "R2.adj=0.1366 ")+
labs(x="lambda",y="mu")+ggtitle("lambda vs mu")
|
#'
#'
#'@export
get_normalizingconstant <- function(type, q, N = 1000) {
#-----------------------------------------------------------------------------
match.arg(type, c("log-q-serie", "q-serie", "JM"))
#-----------------------------------------------------------------------------
i <- 1:N
if (type == "q-serie") {
gamma <- 1 / ((i)^q)
integral_upper_bound = sum(gamma) - ((1 / (1 - q)) * (N)^(1-q))
}
else if (type == "log-q-serie"){
gamma <- 1 / ((i + 1) * (log(i + 1)^q))
integral_upper_bound = sum(gamma) - (1 / ((1 - q) * (log(N + 1)^(1 - q))))
}
else{
gamma <- log(pmax(i, 2)) / (i * exp(sqrt(log(i))))
integral_upper_bound = sum(gamma) + (2 * exp(-sqrt(log(N))) * (log(N)^(3 / 2) + 3 * log(N) + 6 * sqrt(log(N)) + 6))
}
return(1 / integral_upper_bound)
}
#' gamma_sequence.
#'
#' Function that computes a nonnegative decreasing sequence.
#' The user can choose to make the sequence sum to exactly one
#' (and thus using the number of hypotheses to test),
#' or to make the sequence sum to less than one by approximating the infinity.
#' Three choices for the type of sequence are proposed, of which
#' log-q serie and q-serie as proposed by Tian and Ramdas (2021).
#'
#' @param type Either "log-q-serie", "q-serie" or a "rectangular" kernel.
#' @param nb_pvalues An integer giving the nb of p-values (/ hypothesis) to test.
#' @param q The exponent for computing the sequence or the kernel bandwidth.
#' Note that when using a rectangular kernel, q must be an integer.
#'
#' @return A vector: the gamma sequence.
#'
#' @example gamma_sequence("log-q-serie", 100, 2).
#'
#' @references Tian, J. and Ramdas, A. (2021). Online control of the familywise
#' error rate. \emph{Statistical Methods for Medical Research},
#' \url{https://journals.sagepub.com/eprint/AYRRKZX7XMTVHKCFYBJY/full}
#'
#' @export
gamma_sequence <- function(type, nb_pvalues, q) {
#-----------------------------------------------------------------------------
match.arg(type, c("log-q-serie", "q-serie", "JM", "rectangular"))
if (type == "rectangular"){
if (q %% 1 != 0) {
stop("For using a rectangular kernel, you should provide an integer for the bandwidth q")
}
}
#-----------------------------------------------------------------------------
if (type != "rectangular"){
normalization_constant = get_normalizingconstant(type, q)
}
if (type == "log-q-serie") {
i <- 1:nb_pvalues
gamma <- 1 / ((i + 1) * (log(i + 1)^q))
# normalize the sequence
gamma = gamma * normalization_constant
}
else if (type == "q-serie") {
i <- 1:nb_pvalues
gamma <- 1 / ((i)^q)
# normalize the sequence
gamma = gamma * normalization_constant
}
else if (type == "JM") {
i <- 1:nb_pvalues
gamma <- log(pmax(i, 2)) / (i * exp(sqrt(log(i))))
# normalize the sequence
gamma = gamma * normalization_constant
}
else {
if (q - round(q) != 0) {
stop("You should provide a round number for the bandwidth, q, when wanting to use a rectangular kernel.")
}
if (nb_pvalues - q < 0) {
stop("The kernel bandwidth cannot be larger than the number of hypothesis to test.")
}
gamma <- c(rep(1 / q, q), rep(0, nb_pvalues - q))
}
testthat::expect_lte(sum(gamma), 1) # test that the sum is less than or equal to 1
return(gamma)
}
#' shuffle_vec
#'
#' Function that shuffles a vector (permutation).
#' This function allows to study the signal position scheme where
#' the signal is not clustered but positioned randomly across the whole stream of hypothesis
#' (signal position = "no_cluster_shuffle" in data_simulation function).
#'
#'
#' @param vec A vector that needs to be shuffled.
#' @param permutation_index A vector indicating how to shuffle the vector
#' if one wants to perform a certain permutation.
#'
#' @return A list containing the shuffled vector and the index of the entries.
#'
#' @example shuffle_vec(c(11, 12, 13, 14, 15), c(4, 3, 5, 1, 2))
#' should return the permuted vector c(14, 13, 15, 11, 12),
#' and c(4, 3, 5, 1, 2), the permutation index.
#'
#' @export
shuffle_vec <- function(vec, permutation_index = NULL) {
if (missing(permutation_index)){
l = length(vec)
permutation_index <- gtools::permute(1:l)
}
permutation_mat <- as.matrix(Matrix::sparseMatrix(seq_along(permutation_index),
permutation_index, x=1))
shuffled_vec <- as.vector(vec %*% permutation_mat)
output <- list(shuffle_vec = shuffled_vec, permutation_index = permutation_index)
return(output)
}
#' number_of_discoveries
#'
#' Function that allows to get the necessary quantities to estimate the
#' error (power, FWER or mFDR).
#'
#' @param rej_index A vector containing the indices of the rejected hypothesis.
#' @param alternative_index A vector containing the indices (in the stream of hypothesis) of the signal.
#' @param error_metric A string, either "FWER" or "mFDR" to indicate the error metric the user is studying.
#'
#' @return A list containing
#' ratio_true_discoveries : Ratio between the nb of true discoveries
#' and the number of non-nulls (= signals),
#' Nb of true discoveries,
#' error_quantity : depending on the error metric;
#' either a boolean stating the presence of a false discovery (FWER),
#' or the number of false discoveries (mFDR).
#'
#' @example number_of_discoveries(c(4, 5, 13, 14, 17), seq(13, 20), "FWER") should
#' return (3 / 20, 3, 1) (where 1 stands for TRUE) and
#' number_of_discoveries(c(4, 5, 13, 14, 17), seq(13, 20), "mFDR") should
#' return (3 / 20, 3, 2)
#'
#' @export
number_of_discoveries <- function(rej_index, alternative_index, error_metric) {
#-----------------------------------------------------------------------------
match.arg(error_metric, c("mFDR", "FWER"))
#-----------------------------------------------------------------------------
nb_true_discoveries <- sum(rej_index %in% alternative_index)
ratio_true_discoveries <- nb_true_discoveries / length(alternative_index)
if (error_metric == "FWER") {
false_discoveries_bool <- (length(rej_index) > nb_true_discoveries)
error_quantity <- false_discoveries_bool
}
else if (error_metric == "mFDR") {
nb_false_discoveries <- length(rej_index) - nb_true_discoveries
error_quantity <- nb_false_discoveries
}
output <- list(ratio_true_discoveries = ratio_true_discoveries,
nb_true_discoveries = nb_true_discoveries,
error_quantity = error_quantity)
return(output)
}
#' get_CDF
#'
#' Function that allows getting the CDF of p-values ready to plot.
#' This function is used only for shiny apps.
#'
#' @param N An integer corresponding to the number of subjects studied (or the number of rows in the matrice).
#' @param m An integer corresponding to the number of hypotheses to test (or the number of columns in the matrice).
#' @param non_nulls_proportion A numeric in [0, 1] corresponding to the quantity of signal the user wants in the data.
#' @param p3 A numeric in [0, 1] corresponding to the strength of the signal the user wants.
#' @param cluster_option Either "end", "begin", "begin_middle", "begin_end", "midlle_end", or "no_cluster_shuffle".
#' This option indicates how to position the signal in the stream of hypothesis.
#' @param p1 A numeric corresponding to the Bernouilli parameter for generating a first group of nulls.
#' @param p2 A numeric corresponding to the Bernouilli parameter for generating a second group of nulls.
#'
#' @return A list with the p-values' CDFs ready to use.
#'
#' @example get_CDF(25, 100, 0.3, 0.4, "end").
#'
get_CDF <- function(N, m, non_nulls_proportion, p3,
cluster_option, p1 = 0.01, p2 = 0.1) {
proportions = c((1 - non_nulls_proportion) / 2,
(1 - non_nulls_proportion) / 2,
non_nulls_proportion)
data <- data_simulation(N, m, non_nulls_proportion, p3, cluster_option)$data
CDF_list <- pvalues_simulation(data)$support
stepf <- lapply(CDF_list, function(x) stepfun(x, c(0, x)))
return(stepf)
}
#' male_female_pvalue_min
#'
#'
male_female_pvalue_min <- function(Male_test, Fem_test) {
pvalues <- numeric(nrow(Male_test))
support <- list(nrow(Male_test))
for (i in 1:nrow(Male_df)) {
if (min(Male_test$raw[i], Fem_test$raw[i]) == Male_test$raw[i]) {
pvalues[i] = Male_test$raw[i]
support[i] = Male_test$support[i]
}
else {
pvalues[i] = Fem_test$raw[i]
support[i] = Fem_test$support[i]
}
}
output <- list(raw = pvalues, support = support)
}
#' get_counts
#'
#' @export
get_counts <- function(totals, rates) {
if (length(totals) != length(rates)) { stop("The number of row must be the same")}
Ab_counts <- numeric(length(totals)) # for abnormality counts
N_counts <- numeric(length(totals)) # for normality counts
for (i in 1:length(totals)) {
Ab_counts[i] = rates[i] * totals[i]
N_counts[i] = totals[i] - Ab_counts[i]
}
output <- list(Ab_counts = Ab_counts, N_counts = N_counts)
return(output)
}
| /OnlineSuperUnif/R/utils_functions.R | no_license | iqm15/SUREOMT | R | false | false | 9,313 | r | #'
#'
#'@export
get_normalizingconstant <- function(type, q, N = 1000) {
#-----------------------------------------------------------------------------
match.arg(type, c("log-q-serie", "q-serie", "JM"))
#-----------------------------------------------------------------------------
i <- 1:N
if (type == "q-serie") {
gamma <- 1 / ((i)^q)
integral_upper_bound = sum(gamma) - ((1 / (1 - q)) * (N)^(1-q))
}
else if (type == "log-q-serie"){
gamma <- 1 / ((i + 1) * (log(i + 1)^q))
integral_upper_bound = sum(gamma) - (1 / ((1 - q) * (log(N + 1)^(1 - q))))
}
else{
gamma <- log(pmax(i, 2)) / (i * exp(sqrt(log(i))))
integral_upper_bound = sum(gamma) + (2 * exp(-sqrt(log(N))) * (log(N)^(3 / 2) + 3 * log(N) + 6 * sqrt(log(N)) + 6))
}
return(1 / integral_upper_bound)
}
#' gamma_sequence.
#'
#' Function that computes a nonnegative decreasing sequence.
#' The user can choose to make the sequence sum to exactly one
#' (and thus using the number of hypotheses to test),
#' or to make the sequence sum to less than one by approximating the infinity.
#' Three choices for the type of sequence are proposed, of which
#' log-q serie and q-serie as proposed by Tian and Ramdas (2021).
#'
#' @param type Either "log-q-serie", "q-serie" or a "rectangular" kernel.
#' @param nb_pvalues An integer giving the nb of p-values (/ hypothesis) to test.
#' @param q The exponent for computing the sequence or the kernel bandwidth.
#' Note that when using a rectangular kernel, q must be an integer.
#'
#' @return A vector: the gamma sequence.
#'
#' @example gamma_sequence("log-q-serie", 100, 2).
#'
#' @references Tian, J. and Ramdas, A. (2021). Online control of the familywise
#' error rate. \emph{Statistical Methods for Medical Research},
#' \url{https://journals.sagepub.com/eprint/AYRRKZX7XMTVHKCFYBJY/full}
#'
#' @export
gamma_sequence <- function(type, nb_pvalues, q) {
#-----------------------------------------------------------------------------
match.arg(type, c("log-q-serie", "q-serie", "JM", "rectangular"))
if (type == "rectangular"){
if (q %% 1 != 0) {
stop("For using a rectangular kernel, you should provide an integer for the bandwidth q")
}
}
#-----------------------------------------------------------------------------
if (type != "rectangular"){
normalization_constant = get_normalizingconstant(type, q)
}
if (type == "log-q-serie") {
i <- 1:nb_pvalues
gamma <- 1 / ((i + 1) * (log(i + 1)^q))
# normalize the sequence
gamma = gamma * normalization_constant
}
else if (type == "q-serie") {
i <- 1:nb_pvalues
gamma <- 1 / ((i)^q)
# normalize the sequence
gamma = gamma * normalization_constant
}
else if (type == "JM") {
i <- 1:nb_pvalues
gamma <- log(pmax(i, 2)) / (i * exp(sqrt(log(i))))
# normalize the sequence
gamma = gamma * normalization_constant
}
else {
if (q - round(q) != 0) {
stop("You should provide a round number for the bandwidth, q, when wanting to use a rectangular kernel.")
}
if (nb_pvalues - q < 0) {
stop("The kernel bandwidth cannot be larger than the number of hypothesis to test.")
}
gamma <- c(rep(1 / q, q), rep(0, nb_pvalues - q))
}
testthat::expect_lte(sum(gamma), 1) # test that the sum is less than or equal to 1
return(gamma)
}
#' shuffle_vec
#'
#' Function that shuffles a vector (permutation).
#' This function allows to study the signal position scheme where
#' the signal is not clustered but positioned randomly across the whole stream of hypothesis
#' (signal position = "no_cluster_shuffle" in data_simulation function).
#'
#'
#' @param vec A vector that needs to be shuffled.
#' @param permutation_index A vector indicating how to shuffle the vector
#' if one wants to perform a certain permutation.
#'
#' @return A list containing the shuffled vector and the index of the entries.
#'
#' @example shuffle_vec(c(11, 12, 13, 14, 15), c(4, 3, 5, 1, 2))
#' should return the permuted vector c(14, 13, 15, 11, 12),
#' and c(4, 3, 5, 1, 2), the permutation index.
#'
#' @export
shuffle_vec <- function(vec, permutation_index = NULL) {
if (missing(permutation_index)){
l = length(vec)
permutation_index <- gtools::permute(1:l)
}
permutation_mat <- as.matrix(Matrix::sparseMatrix(seq_along(permutation_index),
permutation_index, x=1))
shuffled_vec <- as.vector(vec %*% permutation_mat)
output <- list(shuffle_vec = shuffled_vec, permutation_index = permutation_index)
return(output)
}
#' number_of_discoveries
#'
#' Function that allows to get the necessary quantities to estimate the
#' error (power, FWER or mFDR).
#'
#' @param rej_index A vector containing the indices of the rejected hypothesis.
#' @param alternative_index A vector containing the indices (in the stream of hypothesis) of the signal.
#' @param error_metric A string, either "FWER" or "mFDR" to indicate the error metric the user is studying.
#'
#' @return A list containing
#' ratio_true_discoveries : Ratio between the nb of true discoveries
#' and the number of non-nulls (= signals),
#' Nb of true discoveries,
#' error_quantity : depending on the error metric;
#' either a boolean stating the presence of a false discovery (FWER),
#' or the number of false discoveries (mFDR).
#'
#' @example number_of_discoveries(c(4, 5, 13, 14, 17), seq(13, 20), "FWER") should
#' return (3 / 20, 3, 1) (where 1 stands for TRUE) and
#' number_of_discoveries(c(4, 5, 13, 14, 17), seq(13, 20), "mFDR") should
#' return (3 / 20, 3, 2)
#'
#' @export
number_of_discoveries <- function(rej_index, alternative_index, error_metric) {
#-----------------------------------------------------------------------------
match.arg(error_metric, c("mFDR", "FWER"))
#-----------------------------------------------------------------------------
nb_true_discoveries <- sum(rej_index %in% alternative_index)
ratio_true_discoveries <- nb_true_discoveries / length(alternative_index)
if (error_metric == "FWER") {
false_discoveries_bool <- (length(rej_index) > nb_true_discoveries)
error_quantity <- false_discoveries_bool
}
else if (error_metric == "mFDR") {
nb_false_discoveries <- length(rej_index) - nb_true_discoveries
error_quantity <- nb_false_discoveries
}
output <- list(ratio_true_discoveries = ratio_true_discoveries,
nb_true_discoveries = nb_true_discoveries,
error_quantity = error_quantity)
return(output)
}
#' get_CDF
#'
#' Function that allows getting the CDF of p-values ready to plot.
#' This function is used only for shiny apps.
#'
#' @param N An integer corresponding to the number of subjects studied (or the number of rows in the matrice).
#' @param m An integer corresponding to the number of hypotheses to test (or the number of columns in the matrice).
#' @param non_nulls_proportion A numeric in [0, 1] corresponding to the quantity of signal the user wants in the data.
#' @param p3 A numeric in [0, 1] corresponding to the strength of the signal the user wants.
#' @param cluster_option Either "end", "begin", "begin_middle", "begin_end", "midlle_end", or "no_cluster_shuffle".
#' This option indicates how to position the signal in the stream of hypothesis.
#' @param p1 A numeric corresponding to the Bernouilli parameter for generating a first group of nulls.
#' @param p2 A numeric corresponding to the Bernouilli parameter for generating a second group of nulls.
#'
#' @return A list with the p-values' CDFs ready to use.
#'
#' @example get_CDF(25, 100, 0.3, 0.4, "end").
#'
get_CDF <- function(N, m, non_nulls_proportion, p3,
cluster_option, p1 = 0.01, p2 = 0.1) {
proportions = c((1 - non_nulls_proportion) / 2,
(1 - non_nulls_proportion) / 2,
non_nulls_proportion)
data <- data_simulation(N, m, non_nulls_proportion, p3, cluster_option)$data
CDF_list <- pvalues_simulation(data)$support
stepf <- lapply(CDF_list, function(x) stepfun(x, c(0, x)))
return(stepf)
}
#' male_female_pvalue_min
#'
#'
male_female_pvalue_min <- function(Male_test, Fem_test) {
pvalues <- numeric(nrow(Male_test))
support <- list(nrow(Male_test))
for (i in 1:nrow(Male_df)) {
if (min(Male_test$raw[i], Fem_test$raw[i]) == Male_test$raw[i]) {
pvalues[i] = Male_test$raw[i]
support[i] = Male_test$support[i]
}
else {
pvalues[i] = Fem_test$raw[i]
support[i] = Fem_test$support[i]
}
}
output <- list(raw = pvalues, support = support)
}
#' get_counts
#'
#' @export
get_counts <- function(totals, rates) {
if (length(totals) != length(rates)) { stop("The number of row must be the same")}
Ab_counts <- numeric(length(totals)) # for abnormality counts
N_counts <- numeric(length(totals)) # for normality counts
for (i in 1:length(totals)) {
Ab_counts[i] = rates[i] * totals[i]
N_counts[i] = totals[i] - Ab_counts[i]
}
output <- list(Ab_counts = Ab_counts, N_counts = N_counts)
return(output)
}
|
# Get saleprob, sdlog moments given data and transmat
get_moments = function(data, transmat, params) {
data = solve_value_function(tau = 0, data = data, transmat = transmat, params = params)
data = solve_steadystate(data = data, transmat = transmat, efficient = 0)
seller_dist = data$seller_dist
prices = data$best_p
mean_price = sum(seller_dist * prices)
var_price = sum((prices - mean_price)^2 * seller_dist)
sd_price = sqrt(var_price)
sdmean_moment = sd_price / mean_price
saleprob_moment = data[, ss %*% best_saleprob]
return(c(sdmean_moment, saleprob_moment))
}
| /functions/get_moments.R | no_license | anthonyleezhang/dlcode | R | false | false | 602 | r | # Get saleprob, sdlog moments given data and transmat
get_moments = function(data, transmat, params) {
data = solve_value_function(tau = 0, data = data, transmat = transmat, params = params)
data = solve_steadystate(data = data, transmat = transmat, efficient = 0)
seller_dist = data$seller_dist
prices = data$best_p
mean_price = sum(seller_dist * prices)
var_price = sum((prices - mean_price)^2 * seller_dist)
sd_price = sqrt(var_price)
sdmean_moment = sd_price / mean_price
saleprob_moment = data[, ss %*% best_saleprob]
return(c(sdmean_moment, saleprob_moment))
}
|
setwd("E:\\Go geek\\Exploratory data analysis")
#-------------Create Dataframe
data <- read.table('data.txt', sep=';', header=T,
colClasses = c('character', 'character', 'numeric',
'numeric', 'numeric', 'numeric',
'numeric', 'numeric', 'numeric'),
na.strings='?')
#str(data)
#summary(data)
#------------Subset to get only the required observations
data <- subset(data,(
as.Date(Date,format="%d/%m/%Y") >= as.Date("2007-02-01") &
as.Date(Date,format="%d/%m/%Y") <= as.Date("2007-02-02")
))
#table(data$Date)
#-------------Converting to standard date format
data$Date <- as.Date(data$Date,format="%d/%m/%Y")
#-------------Timestamp creation
data$DateTime <- strptime(paste(data$Date, data$Time),"%Y-%m-%d %H:%M:%S") | /CleanData.R | no_license | rgopikrishnan91/ExData_Plotting1 | R | false | false | 829 | r | setwd("E:\\Go geek\\Exploratory data analysis")
#-------------Create Dataframe
data <- read.table('data.txt', sep=';', header=T,
colClasses = c('character', 'character', 'numeric',
'numeric', 'numeric', 'numeric',
'numeric', 'numeric', 'numeric'),
na.strings='?')
#str(data)
#summary(data)
#------------Subset to get only the required observations
data <- subset(data,(
as.Date(Date,format="%d/%m/%Y") >= as.Date("2007-02-01") &
as.Date(Date,format="%d/%m/%Y") <= as.Date("2007-02-02")
))
#table(data$Date)
#-------------Converting to standard date format
data$Date <- as.Date(data$Date,format="%d/%m/%Y")
#-------------Timestamp creation
data$DateTime <- strptime(paste(data$Date, data$Time),"%Y-%m-%d %H:%M:%S") |
#MONTE CARLO SIMULATION FUNCTION
## Draw a simulation
draw_simulation <- function(time, init_state, parms, options, montecarlo, iter_num) {
# Drawing of the parameters
parms['eta_p'] <- draw_eta()
parms['markup'] <- draw_markup()
parms['gamma'] <- draw_gamma()
parms['alpha'] <- draw_alpha()
parms['S'] <- draw_ECS()
parms['CO2_UP_preind'] <- draw_C_UP_preind()
# Run of a simulation
Draw_simu <- try(simulation(time = time,
init_state = init_state,
parms = parms,
options = options,
montecarlo = T))
# Return
return(list(draw = c(parms['eta_p'], parms['markup'], parms['gamma']),
parms['alpha'], parms['S'], parms['CO2_UP_preind'],
sim = Draw_simu))
}
## Process several iterations of drawings
data_simulations <- function(iter = 10,
time = Time,
init_state = IC,
parms = Parms,
options = Options,
montecarlo = T) {
# Begin of calculation
start_time <- proc.time()
# Start parallel computing
mc.cores <- getOption("mc.cores", max(1, detectCores()-1))
cl <- makeCluster(mc.cores)
registerDoParallel(cl)
# Computation
data <- foreach(k=1:iter, .packages=c('deSolve','rmutil'),
.export = functions, .combine='cbind') %dopar% {
drawing <- draw_simulation(time = time,
init_state = init_state,
parms = parms,
options = options,
montecarlo = montecarlo,
iter_num = k)
return(list(c(draw = drawing$draw, sim = drawing$sim)))
}
# Start parallel computing
stopCluster(cl)
# End of calculation
end_time <- proc.time() - start_time
print(end_time)
# Returning
return(data)
}
functions <- c(functions,
'draw_eta',
'draw_markup',
'draw_gamma',
'draw_alpha',
'draw_ECS',
'draw_C_UP_preind',
'draw_simulation',
'get_ggamma')
| /full_model/monte_carlo_sim.R | no_license | shizelong1985/econ-climate-sensitivity | R | false | false | 2,568 | r | #MONTE CARLO SIMULATION FUNCTION
## Draw a simulation
draw_simulation <- function(time, init_state, parms, options, montecarlo, iter_num) {
# Drawing of the parameters
parms['eta_p'] <- draw_eta()
parms['markup'] <- draw_markup()
parms['gamma'] <- draw_gamma()
parms['alpha'] <- draw_alpha()
parms['S'] <- draw_ECS()
parms['CO2_UP_preind'] <- draw_C_UP_preind()
# Run of a simulation
Draw_simu <- try(simulation(time = time,
init_state = init_state,
parms = parms,
options = options,
montecarlo = T))
# Return
return(list(draw = c(parms['eta_p'], parms['markup'], parms['gamma']),
parms['alpha'], parms['S'], parms['CO2_UP_preind'],
sim = Draw_simu))
}
## Process several iterations of drawings
data_simulations <- function(iter = 10,
time = Time,
init_state = IC,
parms = Parms,
options = Options,
montecarlo = T) {
# Begin of calculation
start_time <- proc.time()
# Start parallel computing
mc.cores <- getOption("mc.cores", max(1, detectCores()-1))
cl <- makeCluster(mc.cores)
registerDoParallel(cl)
# Computation
data <- foreach(k=1:iter, .packages=c('deSolve','rmutil'),
.export = functions, .combine='cbind') %dopar% {
drawing <- draw_simulation(time = time,
init_state = init_state,
parms = parms,
options = options,
montecarlo = montecarlo,
iter_num = k)
return(list(c(draw = drawing$draw, sim = drawing$sim)))
}
# Start parallel computing
stopCluster(cl)
# End of calculation
end_time <- proc.time() - start_time
print(end_time)
# Returning
return(data)
}
functions <- c(functions,
'draw_eta',
'draw_markup',
'draw_gamma',
'draw_alpha',
'draw_ECS',
'draw_C_UP_preind',
'draw_simulation',
'get_ggamma')
|
#' menu-like function for interactive or non-interactive sections
#' which allows multiple choices as well
#'
#' return NA if the user inserted the usual 0 to exit
#'
#' @param choiches vector of possible choiches
#' @param title optional
#' @param multiple can more than one item be selected?
#' @param return what to return values (selected choiches given, by
#' default), or indexes. If only 0 is selected (to exit), NA is
#' returned
#' @param strict allow only selectable index to be choosen
#' @export
menu2 <- function(choices, title = NULL, multiple = FALSE,
return = c('values', 'indexes'),
strict = FALSE)
{
return <- match.arg(return)
available_ind <- seq_along(choices)
avail_with_0 <- c(0, available_ind)
the_menu <- paste(available_ind, choices, sep = '. ', collapse = "\n")
interactive <- interactive()
con <- if (interactive) stdin() else file('stdin')
selection_msg <- if (interactive){
if (multiple)
"Selection (values as '1, 2-3, 6') or 0 to exit: "
else {
"Selection (0 to exit): "
}
} else {
if (multiple){
"a) Insert selection (values as '1, 2-3, 6') or 0 to exit; b) [ENTER]; c) [Ctrl+D]\n"
} else {
"a) Selection (0 to exit); b) [ENTER]; c) [Ctrl+D]\n "
}
}
## get infos from user
if (!is.null(title)) cat(title, "\n\n")
cat(the_menu, '\n\n')
cat(selection_msg)
line <- readLines(con = con, n = 1)
ind <- line_to_numbers(line)
ind <- if (multiple) ind else ind[1]
if (strict){
## continua a continuare fino a che gli indici sono tutti tra
## i selezionabili o 0 per uscire
while (!all(ind %in% avail_with_0)){
not_in <- ind[! (ind %in% avail_with_0)]
cat("Not valid insertion:", not_in, "\n")
cat(selection_msg)
line <- readLines(con = con, n = 1)
ind <- line_to_numbers(line)
ind <- if (multiple) ind else ind[1]
}
} else {
## non ciclare ma tieni comunque quello che c'รจ di tenibile
## indici positivi o nulli nel range
allowed <- ind %in% avail_with_0
any_nin_avail <- any(! allowed)
if (any_nin_avail){
not_allowed <- ind[!allowed]
warning("Removed some values (not 0 or specified possibilities:", not_allowed, ".")
ind <- ind[allowed]
}
}
## return values or NA if nothing was choosed
ind <- unique(ind)
ind <- ind %without% 0
if (length(ind) > 0) {
if (return == 'values') choices[ind] else ind
} else NA
}
line_to_numbers <- function(x){
## transform "1 2-3, 4, 6-10" to c(1:3, 4, 6:10)
x <- gsub(",", " ", x)
x <- as.list(strsplit(x, " ")[[1]])
x <- lapply(x, line_to_numbers_worker)
unlist(x)
}
line_to_numbers_worker <- function(x) {
if (x == '') {
NULL
} else if (grepl("\\d-\\d", x)) {
first <- gsub("(\\d+)-\\d+" , "\\1", x)
second <- gsub("\\d+-(\\d+)", "\\1", x)
seq(from = first, to = second)
} else{
as.integer(x)
}
}
## Todo, fai line to number
## testa in modalita batch e non
| /R/menu2.R | no_license | lbraglia/lbmisc | R | false | false | 3,442 | r | #' menu-like function for interactive or non-interactive sections
#' which allows multiple choices as well
#'
#' return NA if the user inserted the usual 0 to exit
#'
#' @param choiches vector of possible choiches
#' @param title optional
#' @param multiple can more than one item be selected?
#' @param return what to return values (selected choiches given, by
#' default), or indexes. If only 0 is selected (to exit), NA is
#' returned
#' @param strict allow only selectable index to be choosen
#' @export
menu2 <- function(choices, title = NULL, multiple = FALSE,
return = c('values', 'indexes'),
strict = FALSE)
{
return <- match.arg(return)
available_ind <- seq_along(choices)
avail_with_0 <- c(0, available_ind)
the_menu <- paste(available_ind, choices, sep = '. ', collapse = "\n")
interactive <- interactive()
con <- if (interactive) stdin() else file('stdin')
selection_msg <- if (interactive){
if (multiple)
"Selection (values as '1, 2-3, 6') or 0 to exit: "
else {
"Selection (0 to exit): "
}
} else {
if (multiple){
"a) Insert selection (values as '1, 2-3, 6') or 0 to exit; b) [ENTER]; c) [Ctrl+D]\n"
} else {
"a) Selection (0 to exit); b) [ENTER]; c) [Ctrl+D]\n "
}
}
## get infos from user
if (!is.null(title)) cat(title, "\n\n")
cat(the_menu, '\n\n')
cat(selection_msg)
line <- readLines(con = con, n = 1)
ind <- line_to_numbers(line)
ind <- if (multiple) ind else ind[1]
if (strict){
## continua a continuare fino a che gli indici sono tutti tra
## i selezionabili o 0 per uscire
while (!all(ind %in% avail_with_0)){
not_in <- ind[! (ind %in% avail_with_0)]
cat("Not valid insertion:", not_in, "\n")
cat(selection_msg)
line <- readLines(con = con, n = 1)
ind <- line_to_numbers(line)
ind <- if (multiple) ind else ind[1]
}
} else {
## non ciclare ma tieni comunque quello che c'รจ di tenibile
## indici positivi o nulli nel range
allowed <- ind %in% avail_with_0
any_nin_avail <- any(! allowed)
if (any_nin_avail){
not_allowed <- ind[!allowed]
warning("Removed some values (not 0 or specified possibilities:", not_allowed, ".")
ind <- ind[allowed]
}
}
## return values or NA if nothing was choosed
ind <- unique(ind)
ind <- ind %without% 0
if (length(ind) > 0) {
if (return == 'values') choices[ind] else ind
} else NA
}
line_to_numbers <- function(x){
## transform "1 2-3, 4, 6-10" to c(1:3, 4, 6:10)
x <- gsub(",", " ", x)
x <- as.list(strsplit(x, " ")[[1]])
x <- lapply(x, line_to_numbers_worker)
unlist(x)
}
line_to_numbers_worker <- function(x) {
if (x == '') {
NULL
} else if (grepl("\\d-\\d", x)) {
first <- gsub("(\\d+)-\\d+" , "\\1", x)
second <- gsub("\\d+-(\\d+)", "\\1", x)
seq(from = first, to = second)
} else{
as.integer(x)
}
}
## Todo, fai line to number
## testa in modalita batch e non
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.