content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
#' Label the node with the name of the cell type in the ICELLNET network. #' #' @description Label the node with the name of the cell type in the ICELLNET network. #' #' @param mapping mapping #' @param data data #' @param position position #' @param parse parse #' @param nudge_x nudge_x #' @param nudge_y nudge_y #' @param label.padding label.padding #' @param label.r label.r #' @param label.size label.size #' @param na.rm na.rm #' @param show.legend to show the legend or not #' @param inherit.aes inherit.aes #' @param ... #' #' @examples #' \dontrun{geom_node_label(aes(label = Cell_type, fill = Cell_type), size = rel(6), fontface = "bold") } geom_node_label = function (mapping = NULL, data = NULL, position = "identity", ..., parse = FALSE, nudge_x = 0, nudge_y = 0, label.padding = unit(0.25, "lines"), label.r = unit(0.15, "lines"), label.size = 0.25, na.rm = FALSE, show.legend = NA, inherit.aes = TRUE) { if (!missing(nudge_x) || !missing(nudge_y)) { if (!missing(position)) { stop("Specify either `position` or `nudge_x`/`nudge_y`", call. = FALSE) } position <- ggplot2::position_nudge(nudge_x, nudge_y) } ggplot2::layer( data = data, mapping = mapping, stat = StatNodes, geom = GeomLabel, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list( parse = parse, label.padding = label.padding, label.r = label.r, label.size = label.size, na.rm = na.rm, ... ) ) }
/icellnet/R/geom_node_label.R
no_license
soumelis-lab/ICELLNET
R
false
false
1,858
r
#' Label the node with the name of the cell type in the ICELLNET network. #' #' @description Label the node with the name of the cell type in the ICELLNET network. #' #' @param mapping mapping #' @param data data #' @param position position #' @param parse parse #' @param nudge_x nudge_x #' @param nudge_y nudge_y #' @param label.padding label.padding #' @param label.r label.r #' @param label.size label.size #' @param na.rm na.rm #' @param show.legend to show the legend or not #' @param inherit.aes inherit.aes #' @param ... #' #' @examples #' \dontrun{geom_node_label(aes(label = Cell_type, fill = Cell_type), size = rel(6), fontface = "bold") } geom_node_label = function (mapping = NULL, data = NULL, position = "identity", ..., parse = FALSE, nudge_x = 0, nudge_y = 0, label.padding = unit(0.25, "lines"), label.r = unit(0.15, "lines"), label.size = 0.25, na.rm = FALSE, show.legend = NA, inherit.aes = TRUE) { if (!missing(nudge_x) || !missing(nudge_y)) { if (!missing(position)) { stop("Specify either `position` or `nudge_x`/`nudge_y`", call. = FALSE) } position <- ggplot2::position_nudge(nudge_x, nudge_y) } ggplot2::layer( data = data, mapping = mapping, stat = StatNodes, geom = GeomLabel, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list( parse = parse, label.padding = label.padding, label.r = label.r, label.size = label.size, na.rm = na.rm, ... ) ) }
BootCL.distribution<-function(chip,sampling.count,total.sampling.count=10000, windowsize=NULL) { seed<-sample(1,1:32767) if(chip=="HG.U133A") { data(HG.U133A); Chromosome.List<-HG.U133A;list.whole.count<-nrow(Chromosome.List) } else if(chip=="HG.U133B") { data(HG.U133B); Chromosome.List<-HG.U133B;list.whole.count<-nrow(Chromosome.List) } else if(chip=="MG.U74Av2") { data(MG.U74Av2); Chromosome.List<-MG.U74Av2;list.whole.count<-nrow(Chromosome.List) } else if(chip=="MG.U74Bv2") { data(MG.U74Bv2); Chromosome.List<-MG.U74Bv2;list.whole.count<-nrow(Chromosome.List) } else if(chip=="MG.U74Cv2") { data(MG.U74Cv2); Chromosome.List<-MG.U74Cv2;list.whole.count<-nrow(Chromosome.List) } else if(chip=="RG.U34A") { data(RG.U34A); Chromosome.List<-RG.U34A;list.whole.count<-nrow(Chromosome.List) } else if(chip=="RG.U34B") { data(RG.U34B); Chromosome.List<-RG.U34B;list.whole.count<-nrow(Chromosome.List) } else if(chip=="RG.U34C") { data(RG.U34C); Chromosome.List<-RG.U34C;list.whole.count<-nrow(Chromosome.List) } else if(chip=="Hgfocus") { data(Hgfocus); Chromosome.List<-Hgfocus;list.whole.count<-nrow(Chromosome.List) } else if(chip=="Hgu133plus2") { data(Hgu133plus2); Chromosome.List<-Hgu133plus2;list.whole.count<-nrow(Chromosome.List) } else if(chip=="Hgu133A2") { data(Hgu133A2); Chromosome.List<-Hgu133A2;list.whole.count<-nrow(Chromosome.List) } else if(chip=="Mouse4302") { data(Mouse4302); Chromosome.List<-Mouse4302;list.whole.count<-nrow(Chromosome.List) } else if(chip=="Mouse430A2") { data(Mouse430A2); Chromosome.List<-Mouse430A2;list.whole.count<-nrow(Chromosome.List) } else if(chip=="hwgcod") { data(hwgcod); Chromosome.List<-hwgcod;list.whole.count<-nrow(Chromosome.List) } else if(chip=="mwgcod") { data(mwgcod); Chromosome.List<-mwgcod;list.whole.count<-nrow(Chromosome.List) } else if(chip=="rwgcod") { data(rwgcod); Chromosome.List<-rwgcod;list.whole.count<-nrow(Chromosome.List) } else if(chip=="hgug4112a") { data(hgug4112a); Chromosome.List<-hgug4112a;list.whole.count<-nrow(Chromosome.List) } else if(chip=="hgug4110b") { data(hgug4110b); Chromosome.List<-hgug4110b;list.whole.count<-nrow(Chromosome.List) } else if(chip=="mgug4122a") { data(mgug4122a); Chromosome.List<-mgug4122a;list.whole.count<-nrow(Chromosome.List) } else if(chip=="chicken") { data(chickenChip); Chromosome.List<-chickenChip;list.whole.count<-nrow(Chromosome.List) } else { print("Please enter VALID chip name!") } data(total.count.ws) if(is.null(windowsize)) { windowsize<-round(total.count.ws[chip,]$windowsize)} diffcount<-rep(0,total.sampling.count) RN1 = .C("distribution", as.integer(seed), as.integer(sampling.count), as.integer(list.whole.count), as.integer(total.sampling.count), diff=as.integer(diffcount), as.integer(Chromosome.List$CHR.NAME), as.integer(Chromosome.List$START), as.integer(Chromosome.List$END), ws=as.integer(windowsize), PACKAGE="BootCL") return(list(dist=RN1$diff,windowsize=RN1$ws)) } BootCL.Statistic<-function(chip,ID.data,windowsize=NULL) { if(chip=="HG.U133A") { data(HG.U133A); Chromosome.List<-HG.U133A } else if(chip=="HG.U133B") { data(HG.U133B); Chromosome.List<-HG.U133B } else if(chip=="MG.U74Av2") { data(MG.U74Av2); Chromosome.List<-MG.U74Av2 } else if(chip=="MG.U74Bv2") { data(MG.U74Bv2); Chromosome.List<-MG.U74Bv2 } else if(chip=="MG.U74Cv2") { data(MG.U74Cv2); Chromosome.List<-MG.U74Cv2 } else if(chip=="RG.U34A") { data(RG.U34A); Chromosome.List<-RG.U34A } else if(chip=="RG.U34B") { data(RG.U34B); Chromosome.List<-RG.U34B } else if(chip=="RG.U34C") { data(RG.U34C); Chromosome.List<-RG.U34C } else if(chip=="Hgfocus") { data(Hgfocus); Chromosome.List<-Hgfocus } else if(chip=="Hgu133plus2") { data(Hgu133plus2); Chromosome.List<-Hgu133plus2 } else if(chip=="Hgu133A2") { data(Hgu133A2); Chromosome.List<-Hgu133A2 } else if(chip=="Mouse4302") { data(Mouse4302); Chromosome.List<-Mouse4302 } else if(chip=="Mouse430A2") { data(Mouse430A2); Chromosome.List<-Mouse430A2 } else if(chip=="hwgcod") { data(hwgcod); Chromosome.List<-hwgcod } else if(chip=="mwgcod") { data(mwgcod); Chromosome.List<-mwgcod } else if(chip=="rwgcod") { data(rwgcod); Chromosome.List<-rwgcod } else if(chip=="hgug4112a") { data(hgug4112a); Chromosome.List<-hgug4112a } else if(chip=="hgug4110b") { data(hgug4110b); Chromosome.List<-hgug4110b } else if(chip=="mgug4122a") { data(mgug4122a); Chromosome.List<-mgug4122a } else if(chip=="chicken") { data(chickenChip); Chromosome.List<-chickenChip } else { print("Please enter VALID chip name!") } diff.count<-0; select.id<-NULL Affy.ID<-NULL for(i in 1:length(ID.data$Affy.list)) { temp.id<-which(as.character(Chromosome.List$NAME)==as.character(ID.data$Access.list[i])) select.id<-c(select.id,temp.id) Affy.ID<-c(Affy.ID,rep(as.character(ID.data$Affy.list[i]),length(temp.id))) } select.list<-Chromosome.List[select.id,] chsort.id<-sort.list(select.list$CHR.NAME) select.list<-select.list[chsort.id,] Affy.ID<-Affy.ID[chsort.id] ch.list<-table(select.list$CHR.NAME) ch.name<-names(ch.list) sortCH.list<-NULL sortAffy.ID<-NULL for(i in 1:length(ch.list)) { temp.list<-select.list[select.list$CHR.NAME==ch.name[i],] temp.Affy<-Affy.ID[select.list$CHR.NAME==ch.name[i]] tempsort.id<-sort.list(temp.list$START) temp.list<-temp.list[tempsort.id,] sortCH.list<-rbind(sortCH.list,temp.list) sortAffy.ID<-c(sortAffy.ID,as.character(temp.Affy[tempsort.id])) } data(total.count.ws) if(is.null(windowsize)) { windowsize<-round(total.count.ws[chip,]$windowsize)} n<-nrow(sortCH.list) conseq.state<-rep(0,n-1) for(i in 1:(n-1)) { if( sortCH.list$CHR.NAME[i+1] == sortCH.list$CHR.NAME[i]) { diff<- sortCH.list$START[i+1] - sortCH.list$END[i] # if(diff>0 & diff<windowsize) if(diff<windowsize) { diff.count<-diff.count+1 conseq.state[i]<-1 } } } return(list(Diff.count=diff.count,windowsize=windowsize,sampling.count=n, conseq.state=conseq.state,Affy.ID=sortAffy.ID,Access.ID=sortCH.list$NAME)) } BootCL.Pvalue<-function(Bstat,distribution) { Pvalue<-sum(distribution$dist>=Bstat$Diff.count)/length(distribution$dist) return(list(Diff.count=Bstat$Diff.count, Pvalue=Pvalue,dist=distribution$dist,windowsize=distribution$windowsize,sampling.count= Bstat$sampling.count,conseq.state=Bstat$conseq.state,Affy.ID=Bstat$Affy.ID,Access.ID=Bstat$Access.ID)) } BootCL.print<-function(BootP,cutoff=3,affyID.flag=TRUE) { print.list<-NULL id<-1 count<-1 if(affyID.flag) { gene.name<-BootP$Affy.ID } else { gene.name<-BootP$Access.ID} for(i in 1:length(BootP$conseq.state)) { if(BootP$conseq.state[i]==1) { count<-count+1 } else { temp<-as.character(gene.name[(i-count+1):i]) print.list<-c(print.list,list(temp)) count<-1 } } select.list<-NULL for(i in 1:length(print.list)) { if(length(print.list[[i]])>=cutoff) select.list<-c(select.list,list(print.list[[i]])) } return(list(print.list=print.list,select.list=select.list)) } find.ID<-function(chip,sample,affyID.flag=TRUE) { if(chip=="HG.U133A") { data(affy.hgu133a); xx<-affy.hgu133a;data(HG.U133A); Chromosome.List<-HG.U133A } else if(chip=="HG.U133B") { data(affy.hgu133b); xx<-affy.hgu133b;data(HG.U133B); Chromosome.List<-HG.U133B } else if(chip=="MG.U74Av2") { data(affy.mgu74av2); xx<-affy.mgu74av2;data(MG.U74Av2); Chromosome.List<-MG.U74Av2 } else if(chip=="MG.U74Bv2") { data(affy.mgu74bv2); xx<-affy.mgu74bv2;data(MG.U74Bv2); Chromosome.List<-MG.U74Bv2 } else if(chip=="MG.U74Cv2") { data(affy.mgu74cv2); xx<-affy.mgu74cv2;data(MG.U74Cv2); Chromosome.List<-MG.U74Cv2 } else if(chip=="RG.U34A") { data(affy.rgu34a); xx<-affy.rgu34a;data(RG.U34A); Chromosome.List<-RG.U34A } else if(chip=="RG.U34B") { data(affy.rgu34b); xx<-affy.rgu34b;data(RG.U34B); Chromosome.List<-RG.U34B } else if(chip=="RG.U34C") { data(affy.rgu34c); xx<-affy.rgu34c;data(RG.U34C); Chromosome.List<-RG.U34C } else if(chip=="Hgfocus") { data(affy.hgfocus); xx<-affy.hgfocus;data(Hgfocus); Chromosome.List<-Hgfocus } else if(chip=="Hgu133plus2") { data(affy.hgu133plus2); xx<-affy.hgu133plus2;data(Hgu133plus2); Chromosome.List<-Hgu133plus2 } else if(chip=="Hgu133A2") { data(affy.hgu133a2); xx<-affy.hgu133a2;data(Hgu133A2); Chromosome.List<-Hgu133A2 } else if(chip=="Mouse4302") { data(affy.mouse4302); xx<-affy.mouse4302;data(Mouse4302); Chromosome.List<-Mouse4302 } else if(chip=="Mouse430A2") { data(affy.mouse430a2); xx<-affy.mouse430a2;data(Mouse430A2); Chromosome.List<-Mouse430A2 } else if(chip=="hwgcod") { data(codelink.hwgcod); xx<-codelink.hwgcod;data(hwgcod); Chromosome.List<-hwgcod xx[,2]<-apply(as.matrix(xx[,2]),1,function(x){strsplit(x,split=".",fixed=TRUE)[[1]][1]}) } else if(chip=="mwgcod") { data(codelink.mwgcod); xx<-codelink.mwgcod;data(mwgcod); Chromosome.List<-mwgcod xx[,2]<-apply(as.matrix(xx[,2]),1,function(x){strsplit(x,split=".",fixed=TRUE)[[1]][1]}) } else if(chip=="rwgcod") { data(codelink.rwgcod); xx<-codelink.rwgcod;data(rwgcod); Chromosome.List<-rwgcod xx[,2]<-apply(as.matrix(xx[,2]),1,function(x){strsplit(x,split=".",fixed=TRUE)[[1]][1]}) } else if(chip=="hgug4112a") { data(agilent.hgug4112a); xx<-agilent.hgug4112a;data(hgug4112a); Chromosome.List<-hgug4112a } else if(chip=="hgug4110b") { data(agilent.hgug4110b); xx<-agilent.hgug4110b;data(hgug4110b); Chromosome.List<-hgug4110b } else if(chip=="mgug4122a") { data(agilent.mgug4122a); xx<-agilent.mgug4122a;data(mgug4122a); Chromosome.List<-mgug4122a } else if(chip=="chicken") { data(affy.chickenChip); xx<-affy.chickenChip;data(chickenChip); Chromosome.List<-chickenChip } else { print("Please enter VALID chip name!") } rownames(xx)<-as.character(xx[,1]) if(affyID.flag==TRUE) { if(is.null(dim(sample))) { Access.list<-xx[as.character(sample),2];n<-length(sample) } else { sample<-sample[,1] Access.list<-xx[as.character(sample),2];n<-length(sample) } if(sum(is.na(Access.list))==0) { probename.list<-sample } else { probename.list<-sample[-which(is.na(Access.list))] } } else { if(!is.null(dim(sample))) { sample<-sample[,1] } Access.list<-sample probename.list<-NULL } return(list(Access.list=Access.list,Affy.list=probename.list)) } BootCL.plot<-function(BootP,xrange=NULL,freq.bootCL=NULL) { if(is.null(xrange)) {xrange<-c(0,max(BootP$dist)+5)} hist(BootP$dist,xlim=xrange,main="Bootstrapping distribution",freq=freq.bootCL, # sub=paste("Chromosomal spatial bias of ", chip ,"genes of #",BootP$sampling.count,sep=""), xlab=paste("P value : ",as.character(BootP$Pvalue)),ylab="", breaks=seq(0,BootP$sampling.count)) abline(v=BootP$Diff.count,lty=1,col=2,lwd=2) text((BootP$Diff),-10,as.character(BootP$Diff),col="blue",cex=1) }
/R/boot.R
no_license
cran/BootCL
R
false
false
11,986
r
BootCL.distribution<-function(chip,sampling.count,total.sampling.count=10000, windowsize=NULL) { seed<-sample(1,1:32767) if(chip=="HG.U133A") { data(HG.U133A); Chromosome.List<-HG.U133A;list.whole.count<-nrow(Chromosome.List) } else if(chip=="HG.U133B") { data(HG.U133B); Chromosome.List<-HG.U133B;list.whole.count<-nrow(Chromosome.List) } else if(chip=="MG.U74Av2") { data(MG.U74Av2); Chromosome.List<-MG.U74Av2;list.whole.count<-nrow(Chromosome.List) } else if(chip=="MG.U74Bv2") { data(MG.U74Bv2); Chromosome.List<-MG.U74Bv2;list.whole.count<-nrow(Chromosome.List) } else if(chip=="MG.U74Cv2") { data(MG.U74Cv2); Chromosome.List<-MG.U74Cv2;list.whole.count<-nrow(Chromosome.List) } else if(chip=="RG.U34A") { data(RG.U34A); Chromosome.List<-RG.U34A;list.whole.count<-nrow(Chromosome.List) } else if(chip=="RG.U34B") { data(RG.U34B); Chromosome.List<-RG.U34B;list.whole.count<-nrow(Chromosome.List) } else if(chip=="RG.U34C") { data(RG.U34C); Chromosome.List<-RG.U34C;list.whole.count<-nrow(Chromosome.List) } else if(chip=="Hgfocus") { data(Hgfocus); Chromosome.List<-Hgfocus;list.whole.count<-nrow(Chromosome.List) } else if(chip=="Hgu133plus2") { data(Hgu133plus2); Chromosome.List<-Hgu133plus2;list.whole.count<-nrow(Chromosome.List) } else if(chip=="Hgu133A2") { data(Hgu133A2); Chromosome.List<-Hgu133A2;list.whole.count<-nrow(Chromosome.List) } else if(chip=="Mouse4302") { data(Mouse4302); Chromosome.List<-Mouse4302;list.whole.count<-nrow(Chromosome.List) } else if(chip=="Mouse430A2") { data(Mouse430A2); Chromosome.List<-Mouse430A2;list.whole.count<-nrow(Chromosome.List) } else if(chip=="hwgcod") { data(hwgcod); Chromosome.List<-hwgcod;list.whole.count<-nrow(Chromosome.List) } else if(chip=="mwgcod") { data(mwgcod); Chromosome.List<-mwgcod;list.whole.count<-nrow(Chromosome.List) } else if(chip=="rwgcod") { data(rwgcod); Chromosome.List<-rwgcod;list.whole.count<-nrow(Chromosome.List) } else if(chip=="hgug4112a") { data(hgug4112a); Chromosome.List<-hgug4112a;list.whole.count<-nrow(Chromosome.List) } else if(chip=="hgug4110b") { data(hgug4110b); Chromosome.List<-hgug4110b;list.whole.count<-nrow(Chromosome.List) } else if(chip=="mgug4122a") { data(mgug4122a); Chromosome.List<-mgug4122a;list.whole.count<-nrow(Chromosome.List) } else if(chip=="chicken") { data(chickenChip); Chromosome.List<-chickenChip;list.whole.count<-nrow(Chromosome.List) } else { print("Please enter VALID chip name!") } data(total.count.ws) if(is.null(windowsize)) { windowsize<-round(total.count.ws[chip,]$windowsize)} diffcount<-rep(0,total.sampling.count) RN1 = .C("distribution", as.integer(seed), as.integer(sampling.count), as.integer(list.whole.count), as.integer(total.sampling.count), diff=as.integer(diffcount), as.integer(Chromosome.List$CHR.NAME), as.integer(Chromosome.List$START), as.integer(Chromosome.List$END), ws=as.integer(windowsize), PACKAGE="BootCL") return(list(dist=RN1$diff,windowsize=RN1$ws)) } BootCL.Statistic<-function(chip,ID.data,windowsize=NULL) { if(chip=="HG.U133A") { data(HG.U133A); Chromosome.List<-HG.U133A } else if(chip=="HG.U133B") { data(HG.U133B); Chromosome.List<-HG.U133B } else if(chip=="MG.U74Av2") { data(MG.U74Av2); Chromosome.List<-MG.U74Av2 } else if(chip=="MG.U74Bv2") { data(MG.U74Bv2); Chromosome.List<-MG.U74Bv2 } else if(chip=="MG.U74Cv2") { data(MG.U74Cv2); Chromosome.List<-MG.U74Cv2 } else if(chip=="RG.U34A") { data(RG.U34A); Chromosome.List<-RG.U34A } else if(chip=="RG.U34B") { data(RG.U34B); Chromosome.List<-RG.U34B } else if(chip=="RG.U34C") { data(RG.U34C); Chromosome.List<-RG.U34C } else if(chip=="Hgfocus") { data(Hgfocus); Chromosome.List<-Hgfocus } else if(chip=="Hgu133plus2") { data(Hgu133plus2); Chromosome.List<-Hgu133plus2 } else if(chip=="Hgu133A2") { data(Hgu133A2); Chromosome.List<-Hgu133A2 } else if(chip=="Mouse4302") { data(Mouse4302); Chromosome.List<-Mouse4302 } else if(chip=="Mouse430A2") { data(Mouse430A2); Chromosome.List<-Mouse430A2 } else if(chip=="hwgcod") { data(hwgcod); Chromosome.List<-hwgcod } else if(chip=="mwgcod") { data(mwgcod); Chromosome.List<-mwgcod } else if(chip=="rwgcod") { data(rwgcod); Chromosome.List<-rwgcod } else if(chip=="hgug4112a") { data(hgug4112a); Chromosome.List<-hgug4112a } else if(chip=="hgug4110b") { data(hgug4110b); Chromosome.List<-hgug4110b } else if(chip=="mgug4122a") { data(mgug4122a); Chromosome.List<-mgug4122a } else if(chip=="chicken") { data(chickenChip); Chromosome.List<-chickenChip } else { print("Please enter VALID chip name!") } diff.count<-0; select.id<-NULL Affy.ID<-NULL for(i in 1:length(ID.data$Affy.list)) { temp.id<-which(as.character(Chromosome.List$NAME)==as.character(ID.data$Access.list[i])) select.id<-c(select.id,temp.id) Affy.ID<-c(Affy.ID,rep(as.character(ID.data$Affy.list[i]),length(temp.id))) } select.list<-Chromosome.List[select.id,] chsort.id<-sort.list(select.list$CHR.NAME) select.list<-select.list[chsort.id,] Affy.ID<-Affy.ID[chsort.id] ch.list<-table(select.list$CHR.NAME) ch.name<-names(ch.list) sortCH.list<-NULL sortAffy.ID<-NULL for(i in 1:length(ch.list)) { temp.list<-select.list[select.list$CHR.NAME==ch.name[i],] temp.Affy<-Affy.ID[select.list$CHR.NAME==ch.name[i]] tempsort.id<-sort.list(temp.list$START) temp.list<-temp.list[tempsort.id,] sortCH.list<-rbind(sortCH.list,temp.list) sortAffy.ID<-c(sortAffy.ID,as.character(temp.Affy[tempsort.id])) } data(total.count.ws) if(is.null(windowsize)) { windowsize<-round(total.count.ws[chip,]$windowsize)} n<-nrow(sortCH.list) conseq.state<-rep(0,n-1) for(i in 1:(n-1)) { if( sortCH.list$CHR.NAME[i+1] == sortCH.list$CHR.NAME[i]) { diff<- sortCH.list$START[i+1] - sortCH.list$END[i] # if(diff>0 & diff<windowsize) if(diff<windowsize) { diff.count<-diff.count+1 conseq.state[i]<-1 } } } return(list(Diff.count=diff.count,windowsize=windowsize,sampling.count=n, conseq.state=conseq.state,Affy.ID=sortAffy.ID,Access.ID=sortCH.list$NAME)) } BootCL.Pvalue<-function(Bstat,distribution) { Pvalue<-sum(distribution$dist>=Bstat$Diff.count)/length(distribution$dist) return(list(Diff.count=Bstat$Diff.count, Pvalue=Pvalue,dist=distribution$dist,windowsize=distribution$windowsize,sampling.count= Bstat$sampling.count,conseq.state=Bstat$conseq.state,Affy.ID=Bstat$Affy.ID,Access.ID=Bstat$Access.ID)) } BootCL.print<-function(BootP,cutoff=3,affyID.flag=TRUE) { print.list<-NULL id<-1 count<-1 if(affyID.flag) { gene.name<-BootP$Affy.ID } else { gene.name<-BootP$Access.ID} for(i in 1:length(BootP$conseq.state)) { if(BootP$conseq.state[i]==1) { count<-count+1 } else { temp<-as.character(gene.name[(i-count+1):i]) print.list<-c(print.list,list(temp)) count<-1 } } select.list<-NULL for(i in 1:length(print.list)) { if(length(print.list[[i]])>=cutoff) select.list<-c(select.list,list(print.list[[i]])) } return(list(print.list=print.list,select.list=select.list)) } find.ID<-function(chip,sample,affyID.flag=TRUE) { if(chip=="HG.U133A") { data(affy.hgu133a); xx<-affy.hgu133a;data(HG.U133A); Chromosome.List<-HG.U133A } else if(chip=="HG.U133B") { data(affy.hgu133b); xx<-affy.hgu133b;data(HG.U133B); Chromosome.List<-HG.U133B } else if(chip=="MG.U74Av2") { data(affy.mgu74av2); xx<-affy.mgu74av2;data(MG.U74Av2); Chromosome.List<-MG.U74Av2 } else if(chip=="MG.U74Bv2") { data(affy.mgu74bv2); xx<-affy.mgu74bv2;data(MG.U74Bv2); Chromosome.List<-MG.U74Bv2 } else if(chip=="MG.U74Cv2") { data(affy.mgu74cv2); xx<-affy.mgu74cv2;data(MG.U74Cv2); Chromosome.List<-MG.U74Cv2 } else if(chip=="RG.U34A") { data(affy.rgu34a); xx<-affy.rgu34a;data(RG.U34A); Chromosome.List<-RG.U34A } else if(chip=="RG.U34B") { data(affy.rgu34b); xx<-affy.rgu34b;data(RG.U34B); Chromosome.List<-RG.U34B } else if(chip=="RG.U34C") { data(affy.rgu34c); xx<-affy.rgu34c;data(RG.U34C); Chromosome.List<-RG.U34C } else if(chip=="Hgfocus") { data(affy.hgfocus); xx<-affy.hgfocus;data(Hgfocus); Chromosome.List<-Hgfocus } else if(chip=="Hgu133plus2") { data(affy.hgu133plus2); xx<-affy.hgu133plus2;data(Hgu133plus2); Chromosome.List<-Hgu133plus2 } else if(chip=="Hgu133A2") { data(affy.hgu133a2); xx<-affy.hgu133a2;data(Hgu133A2); Chromosome.List<-Hgu133A2 } else if(chip=="Mouse4302") { data(affy.mouse4302); xx<-affy.mouse4302;data(Mouse4302); Chromosome.List<-Mouse4302 } else if(chip=="Mouse430A2") { data(affy.mouse430a2); xx<-affy.mouse430a2;data(Mouse430A2); Chromosome.List<-Mouse430A2 } else if(chip=="hwgcod") { data(codelink.hwgcod); xx<-codelink.hwgcod;data(hwgcod); Chromosome.List<-hwgcod xx[,2]<-apply(as.matrix(xx[,2]),1,function(x){strsplit(x,split=".",fixed=TRUE)[[1]][1]}) } else if(chip=="mwgcod") { data(codelink.mwgcod); xx<-codelink.mwgcod;data(mwgcod); Chromosome.List<-mwgcod xx[,2]<-apply(as.matrix(xx[,2]),1,function(x){strsplit(x,split=".",fixed=TRUE)[[1]][1]}) } else if(chip=="rwgcod") { data(codelink.rwgcod); xx<-codelink.rwgcod;data(rwgcod); Chromosome.List<-rwgcod xx[,2]<-apply(as.matrix(xx[,2]),1,function(x){strsplit(x,split=".",fixed=TRUE)[[1]][1]}) } else if(chip=="hgug4112a") { data(agilent.hgug4112a); xx<-agilent.hgug4112a;data(hgug4112a); Chromosome.List<-hgug4112a } else if(chip=="hgug4110b") { data(agilent.hgug4110b); xx<-agilent.hgug4110b;data(hgug4110b); Chromosome.List<-hgug4110b } else if(chip=="mgug4122a") { data(agilent.mgug4122a); xx<-agilent.mgug4122a;data(mgug4122a); Chromosome.List<-mgug4122a } else if(chip=="chicken") { data(affy.chickenChip); xx<-affy.chickenChip;data(chickenChip); Chromosome.List<-chickenChip } else { print("Please enter VALID chip name!") } rownames(xx)<-as.character(xx[,1]) if(affyID.flag==TRUE) { if(is.null(dim(sample))) { Access.list<-xx[as.character(sample),2];n<-length(sample) } else { sample<-sample[,1] Access.list<-xx[as.character(sample),2];n<-length(sample) } if(sum(is.na(Access.list))==0) { probename.list<-sample } else { probename.list<-sample[-which(is.na(Access.list))] } } else { if(!is.null(dim(sample))) { sample<-sample[,1] } Access.list<-sample probename.list<-NULL } return(list(Access.list=Access.list,Affy.list=probename.list)) } BootCL.plot<-function(BootP,xrange=NULL,freq.bootCL=NULL) { if(is.null(xrange)) {xrange<-c(0,max(BootP$dist)+5)} hist(BootP$dist,xlim=xrange,main="Bootstrapping distribution",freq=freq.bootCL, # sub=paste("Chromosomal spatial bias of ", chip ,"genes of #",BootP$sampling.count,sep=""), xlab=paste("P value : ",as.character(BootP$Pvalue)),ylab="", breaks=seq(0,BootP$sampling.count)) abline(v=BootP$Diff.count,lty=1,col=2,lwd=2) text((BootP$Diff),-10,as.character(BootP$Diff),col="blue",cex=1) }
# COLORS/LABELS # (used in plot_lollipop(), plot_bar_periods(), and plot_bar_periods_ses(), aggregate_categories()) gent_cat_colors <- c("snow3","#d94801", "#fa7b00", "#fdcc8a", "#a6d894") gent_cat <- c("Nongentrifiable", "Intense", "Moderate", "Weak", "People or Price") names(gent_cat_colors) <- gent_cat # refers to ethnoracial category race_short_colors <- c("#481567FF", "#33638DDF", "#FDE725FF", "#20A387FF") race_short <- c("Predominantly Black", "Black-Other", "White/White-Mixed", "Multiethnic/Other") names(race_short_colors) <- race_short # refers to race category race_colors <- c("#17202A", "#DE3163", "#FFBF00", "#DFFF00", "#9FE2BF", "#6495ED") race_cat <- c("Overall", "Asian", "Black", "Hispanic", "White", "Other") names(race_colors) <- race_cat inc_cat_colors <- c("#c7cff2","#8897db","#697fe0","#4c66d9","#1437cc") inc_cat <- c("Bottom Quintile", "Second Quintile", "Middle Quintile", "Fourth Quintile", "Top Quintile") names(inc_cat_colors) <- inc_cat ses_cat_colors <- c("#9b9b9b", "#fcbba1", "#fc9272", "#faab8c","#fb6a4a", "#b63b36") ses_cat = c("All", "Low", "Moderate", "LMM" ,"Middle", "High") ses_short = c("Low", "Moderate", "Middle", "High") names(ses_cat_colors) <- ses_cat # " Moderate" includes a space to differentiate from "Moderate" gentrification # Used when ordering cat levels for lollipop plots ses_lollipop_colors <- c("#9b9b9b", "#fcbba1", "#fc9272", "#faab8c","#fb6a4a", "#b63b36") ses_lollipop_cat = c("All", "Low", " Moderate", "LMM" ,"Middle", "High") ses_lollipop_short = c("Low", " Moderate", "Middle", "High") names(ses_lollipop_colors) <- ses_lollipop_cat period_cat_colors <- c("#46aac8", "#46aac8", "#46aac8", "#46aac8") period_cat = c("Boom", "Bust", "Recovery", "Post-Recovery") names(period_cat_colors) <- period_cat move_cat_colors = c("Moved out of Bay Area" = "#8baf3e", "Different City within Bay Area" = "#fdbd3b", "Moved within Oakland" = "#2e5e8b") dest_colors = c("Outside Bay Area" = "#d53e4f", "South Bay" = "#fc8d59", "San Francisco" = "#fee08b", "North Bay" = "#ffffbf", "Contra Costa" = "#e6f598", "Alameda" = "#99d594", "Within Oakland" = "#3288bd") # LABELS/ORDERING # used in plot_bar_ses() and stacked_bar() relabel_gent_cat <- c("nongentrifiable" = "Nongentrifiable", "gentrifying" = "Gentrifying", "intense" = "Intense", "moderate" = "Moderate", "earlygent" = "Early Gentrification", "weak" = "Weak", "peoplepricegent" = "People or Price") gent_cat_plot_order <- c("Nongentrifiable", "Gentrifying", "Intense", "Moderate", "Early Gentrification", "Weak", "People or Price") relabel_race_cat <- c("PredWhite" = "Predominantly White", "PredBlack" = "Predominantly Black", "PredOther" = "Predominantly Other", "WhiteOther" = "White-Other", "BlackWhite" = "Black-White", "BlackOther" = "Black-Other", "Multiethnic" = "Multiethnic", "Overall" = "Overall", "WhiteMixed" = "White/White-Mixed", "MixedOther" = "Multiethnic/Other") relabel_move_cat <- c("moved_outba_pct"="Moved out of Bay Area", "diff_city_ba_pct" = "Different City within Bay Area", "moved_within_oak_pct" = "Moved within Oakland") relabel_dest_cat <- c("outmigration_outba_pct" = "Outside Bay Area", "withinoakmigration_pct" = "Within Oakland", "outmigration_alameda_pct" = "Alameda", "outmigration_contracosta_pct" = "Contra Costa", "outmigration_northbay_pct" = "North Bay", "outmigration_sanfran_pct" = "San Francisco", "outmigration_southbay_pct" = "South Bay") race_cat_plot_order <- c("Predominantly White", "Predominantly Black", "Predominantly Other","White-Other","Black-White","Black-Other","Multiethnic", "White/White-Mixed", "Multiethnic/Other") inc_cat_plot_order <- c("Bottom Quintile", "Second Quintile", "Middle Quintile", "Fourth Quintile", "Top Quintile") move_order <- c("Moved out of Bay Area", "Different City within Bay Area", "Moved within Oakland") dest_order <- c("Outside Bay Area", "South Bay", "San Francisco", "North Bay", "Contra Costa", "Alameda", "Within Oakland") # READ IN DATA # Oakland tractids oak_ids <- readr::read_csv("../../oak-data-repo/oakland_geographies/trtid10_oak.csv") # Bay Area tractids bay_ids <- readr::read_csv("../../oak-data-repo/oakland_geographies/trtid10_bayarea.csv") # gentrification data gentcat <- read_csv("../../oak-data-repo/gentrification_categories/gentcat_006a_50_oak.csv") %>% select(tractid10 = trtid10, cat = gentcat_006a_50) gentcat$cat <- plyr::revalue(gentcat$cat, relabel_gent_cat) gentcat$cat <- factor(gentcat$cat, levels = gent_cat_plot_order) gentcat$facet = "Gentrification" # race data racecat <- read_csv("../../oak-data-repo/ethnoracial_composition/racetypology_oak_tracts_00.csv") %>% select(tractid10 = trtid10, cat = race.shortcategory00) racecat$cat <- plyr::revalue(racecat$cat, relabel_race_cat) racecat$cat <- factor(racecat$cat, levels = race_cat_plot_order) racecat$facet = "Ethnoracial" # income data inccat <- read_csv("../../oak-data-repo/income_categories/hinc8a_categories.csv") inccat$cat <- factor(inccat$cat, levels = inc_cat_plot_order) inccat$facet = "Income" # city shapefiles cities <- read_csv("../../oak-data-repo/oakland_geographies/census_2010b_tracts_places_ca.csv") # Oakland tracts data oak_tracts <- oak_ids %>% select(tractid10 = trtid10) # CAPTIONS ses_caption = "\nSES Ranges by Equifax Risk Scores: Low = missing or <580, Moderate = 580-649, Middle = 650-749, High = 750+." period_caption = "\nHousing Period Ranges: Boom = 2002-2006, Bust = 2007-2009, Recovery = 2010-2014, Post-Recovery = 2015-2017." frb_caption = "\nSource: Federal Reserve Bank of New York Consumer Credit Panel/Equifax Data." frb_acs_caption = "\nSource: Federal Reserve Bank of New York Consumer Credit Panel/Equifax Data and 2000 US Census, 2005-2009 ACS, and 2012-2016 ACS." frb_acs_caption_splitline = "\nSource: Federal Reserve Bank of New York Consumer Credit Panel/Equifax Data\nand 2000 US Census, 2005-2009 ACS, and 2012-2016 ACS." acs_caption = "\nSource: 2000 US Census, 2005-2009 ACS, and 2012-2016 ACS." usethis::use_data(gent_cat_colors, gent_cat, race_short_colors, race_short, race_colors, race_cat, inc_cat_colors, inc_cat, ses_cat_colors, ses_cat, ses_short, ses_lollipop_colors, ses_lollipop_cat, ses_lollipop_short, period_cat_colors, period_cat, move_cat_colors, dest_colors, relabel_gent_cat, gent_cat_plot_order, relabel_race_cat, relabel_move_cat, relabel_dest_cat, race_cat_plot_order, inc_cat_plot_order, move_order, dest_order, oak_ids, bay_ids, gentcat, racecat, inccat, cities, oak_tracts, ses_caption, period_caption, frb_caption, frb_acs_caption, frb_acs_caption_splitline, acs_caption, overwrite = TRUE, internal = TRUE)
/data-raw/DATASET.R
no_license
Changing-Cities-Research-Lab/oakViz
R
false
false
8,219
r
# COLORS/LABELS # (used in plot_lollipop(), plot_bar_periods(), and plot_bar_periods_ses(), aggregate_categories()) gent_cat_colors <- c("snow3","#d94801", "#fa7b00", "#fdcc8a", "#a6d894") gent_cat <- c("Nongentrifiable", "Intense", "Moderate", "Weak", "People or Price") names(gent_cat_colors) <- gent_cat # refers to ethnoracial category race_short_colors <- c("#481567FF", "#33638DDF", "#FDE725FF", "#20A387FF") race_short <- c("Predominantly Black", "Black-Other", "White/White-Mixed", "Multiethnic/Other") names(race_short_colors) <- race_short # refers to race category race_colors <- c("#17202A", "#DE3163", "#FFBF00", "#DFFF00", "#9FE2BF", "#6495ED") race_cat <- c("Overall", "Asian", "Black", "Hispanic", "White", "Other") names(race_colors) <- race_cat inc_cat_colors <- c("#c7cff2","#8897db","#697fe0","#4c66d9","#1437cc") inc_cat <- c("Bottom Quintile", "Second Quintile", "Middle Quintile", "Fourth Quintile", "Top Quintile") names(inc_cat_colors) <- inc_cat ses_cat_colors <- c("#9b9b9b", "#fcbba1", "#fc9272", "#faab8c","#fb6a4a", "#b63b36") ses_cat = c("All", "Low", "Moderate", "LMM" ,"Middle", "High") ses_short = c("Low", "Moderate", "Middle", "High") names(ses_cat_colors) <- ses_cat # " Moderate" includes a space to differentiate from "Moderate" gentrification # Used when ordering cat levels for lollipop plots ses_lollipop_colors <- c("#9b9b9b", "#fcbba1", "#fc9272", "#faab8c","#fb6a4a", "#b63b36") ses_lollipop_cat = c("All", "Low", " Moderate", "LMM" ,"Middle", "High") ses_lollipop_short = c("Low", " Moderate", "Middle", "High") names(ses_lollipop_colors) <- ses_lollipop_cat period_cat_colors <- c("#46aac8", "#46aac8", "#46aac8", "#46aac8") period_cat = c("Boom", "Bust", "Recovery", "Post-Recovery") names(period_cat_colors) <- period_cat move_cat_colors = c("Moved out of Bay Area" = "#8baf3e", "Different City within Bay Area" = "#fdbd3b", "Moved within Oakland" = "#2e5e8b") dest_colors = c("Outside Bay Area" = "#d53e4f", "South Bay" = "#fc8d59", "San Francisco" = "#fee08b", "North Bay" = "#ffffbf", "Contra Costa" = "#e6f598", "Alameda" = "#99d594", "Within Oakland" = "#3288bd") # LABELS/ORDERING # used in plot_bar_ses() and stacked_bar() relabel_gent_cat <- c("nongentrifiable" = "Nongentrifiable", "gentrifying" = "Gentrifying", "intense" = "Intense", "moderate" = "Moderate", "earlygent" = "Early Gentrification", "weak" = "Weak", "peoplepricegent" = "People or Price") gent_cat_plot_order <- c("Nongentrifiable", "Gentrifying", "Intense", "Moderate", "Early Gentrification", "Weak", "People or Price") relabel_race_cat <- c("PredWhite" = "Predominantly White", "PredBlack" = "Predominantly Black", "PredOther" = "Predominantly Other", "WhiteOther" = "White-Other", "BlackWhite" = "Black-White", "BlackOther" = "Black-Other", "Multiethnic" = "Multiethnic", "Overall" = "Overall", "WhiteMixed" = "White/White-Mixed", "MixedOther" = "Multiethnic/Other") relabel_move_cat <- c("moved_outba_pct"="Moved out of Bay Area", "diff_city_ba_pct" = "Different City within Bay Area", "moved_within_oak_pct" = "Moved within Oakland") relabel_dest_cat <- c("outmigration_outba_pct" = "Outside Bay Area", "withinoakmigration_pct" = "Within Oakland", "outmigration_alameda_pct" = "Alameda", "outmigration_contracosta_pct" = "Contra Costa", "outmigration_northbay_pct" = "North Bay", "outmigration_sanfran_pct" = "San Francisco", "outmigration_southbay_pct" = "South Bay") race_cat_plot_order <- c("Predominantly White", "Predominantly Black", "Predominantly Other","White-Other","Black-White","Black-Other","Multiethnic", "White/White-Mixed", "Multiethnic/Other") inc_cat_plot_order <- c("Bottom Quintile", "Second Quintile", "Middle Quintile", "Fourth Quintile", "Top Quintile") move_order <- c("Moved out of Bay Area", "Different City within Bay Area", "Moved within Oakland") dest_order <- c("Outside Bay Area", "South Bay", "San Francisco", "North Bay", "Contra Costa", "Alameda", "Within Oakland") # READ IN DATA # Oakland tractids oak_ids <- readr::read_csv("../../oak-data-repo/oakland_geographies/trtid10_oak.csv") # Bay Area tractids bay_ids <- readr::read_csv("../../oak-data-repo/oakland_geographies/trtid10_bayarea.csv") # gentrification data gentcat <- read_csv("../../oak-data-repo/gentrification_categories/gentcat_006a_50_oak.csv") %>% select(tractid10 = trtid10, cat = gentcat_006a_50) gentcat$cat <- plyr::revalue(gentcat$cat, relabel_gent_cat) gentcat$cat <- factor(gentcat$cat, levels = gent_cat_plot_order) gentcat$facet = "Gentrification" # race data racecat <- read_csv("../../oak-data-repo/ethnoracial_composition/racetypology_oak_tracts_00.csv") %>% select(tractid10 = trtid10, cat = race.shortcategory00) racecat$cat <- plyr::revalue(racecat$cat, relabel_race_cat) racecat$cat <- factor(racecat$cat, levels = race_cat_plot_order) racecat$facet = "Ethnoracial" # income data inccat <- read_csv("../../oak-data-repo/income_categories/hinc8a_categories.csv") inccat$cat <- factor(inccat$cat, levels = inc_cat_plot_order) inccat$facet = "Income" # city shapefiles cities <- read_csv("../../oak-data-repo/oakland_geographies/census_2010b_tracts_places_ca.csv") # Oakland tracts data oak_tracts <- oak_ids %>% select(tractid10 = trtid10) # CAPTIONS ses_caption = "\nSES Ranges by Equifax Risk Scores: Low = missing or <580, Moderate = 580-649, Middle = 650-749, High = 750+." period_caption = "\nHousing Period Ranges: Boom = 2002-2006, Bust = 2007-2009, Recovery = 2010-2014, Post-Recovery = 2015-2017." frb_caption = "\nSource: Federal Reserve Bank of New York Consumer Credit Panel/Equifax Data." frb_acs_caption = "\nSource: Federal Reserve Bank of New York Consumer Credit Panel/Equifax Data and 2000 US Census, 2005-2009 ACS, and 2012-2016 ACS." frb_acs_caption_splitline = "\nSource: Federal Reserve Bank of New York Consumer Credit Panel/Equifax Data\nand 2000 US Census, 2005-2009 ACS, and 2012-2016 ACS." acs_caption = "\nSource: 2000 US Census, 2005-2009 ACS, and 2012-2016 ACS." usethis::use_data(gent_cat_colors, gent_cat, race_short_colors, race_short, race_colors, race_cat, inc_cat_colors, inc_cat, ses_cat_colors, ses_cat, ses_short, ses_lollipop_colors, ses_lollipop_cat, ses_lollipop_short, period_cat_colors, period_cat, move_cat_colors, dest_colors, relabel_gent_cat, gent_cat_plot_order, relabel_race_cat, relabel_move_cat, relabel_dest_cat, race_cat_plot_order, inc_cat_plot_order, move_order, dest_order, oak_ids, bay_ids, gentcat, racecat, inccat, cities, oak_tracts, ses_caption, period_caption, frb_caption, frb_acs_caption, frb_acs_caption_splitline, acs_caption, overwrite = TRUE, internal = TRUE)
install.packages("xlsx") library(readr) library(ggplot2) library(lattice) library(caret) install.packages("corrplot") install.packages("stats") library(corrplot) library(dplyr) library(ggplot2) library(xlsx) library(stats) set.seed(39) existingproductattributes2017 <- read_csv("existingproductattributes2017.csv") summary(existingproductattributes2017) plot (existingproductattributes2017$BestSellersRank) ggplot(existingproductattributes2017,aes(x=existingproductattributes2017$BestSellersRank)) + geom_histogram(na.rm = TRUE, show.legend = TRUE) ####################################################################################################################################################### #get a subset of the data frame with only the observations with BestSellersRank populated #This is done to help impute the bservations with NA as best sellers rank to complete the dataset if possible. exist_subset = subset(existingproductattributes2017, !is.na(existingproductattributes2017$BestSellersRank)) #convert the exist_subset categorical attributes to factors exist_subset$ProductType = as.factor(exist_subset$ProductType) exist_subset$ProductNum = as.factor(exist_subset$ProductNum) #dummyfy the non-numeric attributes newdatframe = dummyVars(" ~ .", data = exist_subset) readyData <- data.frame(predict(newdatframe, newdata = exist_subset)) readyData #remove the product number column from the dataset exist_subset_no_prodNum = subset(exist_subset,select =-ProductNum) #remove the highest values od best sellers rank boxplot(exist_subset_no_prodNum$BestSellersRank) summary(exist_subset_no_prodNum) outlier_values <- boxplot.stats(exist_subset_no_prodNum$BestSellersRank)$out # outlier values. boxplot(exist_subset_no_prodNum$BestSellersRank, main="Best Sellers Rank", boxwex=0.1) mtext(paste("Outliers: ", paste(outlier_values, collapse=", ")), cex=0.6) exist_subset_no_prodNum = subset(exist_subset_no_prodNum, !exist_subset_no_prodNum$BestSellersRank %in% c(17502, 14086, 12076, 6295, 5742)) exist_subset_no_prodNum #dummyfy the non-numeric attributes newdatframe = dummyVars(" ~ .", data = exist_subset_no_prodNum) readyData <- data.frame(predict(newdatframe, newdata = exist_subset_no_prodNum)) readyData #find correlation between all populated attributes. #Find correlation in the columns CorrData = cor(readyData) corrplot(CorrData) #train on data set to predict best sellers rank ##Split data for training and testing respIndices = createDataPartition(readyData$BestSellersRank, p=0.75,list = FALSE) resp_75_train = readyData[respIndices,] resp_25_test = readyData[-respIndices,] #apply 10 fold cross validation fitcontrol = trainControl(method ="repeatedcv", number = 10, repeats = 1) rfTrain10 = train(BestSellersRank~.,data = resp_75_train, method ='rf', trControl= fitcontrol) rfTrain10 ############################################################################################################################### #After trying to find if there is a good confidence level to predict best sellers rank missing values we conclude that #the confidence level to predict that column is under 50% hence we will exclude that column from our analysis. # we will exclude product Number and Best sellers Rank from analysis for now. exist_subset_no_prodNum = subset (existingproductattributes2017, select = -BestSellersRank) exist_subset_no_prodNum$ProductNum = NULL exist_subset_no_prodNum #find outliers in data outlier_values <- boxplot.stats(existingproductattributes2017$Volume~existingproductattributes2017$ProductType)$out # outlier values. boxplot(existingproductattributes2017$Volume ~ existingproductattributes2017$ProductType, main="Volume Vs Product Type", boxwex=0.1) mtext(paste("Outliers: ", paste(outlier_values, collapse=", ")), cex=0.6) #The below code will show the outliers on the plot. Just running the tapply portion will show the min/max/quartile numbers. #Once you have a list of outliers , calculate to see if they are acually outside range boxplot(tapply(existingproductattributes2017$Volume,existingproductattributes2017$ProductType, summary)) # we found 4 outliers - Accessories with Volume 11204, Printer with Volume = 824, Smartphone with Volume = 1472 and ExtendedWarranty with volume = 0 #remove those from the data exist_subset_no_prodNum= subset(exist_subset_no_prodNum, !exist_subset_no_prodNum$Volume %in% c(11204,824,1472,7036)) boxplot(tapply(exist_subset_no_prodNum$Volume,exist_subset_no_prodNum$ProductType, summary)) #Check the distribution of the other attributes boxplot(tapply(exist_subset_no_prodNum$Price,exist_subset_no_prodNum$ProductType, summary)) write.csv(exist_subset_no_prodNum,"C:/Users/Chitari/Documents/Data analytics Course/Deliverables/Course 2/T3/exist_subset_no_prodNum.csv",row.names=FALSE) #################################################################################################################################### #Normalization worsened the predictions on the test set from the training portion. So we will not normalize the data #dummyfy the non-numeric attributes of the normalized dataset newdatframe = dummyVars(" ~ .", data = normdataset) readyData <- data.frame(predict(newdatframe, newdata = normdataset)) readyData #################################################################################################################################### #dummyfy the non-numeric attributes newdatframe = dummyVars(" ~ .", data = exist_subset_no_prodNum) readyData <- data.frame(predict(newdatframe, newdata = exist_subset_no_prodNum)) readyData #find correlation between all populated attributes. #Find correlation in the columns CorrData = cor(readyData) corrplot(CorrData) CorrData hc = findCorrelation(CorrData, cutoff = 0.9) hc exist_remove_corr = readyData[,-c(hc)] print (exist_remove_corr) corrplot(cor(exist_remove_corr),method = c("number"),type ="lower" , mar = c(0,0,0,0), title = "Correlation Heat map - No Collinearity") #normalize data set except the Volume column #exist_remove_corr #mean(exist_remove_corr$Price) #sd(exist_remove_corr$Price) #exist_remove_corr$Price = ((exist_remove_corr$Price - mean(exist_remove_corr$Price))/(sd(exist_remove_corr$Price))) #summary(exist_remove_corr) ##Split data for training and testing respIndices = createDataPartition(exist_remove_corr$Volume, p=0.75,list = FALSE) resp_75_train = exist_remove_corr[respIndices,] resp_25_test = exist_remove_corr[-respIndices,] #apply 10 fold cross validation fitcontrol = trainControl(method ="repeatedcv", number = 10, repeats = 1) #train SVM. SVM performed poorly with RMSE = 464 and R2 = 0.80 system.time(svmTrain1 <- train(Volume~., data = resp_75_train, method ="svmLinear", trControl = fitcontrol, tuneLength = 10)) svmTrain1 plot(svmTrain1) plot(varImp(svmTrain1)) #train rf. after some trys mtry= 25 seemed to be the most optimum RMSE under 200 and R-squared between 0.92 and 0.95 tunegrid = expand.grid(mtry = 25) system.time(rftrain <- train(Volume~., data = resp_75_train, method ="rf", trControl = fitcontrol, tuneGrid = tunegrid)) rftrain plot(rftrain) plot(varImp(rftrain)) # train gbm. after some trys interaction.depth = 7, n.trees = 500 was the better with RMSE under 250 and R2 0.90 tunegrid = expand.grid(n.trees = 50, interaction.depth = 2, shrinkage = 0.1, n.minobsinnode = 10) system.time(gbmtrain <- train(Volume~., data = resp_75_train, method ="gbm", trControl = fitcontrol, tuneGrid = tunegrid, tuneLength = 30)) gbmtrain plot(gbmtrain) plot(varImp(gbmtrain)) #predict using random forest gave a RMSE of over 200 and R2 = 0.81. Prediction with gbm gave an RMSE = 186 and R2 = 0.91. volumePred = predict(rftrain,resp_25_test, type = "raw") volumePred qplot(volumePred,resp_25_test$Volume, geom = "jitter") postResample(volumePred,resp_25_test$Volume) # read newproducts csv file newproductattributes2017 <- read_csv("newproductattributes2017.csv") # Apply the same preprocessing steps to it as we did during training. # we will exclude product Number and Best sellers Rank from analysis for now. new_subset_no_prodNum = subset (newproductattributes2017, select = -BestSellersRank) new_subset_no_prodNum$ProductNum = NULL new_subset_no_prodNum # we will not normalize the dataset since during training normalization made the RMSE and R2 worse. #dummyfy the non-numeric attributes newNewdatframe = dummyVars(" ~ .", data = new_subset_no_prodNum) newreadyData <- data.frame(predict(newNewdatframe, newdata = new_subset_no_prodNum)) newreadyData # exclude the same attribute we excluded from the training due to correlation and collinearity newreadyData$x5StarReviews = NULL newreadyData$x4StarReviews = NULL newreadyData$x2StarReviews = NULL newreadyData$NegativeServiceReview = NULL #do the predictions finalPred = predict(rftrain,newreadyData, type = "raw") finalPred #qplot(finalPred,newreadyData$Volume, geom = "jitter") #postResample(VolumePredictionsGBM,newreadyData$Volume) #Move the predicted results to the csv file output = newproductattributes2017 output$Predictions = finalPred write.csv(output, file="C2T3outputNewPred.csv", row.names = TRUE) boxplot(tapply(output$Predictions,output$ProductType, summary))
/C2T3.R
no_license
schitari/C2T3---Predicting-new-product-volumes-using-supervised-learning-algorithms
R
false
false
9,167
r
install.packages("xlsx") library(readr) library(ggplot2) library(lattice) library(caret) install.packages("corrplot") install.packages("stats") library(corrplot) library(dplyr) library(ggplot2) library(xlsx) library(stats) set.seed(39) existingproductattributes2017 <- read_csv("existingproductattributes2017.csv") summary(existingproductattributes2017) plot (existingproductattributes2017$BestSellersRank) ggplot(existingproductattributes2017,aes(x=existingproductattributes2017$BestSellersRank)) + geom_histogram(na.rm = TRUE, show.legend = TRUE) ####################################################################################################################################################### #get a subset of the data frame with only the observations with BestSellersRank populated #This is done to help impute the bservations with NA as best sellers rank to complete the dataset if possible. exist_subset = subset(existingproductattributes2017, !is.na(existingproductattributes2017$BestSellersRank)) #convert the exist_subset categorical attributes to factors exist_subset$ProductType = as.factor(exist_subset$ProductType) exist_subset$ProductNum = as.factor(exist_subset$ProductNum) #dummyfy the non-numeric attributes newdatframe = dummyVars(" ~ .", data = exist_subset) readyData <- data.frame(predict(newdatframe, newdata = exist_subset)) readyData #remove the product number column from the dataset exist_subset_no_prodNum = subset(exist_subset,select =-ProductNum) #remove the highest values od best sellers rank boxplot(exist_subset_no_prodNum$BestSellersRank) summary(exist_subset_no_prodNum) outlier_values <- boxplot.stats(exist_subset_no_prodNum$BestSellersRank)$out # outlier values. boxplot(exist_subset_no_prodNum$BestSellersRank, main="Best Sellers Rank", boxwex=0.1) mtext(paste("Outliers: ", paste(outlier_values, collapse=", ")), cex=0.6) exist_subset_no_prodNum = subset(exist_subset_no_prodNum, !exist_subset_no_prodNum$BestSellersRank %in% c(17502, 14086, 12076, 6295, 5742)) exist_subset_no_prodNum #dummyfy the non-numeric attributes newdatframe = dummyVars(" ~ .", data = exist_subset_no_prodNum) readyData <- data.frame(predict(newdatframe, newdata = exist_subset_no_prodNum)) readyData #find correlation between all populated attributes. #Find correlation in the columns CorrData = cor(readyData) corrplot(CorrData) #train on data set to predict best sellers rank ##Split data for training and testing respIndices = createDataPartition(readyData$BestSellersRank, p=0.75,list = FALSE) resp_75_train = readyData[respIndices,] resp_25_test = readyData[-respIndices,] #apply 10 fold cross validation fitcontrol = trainControl(method ="repeatedcv", number = 10, repeats = 1) rfTrain10 = train(BestSellersRank~.,data = resp_75_train, method ='rf', trControl= fitcontrol) rfTrain10 ############################################################################################################################### #After trying to find if there is a good confidence level to predict best sellers rank missing values we conclude that #the confidence level to predict that column is under 50% hence we will exclude that column from our analysis. # we will exclude product Number and Best sellers Rank from analysis for now. exist_subset_no_prodNum = subset (existingproductattributes2017, select = -BestSellersRank) exist_subset_no_prodNum$ProductNum = NULL exist_subset_no_prodNum #find outliers in data outlier_values <- boxplot.stats(existingproductattributes2017$Volume~existingproductattributes2017$ProductType)$out # outlier values. boxplot(existingproductattributes2017$Volume ~ existingproductattributes2017$ProductType, main="Volume Vs Product Type", boxwex=0.1) mtext(paste("Outliers: ", paste(outlier_values, collapse=", ")), cex=0.6) #The below code will show the outliers on the plot. Just running the tapply portion will show the min/max/quartile numbers. #Once you have a list of outliers , calculate to see if they are acually outside range boxplot(tapply(existingproductattributes2017$Volume,existingproductattributes2017$ProductType, summary)) # we found 4 outliers - Accessories with Volume 11204, Printer with Volume = 824, Smartphone with Volume = 1472 and ExtendedWarranty with volume = 0 #remove those from the data exist_subset_no_prodNum= subset(exist_subset_no_prodNum, !exist_subset_no_prodNum$Volume %in% c(11204,824,1472,7036)) boxplot(tapply(exist_subset_no_prodNum$Volume,exist_subset_no_prodNum$ProductType, summary)) #Check the distribution of the other attributes boxplot(tapply(exist_subset_no_prodNum$Price,exist_subset_no_prodNum$ProductType, summary)) write.csv(exist_subset_no_prodNum,"C:/Users/Chitari/Documents/Data analytics Course/Deliverables/Course 2/T3/exist_subset_no_prodNum.csv",row.names=FALSE) #################################################################################################################################### #Normalization worsened the predictions on the test set from the training portion. So we will not normalize the data #dummyfy the non-numeric attributes of the normalized dataset newdatframe = dummyVars(" ~ .", data = normdataset) readyData <- data.frame(predict(newdatframe, newdata = normdataset)) readyData #################################################################################################################################### #dummyfy the non-numeric attributes newdatframe = dummyVars(" ~ .", data = exist_subset_no_prodNum) readyData <- data.frame(predict(newdatframe, newdata = exist_subset_no_prodNum)) readyData #find correlation between all populated attributes. #Find correlation in the columns CorrData = cor(readyData) corrplot(CorrData) CorrData hc = findCorrelation(CorrData, cutoff = 0.9) hc exist_remove_corr = readyData[,-c(hc)] print (exist_remove_corr) corrplot(cor(exist_remove_corr),method = c("number"),type ="lower" , mar = c(0,0,0,0), title = "Correlation Heat map - No Collinearity") #normalize data set except the Volume column #exist_remove_corr #mean(exist_remove_corr$Price) #sd(exist_remove_corr$Price) #exist_remove_corr$Price = ((exist_remove_corr$Price - mean(exist_remove_corr$Price))/(sd(exist_remove_corr$Price))) #summary(exist_remove_corr) ##Split data for training and testing respIndices = createDataPartition(exist_remove_corr$Volume, p=0.75,list = FALSE) resp_75_train = exist_remove_corr[respIndices,] resp_25_test = exist_remove_corr[-respIndices,] #apply 10 fold cross validation fitcontrol = trainControl(method ="repeatedcv", number = 10, repeats = 1) #train SVM. SVM performed poorly with RMSE = 464 and R2 = 0.80 system.time(svmTrain1 <- train(Volume~., data = resp_75_train, method ="svmLinear", trControl = fitcontrol, tuneLength = 10)) svmTrain1 plot(svmTrain1) plot(varImp(svmTrain1)) #train rf. after some trys mtry= 25 seemed to be the most optimum RMSE under 200 and R-squared between 0.92 and 0.95 tunegrid = expand.grid(mtry = 25) system.time(rftrain <- train(Volume~., data = resp_75_train, method ="rf", trControl = fitcontrol, tuneGrid = tunegrid)) rftrain plot(rftrain) plot(varImp(rftrain)) # train gbm. after some trys interaction.depth = 7, n.trees = 500 was the better with RMSE under 250 and R2 0.90 tunegrid = expand.grid(n.trees = 50, interaction.depth = 2, shrinkage = 0.1, n.minobsinnode = 10) system.time(gbmtrain <- train(Volume~., data = resp_75_train, method ="gbm", trControl = fitcontrol, tuneGrid = tunegrid, tuneLength = 30)) gbmtrain plot(gbmtrain) plot(varImp(gbmtrain)) #predict using random forest gave a RMSE of over 200 and R2 = 0.81. Prediction with gbm gave an RMSE = 186 and R2 = 0.91. volumePred = predict(rftrain,resp_25_test, type = "raw") volumePred qplot(volumePred,resp_25_test$Volume, geom = "jitter") postResample(volumePred,resp_25_test$Volume) # read newproducts csv file newproductattributes2017 <- read_csv("newproductattributes2017.csv") # Apply the same preprocessing steps to it as we did during training. # we will exclude product Number and Best sellers Rank from analysis for now. new_subset_no_prodNum = subset (newproductattributes2017, select = -BestSellersRank) new_subset_no_prodNum$ProductNum = NULL new_subset_no_prodNum # we will not normalize the dataset since during training normalization made the RMSE and R2 worse. #dummyfy the non-numeric attributes newNewdatframe = dummyVars(" ~ .", data = new_subset_no_prodNum) newreadyData <- data.frame(predict(newNewdatframe, newdata = new_subset_no_prodNum)) newreadyData # exclude the same attribute we excluded from the training due to correlation and collinearity newreadyData$x5StarReviews = NULL newreadyData$x4StarReviews = NULL newreadyData$x2StarReviews = NULL newreadyData$NegativeServiceReview = NULL #do the predictions finalPred = predict(rftrain,newreadyData, type = "raw") finalPred #qplot(finalPred,newreadyData$Volume, geom = "jitter") #postResample(VolumePredictionsGBM,newreadyData$Volume) #Move the predicted results to the csv file output = newproductattributes2017 output$Predictions = finalPred write.csv(output, file="C2T3outputNewPred.csv", row.names = TRUE) boxplot(tapply(output$Predictions,output$ProductType, summary))
#' Extract values from Mplus output #' An internal function used by extractSummaries_1file to extract #' parameters from the output file using regular expressions. #' #' @param pattern the exact text to be matched in the outfile that identifies the parameter of interest #' @param textToScan the chunk of Mplus output to be parsed, passed as a vector of character strings (from the scan command). #' @param filename the name of the file containing textToScan. Used to make more intelligible warning messages. #' @param type the data type of the parameter, which determines the regexp used. Currently can be \dQuote{int}, \dQuote{dec}, \dQuote{str}, or \dQuote{calc}. Defaults to \dQuote{int}. #' @return A string or numeric vector #' @keywords internal #' @examples #' #make me!!! extractValue <- function(pattern, textToScan, filename, type="int") { #regex pattern now allows for specification to search for value on some line before or after match #example: +2:the Observed and the Replicated Chi-Square Values offset <- 0 if (grepl("^[+-]+\\d+:.*$", pattern, perl=TRUE)) { offset <- as.numeric(sub("^([+-]+\\d+):.*$", "\\1", pattern, perl=TRUE)) pattern <- sub("^[+-]+\\d+:(.*)$", "\\1", pattern, perl=TRUE) #chop offset } #locate the matching line in the output file matchpos <- grep(pattern, textToScan, ignore.case=TRUE) matchlines <- textToScan[(matchpos+offset)] if (length(matchlines) > 1) { stop("More than one match found for parameter: ", pattern, "\n ", filename) #return(matchlines) #not sure what I was thinking here... seems better to stop than warn and return lines } else if (length(matchlines) == 0) { #if the parameter of interest not found in this file, then return NA #warning(paste("Parameter not found: ", pattern, "\n ", filename, sep="")) if (type == "int") return(NA_integer_) else if (type == "dec") return(NA_real_) else if (type == "str") return(NA_character_) } #different idea: concatenate pattern with var type and match on that #then sub just the pattern part from the larger line typePrefix <- substr(type, 1, 3) if (typePrefix == "int") { regexp <- "-*\\d+" #optional negative sign in front } else if (typePrefix == "dec") { #regexpr: -*\\d+\\.\\d+ : -* optional negative sign, \\d+ match at least one digit \\. match decimal sign \\d+ match decimal digits regexp <- "-*\\d+\\.\\d+" } else if (typePrefix == "str") { regexp <- paste(pattern, ".*", sep="") } #locate the match valueMatches <- gregexpr(regexp, matchlines[1], perl=TRUE)[[1]] if (type == "str") { #remove the tag portion of the string (e.g., "title:"), retaining rest of line returnVal <- as.character(sub(pattern, "", matchlines[1], ignore.case=TRUE)) } else { #excessively tight syntax: replace dec[15] with 15, if number at end of type. Otherwise return just "dec". #then grep result for only numeric characters (\\d+). If grep is false (i.e., no numerals in substitution, #then no index was specified in type, so type must be simply "dec", "int", or "str" (as opposed to "int[15]"), so set as 1 if (!grepl("^\\d+$", whichMatch <- sub("^.*\\[(\\d+)\\]$", "\\1", type, perl=TRUE), perl=TRUE)) whichMatch <- 1 else whichMatch <- as.numeric(whichMatch) #pull from the start of the match through match.length, which is the length of characters that matched #need to subtract one from the start + length offset to grab the correct number of characters #(e.g., if the match runs from 40-44, the start will be 40, with length 5, but 40 + 5 would be 6 characters, hence -1 returnVal <- as.numeric(substr(matchlines[1], valueMatches[whichMatch], valueMatches[whichMatch] + attr(valueMatches, "match.length")[whichMatch] - 1)) } return(returnVal) } #' Worker function used in extractSummaries_1section #' #' @param arglist The argument list #' @param sectionHeaders A character vector with headers for each section of interest #' @param sectionFields is a list of data.frames where each data.frame specifies the fields to be extracted for that section #' @param textToParse The text to parse #' @param filename The filename #' @return A list #' @keywords internal #' @examples #' # make me!!! extractSummaries_1plan <- function(arglist, sectionHeaders, sectionFields, textToParse, filename) { #make this a more generic function that accepts headers and fields in case it is useful outside the MODEL FIT section if (length(sectionHeaders) < 1) stop("No section headers provided.") if (length(sectionHeaders) != length(sectionFields)) stop("Section headers and section fields have different lengths.") #multiple sections for (header in 1:length(sectionHeaders)) { #a blank section header indicates to match anywhere in the textToParse if (sectionHeaders[header] == "") { sectionText <- textToParse } else { #could be pretty inefficient if the same section header is repeated several times. #could build a list with divided output and check whether a section is present in the list before extracting sectionText <- getMultilineSection(sectionHeaders[header], textToParse, filename) } #process all fields for this section sectionFieldDF <- sectionFields[[header]] for (i in 1:nrow(sectionFieldDF)) { thisField <- sectionFieldDF[i,] #Check whether this field already exists and is not missing. If so, skip the extraction. #This was initially setup because of Tech 14 section changes where the number of final stage optimizations is different from v6 to v7. if (!thisField$varName %in% names(arglist) || is.na(arglist[[ thisField$varName ]])) { arglist[[ thisField$varName ]] <- extractValue(pattern=thisField$regexPattern, sectionText, filename, type=thisField$varType) } } } return(arglist) } #' Extract summary information for one section from Mplus output #' #' Function to extract model fit statistics from a section, wrapped to allow for multiple fit sections, as in EFA files. #' Calls \code{extractSummaries_1plan} #' #' @param modelFitSection The fit information section #' @param arglist The argument list #' @param filename The file name #' @return The argument list #' @keywords internal #' @examples #' # make me!!! extractSummaries_1section <- function(modelFitSection, arglist, filename, input=list()) { #DATA IMPUTATION outputs sometimes use the Mean/SD output (I believe in Mplus v6.12 and perhaps v7) #In Mplus v8, Model fit statistics are output as usual (e.g., ex11.6.out). #This is confusing, so we should just test for the Mean/SD output here and use the MI-type output if found useMIHeadings <- FALSE if (!is.null(input$data.imputation)) { header <- "Chi-Square Test of Model Fit" fields <- list(data.frame( varName=c("ChiSqM_DF", "ChiSqM_Mean", "ChiSqM_SD", "ChiSqM_NumComputations"), regexPattern=c("Degrees of Freedom", "Mean", "Std Dev", "Number of successful computations"), varType=c("int", "dec", "dec", "int"), stringsAsFactors=FALSE)) test <- extractSummaries_1plan(arglist, header, fields, modelFitSection, filename) if (!is.na(test$ChiSqM_Mean)) { useMIHeadings <- TRUE } } #MI and Montecarlo data types have fundamentally different output (means and sds per fit stat) if (useMIHeadings || grepl("imputation", arglist$DataType, ignore.case=TRUE) || grepl("montecarlo", arglist$DataType, ignore.case=TRUE)) { modelFitSectionHeaders <- c( "", #section-nonspecific parameters "Chi-Square Test of Model Fit", # "Chi-Square Test of Model Fit for the Baseline Model", "Loglikelihood::H0 Value", "Loglikelihood::H1 Value", "CFI/TLI::CFI", "CFI/TLI::TLI", "Bayesian Posterior Predictive Checking using Chi-Square::Posterior Predictive P-Value", "Bayesian Prior Posterior Predictive Checking using Chi-Square::Prior Posterior Predictive P-Value", "Information Criteria( Including the Auxiliary Part)*::Akaike \\(AIC\\)", "Information Criteria( Including the Auxiliary Part)*::Bayesian \\(BIC\\)", "Information Criteria( Including the Auxiliary Part)*::Sample-Size Adjusted BIC \\(n\\* = \\(n \\+ 2\\) / 24\\)", "RMSEA \\(Root Mean Square Error Of Approximation\\)", "WRMR \\(Weighted Root Mean Square Residual\\)", "Information Criteri(a|on)::Deviance \\(DIC\\)", # "Information Criteri(a|on)::Estimated Number of Parameters \\(pD\\)", "Information Criteri(a|on)::Bayesian \\(BIC\\)" ) modelFitSectionFields <- list( data.frame( varName=c("Parameters"), #defined outside of information criteria section for non-ML estimators regexPattern=c("^Number of Free Parameters"), varType=c("int"), stringsAsFactors=FALSE ), data.frame( varName=c("ChiSqM_DF", "ChiSqM_Mean", "ChiSqM_SD", "ChiSqM_NumComputations"), regexPattern=c("Degrees of Freedom", "Mean", "Std Dev", "Number of successful computations"), varType=c("int", "dec", "dec", "int"), stringsAsFactors=FALSE ), # data.frame( # varName=c("ChiSqBaseline_Value", "ChiSqBaseline_DF", "ChiSqBaseline_PValue"), # regexPattern=c("Value", "Degrees of Freedom", "^P-Value"), # varType=c("dec", "int", "dec"), stringsAsFactors=FALSE # ), data.frame( varName=c("LL_Mean", "LL_SD", "LL_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("UnrestrictedLL_Mean", "UnrestrictedLL_SD", "UnrestrictedLL_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("CFI_Mean", "CFI_SD", "CFI_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("TLI_Mean", "TLI_SD", "TLI_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("PostPred_PValue_Mean", "PostPred_PValue_SD", "PostPred_PValue_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("PriorPostPred_PValue_Mean", "PriorPostPred_PValue_SD", "PriorPostPred_PValue_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("AIC_Mean", "AIC_SD", "AIC_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("BIC_Mean", "BIC_SD", "BIC_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("aBIC_Mean", "aBIC_SD", "aBIC_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("RMSEA_Mean", "RMSEA_SD", "RMSEA_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("WRMR_Mean", "WRMR_SD", "WRMR_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( #Information Criterion:: DIC varName=c("DIC_Mean", "DIC_SD", "DIC_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( #Information Criterion:: Estimated number of parameters (pD) varName=c("pD_Mean", "pD_SD", "pD_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( #Information Criterion:: Bayesian (BIC) -- sometimes within Information Criterion, sometimes Information Criteria (above)... varName=c("BIC_Mean", "BIC_SD", "BIC_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ) ) #handle two-level models, which return separate srmr for between vs. within if (grepl("twolevel", arglist$AnalysisType, ignore.case=TRUE)) { modelFitSectionHeaders <- append(modelFitSectionHeaders, c( "SRMR \\(Standardized Root Mean Square Residual\\) for the WITHIN level", "SRMR \\(Standardized Root Mean Square Residual\\) for the BETWEEN level")) modelFitSectionFields <- c(modelFitSectionFields, list(data.frame( varName=c("SRMR.Within_Mean", "SRMR.Within_SD", "SRMR.Within_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("SRMR.Between_Mean", "SRMR.Between_SD", "SRMR.Between_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE )) ) } else { modelFitSectionHeaders <- append(modelFitSectionHeaders, "SRMR \\(Standardized Root Mean Square Residual\\)") modelFitSectionFields <- c(modelFitSectionFields, list(data.frame( varName=c("SRMR_Mean", "SRMR_SD", "SRMR_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE )) ) } } else { #not imputation or monte carlo output modelFitSectionHeaders <- c( "", #section-inspecific parameters "Chi-Square Test of Model Fit", "Chi-Square Test of Model Fit for the Baseline Model", "{+3i}Chi-Square Test of Model Fit for the Binary and Ordered Categorical::{+2b}Pearson Chi-Square", #chi-square header spans two lines, so +3i "{+3i}Chi-Square Test of Model Fit for the Binary and Ordered Categorical::{+2b}Likelihood Ratio Chi-Square", "Chi-Square Test for MCAR under the Unrestricted Latent Class Indicator Model::{+2b}Pearson Chi-Square", #use blank line to find pearson within section "Chi-Square Test for MCAR under the Unrestricted Latent Class Indicator Model::{+2b}Likelihood Ratio Chi-Square", "Chi-Square Test for Difference Testing", "Loglikelihood( Including the Auxiliary Part)*", "CFI/TLI", "Information Criteria( Including the Auxiliary Part)*", "Information Criteria Including the Auxiliary Part", "RMSEA \\(Root Mean Square Error Of Approximation\\)", "WRMR \\(Weighted Root Mean Square Residual\\)", "Bayesian Posterior Predictive Checking using Chi-Square", "Information Criterion", #somehow singular for bayes output? "Wald Test of Parameter Constraints" ) modelFitSectionFields <- list( data.frame( varName=c("Parameters"), #defined outside of information criteria section for non-ML estimators regexPattern=c("^Number of Free Parameters"), #only match beginning of line (aux section has its own indented variant) varType=c("int"), stringsAsFactors=FALSE ), data.frame( varName=c("ChiSqM_Value", "ChiSqM_DF", "ChiSqM_PValue", "ChiSqM_ScalingCorrection"), regexPattern=c("^\\s*Value", "Degrees of Freedom", "^\\s*P-Value", "Scaling Correction Factor"), varType=c("dec", "int", "dec", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("ChiSqBaseline_Value", "ChiSqBaseline_DF", "ChiSqBaseline_PValue"), regexPattern=c("^\\s*Value", "Degrees of Freedom", "^\\s*P-Value"), varType=c("dec", "int", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("ChiSqCategoricalPearson_Value", "ChiSqCategoricalPearson_DF", "ChiSqCategoricalPearson_PValue"), regexPattern=c("^\\s*Value", "Degrees of Freedom", "^\\s*P-Value"), varType=c("dec", "int", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("ChiSqCategoricalLRT_Value", "ChiSqCategoricalLRT_DF", "ChiSqCategoricalLRT_PValue"), regexPattern=c("^\\s*Value", "Degrees of Freedom", "^\\s*P-Value"), varType=c("dec", "int", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("ChiSqMCARUnrestrictedPearson_Value", "ChiSqMCARUnrestrictedPearson_DF", "ChiSqMCARUnrestrictedPearson_PValue"), regexPattern=c("^\\s*Value", "Degrees of Freedom", "^\\s*P-Value"), varType=c("dec", "int", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("ChiSqMCARUnrestrictedLRT_Value", "ChiSqMCARUnrestrictedLRT_DF", "ChiSqMCARUnrestrictedLRT_PValue"), regexPattern=c("^\\s*Value", "Degrees of Freedom", "^\\s*P-Value"), varType=c("dec", "int", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("ChiSqDiffTest_Value", "ChiSqDiffTest_DF", "ChiSqDiffTest_PValue"), regexPattern=c("^\\s*Value", "Degrees of Freedom", "^\\s*P-Value"), varType=c("dec", "int", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("LL", "UnrestrictedLL", "LLCorrectionFactor", "UnrestrictedLLCorrectionFactor"), regexPattern=c("H0 Value", "H1 Value", "H0 Scaling Correction Factor", "H1 Scaling Correction Factor"), varType=c("dec", "dec", "dec", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("CFI", "TLI"), regexPattern=c("CFI", "TLI"), varType=c("dec", "dec"), stringsAsFactors=FALSE ), data.frame( #Information Criteria (v8 now includes DIC and pD here) varName=c("AIC", "BIC", "aBIC", "DIC", "pD"), regexPattern=c("Akaike \\(AIC\\)", "Bayesian \\(BIC\\)", "Sample-Size Adjusted BIC", "Deviance \\(DIC\\)", "Estimated Number of Parameters \\(pD\\)"), varType=c("dec", "dec", "dec", "dec", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("ParametersWithAux"), regexPattern=c("Number of Free Parameters"), varType=c("int"), stringsAsFactors=FALSE ), data.frame( varName=c("RMSEA_Estimate", "RMSEA_90CI_LB", "RMSEA_90CI_UB", "RMSEA_pLT05"), regexPattern=c("Estimate", "90 Percent C.I.", "90 Percent C.I.", "Probability RMSEA <= .05"), varType=c("dec", "dec[1]", "dec[2]", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("WRMR"), regexPattern=c("Value"), varType=c("dec"), stringsAsFactors=FALSE ), data.frame( #Bayesian Posterior Predictive Checking using Chi-Square varName=c("ObsRepChiSqDiff_95CI_LB", "ObsRepChiSqDiff_95CI_UB", "PostPred_PValue", "PriorPostPred_PValue"), regexPattern=c("+2:the Observed and the Replicated Chi-Square Values", "+2:the Observed and the Replicated Chi-Square Values", "^\\s*Posterior Predictive P-Value", "Prior Posterior Predictive P-Value"), varType=c("dec[1]", "dec[2]", "dec", "dec"), stringsAsFactors=FALSE ), data.frame( #Information Criterion (singular name under Mplus Bayes v7. Corrected to "Criteria" in v8) varName=c("DIC", "pD", "BIC"), regexPattern=c("Deviance \\(DIC\\)", "Estimated Number of Parameters \\(pD\\)", "Bayesian \\(BIC\\)"), #sometimes BIC is listed here (e.g., MI Bayes output) varType=c("dec", "dec", "dec"), stringsAsFactors=FALSE ), data.frame( #Wald Test of Parameter Constraints varName=c("WaldChiSq_Value", "WaldChiSq_DF", "WaldChiSq_PValue"), regexPattern=c("^\\s*Value", "Degrees of Freedom", "^\\s*P-Value"), varType=c("dec", "int", "dec"), stringsAsFactors=FALSE ) ) if (grepl("twolevel", arglist$AnalysisType, ignore.case=TRUE)) { modelFitSectionHeaders <- append(modelFitSectionHeaders, "SRMR \\(Standardized Root Mean Square Residual\\)") modelFitSectionFields <- c(modelFitSectionFields, list(data.frame( varName=c("SRMR.Within", "SRMR.Between"), regexPattern=c("Value for Within", "Value for Between"), varType=c("dec", "dec"), stringsAsFactors=FALSE )) ) } else if (grepl("threelevel", arglist$AnalysisType, ignore.case=TRUE)) { modelFitSectionHeaders <- append(modelFitSectionHeaders, "SRMR \\(Standardized Root Mean Square Residual\\)") modelFitSectionFields <- c(modelFitSectionFields, list(data.frame( varName=c("SRMR.Within", "SRMR.Between.L2", "SRMR.Between.L3"), regexPattern=c("Value for Within", "Value for Between Level 2", "Value for Between Level 3"), varType=c("dec", "dec", "dec"), stringsAsFactors=FALSE )) ) } else { modelFitSectionHeaders <- append(modelFitSectionHeaders, "SRMR \\(Standardized Root Mean Square Residual\\)") #append two lists together modelFitSectionFields <- c(modelFitSectionFields, list(data.frame( varName=c("SRMR"), regexPattern=c("Value"), varType=c("dec"), stringsAsFactors=FALSE ) )) } } arglist <- extractSummaries_1plan(arglist, modelFitSectionHeaders, modelFitSectionFields, modelFitSection, filename) return(arglist) } #' Divide text into fields #' #' Helper function to divide an input section into key-value pair list taken from mplus2lavaan #' #' @param section.text The section text #' @param required Required sections #' @return Divided sections #' @keywords internal #' @examples #' # make me!!! divideIntoFields <- function(section.text, required) { if (is.null(section.text)) { return(NULL) } section.split <- strsplit(paste(section.text, collapse=" "), ";", fixed=TRUE)[[1]] section.divide <- list() for (cmd in section.split) { if (grepl("^\\s*!.*", cmd, perl=TRUE)) next #skip comment lines if (grepl("^\\s+$", cmd, perl=TRUE)) next #skip blank lines #mplus is apparently tolerant of specifications that don't include IS/ARE/= #example: usevariables x1-x10; #thus, split on spaces and assume that first element is lhs, drop second element if IS/ARE/=, and assume remainder is rhs #but if user uses equals sign, then spaces will not always be present (e.g., usevariables=x1-x10) if ( (leadingEquals <- regexpr("^\\s*[A-Za-z]+[A-Za-z_-]*\\s*(=)", cmd[1L], perl=TRUE))[1L] > 0) { cmdName <- trimSpace(substr(cmd[1L], 1, attr(leadingEquals, "capture.start") - 1)) cmdArgs <- trimSpace(substr(cmd[1L], attr(leadingEquals, "capture.start") + 1, nchar(cmd[1L]))) } else { cmd.spacesplit <- strsplit(trimSpace(cmd[1L]), "\\s+", perl=TRUE)[[1L]] if (length(cmd.spacesplit) < 2L) { #for future: make room for this function to prase things like just TECH13 (no rhs) } else { cmdName <- trimSpace(cmd.spacesplit[1L]) if (length(cmd.spacesplit) > 2L && tolower(cmd.spacesplit[2L]) %in% c("is", "are")) { cmdArgs <- paste(cmd.spacesplit[3L:length(cmd.spacesplit)], collapse=" ") #remainder, removing is/are } else { cmdArgs <- paste(cmd.spacesplit[2L:length(cmd.spacesplit)], collapse=" ") #is/are not used, so just join rhs } } } section.divide[[make.names(tolower(cmdName))]] <- cmdArgs } if (!missing(required)) { stopifnot(all(required %in% names(section.divide))) } return(section.divide) } #' Extract warnings and errors from 1 mplus file #' #' Helper function #' #' @param outfiletext The text of the output file #' @param filename The filename #' @param input The input #' @return A list with two elements #' \item{errors}{Mplus Errors} #' \item{warnings}{Mplus Warnings} #' @keywords internal #' @examples #' # make me!!! extractWarningsErrors_1file <- function(outfiletext, filename, input) { warnerr <- list(warnings = list(), errors = list()) class(warnerr$errors) <- c("list", "mplus.errors") class(warnerr$warnings) <- c("list", "mplus.warnings") if (!inherits(input, "mplus.inp")) { warning("Could not identify warnings and errors; input is not of class mplus.inp") return(warnerr) } if (is.null(attr(input, "start.line")) || is.null(attr(input, "end.line")) || attr(input, "start.line") < 0L || attr(input, "end.line") < 0L) { warning("Could not identify bounds of input section: ", filename) return(warnerr) } #handle input warnings and errors first startInputWarnErr <- attr(input, "end.line") + 1L #first eligible line is after input section endInputWarnErr <- grep("^\\s*(INPUT READING TERMINATED NORMALLY|\\*\\*\\* WARNING.*|\\d+ (?:ERROR|WARNING)\\(S\\) FOUND IN THE INPUT INSTRUCTIONS|\\*\\*\\* ERROR.*)\\s*$", outfiletext, ignore.case=TRUE, perl=TRUE) w <- 1 #counters for warnings and errors lists e <- 1 #only process section if end was identified properly if (length(endInputWarnErr) > 0L) { #The above will match all of the possible relevant lines. #To identify input warnings/errors section, need to go to first blank line after the final warning or error. (look in next 100 lines) lastWarn <- endInputWarnErr[length(endInputWarnErr)] blank <- which(outfiletext[lastWarn:(lastWarn + 100 )] == "")[1L] + lastWarn - 1 warnerrtext <- outfiletext[startInputWarnErr[1L]:(blank-1)] lines <- friendlyGregexpr("^\\s*(\\*\\*\\* WARNING|\\*\\*\\* ERROR).*\\s*$", warnerrtext, perl=TRUE) if (!is.null(lines)) { for (l in 1:nrow(lines)) { if (l < nrow(lines)) { warn.err.body <- trimSpace(warnerrtext[(lines[l,"element"] + 1):(lines[l+1,"element"] - 1)]) } else { warn.err.body <- trimSpace(warnerrtext[(lines[l,"element"] + 1):length(warnerrtext)]) } if (substr(lines[l,"tag"], 1, 11) == "*** WARNING") { warnerr$warnings[[w]] <- warn.err.body w <- w + 1 } else if (substr(lines[l,"tag"], 1, 9) == "*** ERROR") { warnerr$errors[[e]] <- warn.err.body splittag <- strsplit(lines[l,"tag"], "\\s+", perl=TRUE)[[1L]] if (length(splittag) > 3L && splittag[3L] == "in") { attr(warnerr$errors[[e]], "section") <- tolower(paste(splittag[4L:(which(splittag == "command") - 1L)], collapse=".")) } e <- e + 1 } else { stop ("Cannot discern warning/error type: ", lines[l, "tag"]) } } } } #now handle estimation errors and warnings #these fall above either # 1) MODEL FIT INFORMATION: model converged with warnings # 2) MODEL RESULTS: model did not converge, so no fit statistics produced # 3) FINAL CLASS COUNTS (occurs for some mixture models, which report class counts before model results) # 4) TESTS OF MODEL FIT (older versions of Mplus) # # It's harder to determine where the section begins, however, because there is no clear boundary # with the preceding section, which is heterogeneous (e.g., sample stats). # # In the case of warnings only, the estimation warnings section is demarcated by # THE MODEL ESTIMATION TERMINATED NORMALLY above and MODEL FIT INFORMATION below. # # In other cases (maybe dependent on Mplus version), warnings are printed above THE MODEL ESTIMATION TERMINATED NORMALLY. # Allow for the possibility that the estimation warnings/errors section begins with WARNING: # # For failed models, the section likely begins with one of three possibilities: # 1) THE MODEL ESTIMATION DID NOT TERMINATE NORMALLY # 2) THE LOGLIKELIHOOD DECREASED # 3) NO CONVERGENCE # # Warnings that can potentially be ignored are prefixed by "WARNING: " # whereas more serious estimation problems (errors) typically have no prefix. # # Blank lines indicate a boundary in each message. #the end sections are more well behaved (esp. if there is Tech 9 output). Identify end first, then constrain start to precede end endEstWarnErr <- grep("^\\s*(MODEL FIT INFORMATION|FINAL CLASS COUNTS|MODEL RESULTS|TESTS OF MODEL FIT)\\s*$", outfiletext, ignore.case=TRUE, perl=TRUE) if (length(endEstWarnErr) == 0L) { return(warnerr) } #unable to find section properly startEstWarnErr <- grep("^\\s*(WARNING:.*|THE MODEL ESTIMATION DID NOT TERMINATE NORMALLY.*|THE LOGLIKELIHOOD DECREASED.*|THE MODEL ESTIMATION TERMINATED NORMALLY|NO CONVERGENCE\\.\\s+NUMBER OF ITERATIONS EXCEEDED\\..*)\\s*$", outfiletext[1:endEstWarnErr[1L]], ignore.case=TRUE, perl=TRUE) if (length(startEstWarnErr) > 0L && length(endEstWarnErr) > 0L) { warnerrtext <- outfiletext[startEstWarnErr[1L]:(endEstWarnErr[1L] - 1)] #if the model estimation terminated normally, delete this line from the text to parse (whereas the other start flags indicate a meaningful message) if (length(normexit <- grep("^\\s*THE MODEL ESTIMATION TERMINATED NORMALLY\\s*$", warnerrtext, perl=TRUE, ignore.case=TRUE)) > 0L) { warnerrtext <- warnerrtext[-normexit] } if (!any(warnerrtext != "")) { return(warnerr) #no non-blank lines -- just exit function as is } #trim blank lines from beginning and end of section warnerrtext <- warnerrtext[min(which(warnerrtext != "")):max(which(warnerrtext != ""))] #estimation warnings and errors are separated by blank lines. blanks <- which(warnerrtext == "") #trim consecutive blank lines (throw off blanks-based parsing below) consec <- which(diff(blanks) == 1) if (length(consec) > 0L) { warnerrtext <- warnerrtext[-1*blanks[consec]] blanks <- which(warnerrtext == "") #clunky } #for loop is probably clunky here, but works for now startMsg <- 1 #first line of a message for (line in 1:length(warnerrtext)) { if ((line %in% blanks && ! (line-1) %in% blanks) || line == length(warnerrtext)) { msg <- trimSpace(warnerrtext[startMsg:ifelse(line %in% blanks, line - 1, line)]) if (grepl("^\\s*WARNING:", msg[1L], ignore.case=TRUE, perl=TRUE)) { warnerr$warnings[[w]] <- msg w <- w+1 } else { warnerr$errors[[e]] <- msg #if not prefixed by WARNING:, treat as error e <- e + 1 } startMsg <- line + 1 } } } else { } #warning("Unable to identify estimation warnings and errors section.") } return(warnerr) } #' Extract and parse Mplus input file #' #' Function to extract and parse mplus input syntax from the output file #' #' @param outfiletext The text of the output file #' @param filename The filename #' @return The parsed input file #' @keywords internal #' @examples #' # make me!!! extractInput_1file <- function(outfiletext, filename) { input <- list() class(input) <- c("list", "mplus.inp") startInput <- grep("^\\s*INPUT INSTRUCTIONS\\s*$", outfiletext, ignore.case=TRUE, perl=TRUE) if (length(startInput) == 0L) { warning("Could not find beginning of input for: ", filename) attr(input, "start.line") <- attr(input, "end.line") <- -1L return(input) } else { startInput <- startInput[1L] + 1L } #skip input instructions line itself endInput <- grep("^\\s*(INPUT READING TERMINATED NORMALLY|\\*\\*\\* WARNING.*|\\d+ (?:ERROR|WARNING)\\(S\\) FOUND IN THE INPUT INSTRUCTIONS|\\*\\*\\* ERROR.*)\\s*$", outfiletext, ignore.case=TRUE, perl=TRUE) if (length(endInput) == 0L) { #In Mplus v6.12 (and perhaps at some other point in the evolution), the input parser output was not included. #In such cases, try to fall back to the first line of the TITLE: XXX line, which is reprinted after input title1 <- grep("\\s*TITLE:\\s*(.*)$", outfiletext[1:100], perl=TRUE) #assume it lives in first 100 lines if (length(title1)==1L && length((endinputTitle <- grep(sub("\\s*TITLE:\\s*(.*)$", "^\\\\s*\\1\\\\s*$", outfiletext[title1], perl=TRUE), outfiletext)) == 1L)) { endInput <- endinputTitle - 1L } else { warning("Could not find end of input for: ", filename) attr(input, "start.line") <- attr(input, "end.line") <- -1 return(input) } } else { endInput <- endInput[1L] - 1L } #one line before first warning or end of instructions input.text <- outfiletext[startInput[1L]:endInput[1L]] #explicit first element because there could be both warnings and errors. #some code adapted from mplus2lavaan prototype inputHeaders <- grep("^\\s*(title:|data.*:|variable:|define:|analysis:|model.*:|output:|savedata:|plot:|montecarlo:)", input.text, ignore.case=TRUE, perl=TRUE) stopifnot(length(inputHeaders) > 0L) for (h in 1:length(inputHeaders)) { sectionEnd <- ifelse(h < length(inputHeaders), inputHeaders[h+1] - 1, length(input.text)) section <- input.text[inputHeaders[h]:sectionEnd] sectionName <- trimSpace(sub("^([^:]+):.*$", "\\1", section[1L], perl=TRUE)) #obtain text before the colon #dump section name from input syntax section[1L] <- sub("^[^:]+:(.*)$", "\\1", section[1L], perl=TRUE) input[[make.names(tolower(sectionName))]] <- section } #divide some input sections into fields #need to do a better job here of handling blank lines and such input$title <- paste(trimSpace(input$title), collapse=" ") input$data <- divideIntoFields(input$data) input$data.imputation <- divideIntoFields(input$data.imputation) input$variable <- divideIntoFields(input$variable) input$analysis <- divideIntoFields(input$analysis) input$montecarlo <- divideIntoFields(input$montecarlo) attr(input, "start.line") <- startInput attr(input, "end.line") <- endInput return(input) } #' Extract the summaries from one file #' #' Description: This function parses an output file for specific model details. It returns a list of model details for a single output file. #' #' @param outfiletext This is the output file in string form to be parsed. Passed in from extractModelSummaries. #' @param filename Name of the file being parsed. Used in case of bad model, prints a warning. #' @return A list of the summaries #' @keywords internal #' @examples #' # make me!!! extractSummaries_1file <- function(outfiletext, filename, input) { #preallocates list arglist <- list() #obtain mplus software version if ((mplus.version <- regexpr("\\s*Mplus VERSION ([\\d\\.]+)\\s*", outfiletext[1L], perl=TRUE)) > 0L) { arglist$Mplus.version <- substr(outfiletext[1L], attr(mplus.version, "capture.start")[1L], attr(mplus.version, "capture.start")[1L] + attr(mplus.version, "capture.length")[1L] - 1) } ###Copy some elements of the input instructions into the summaries #copy title into arglist if (!is.null(input$title)) { arglist$Title <- input$title } else { #warning("Unable to locate title field. Returning missing") #Warning doesn't seem very useful arglist$Title <- NA_character_ } #extract the analysis type, which is important for setting other parameters. if (!is.null(input$analysis$type)) { arglist$AnalysisType <- input$analysis$type } else { arglist$AnalysisType <- "GENERAL" #Analysis type not specified, default to general } #extract the data type (important for detecting imputation datasets) if (!is.null(input$data$type)) { arglist$DataType <- input$data$type } else if (any(c("montecarlo", "model.population") %in% names(input))) { arglist$DataType <- "MONTECARLO" } else { arglist$DataType <- "INDIVIDUAL" #Data type not specified, default to individual } if (!is.null(input$data.imputation)) { arglist$NImputedDatasets <- input$data.imputation$ndatasets #number of imputed datasets } #End input instructions processing #BEGIN ANALYSIS SUMMARY PROCESSING analysisSummarySection <- getSection("^\\s*SUMMARY OF ANALYSIS\\s*$", outfiletext) arglist$Estimator <- extractValue(pattern="^\\s*Estimator\\s*", analysisSummarySection, filename, type="str") arglist$Observations <- extractValue(pattern="^\\s*Number of observations\\s*", analysisSummarySection, filename, type="int") # Fix for multigroup models, where Observations were not parsed correctly if(is.na(arglist$Observations)){ arglist$Observations <- extractValue(pattern="^\\s*Total sample size\\s*", analysisSummarySection, filename, type="int") } arglist$NGroups <- extractValue(pattern="^\\s*Number of groups\\s*", analysisSummarySection, filename, type="int") arglist$NDependentVars <- extractValue(pattern="^\\s*Number of dependent variables\\s*", analysisSummarySection, filename, type="int") arglist$NIndependentVars <- extractValue(pattern="^\\s*Number of independent variables\\s*", analysisSummarySection, filename, type="int") arglist$NContinuousLatentVars <- extractValue(pattern="^\\s*Number of continuous latent variables\\s*", analysisSummarySection, filename, type="int") arglist$NCategoricalLatentVars <- extractValue(pattern="^\\s*Number of categorical latent variables\\s*", analysisSummarySection, filename, type="int") arglist$InformationMatrix <- extractValue(pattern="^\\s*Information matrix\\s*", analysisSummarySection, filename, type="int") #END ANALYSIS SUMMARY PROCESSING #BEGIN MODEL FIT STATISTICS PROCESSING #handle EFA output, which has separate model fit sections within each file #do this by extracting model fit sections for each and using an rbind call if (grepl("(?!MIXTURE|TWOLEVEL)\\s*EFA\\s+", arglist$AnalysisType, ignore.case=TRUE, perl=TRUE)) { factorLB <- as.numeric(sub(".*EFA\\s+(\\d+).*", "\\1", arglist$AnalysisType, perl=TRUE)) factorUB <- as.numeric(sub(".*EFA\\s+\\d+\\s+(\\d+).*", "\\1", arglist$AnalysisType, perl=TRUE)) factorSeq <- seq(factorLB, factorUB) EFASections <- grep(paste("^\\s*EXPLORATORY FACTOR ANALYSIS WITH (", paste(factorSeq, collapse="|"), ") FACTOR\\(S\\):\\s*$", sep=""), outfiletext, perl=TRUE) if (!length(EFASections) > 0) stop("Unable to locate section headers for EFA model fit statistics") #need to convert from list to data.frame format to allow for proper handling of rbind below arglistBase <- as.data.frame(arglist, stringsAsFactors=FALSE) efaList <- list() for (thisFactor in 1:length(EFASections)) { #subset output by starting text to be searched at the point where factor output begins modelFitSection <- getSection_Blanklines("^(TESTS OF MODEL FIT|MODEL FIT INFORMATION)$", outfiletext[EFASections[thisFactor]:length(outfiletext)]) efaList[[thisFactor]] <- extractSummaries_1section(modelFitSection, arglistBase, filename) efaList[[thisFactor]]$NumFactors <- factorSeq[thisFactor] } arglist <- do.call(rbind, efaList) } else if (length(multisectionMatches <- grep("^\\s*MODEL FIT INFORMATION FOR (?!THE LATENT CLASS INDICATOR MODEL PART).*", outfiletext, perl=TRUE, value=TRUE)) > 0L) { #use negative lookahead to ensure we don't grab the TECH10 output for LCA where it lists model fit info for latent class part #support Mplus v8 invariance testing outputs with one model fit section per variant (MODEL FIT INFORMATION FOR THE SCALAR MODEL etc.) #need to convert from list to data.frame format to allow for proper handling of rbind below arglistBase <- as.data.frame(arglist, stringsAsFactors=FALSE) multiList <- list() sectionNames <- sub("^\\s*MODEL FIT INFORMATION FOR\\s+(?:THE)*\\s*([\\w\\.]+)", "\\1", multisectionMatches, perl=TRUE) for (s in 1:length(multisectionMatches)) { fitinfo <- getSection(multisectionMatches[s], outfiletext) if (!is.null(fitinfo)) { multiList[[s]] <- extractSummaries_1section(fitinfo, arglistBase, filename, input) } } arglist <- do.call(rbind, multiList) arglist$Model <- sectionNames #add model info } else { modelFitSection <- getSection("^(TESTS OF MODEL FIT|MODEL FIT INFORMATION)$", outfiletext) arglist <- extractSummaries_1section(modelFitSection, arglist, filename, input) } #CLASSIFICATION QUALITY classificationQuality <- getSection("^CLASSIFICATION QUALITY$", outfiletext) if (!is.null(classificationQuality)) arglist$Entropy <- extractValue(pattern="^\\s*Entropy\\s*", classificationQuality, filename, type="dec") #overkill #arglist <- extractSummaries_1plan(arglist, "", list(data.frame(varName="Entropy", regexPattern="Entropy", varType=c("dec"), stringsAsFactors=FALSE)), classificationQuality, filename) else arglist$Entropy <- NA_real_ #maybe try to avoid the is null logic and just have extractModelSummary correctly handle null sections #TECH11 OUTPUT: LMR LRT tech11Output <- getSection("^\\s*TECHNICAL 11 OUTPUT\\s*$", outfiletext) if (!is.null(tech11Output)) { tech11headers <- c( "Random Starts Specifications for the k-1 Class Analysis Model", "VUONG-LO-MENDELL-RUBIN LIKELIHOOD RATIO TEST FOR \\d+ \\(H0\\) VERSUS \\d+ CLASSES", "LO-MENDELL-RUBIN ADJUSTED LRT TEST" ) tech11fields <- list( data.frame( varName=c("T11_KM1Starts", "T11_KM1Final"), regexPattern=c("Number of initial stage random starts", "Number of final stage optimizations"), varType=c("int", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("T11_KM1LL", "T11_VLMR_2xLLDiff", "T11_VLMR_ParamDiff", "T11_VLMR_Mean", "T11_VLMR_SD", "T11_VLMR_PValue"), regexPattern=c("H0 Loglikelihood Value", "2 Times the Loglikelihood Difference", "Difference in the Number of Parameters", "Mean", "Standard Deviation", "P-Value"), varType=c("dec", "dec", "int", "dec", "dec", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("T11_LMR_Value", "T11_LMR_PValue"), regexPattern=c("^\\s*Value", "^\\s*P-Value"), varType=c("dec", "dec"), stringsAsFactors=FALSE ) ) arglist <- extractSummaries_1plan(arglist, tech11headers, tech11fields, tech11Output, filename) } tech14Output <- getSection("^\\s*TECHNICAL 14 OUTPUT\\s*$", outfiletext) if (!is.null(tech14Output)) { tech14headers <- c( "", #section-inspecific parameters "Random Starts Specifications for the k-1 Class Analysis Model", "Random Starts Specification for the k-1 Class Model for Generated Data", "Random Starts Specification for the k Class Model for Generated Data", "PARAMETRIC BOOTSTRAPPED LIKELIHOOD RATIO TEST FOR \\d+ \\(H0\\) VERSUS \\d+ CLASSES" ) tech14fields <- list( #top-level (no section) data.frame( varName=c("BLRT_RequestedDraws"), regexPattern=c("Number of bootstrap draws requested"), varType=c("str"), stringsAsFactors=FALSE ), #Random Starts Specifications for the k-1 Class Analysis Model data.frame( varName=c("BLRT_KM1AnalysisStarts", "BLRT_KM1AnalysisFinal"), regexPattern=c("Number of initial stage random starts", "Number of final stage optimizations"), varType=c("int", "int"), stringsAsFactors=FALSE ), #Random Starts Specification for the k-1 Class Model for Generated Data #v7 format: Number of final stage optimizations for the\n initial stage random starts <N> #v6 format: Number of final stage optimizations <N> #Thus, include the genfinal twice here to catch both circumstances data.frame( varName=c("BLRT_KM1GenStarts", "BLRT_KM1GenFinal", "BLRT_KM1GenFinal"), regexPattern=c("Number of initial stage random starts", "+1:Number of final stage optimizations for the", "Number of final stage optimizations"), varType=c("int", "int", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("BLRT_KGenStarts", "BLRT_KGenFinal"), regexPattern=c("Number of initial stage random starts", "Number of final stage optimizations"), varType=c("int", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("BLRT_KM1LL", "BLRT_2xLLDiff", "BLRT_ParamDiff", "BLRT_PValue", "BLRT_SuccessfulDraws"), regexPattern=c("H0 Loglikelihood Value", "2 Times the Loglikelihood Difference", "Difference in the Number of Parameters", "Approximate P-Value", "Successful Bootstrap Draws"), varType=c("dec", "dec", "int", "dec", "int"), stringsAsFactors=FALSE ) ) arglist <- extractSummaries_1plan(arglist, tech14headers, tech14fields, tech14Output, filename) } #calculate adjusted AIC per Burnham & Anderson(2004), which is better than AIC for non-nested model selection #handle AICC calculation, requires AIC, Parameters, and observations if (!is.null(arglist$Parameters) && !is.na(arglist$Parameters) && !is.null(arglist$AIC) && !is.na(arglist$AIC) && !is.null(arglist$Observations) && !is.na(arglist$Observations)) { arglist$AICC <- arglist$AIC + (2*arglist$Parameters*(arglist$Parameters+1))/(arglist$Observations-arglist$Parameters-1) } else { arglist$AICC <- NA_real_ } #Only warn about missing LL for ML-based estimators #too convoluted to maintain (and not so useful), generating errors I don't want to debug # if ("Estimator" %in% extract && "LL" %in% extract # && !is.na(arglist$Estimator) && arglist$Estimator %in% c("ML", "MLR", "MLM", "MLMV", "MLF") # && ((grepl("imputation", arglist$DataType, ignore.case=TRUE) && is.na(arglist$LL_Mean)) # || (!grepl("imputation", arglist$DataType, ignore.case=TRUE) && is.na(arglist$LL)))) # warning("Model missing LL value, despite use of ML-based estimator. Likely a failed run.\n ", filename) # #for now, skip including input instructions in the returned data.frame. Makes the output too cluttered. #arglist$InputInstructions <- paste((outfiletext[(startInput+1):(endInput-1)]), collapse="\n") arglist$Filename <- splitFilePath(filename)$filename #only retain filename, not path arglist <- as.data.frame(arglist, stringsAsFactors=FALSE) class(arglist) <- c("data.frame", "mplus.summaries") attr(arglist, "filename") <- arglist$Filename return(arglist) } #' Extract summary statistics from a single output file or from a group of Mplus models within a directory #' #' Parses a group of Mplus model output files (.out extension) for model fit statistics. #' At this time, the details extracted are fixed and include: \code{Filename, InputInstructions, Title, Estimator, #' LL, BIC, aBIC, AIC, AICC, Parameters, Observations, CFI, TLI, RMSEA_Estimate, RMSEA_90CI_LB, RMSEA_90CI_UB, #' RMSEA_pLT05, ChiSqM_Value, ChiSqM_DF, ChiSq_PValue, BLRT_KM1LL, BLRT_PValue, BLRT_Numdraws)}. The #' infrastructure is in place to allow for user-specified selection of summary statistics in future versions. #' #' @param target the directory containing Mplus output files (.out) to parse OR the #' single output file to be parsed. Defaults to the current working directory. #' Example: "C:/Users/Michael/Mplus Runs" #' @param recursive optional. If \code{TRUE}, parse all models nested in #' subdirectories within \code{directory}. Defaults to \code{FALSE}. #' @param filefilter a Perl regular expression (PCRE-compatible) specifying particular #' output files to be parsed within \code{directory}. See \code{regex} or #' \url{http://www.pcre.org/pcre.txt} for details about regular expression syntax. #' #' @return Returns a \code{data.frame} containing model fit statistics for all output files within \code{directory}. #' The \code{data.frame} contains some of the following variables (depends on model type): #' \item{Title}{Title for the model, specified by the TITLE: command} #' \item{Filename}{Filename of the output file} #' \item{Estimator}{Estimator used for the model (e.g., ML, MLR, WLSMV, etc.)} #' \item{LL}{Log-likelihood of the model} #' \item{BIC}{Bayesian Information Criterion} #' \item{aBIC}{Sample-Size-Adjusted BIC (Sclove, 1987)} #' \item{AIC}{Akaike's Information Criterion} #' \item{AICC}{Corrected AIC, based on Sugiura (1978) and recommended by Burnham & Anderson (2002)} #' \item{DIC}{Deviance Information Criterion. Available in ESTIMATOR=BAYES output.} #' \item{Parameters}{Number of parameters estimated by the model} #' \item{pD}{Estimated number of parameters in Bayesian output} #' \item{Observations}{The number of observations for the model (does not suppport multiple-groups analysis at this time)} #' \item{CFI}{Confirmatory Fit Index} #' \item{TLI}{Tucker-Lewis Index} #' \item{RMSEA_Estimate}{Point estimate of root mean squared error of approximation} #' \item{RMSEA_90CI_LB}{Lower bound of the 90\% Confidence Interval around the RMSEA estimate.} #' \item{RMSEA_90CI_UB}{Upper bound of the 90\% Confidence Interval around the RMSEA estimate.} #' \item{RMSEA_pLT05}{Probability that the RMSEA estimate falls below .05, indicating good fit.} #' \item{ChiSqM_Value}{Model chi-squared value} #' \item{ChiSqM_DF}{Model chi-squared degrees of freedom} #' \item{ChiSqM_PValue}{Model chi-squared p value} #' \item{ChiSqM_ScalingCorrection}{H0 Scaling Correction Factor} #' \item{ObsRepChiSqDiff_95CI_LB}{Lower bound of 95\% confidence interval for the difference between observed and replicated chi-square values} #' \item{ObsRepChiSqDiff_95CI_UB}{Upper bound of 95\% confidence interval for the difference between observed and replicated chi-square values} #' \item{PostPred_PValue}{Posterior predictive p-value} #' \item{PriorPostPred_PValue}{Prior Posterior Predictive P-Value} #' \item{BLRT_RequestedDraws}{Number of requested bootstrap draws for TECH14.} #' \item{BLRT_KM1LL}{Log-likelihood of the K-1 model (one less class) for the Bootstrapped Likelihood Ratio Test (TECH14).} #' \item{BLRT_2xLLDiff}{Two times the log-likelihood difference of the models with K and K-1 classes (TECH14).} #' \item{BLRT_ParamDiff}{Difference in the number of parameters for models with K and K-1 classes (TECH14).} #' \item{BLRT_PValue}{P-value of the Bootstrapped Likelihood Ratio Test (TECH14) testing whether the K class model is significantly better than K-1} #' \item{BLRT_SuccessfulDraws}{The number of successful bootstrapped samples used in the Bootstrapped Likelihood Ratio Test} #' \item{SRMR}{Standardized root mean square residual} #' \item{SRMR.Between}{For TYPE=TWOLEVEL output, standardized root mean square residual for between level} #' \item{SRMR.Within}{For TYPE=TWOLEVEL output, standardized root mean square residual for within level} #' \item{WRMR}{Weighted root mean square residual} #' \item{ChiSqBaseline_Value}{Baseline (unstructured) chi-squared value} #' \item{ChiSqBaseline_DF}{Baseline (unstructured) chi-squared degrees of freedom} #' \item{ChiSqBaseline_PValue}{Baseline (unstructured) chi-squared p value} #' \item{NumFactors}{For TYPE=EFA output, the number of factors} #' \item{T11_KM1Starts}{TECH11: Number of initial stage random starts for k-1 model} #' \item{T11_KM1Final}{TECH11: Number of final stage optimizations for k-1 model} #' \item{T11_KM1LL}{TECH11: Log-likelihood of the K-1 model used for the Vuong-Lo-Mendell-Rubin LRT} #' \item{T11_VLMR_2xLLDiff}{TECH11: 2 * Log-likelihood Difference of K-class vs. K-1-class model for the Vuong-Lo-Mendell-Rubin LRT} #' \item{T11_VLMR_ParamDiff}{TECH11: Difference in number of parameters between K-class and K-1-class model for the Vuong-Lo-Mendell-Rubin LRT} #' \item{T11_VLMR_Mean}{TECH11: Vuong-Lo-Mendell-Rubin LRT mean} #' \item{T11_VLMR_SD}{TECH11: Vuong-Lo-Mendell-Rubin LRT standard deviation} #' \item{T11_VLMR_PValue}{TECH11: Vuong-Lo-Mendell-Rubin LRT p-value} #' \item{T11_LMR_Value}{TECH11: Lo-Mendell-Rubin Adjusted LRT value} #' \item{T11_LMR_PValue}{TECH11: Lo-Mendell-Rubin Adjusted LRT p-value} #' #' @author Michael Hallquist #' @seealso \code{\link{regex}}, \code{\link{runModels}}, \code{\link{readModels}} #' @keywords interface #' @export #' @examples #' \dontrun{ #' allExamples <- extractModelSummaries( #' "C:/Program Files/Mplus/Mplus Examples/User's Guide Examples") #' } extractModelSummaries <- function(target=getwd(), recursive=FALSE, filefilter) { #message("This function is deprecated and will be removed from future versions of MplusAutomation. Please use readModels() instead.") message("extractModelSummaries has been deprecated. Please use readModels(\"nameofMplusoutfile.out\", what=\"summaries\")$summaries to replicate the old functionality.") #retain working directory and reset at end of run # curdir <- getwd() # # outfiles <- getOutFileList(target, recursive, filefilter) # # details <- list() # # #for each output file, use the extractSummaries_1file function to extract relevant data # #note that extractSummaries_1file returns data as a list # #rbind creates an array of lists by appending each extractSummaries_1file return value # for (i in 1:length(outfiles)) { # #read the file # readfile <- scan(outfiles[i], what="character", sep="\n", strip.white=FALSE, blank.lines.skip=FALSE, quiet=TRUE) # # #bomb out for EFA files # if (length(grep("TYPE\\s+(IS|=|ARE)\\s+((MIXTURE|TWOLEVEL)\\s+)+EFA\\s+\\d+", readfile, ignore.case=TRUE, perl=TRUE)) > 0) { # warning(paste0("EFA, MIXTURE EFA, and TWOLEVEL EFA files are not currently supported by extractModelSummaries.\n Skipping outfile: ", outfiles[i])) # next #skip file # } # # #append params for this file to the details array # #note that this is a memory-inefficient solution because of repeated copying. Better to pre-allocate. # # inp <- extractInput_1file(readfile, outfiles[i]) # details[[i]] <- extractSummaries_1file(readfile, outfiles[i], inp) # } # # #if there are several output files, then use rbind.fill to align fields # if (length(details) > 1L) details <- do.call(rbind.fill, details) # else details <- details[[1L]] # # #reset working directory # setwd(curdir) # # #cleanup columns containing only NAs # for (col in names(details)) { # if (all(is.na(details[[col]]))) details[[col]] <- NULL # } # # return(details) } #' Add header to saved data #' #' Description #' #' @param outfile The output file #' @param director The current working directory by default #' @return NULL #' @keywords internal #' @examples #' # make me!!! addHeaderToSavedata <- function(outfile, directory=getwd()) { } #' Extract residual matrices #' #' Function that extracts the residual matrices including standardized ones #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return A list of the residual matrices #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractResiduals <- function(outfiletext, filename) { residSection <- getSection("^RESIDUAL OUTPUT$", outfiletext) if (is.null(residSection)) return(list()) #no residuals output #allow for multiple groups residSubsections <- getMultilineSection("ESTIMATED MODEL AND RESIDUALS \\(OBSERVED - ESTIMATED\\)( FOR [\\w\\d\\s\\.,_]+)*", residSection, filename, allowMultiple=TRUE) matchlines <- attr(residSubsections, "matchlines") if (length(residSubsections) == 0) { warning("No sections found within residuals output.") return(list()) } else if (length(residSubsections) > 1) groupNames <- make.names(gsub("^\\s*ESTIMATED MODEL AND RESIDUALS \\(OBSERVED - ESTIMATED\\)( FOR ([\\w\\d\\s\\.,_]+))*\\s*$", "\\2", residSection[matchlines], perl=TRUE)) residList <- list() #multiple groups possible for (g in 1:length(residSubsections)) { targetList <- list() targetList[["meanEst"]] <- matrixExtract(residSubsections[[g]], "Model Estimated Means(/Intercepts/Thresholds)*", filename) targetList[["meanResid"]] <- matrixExtract(residSubsections[[g]], "Residuals for Means(/Intercepts/Thresholds)*", filename) targetList[["meanResid.std"]] <- matrixExtract(residSubsections[[g]], "Standardized Residuals \\(z-scores\\) for Means(/Intercepts/Thresholds)*", filename) targetList[["meanResid.norm"]] <- matrixExtract(residSubsections[[g]], "Normalized Residuals for Means(/Intercepts/Thresholds)*", filename) targetList[["covarianceEst"]] <- matrixExtract(residSubsections[[g]], "Model Estimated Covariances(/Correlations/Residual Correlations)*", filename) targetList[["covarianceResid"]] <- matrixExtract(residSubsections[[g]], "Residuals for Covariances(/Correlations/Residual Correlations)*", filename) targetList[["covarianceResid.std"]] <- matrixExtract(residSubsections[[g]], "Standardized Residuals \\(z-scores\\) for Covariances(/Correlations/Residual Corr)*", filename) targetList[["covarianceResid.norm"]] <- matrixExtract(residSubsections[[g]], "Normalized Residuals for Covariances(/Correlations/Residual Correlations)*", filename) targetList[["slopeEst"]] <- matrixExtract(residSubsections[[g]], "Model Estimated Slopes", filename) targetList[["slopeResid"]] <- matrixExtract(residSubsections[[g]], "Residuals for Slopes", filename) if (length(residSubsections) > 1) { class(targetList) <- c("list", "mplus.residuals") residList[[groupNames[g]]] <- targetList } else residList <- targetList } class(residList) <- c("list", "mplus.residuals") if (length(residSubsections) > 1) attr(residList, "group.names") <- groupNames return(residList) } #' Extract Technical 1 matrix from Mplus #' #' Function that extracts the Tech1 matrix #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return A list of class \dQuote{mplus.tech1} #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech1 <- function(outfiletext, filename) { tech1Section <- getSection("^TECHNICAL 1 OUTPUT$", outfiletext) if (is.null(tech1Section)) return(list()) #no tech1 output tech1List <- list() paramSpecSubsections <- getMultilineSection("PARAMETER SPECIFICATION( FOR [\\w\\d\\s\\.,_]+)*", tech1Section, filename, allowMultiple=TRUE) matchlines <- attr(paramSpecSubsections, "matchlines") paramSpecList <- list() if (length(paramSpecSubsections) == 0) warning ("No parameter specfication sections found within TECH1 output.") else if (length(paramSpecSubsections) > 1) groupNames <- make.names(gsub("^\\s*PARAMETER SPECIFICATION( FOR ([\\w\\d\\s\\.,_]+))*\\s*$", "\\2", tech1Section[matchlines], perl=TRUE)) else #just one section, no groups groupNames <- "" for (g in 1:length(paramSpecSubsections)) { targetList <- list() targetList[["tau"]] <- matrixExtract(paramSpecSubsections[[g]], "TAU", filename) targetList[["nu"]] <- matrixExtract(paramSpecSubsections[[g]], "NU", filename) targetList[["lambda"]] <- matrixExtract(paramSpecSubsections[[g]], "LAMBDA", filename) targetList[["theta"]] <- matrixExtract(paramSpecSubsections[[g]], "THETA", filename) targetList[["alpha"]] <- matrixExtract(paramSpecSubsections[[g]], "ALPHA", filename) targetList[["beta"]] <- matrixExtract(paramSpecSubsections[[g]], "BETA", filename) targetList[["gamma"]] <- matrixExtract(paramSpecSubsections[[g]], "GAMMA", filename) targetList[["psi"]] <- matrixExtract(paramSpecSubsections[[g]], "PSI", filename) targetList[["delta"]] <- matrixExtract(paramSpecSubsections[[g]], "DELTA", filename) targetList[["gamma.c"]] <- matrixExtract(paramSpecSubsections[[g]], "GAMMA\\(C\\)", filename) targetList[["alpha.c"]] <- matrixExtract(paramSpecSubsections[[g]], "ALPHA\\(C\\)", filename) targetList[["new_additional"]] <- matrixExtract(paramSpecSubsections[[g]], "NEW/ADDITIONAL PARAMETERS", filename) #latent class indicator part includes subsections for each latent class, such as class-varying thresholds if (groupNames[g] == "LATENT.CLASS.INDICATOR.MODEL.PART") { tauLines <- grep("TAU\\(U\\) FOR LATENT CLASS \\d+", paramSpecSubsections[[g]], perl=TRUE, value=TRUE) uniqueLC <- unique(gsub("^\\s*TAU\\(U\\) FOR LATENT CLASS (\\d+)\\s*$", "\\1", tauLines, perl=TRUE)) for (lc in uniqueLC) { targetList[[paste0("tau.u.lc", lc)]] <- matrixExtract(paramSpecSubsections[[g]], paste0("TAU\\(U\\) FOR LATENT CLASS ", lc), filename) } } if (length(paramSpecSubsections) > 1) { class(targetList) <- c("list", "mplus.parameterSpecification") paramSpecList[[groupNames[g]]] <- targetList } else paramSpecList <- targetList } class(paramSpecList) <- c("list", "mplus.parameterSpecification") if (length(paramSpecSubsections) > 1) attr(paramSpecList, "group.names") <- groupNames startValSubsections <- getMultilineSection("STARTING VALUES( FOR [\\w\\d\\s\\.,_]+)*", tech1Section, filename, allowMultiple=TRUE) matchlines <- attr(startValSubsections, "matchlines") startValList <- list() if (length(startValSubsections) == 0) warning ("No starting value sections found within TECH1 output.") else if (length(startValSubsections) > 1) groupNames <- make.names(gsub("^\\s*STARTING VALUES( FOR ([\\w\\d\\s\\.,_]+))*\\s*$", "\\2", tech1Section[matchlines], perl=TRUE)) else groupNames <- "" for (g in 1:length(startValSubsections)) { targetList <- list() targetList[["tau"]] <- matrixExtract(startValSubsections[[g]], "TAU", filename) targetList[["nu"]] <- matrixExtract(startValSubsections[[g]], "NU", filename) targetList[["lambda"]] <- matrixExtract(startValSubsections[[g]], "LAMBDA", filename) targetList[["theta"]] <- matrixExtract(startValSubsections[[g]], "THETA", filename) targetList[["alpha"]] <- matrixExtract(startValSubsections[[g]], "ALPHA", filename) targetList[["beta"]] <- matrixExtract(startValSubsections[[g]], "BETA", filename) targetList[["gamma"]] <- matrixExtract(startValSubsections[[g]], "GAMMA", filename) targetList[["psi"]] <- matrixExtract(startValSubsections[[g]], "PSI", filename) targetList[["delta"]] <- matrixExtract(startValSubsections[[g]], "DELTA", filename) targetList[["gamma.c"]] <- matrixExtract(startValSubsections[[g]], "GAMMA\\(C\\)", filename) targetList[["alpha.c"]] <- matrixExtract(startValSubsections[[g]], "ALPHA\\(C\\)", filename) targetList[["new_additional"]] <- matrixExtract(startValSubsections[[g]], "NEW/ADDITIONAL PARAMETERS", filename) #latent class indicator part includes subsections for each latent class, such as class-varying thresholds if (groupNames[g] == "LATENT.CLASS.INDICATOR.MODEL.PART") { tauLines <- grep("TAU\\(U\\) FOR LATENT CLASS \\d+", startValSubsections[[g]], perl=TRUE, value=TRUE) uniqueLC <- unique(gsub("^\\s*TAU\\(U\\) FOR LATENT CLASS (\\d+)\\s*$", "\\1", tauLines, perl=TRUE)) for (lc in uniqueLC) { targetList[[paste0("tau.u.lc", lc)]] <- matrixExtract(startValSubsections[[g]], paste0("TAU\\(U\\) FOR LATENT CLASS ", lc), filename) } } if (length(startValSubsections) > 1) { class(targetList) <- c("list", "mplus.startingValues") startValList[[groupNames[g]]] <- targetList } else startValList <- targetList } class(startValList) <- c("list", "mplus.startingValues") if (length(startValSubsections) > 1) attr(startValList, "group.names") <- groupNames tech1List <- list(parameterSpecification=paramSpecList, startingValues=startValList) class(tech1List) <- c("list", "mplus.tech1") return(tech1List) } extractSampstat <- function(outfiletext, filename) { sampstatSection <- getSection("^SAMPLE STATISTICS$", outfiletext) if (is.null(sampstatSection)) { #try output from TYPE=BASIC, which places these in a section of a different name sampstatSection <- getSection("^RESULTS FOR BASIC ANALYSIS$", outfiletext) } if(!is.null(sampstatSection) & all(sampstatSection == "")){ first_line <- (attr(outfiletext, "headerlines")[attr(outfiletext, "headerlines") > tail(attr(sampstatSection, "lines"), 1)][1]+1) final_line <- (attr(outfiletext, "headerlines")[attr(outfiletext, "headerlines") > tail(attr(sampstatSection, "lines"), 1)][2]-1) sampstatSection <- outfiletext[first_line:final_line] } sampstatList <- list() sampstatSubsections <- getMultilineSection("ESTIMATED SAMPLE STATISTICS( FOR [\\w\\d\\s\\.,_]+)*", sampstatSection, filename, allowMultiple=TRUE) matchlines <- attr(sampstatSubsections, "matchlines") if(is.na(sampstatSubsections)){ sampstatSubsections <- list(sampstatSection) matchlines <- attr(sampstatSubsections, "lines") } if (length(sampstatSubsections) == 0) warning ("No sample statistics sections found within SAMPSTAT output.") else if (length(sampstatSubsections) > 1) groupNames <- make.names(gsub("^\\s*ESTIMATED SAMPLE STATISTICS( FOR ([\\w\\d\\s\\.,_]+))*\\s*$", "\\2", sampstatSection[matchlines], perl=TRUE)) else #just one section, no groups groupNames <- "" for (g in 1:length(sampstatSubsections)) { targetList <- list() targetList[["means"]] <- matrixExtract(sampstatSubsections[[g]], "Means", filename) targetList[["covariances"]] <- matrixExtract(sampstatSubsections[[g]], "Covariances", filename) targetList[["correlations"]] <- matrixExtract(sampstatSubsections[[g]], "Correlations", filename) targetList[["correlations.vardiag"]] <- matrixExtract(sampstatSubsections[[g]], "CORRELATION MATRIX \\(WITH VARIANCES ON THE DIAGONAL\\)", filename, ignore.case=TRUE) #these seem to show up in DATA: TYPE=IMPUTATION outputs (e.g., ex11.8part2.out) targetList[["means.intercepts.thresholds"]] <- matrixExtract(sampstatSubsections[[g]], "Means/Intercepts/Thresholds", filename, ignore.case=TRUE) targetList[["within.level.variance.covariance"]] <- matrixExtract(sampstatSubsections[[g]], "WITHIN LEVEL VARIANCE/COVARIANCE", filename, ignore.case=TRUE) targetList[["within.level.correlation"]] <- matrixExtract(sampstatSubsections[[g]], "WITHIN LEVEL CORRELATION", filename, ignore.case=TRUE) targetList[["between.level.variance.covariance"]] <- matrixExtract(sampstatSubsections[[g]], "BETWEEN LEVEL VARIANCE/COVARIANCE", filename, ignore.case=TRUE) targetList[["between.level.correlation"]] <- matrixExtract(sampstatSubsections[[g]], "BETWEEN LEVEL CORRELATION", filename, ignore.case=TRUE) #I think these are only in older outputs targetList[["covariances.correlations.resid_correlations"]] <- matrixExtract(sampstatSubsections[[g]], "Covariances/Correlations/Residual Correlations", filename) targetList[["slopes"]] <- matrixExtract(sampstatSubsections[[g]], "Slopes", filename) #latent class indicator part includes subsections for each latent class, such as class-varying thresholds # if (groupNames[g] == "LATENT.CLASS.INDICATOR.MODEL.PART") { # tauLines <- grep("TAU\\(U\\) FOR LATENT CLASS \\d+", sampstatSubsections[[g]], perl=TRUE, value=TRUE) # uniqueLC <- unique(gsub("^\\s*TAU\\(U\\) FOR LATENT CLASS (\\d+)\\s*$", "\\1", tauLines, perl=TRUE)) # for (lc in uniqueLC) { # targetList[[paste0("tau.u.lc", lc)]] <- matrixExtract(sampstatSubsections[[g]], paste0("TAU\\(U\\) FOR LATENT CLASS ", lc), filename) # } # } if (length(sampstatSubsections) > 1) { class(targetList) <- c("list", "mplus.sampstat") sampstatList[[groupNames[g]]] <- targetList } else{ sampstatList <- targetList } } ##Extract Univariate counts and proportions univariateCountsSection <- getSection("^UNIVARIATE PROPORTIONS AND COUNTS FOR CATEGORICAL VARIABLES$", outfiletext) #remove warning lines, which throw off the parser (e.g., ex6.15.out) univariateCountsSection <- univariateCountsSection[!grepl("\\s*WARNING:.*", univariateCountsSection, perl=TRUE)] if (!is.null(univariateCountsSection)) { countSubsections <- getMultilineSection("Group\\s+([\\w\\d\\.,_]+)*", univariateCountsSection, filename, allowMultiple=TRUE) matchlines <- attr(countSubsections, "matchlines") if (!is.list(countSubsections) && is.na(countSubsections[1])) { countSubsections <- list(univariateCountsSection) #no sublists by group } else if (length(countSubsections) > 1) groupNames <- make.names(gsub("^\\s*Group\\s+([\\w\\d\\s\\.,_]+)\\s*$", "\\1", univariateCountsSection[matchlines], perl=TRUE)) else #just one section, no groups stop("not sure how we got here") for (g in 1:length(countSubsections)) { targetList <- list() df <- data.frame(do.call(rbind, strsplit(trimSpace(parseCatOutput(countSubsections[[g]])), "\\s+", perl=TRUE)), stringsAsFactors=FALSE) names(df) <- c("variable", "proportion", "count") df$proportion <- as.numeric(df$proportion) df$count <- as.numeric(df$count) #divide variable column into variable and category for clarity df$category <- as.numeric(sub(".*\\.Cat\\.(\\d+)", "\\1", df$variable, perl=TRUE)) df$variable <- sub("^(.*)\\.Cat\\.\\d+$", "\\1", df$variable, perl=TRUE) df <- df[,c("variable", "category", "proportion", "count")] #reorder df #targetList[["proportions.counts"]] <- df targetList <- df #just a single element at the moment class(targetList) <- c("data.frame", "mplus.propcounts.data.frame") if (length(countSubsections) > 1) { #class(targetList) <- c("list", "mplus.propcounts") sampstatList[[groupNames[g]]][["proportions.counts"]] <- targetList } else sampstatList[["proportions.counts"]] <- targetList } } # Extract univariate sample statistics ------------------------------------ univariate_sampstat <- getSection("^UNIVARIATE SAMPLE STATISTICS$", outfiletext) if(!is.null(univariate_sampstat)){ stats <- lapply(univariate_sampstat[grepl("\\d$", univariate_sampstat)], function(x){strsplit(trimws(x), split = "\\s+")[[1]]}) if(length(stats) %% 2 == 0){ out <- cbind(do.call(rbind, stats[seq(1, length(stats), by = 2)]), do.call(rbind, stats[seq(2, length(stats), by = 2)])) #headers <- univariate_sampstat[grepl("\\/", univariate_sampstat)] #headers <- gsub("%", " %", headers) #headers <- lapply(trimws(headers), function(x){strsplit(x, "\\s{2,}")[[1]]}) #headers[[1]] <- c(gsub("\\/", "", headers[[1]][grepl("\\/", headers[[1]])]), gsub("\\/.*$", "", headers[[2]][grepl("\\/", headers[[2]])])) #headers[[2]] <- gsub("^.+?\\/", "", headers[[2]]) #colnames(out) <- gsub(" %", "%", c(headers[[1]], headers[[2]])) var_names <- out[, 1] out <- gsub("%", "", out) out <- apply(out[, -1], 2, as.numeric) colnames(out) <- c("Mean", "Skewness", "Minimum", "%Min", "20%", "40%", "Median", "Sample Size", "Variance", "Kurtosis", "Maximum", "%Max", "60%", "80%") rownames(out) <- var_names sampstatList$univariate.sample.statistics <- out[, c("Sample Size", "Mean", "Variance", "Skewness", "Kurtosis", "Minimum", "Maximum", "%Min", "%Max", "20%", "40%", "Median", "60%", "80%")] } } class(sampstatList) <- c("list", "mplus.sampstat") if (length(sampstatSubsections) > 1) attr(sampstatList, "group.names") <- groupNames return(sampstatList) } extractCovarianceCoverage <- function(outfiletext, filename) { #TODO: Return type is sometimes list, sometimes matrix; a bit inconsistent covcoverageSection <- getSection("^COVARIANCE COVERAGE OF DATA$", outfiletext) if (is.null(covcoverageSection)) { return(list()) } #no COVARIANCE COVERAGE OF DATA output covcoverageList <- list() covcoverageSubsections <- getMultilineSection("PROPORTION OF DATA PRESENT( FOR [\\w\\d\\s\\.,_]+)*", covcoverageSection, filename, allowMultiple=TRUE) matchlines <- attr(covcoverageSubsections, "matchlines") if (length(covcoverageSubsections) == 0 || is.na(covcoverageSubsections)) { #See UG ex9.7.out message("No PROPORTION OF DATA PRESENT sections found within COVARIANCE COVERAGE OF DATA output.") return(covcoverageList) } else if (length(covcoverageSubsections) > 1) { groupNames <- make.names(gsub("^\\s*PROPORTION OF DATA PRESENT( FOR ([\\w\\d\\s\\.,_]+))*\\s*$", "\\2", covcoverageSection[matchlines], perl=TRUE)) } else { #just one section, no groups groupNames <- "" } for (g in 1:length(covcoverageSubsections)) { #targetList <- list() #for now, there is just one matrix extracted, so no need to label it or treat it as a list. Leaving scaffolding commented out if useful later #targetList[["covcoverage"]] <- matrixExtract(covcoverageSubsections[[g]], "Covariance Coverage", filename) targetList <- matrixExtract(covcoverageSubsections[[g]], "Covariance Coverage", filename) if (length(covcoverageSubsections) > 1) { #class(targetList) <- c("list", "mplus.covcoverage") covcoverageList[[groupNames[g]]] <- targetList } else covcoverageList <- targetList } if (is.list(covcoverageList)) { class(covcoverageList) <- c("list", "mplus.covcoverage") } else { class(covcoverageList) <- c("matrix", "mplus.covcoverage") } #single numeric matrix if (length(covcoverageSubsections) > 1) { attr(covcoverageList, "group.names") <- groupNames } return(covcoverageList) } #' Extract free file output #' #' Function for reading "free" output where a sequence of values populates a matrix #' #' @param filename The name of the output file #' @param outfile The output file #' @param make_symmetric A logical indicating whether or not to make the matrix symmetric, defaults to \code{TRUE} #' @return a matrix #' @keywords internal #' @examples #' # make me!!! extractFreeFile <- function(filename, outfile, make_symmetric=TRUE) { #Adapted from code graciously provided by Joe Glass. if (isEmpty(filename)) return(NULL) #TODO: make this filename building into a function (duped from read raw) outfileDirectory <- splitFilePath(outfile)$directory savedataSplit <- splitFilePath(filename) #if outfile target directory is non-empty, but savedataFile is without directory, then append #outfile directory to savedataFile. This ensures that R need not be in the working directory #to read the savedataFile. But if savedataFile has an absolute directory, don't append #if savedata directory is present and absolute, or if no directory in outfile, just use filename as is if (!is.na(savedataSplit$directory) && savedataSplit$absolute) savedataFile <- filename #just use savedata filename if has absolute path else if (is.na(outfileDirectory)) savedataFile <- filename #just use savedata filename if outfile is missing path (working dir) else savedataFile <- file.path(outfileDirectory, filename) #savedata path relative or absent and outfile dir is present if (!file.exists(savedataFile)) { warning("Cannot read file: ", filename) return(NULL) } values <- scan(savedataFile, what="character", strip.white=FALSE, blank.lines.skip=FALSE, quiet=TRUE) matrix.size <- function(x) { # per algebra of quadratic equations: p is the # of rows & columns in a symmetric # matrix given x unique covariance elements (the lower triangle plus diagonal). # This was constructed from the equation x = p(p+1)/2. p <- (-1/2) + sqrt(2*x + (1/4)) # if p is not an integer, having x elements does not result in a symmetric matrix p.isinteger <- !length(grep("[^[:digit:]]", as.character(p))) if (p.isinteger) { return (p) } else { cat("The length of the supplied vector is not appropriate to generate the matrix. Please check the data file.") return(NULL) } } matSize <- matrix.size(length(values)) mat <- matrix(NA_real_, nrow=matSize, ncol=matSize, dimnames=list(1:matSize, 1:matSize)) # create empty symmetric matrix mat[upper.tri(mat, diag=TRUE)] <- as.numeric(values) # import savedata information into the upper triangle (plus diagonal) of the matrix if (make_symmetric) { mat[lower.tri(mat)] <- t(mat)[lower.tri(mat)] #populate lower triangle } else { mat <- t(mat) # transpose the matrix to create a lower triangular matrix (plus diagonal) } return(mat) } #' Extract Technical 3 matrix from Mplus #' #' Function that extracts the Tech3 matrix #' #' @param outfiletext the text of the output file #' @param savedata_info Information on saved data #' @param filename The name of the file #' @return A list of class \dQuote{mplus.tech3} #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech3 <- function(outfiletext, savedata_info, filename) { tech3Section <- getSection("^TECHNICAL 3 OUTPUT$", outfiletext) if (is.null(tech3Section)) return(list()) #no tech3 output tech3List <- list() tech3List[["paramCov"]] <- matrixExtract(tech3Section, "ESTIMATED COVARIANCE MATRIX FOR PARAMETER ESTIMATES", filename) tech3List[["paramCor"]] <- matrixExtract(tech3Section, "ESTIMATED CORRELATION MATRIX FOR PARAMETER ESTIMATES", filename) if (!is.null(savedata_info) && !is.na(savedata_info$tech3File)) { tech3List[["paramCov.savedata"]] <- extractFreeFile(savedata_info$tech3File, filename, make_symmetric=TRUE) } else { tech3List[["paramCov.savedata"]] <- NULL } class(tech3List) <- c("list", "mplus.tech3") return(tech3List) } #' Extract Technical 4 matrix from Mplus #' #' Function that extracts the Tech4 matrix #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return A list of class \dQuote{mplus.tech4} #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech4 <- function(outfiletext, filename) { #TODO: have empty list use mplus.tech4 class tech4Section <- getSection("^TECHNICAL 4 OUTPUT$", outfiletext) if (is.null(tech4Section)) return(list()) #no tech4 output tech4List <- list() tech4Subsections <- getMultilineSection("ESTIMATES DERIVED FROM THE MODEL( FOR [\\w\\d\\s\\.,_]+)*", tech4Section, filename, allowMultiple=TRUE) matchlines <- attr(tech4Subsections, "matchlines") if (length(tech4Subsections) == 0) { warning("No sections found within TECH4 output.") return(list()) } else if (length(tech4Subsections) > 1) { groupNames <- make.names(gsub("^\\s*ESTIMATES DERIVED FROM THE MODEL( FOR ([\\w\\d\\s\\.,_]+))*\\s*$", "\\2", tech4Section[matchlines], perl=TRUE)) } for (g in 1:length(tech4Subsections)) { targetList <- list() targetList[["latMeansEst"]] <- matrixExtract(tech4Subsections[[g]], "ESTIMATED MEANS FOR THE LATENT VARIABLES", filename) targetList[["latCovEst"]] <- matrixExtract(tech4Subsections[[g]], "ESTIMATED COVARIANCE MATRIX FOR THE LATENT VARIABLES", filename) targetList[["latCorEst"]] <- matrixExtract(tech4Subsections[[g]], "ESTIMATED CORRELATION MATRIX FOR THE LATENT VARIABLES", filename) if (length(tech4Subsections) > 1) { class(targetList) <- c("list", "mplus.tech4") tech4List[[groupNames[g]]] <- targetList } else tech4List <- targetList } class(tech4List) <- c("list", "mplus.tech4") return(tech4List) } #' Extract Technical 7 from Mplus #' #' The TECH7 option is used in conjunction with TYPE=MIXTURE to request sample statistics #' for each class using raw data weighted by the estimated posterior probabilities for each class. #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return A list of class \dQuote{mplus.tech7} #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech7 <- function(outfiletext, filename) { #TODO: have empty list use mplus.tech7 class #not sure whether there are sometimes multiple groups within this section. tech7Section <- getSection("^TECHNICAL 7 OUTPUT$", outfiletext) if (is.null(tech7Section)) return(list()) #no tech7 output tech7List <- list() tech7Subsections <- getMultilineSection("SAMPLE STATISTICS WEIGHTED BY ESTIMATED CLASS PROBABILITIES FOR CLASS \\d+", tech7Section, filename, allowMultiple=TRUE) matchlines <- attr(tech7Subsections, "matchlines") if (length(tech7Subsections) == 0) { warning("No sections found within tech7 output.") return(list()) } else if (length(tech7Subsections) > 1) { groupNames <- make.names(gsub("^\\s*SAMPLE STATISTICS WEIGHTED BY ESTIMATED CLASS PROBABILITIES FOR (CLASS \\d+)\\s*$", "\\1", tech7Section[matchlines], perl=TRUE)) } for (g in 1:length(tech7Subsections)) { targetList <- list() targetList[["classSampMeans"]] <- matrixExtract(tech7Subsections[[g]], "Means", filename) targetList[["classSampCovs"]] <- matrixExtract(tech7Subsections[[g]], "Covariances", filename) if (length(tech7Subsections) > 1) { class(targetList) <- c("list", "mplus.tech7") tech7List[[groupNames[g]]] <- targetList } else tech7List <- targetList } class(tech7List) <- c("list", "mplus.tech7") return(tech7List) } #' Extract Technical 8 from Mplus #' #' The TECH8 option is used to print the optimization history of a model. #' It also prints the potential scale reduction in Bayesian models. #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return A list of class \dQuote{mplus.tech8} #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech8 <- function(outfiletext, filename) { #not sure whether there are sometimes multiple groups within this section. #for now, this function only extract PSR in Bayes models tech8Section <- getSection("^TECHNICAL 8 OUTPUT$", outfiletext) tech8List <- list() class(tech8List) <- c("list", "mplus.tech8") psr <- data.frame(); class(psr) <- c("data.frame", "mplus.psr.data.frame"); tech8List[["psr"]] <- psr if (is.null(tech8Section)) return(tech8List) #no tech8 output #psr extraction subfunction extractPSR <- function(text) { startline <- grep("ITERATION\\s+SCALE REDUCTION\\s+HIGHEST PSR", text, perl=TRUE) if (length(startline) > 0L) { firstBlank <- which(text == "") firstBlank <- firstBlank[firstBlank > startline][1L] #first blank after starting line toparse <- text[(startline+1):firstBlank] psr <- data.frame(matrix(as.numeric(unlist(strsplit(trimSpace(toparse), "\\s+", perl=TRUE))), ncol=3, byrow=TRUE, dimnames=list(NULL, c("iteration", "psr", "param.highest.psr")))) class(psr) <- c("data.frame", "mplus.psr.data.frame") return(psr) } else { return(NULL) } } bayesPSR <- getMultilineSection("TECHNICAL 8 OUTPUT FOR BAYES ESTIMATION", tech8Section, filename, allowMultiple=FALSE) if (!is.na(bayesPSR[1L])) { #new outputs have "Iterations for model estimation" and "Iterations for computing PPPP" if (any(grepl("Iterations for computing PPPP", bayesPSR))) { pppp_text <- getSection("Iterations for computing PPPP", bayesPSR, headers = c("Iterations for computing PPPP", "Iterations for model estimation")) model_text <- getSection("Iterations for model estimation", bayesPSR, headers = c("Iterations for computing PPPP", "Iterations for model estimation")) tech8List[["psr"]] <- extractPSR(model_text) tech8List[["psr_pppp"]] <- extractPSR(pppp_text) } else { tech8List[["psr"]] <- extractPSR(bayesPSR) } } return(tech8List) } #' Extract Technical 9 matrix from Mplus #' #' Function that extracts the Tech9 matrix #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return A list of class \dQuote{mplus.tech9} #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech9 <- function(outfiletext, filename) { tech9List <- list() class(tech9List) <- c("list", "mplus.tech9") tech9Section <- getSection("^TECHNICAL 9 OUTPUT$", outfiletext) if (is.null(tech9Section)) return(tech9List) #no tech9 output tech9Reps <- grep("^\\s*REPLICATION \\d+:\\s*$", tech9Section, perl=TRUE) repNums <- as.numeric(gsub("^\\s*REPLICATION (\\d+):\\s*$", "\\1", tech9Section[tech9Reps], perl=TRUE)) if (length(tech9Reps) > 0L) { for (l in 1:length(tech9Reps)) { if (l < length(tech9Reps)) { msg <- paste(tech9Section[ (tech9Reps[l]+1):(tech9Reps[l+1]-1) ], collapse=" ") } else { msg <- paste(tech9Section[ (tech9Reps[l]+1):length(tech9Section) ], collapse=" ") } msg <- trimSpace(gsub("\\s+", " ", msg, perl=TRUE)) tech9List[[ paste0("rep", repNums[l]) ]] <- list(rep=repNums[l], error=msg) } } return(tech9List) } #' Extract Technical 10 matrix from Mplus #' #' Function that extracts the Tech10 matrix #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return An empty list #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech10 <- function(outfiletext, filename) { tech10Section <- getSection("^TECHNICAL 10 OUTPUT$", outfiletext) if (is.null(tech10Section)) return(list()) #no tech10 output tech10List <- list() } #' Extract Technical 12 from Mplus #' #' The TECH12 option is used in conjunction with TYPE=MIXTURE to request residuals for observed #' versus model estimated means, variances, covariances, univariate skewness, and univariate #' kurtosis. The observed values come from the total sample. The estimated values are computed as #' a mixture across the latent classes. #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return A list of class \dQuote{mplus.tech12} #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech12 <- function(outfiletext, filename) { #not sure whether there are sometimes multiple groups within this section. tech12Section <- getSection("^TECHNICAL 12 OUTPUT$", outfiletext) tech12List <- list() class(tech12List) <- c("list", "mplus.tech12") if (is.null(tech12Section)) return(tech12List) #no tech12 output tech12Subsections <- getMultilineSection("ESTIMATED MIXED MODEL AND RESIDUALS \\(OBSERVED - EXPECTED\\)", tech12Section, filename, allowMultiple=TRUE) matchlines <- attr(tech12Subsections, "matchlines") if (length(tech12Subsections) == 0) { warning("No sections found within tech12 output.") return(list()) } else if (length(tech12Subsections) > 1) { warning("extractTech12 does not yet know how to handle multiple sections (if such exist)") } for (g in 1:length(tech12Subsections)) { targetList <- list() targetList[["obsMeans"]] <- matrixExtract(tech12Subsections[[g]], "Observed Means", filename) targetList[["mixedMeans"]] <- matrixExtract(tech12Subsections[[g]], "Estimated Mixed Means", filename) targetList[["mixedMeansResid"]] <- matrixExtract(tech12Subsections[[g]], "Residuals for Mixed Means", filename) targetList[["obsCovs"]] <- matrixExtract(tech12Subsections[[g]], "Observed Covariances", filename) targetList[["mixedCovs"]] <- matrixExtract(tech12Subsections[[g]], "Estimated Mixed Covariances", filename) targetList[["mixedCovsResid"]] <- matrixExtract(tech12Subsections[[g]], "Residuals for Mixed Covariances", filename) targetList[["obsSkewness"]] <- matrixExtract(tech12Subsections[[g]], "Observed Skewness", filename) targetList[["mixedSkewness"]] <- matrixExtract(tech12Subsections[[g]], "Estimated Mixed Skewness", filename) targetList[["mixedSkewnessResid"]] <- matrixExtract(tech12Subsections[[g]], "Residuals for Mixed Skewness", filename) targetList[["obsKurtosis"]] <- matrixExtract(tech12Subsections[[g]], "Observed Kurtosis", filename) targetList[["mixedKurtosis"]] <- matrixExtract(tech12Subsections[[g]], "Estimated Mixed Kurtosis", filename) targetList[["mixedKurtosisResid"]] <- matrixExtract(tech12Subsections[[g]], "Residuals for Mixed Kurtosis", filename) if (length(tech12Subsections) > 1) { class(targetList) <- c("list", "mplus.tech12") tech12List[[g]] <- targetList #no known case where there are many output sections } else tech12List <- targetList } class(tech12List) <- c("list", "mplus.tech12") return(tech12List) } #' Extract Technical 15 from Mplus #' #' The TECH15 option is used in conjunction with TYPE=MIXTURE to request conditional probabilities #' for the latent class variables. #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return A list of class \dQuote{mplus.tech15} #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech15 <- function(outfiletext, filename) { tech15Section <- getSection("^TECHNICAL 15 OUTPUT$", outfiletext) tech15List <- list(conditional.probabilities = trimws(tech15Section[grepl("^\\s+?P\\(", tech15Section)])) class(tech15List) <- c("list", "mplus.tech15") if (is.null(tech15Section)) return(tech15List) #no tech15 output return(tech15List) } #' Extract Factor Score Statistics #' #' Function for extracting matrices for factor scores #' #' @param outfiletext The text of the output file #' @param filename The name of the output file #' @return A list #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractFacScoreStats <- function(outfiletext, filename) { #for now, skip getSection call and use nested header to getMultilineSection to avoid issue of SAMPLE STATISTICS appearing both #as top-level header and sub-header within factor scores fssSection <- getMultilineSection("SAMPLE STATISTICS FOR ESTIMATED FACTOR SCORES::SAMPLE STATISTICS", outfiletext, filename, allowMultiple=FALSE) fssList <- list() class(fssList) <- c("list", "mplus.facscorestats") if (is.na(fssSection[1L])) return(fssList) #no factor scores output fssList[["Means"]] <- matrixExtract(fssSection, "Means", filename) fssList[["Covariances"]] <- matrixExtract(fssSection, "Covariances", filename) fssList[["Correlations"]] <- matrixExtract(fssSection, "Correlations", filename) return(fssList) } #' Extract Latent Class Counts #' #' Function for extracting counts of latent classes #' #' @param outfiletext The text of the output file #' @param filename The name of the output file #' @return a list #' @keywords internal #' @examples #' # make me!!! extractClassCounts <- function(outfiletext, filename, summaries) { #### #TODO: Implement class count extraction for multiple categorical latent variable models. #Example: UG7.21 #Output is quite different because of latent class patterns, transition probabilities, etc. #helper function for three-column class output getClassCols <- function(sectiontext) { #identify lines of the form class number, class count, class proportion: e.g., 1 136.38 .2728 numberLines <- grep("^\\s*\\d+\\s+[0-9\\.-]+\\s+[0-9\\.-]+\\s*$", sectiontext, perl=TRUE) if (length(numberLines) > 0) { #row bind each line, convert to numeric, and store as data.frame counts <- data.frame(do.call(rbind, lapply(strsplit(trimSpace(sectiontext[numberLines]), "\\s+", perl=TRUE), as.numeric))) if (!ncol(counts) == 3) { warning("Number of columns for model class counts is not three.") return(NULL) } names(counts) <- c("class", "count", "proportion") #store counts as integer counts <- transform(counts, class=as.integer(class)) return(counts) } else { return(NULL) } } countlist <- list() if(is.null(summaries)||missing(summaries)||summaries$NCategoricalLatentVars==1||is.na(summaries$NCategoricalLatentVars)){ #Starting in Mplus v7.3 and above, formatting of the class counts appears to have changed... #Capture the alternatives here if (is.null(summaries)||missing(summaries) || is.null(summaries$Mplus.version) || as.numeric(summaries$Mplus.version) < 7.3) { modelCounts <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASSES$", outfiletext) ppCounts <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASS PATTERNS$", outfiletext) mostLikelyCounts <- getSection("^CLASSIFICATION OF INDIVIDUALS BASED ON THEIR MOST LIKELY LATENT CLASS MEMBERSHIP$", outfiletext) } else { modelCounts <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASSES$::^BASED ON THE ESTIMATED MODEL$", outfiletext) ppCounts <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASSES$::^BASED ON ESTIMATED POSTERIOR PROBABILITIES$", outfiletext) mostLikelyCounts <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASSES$::^BASED ON THEIR MOST LIKELY LATENT CLASS MEMBERSHIP$", outfiletext) } countlist[["modelEstimated"]] <- getClassCols(modelCounts) countlist[["posteriorProb"]] <- getClassCols(ppCounts) countlist[["mostLikely"]] <- getClassCols(mostLikelyCounts) #most likely by posterior probability section mostLikelyProbs <- getSection("^Average Latent Class Probabilities for Most Likely Latent Class Membership \\((Row|Column)\\)$", outfiletext) if (length(mostLikelyProbs) > 1L) { mostLikelyProbs <- mostLikelyProbs[-1L] } #remove line 1: "by Latent Class (Column)" #Example: #Average Latent Class Probabilities for Most Likely Latent Class Membership (Row) #by Latent Class (Column) # # 1 2 # # 1 0.986 0.014 # 2 0.030 0.970 # #A bit of a wonky section. Some notes: # 1) Rows represent those hard classified into that class. # 2) Rows sum to 1.0 and represent the summed average posterior probabilities of all the class assignment possibilities. # 3) Columns represent average posterior probabilitity of being in class 1 for those hard classified as 1 or 2. # 4) High diagonal indicates that hard classification matches posterior probability patterns. countlist[["avgProbs.mostLikely"]] <- unlabeledMatrixExtract(mostLikelyProbs, filename) #same, but for classification probabilities #also, starting ~Mplus 7.3, the columns and rows appear to have switched in this and the logit section (hence the Column|Row syntax) classificationProbs <- getSection("^Classification Probabilities for the Most Likely Latent Class Membership \\((Column|Row)\\)$", outfiletext) if (length(classificationProbs) > 1L) { classificationProbs <- classificationProbs[-1L] } #remove line 1: "by Latent Class (Column)" countlist[["classificationProbs.mostLikely"]] <- unlabeledMatrixExtract(classificationProbs, filename) #same, but for classification probability logits classificationLogitProbs <- getSection("^Logits for the Classification Probabilities for the Most Likely Latent Class Membership \\((Column|Row)\\)$", outfiletext) if (length(classificationLogitProbs) > 1L) { classificationLogitProbs <- classificationLogitProbs[-1L] } #remove line 1: "by Latent Class (Column)" countlist[["logitProbs.mostLikely"]] <- unlabeledMatrixExtract(classificationLogitProbs, filename) } else { # Exctract class_counts for multiple categorical latent variables. getClassCols_lta <- function(sectiontext) { numberLines <- grep("^\\s*([a-zA-Z0-9]+)?(\\s+[0-9\\.-]{1,}){1,}$", sectiontext, perl=TRUE) if (length(numberLines) > 0) { parsedlines <- strsplit(trimSpace(sectiontext[numberLines]), "\\s+", perl=TRUE) num_values <- sapply(parsedlines, length) if(length(unique(num_values)) == 1){ counts <- data.frame(t(sapply(parsedlines, as.numeric)), stringsAsFactors = FALSE) } else { # Pad shorter lines with NA on the left side parsedlines[which(num_values != max(num_values))] <- lapply(parsedlines[which(num_values != max(num_values))], function(x){ c(rep(NA, (max(num_values) - length(x))), x) }) counts <- do.call(rbind, parsedlines) # Repeat existing values on subsequent rows in columns containing NAs counts[,1] <- inverse.rle(list(lengths = diff(c(which(!is.na(counts[,1])), (nrow(counts)+1))), values = counts[,1][complete.cases(counts[,1])])) counts <- data.frame(counts, stringsAsFactors = FALSE) counts[, 2:4] <- lapply(counts[, 2:4], as.numeric) } return(counts) } else { return(NULL) } } if (missing(summaries) || is.null(summaries$Mplus.version) || as.numeric(summaries$Mplus.version) < 7.3) { posteriorProb.patterns <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASSES$::^BASED ON ESTIMATED POSTERIOR PROBABILITIES$", outfiletext) mostLikely.patterns <- getSection("^CLASSIFICATION OF INDIVIDUALS BASED ON THEIR MOST LIKELY LATENT CLASS PATTERN$", outfiletext) mostLikelyCounts <- getSection("CLASSIFICATION OF INDIVIDUALS BASED ON THEIR MOST LIKELY LATENT CLASS PATTERN", outfiletext) } else { posteriorProb.patterns <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASS PATTERNS$::^BASED ON ESTIMATED POSTERIOR PROBABILITIES$", outfiletext) mostLikely.patterns <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASS PATTERNS$::^BASED ON THEIR MOST LIKELY LATENT CLASS PATTERN$", outfiletext) mostLikelyCounts <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR EACH LATENT CLASS VARIABLE$::^BASED ON THEIR MOST LIKELY LATENT CLASS PATTERN$", outfiletext) } # Class counts countlist[["modelEstimated"]] <- getClassCols_lta( getSection( "^FINAL CLASS COUNTS AND PROPORTIONS FOR EACH LATENT CLASS VARIABLE$::^BASED ON THE ESTIMATED MODEL$", outfiletext ) ) countlist[["posteriorProb"]] <- getClassCols_lta( getSection( "^FINAL CLASS COUNTS AND PROPORTIONS FOR EACH LATENT CLASS VARIABLE$::^BASED ON ESTIMATED POSTERIOR PROBABILITIES$", outfiletext ) ) countlist[["mostLikely"]] <- getClassCols_lta(mostLikelyCounts) countlist[which(names(countlist) %in% c("modelEstimated", "posteriorProb", "mostLikely"))] <- lapply(countlist[which(names(countlist) %in% c("modelEstimated", "posteriorProb", "mostLikely"))], setNames, c("variable", "class", "count", "proportion")) # Patterns countlist[["modelEstimated.patterns"]] <- getClassCols_lta( getSection( "^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASS PATTERNS$::^BASED ON THE ESTIMATED MODEL$", outfiletext ) ) countlist[["posteriorProb.patterns"]] <- getClassCols_lta(posteriorProb.patterns) countlist[["mostLikely.patterns"]] <- getClassCols_lta(mostLikely.patterns) countlist[which(names(countlist) %in% c("modelEstimated.patterns", "posteriorProb.patterns", "mostLikely.patterns"))] <- lapply(countlist[which(names(countlist) %in% c("modelEstimated.patterns", "posteriorProb.patterns", "mostLikely.patterns"))], setNames, c(paste0("class.", unique( c(countlist[["mostLikely"]]$variable, countlist[["modelEstimated"]]$variable, countlist[["posteriorProb"]]$variable) )), "count", "proportion")) #Average latent class probabilities avgProbs <- getSection( "^Average Latent Class Probabilities for Most Likely Latent Class Pattern \\((Row|Column)\\)$::^by Latent Class Pattern \\((Row|Column)\\)$", outfiletext) column_headers <- strsplit(trimws(grep("\\s*Latent Class\\s{2,}", avgProbs, value = TRUE)), "\\s+", perl=TRUE)[[1]][-1] variable_pattern_rows <- grep(paste(c("^(\\s{2,}\\d+){", length(column_headers), "}$"), collapse = ""), avgProbs, perl=TRUE) variable_pattern_rows <- variable_pattern_rows[!c(FALSE, diff(variable_pattern_rows) != 1)] variable_patterns <- avgProbs[variable_pattern_rows] variable_patterns <- data.frame(t(sapply(strsplit(trimws(variable_patterns), "\\s+", perl=TRUE), as.numeric))) names(variable_patterns) <- c("Latent Class Pattern No.", column_headers[-1]) probs <- grep(paste(c("^\\s+\\d{1,}(\\s{2,}[0-9\\.-]+)+$"), collapse = ""), avgProbs[(variable_pattern_rows[length(variable_pattern_rows)]+1):length(avgProbs)], perl=TRUE, value = TRUE) # If the table is truncated, concatenate its parts if(length(probs) %% nrow(variable_patterns) > 1){ for(i in 2:(length(probs) %% nrow(variable_patterns))){ probs[1:(nrow(variable_patterns)+1)] <- paste(probs[1:(nrow(variable_patterns)+1)], substring(probs[((i-1)*(nrow(variable_patterns)+1)+1):(i*(nrow(variable_patterns)+1))], first = 8) ) } probs <- probs[1:nrow(variable_patterns)] } probs <- t(sapply(strsplit(trimws(probs[-1]), "\\s+", perl=TRUE), as.numeric))[,-1] countlist[["avgProbs.mostLikely"]] <- probs countlist[["avgProbs.mostLikely.patterns"]] <- variable_patterns # AFAIK this section is not reported for multiple categorical variables countlist[["classificationProbs.mostLikely"]] <- NULL # AFAIK this section is not reported for multiple categorical variables countlist[["logitProbs.mostLikely"]] <- NULL transitionProbs <- getSection("^LATENT TRANSITION PROBABILITIES BASED ON THE ESTIMATED MODEL$", outfiletext) if(!is.null(transitionProbs)){ section_starts <- grep("\\(Columns\\)$", transitionProbs) transitionProbs <- mapply(FUN = function(begin, end){ probs <- grep("^\\s+\\d{1,}(\\s{2,}[0-9\\.-]{2,}){1,}$", transitionProbs[begin:end], perl=TRUE, value = TRUE) probs <- do.call(rbind, strsplit(trimws(probs), "\\s+", perl=TRUE))[,-1] cbind(paste(gsub("\\s+(\\w+) Classes.*$", "\\1", transitionProbs[begin]) , ".", rep(c(1:nrow(probs)), ncol(probs)), sep = ""), paste(gsub(".+?by (\\w+) Classes.*$", "\\1", transitionProbs[begin]) , ".", as.vector(sapply(1:ncol(probs), rep, nrow(probs))), sep = ""), as.vector(probs)) }, begin = section_starts, end = c(section_starts[-1], length(transitionProbs)), SIMPLIFY = FALSE) if(length(transitionProbs) > 1){ transitionProbs <- do.call(rbind, transitionProbs) } else { transitionProbs <- transitionProbs[[1]] } transitionProbs <- data.frame(transitionProbs, stringsAsFactors = FALSE) names(transitionProbs) <- c("from", "to", "probability") transitionProbs$probability <- as.numeric(transitionProbs$probability) } countlist[["transitionProbs"]] <- transitionProbs } return(countlist) } #' Reconstruct matrix from unlabeled multi-line text output #' #' worker function for extracting Mplus matrix output from an unlabeled section #' where matrices are spread across blocks to keep within width constraints #' example: class counts output from latent class models. #' #' @param outfiletext The text of the output file #' @param filename The name of the output file #' @return a matrix #' @keywords internal #' @examples #' # make me!!! unlabeledMatrixExtract <- function(outfiletext, filename) { #This function extends the matrixExtract function by allowing for the matrix to be recreated #to have no header labels and where section headers have a blank line on either side. Only example is in the class counts section, where when there #are many classes, the most likely x posterior probability matrix is too wide and is output like this: # 1 2 3 4 5 6 7 8 9 # #1 0.885 0.000 0.000 0.017 0.024 0.000 0.000 0.019 0.055 #2 0.000 0.775 0.006 0.000 0.000 0.064 0.097 0.013 0.000 #3 0.000 0.004 0.826 0.035 0.000 0.082 0.000 0.000 0.052 #4 0.014 0.002 0.070 0.804 0.018 0.035 0.000 0.008 0.046 #5 0.042 0.000 0.001 0.076 0.842 0.000 0.000 0.001 0.038 #6 0.000 0.096 0.063 0.014 0.001 0.732 0.021 0.026 0.008 #7 0.002 0.091 0.010 0.005 0.001 0.034 0.808 0.005 0.005 #8 0.118 0.014 0.006 0.004 0.000 0.030 0.015 0.514 0.139 #9 0.030 0.001 0.056 0.059 0.014 0.024 0.000 0.109 0.691 #10 0.030 0.062 0.007 0.007 0.002 0.052 0.130 0.108 0.063 # # 10 # #1 0.000 #2 0.046 #3 0.001 #4 0.004 #5 0.000 #6 0.038 #7 0.039 #8 0.159 #9 0.016 #10 0.539 #Only one matrix can be extracted from outfiletext since sections are unlabeled if (length(outfiletext) > 0L && length(outfiletext) > 1L) { #pattern match: 1) blank line; 2) integers line; 3) blank line #find these cases, then add "DUMMY" to each of the header blank lines blankLines <- which(outfiletext == "") if (length(blankLines) > 0L) { headerLines <- c() for (b in 1:length(blankLines)) { if (b < length(blankLines) && blankLines[b+1] == blankLines[b] + 2) { # a blank line followed by a non-blank line followed by a blank line... # check that it represents an integer sequence (this may need to be removed in more general cases) intLine <- strsplit(trimSpace(outfiletext[blankLines[b]+1]), "\\s+", perl=TRUE)[[1L]] firstCol <- as.numeric(intLine[1L]) #number of the class in the first column if (all(intLine == firstCol:(firstCol + length(intLine) - 1) )) { headerLines <- c(headerLines, blankLines[b]) } } } #add the header to blank lines preceding class labels row outfiletext[headerLines] <- "DUMMY" #now use matrix extract to reconstruct matrix unlabeledMat <- matrixExtract(outfiletext, "DUMMY", filename) return(unlabeledMat) } else { return(NULL) } } else { return(NULL) } } #' Reconstruct matrix from multi-line text output #' #' main worker function for extracting Mplus matrix output #' where matrices are spread across blocks to keep within width constraints #' example: tech1 matrix output. #' #' @param outfiletext The text of the output file #' @param headerLine The header line #' @param filename The name of the output file #' @return a matrix #' @keywords internal #' @examples #' # make me!!! matrixExtract <- function(outfiletext, headerLine, filename, ignore.case=FALSE) { matLines <- getMultilineSection(headerLine, outfiletext, filename, allowMultiple=TRUE, ignore.case=ignore.case) if (!is.na(matLines[1])) { numBlocks <- length(matLines) blockList <- list() for (m in 1:numBlocks) { colHeaders <- strsplit(trimSpace(matLines[[m]][1]), "\\s+", perl=TRUE)[[1]] #m+3 because m+1 is col header, m+2 is line of underscores block <- matLines[[m]][c(-1,-2)] block <- block[block != ""] #drop blank lines #10Jul2012: Occasionally, Mplus includes a blank line block just for fun... like this: #Residuals for Covariances/Correlations/Residual Correlations #STRES4 #________ #in this case, skip the block if (length(block) == 0) next splitData <- strsplit(trimSpace(block), "\\s+", perl=TRUE) #alternative to remove blank lines after strsplit (above easier to read) #remove blank lines by comparing against character(0) #splitData2 <- splitData[sapply(splitData, function(x) !identical(x, character(0)))] #May 2017: in Mplus v7*, there is a header on the beginning of each row, including for vectors such as NU,TAU, etc. #example: # NU # Y X1 X2 W # ________ ________ ________ ________ # 1 0 0 0 0 #in Mplus v8, the "1" header on parameter vectors has been removed. # NU # Y12T3 Y13T3 Y14T3 # ________ ________ ________ # 0 0 0 #To overcome this problem, check the number of columns in splitData compared to the number of column headers. #If the number of columns is equal to the number of column headers, add a "1" at the beginning to make parsing code # consistent with v7 and expectation is matrix assembly in the aggMat section below. #Only add this tweak if the first element of v is not identical to any column header. #Otherwise this will add a "1" to some rows that are part of a matrix, not param vector. splitData <- lapply(splitData, function(v) { if (length(v) == length(colHeaders) && (! v[1L] %in% colHeaders)) { v <- c("1", v) } return(v) }) #pull out row names from each element rowHeaders <- sapply(splitData, "[", 1) mat <- matrix(NA_real_, nrow=length(rowHeaders), ncol=length(colHeaders), dimnames=list(rowHeaders, colHeaders)) for (r in 1:length(splitData)) { line <- mplus_as.numeric(splitData[[r]][-1]) #use mplus_as.numeric to handle D+XX scientific notation in output if ((lenDiff <- length(colHeaders) - length(line)) > 0) line <- c(line, rep(NA, lenDiff)) mat[r,] <- line } blockList[[m]] <- mat } #aggregate sections aggMatCols <- do.call("c", lapply(blockList, colnames)) aggMatRows <- rownames(blockList[[1]]) #row names are shared across blocks in Mplus output aggMat <- matrix(NA, nrow=length(aggMatRows), ncol=length(aggMatCols), dimnames=list(aggMatRows, aggMatCols)) #Unfortunately, due to Mplus 8-character printing limits for matrix sections, row/col names are not guaranteed to be unique. #This causes problems for using name-based matching to fill the matrix. #We know that blocks are printed from left-to-right by column (i.e., the block 1 has the first X columns, block 2 has the next Y columns). #Thus, we should be able to use a counter and fill columns numerically. This does not get around a problem of non-unique row names since we #can't easily discern the rows represented in a block based on row numbering alone. Thus, this is an incomplete solution for now (Aug2015 MH) colCounter <- 1 for (l in blockList) { aggMat[rownames(l), colCounter:(colCounter + ncol(l) - 1)] <- l #fill in just the block of the aggregate matrix represented in l colCounter <- colCounter + ncol(l) } } else { #warning("No lines identified for matrix extraction using header: \n ", headerLine) aggMat <- NULL } return(aggMat) } #EXTRACT DATA SUMMARY SECTION #NB. This does not support three-level output yet! #' Function to extract the SUMMARY OF DATA section from Mplus outputs #' #' @param outfiletext The text of the output file #' @param filename the name of the file containing textToScan. Used to make more intelligible warning messages. #' @keywords internal extractDataSummary <- function(outfiletext, filename) { dataSummarySection <- getSection("^\\s*SUMMARY OF DATA( FOR THE FIRST DATA SET)*\\s*$", outfiletext) if (is.null(dataSummarySection)) { empty <- list() class(empty) <- c("list", "mplus.data_summary") return(empty) } #detect groups multipleGroupMatches <- grep("^\\s*Group \\w+(?:\\s+\\(\\d+\\))*\\s*$", dataSummarySection, ignore.case=TRUE, perl=TRUE) #support Mplus v8 syntax Group G1 (0) with parentheses of numeric value if (length(multipleGroupMatches) > 0L) { groupNames <- sub("^\\s*Group (\\w+)(?:\\s+\\(\\d+\\))*\\s*$", "\\1", dataSummarySection[multipleGroupMatches], perl=TRUE) toparse <- list() #divide into a list by group for (i in 1:length(multipleGroupMatches)) { if (i < length(multipleGroupMatches)) { end <- multipleGroupMatches[i+1] - 1 } else { end <- length(dataSummarySection) } section <- dataSummarySection[(multipleGroupMatches[i]+1):end] attr(section, "group.name") <- groupNames[i] toparse[[groupNames[i]]] <- section } } else { attr(dataSummarySection, "group.name") <- "all" toparse <- list(all=dataSummarySection) } summaries <- c() iccs <- c() for (section in toparse) { summaries <- rbind(summaries, data.frame( NClusters = extractValue(pattern="^\\s*Number of clusters\\s*", section, filename, type="int"), NMissPatterns = extractValue(pattern="^\\s*Number of missing data patterns\\s*", section, filename, type="int"), AvgClusterSize = extractValue(pattern="^\\s*Average cluster size\\s*", section, filename, type="dec"), Group=attr(section, "group.name") )) #parse icc icc_start <- grep("^\\s*Estimated Intraclass Correlations for the Y Variables( for [\\w\\._]+ level)*\\s*$", section, perl=TRUE) iccout <- c() if (length(icc_start) > 0L) { to_parse <- trimSpace(section[(icc_start+1):length(section)]) #this assumes nothing comes afterwards in the section #problem: there is an unknown number of columns in this output. Example: # # Intraclass Intraclass Intraclass #Variable Correlation Variable Correlation Variable Correlation #Q22 0.173 Q38 0.320 Q39 0.127 #Q40 0.270 #solution: variables are always odd positions, correlations are always even repeat_line <- grep("(\\s*Variable\\s+Correlation\\s*)+", to_parse) if (length(repeat_line) == 1L) { #x <- to_parse[repeat_line] #not needed with odd/even solution #nrepeats <- length(regmatches(x, gregexpr("g", x))) icc_values <- strsplit(to_parse[(repeat_line+1):length(to_parse)], "\\s+") for (ss in icc_values) { if (length(ss) > 0L) { positions <- 1:length(ss) vars <- ss[positions[positions %% 2 != 0]] vals <- ss[positions[positions %% 2 == 0]] iccout <- rbind(iccout, data.frame(variable=vars, ICC=as.numeric(vals), stringsAsFactors=FALSE)) } } iccout$Group <- attr(section, "group.name") iccs <- rbind(iccs, iccout) } } } #trim out "all" in single group case if (length(multipleGroupMatches) == 0L) { summaries$Group <- NULL iccs$Group <- NULL } retlist <- list(overall=summaries, ICCs=iccs) class(retlist) <- c("list", "mplus.data_summary") return(retlist) } #Caspar van Lissa code for extract invariance testing section extractInvarianceTesting <- function(outfiletext, filename) { inv_test_firstline <- grep("^Invariance Testing$", outfiletext) if (length(inv_test_firstline) == 0L) { return(list()) } #section not found inv_test_endline <- grep("^MODEL FIT INFORMATION", outfiletext) retlist <- list() inv_test_endline <- inv_test_endline[inv_test_endline > inv_test_firstline][1] inv_test <- outfiletext[(inv_test_firstline+2):(inv_test_endline-3)] model_rows <- grep("^\\s+?\\w+(\\s{2,}[0-9.]+){4}$", inv_test, value = TRUE) model_rows <- t(sapply(model_rows, function(x){strsplit(trimws(x), "\\s+")[[1]]}, USE.NAMES = FALSE)) model_rownames <- model_rows[, 1] model_rows <- apply(model_rows[, -1], 2, as.numeric) row.names(model_rows) <- model_rownames colnames(model_rows) <- c("Parameters", "Chi-Square", "DF", "Pvalue") retlist$models <- model_rows[, -1] test_rows <- grep("^\\s+?(\\w+\\s){3}(\\s{2,}[0-9.]+){3}$", inv_test, value = TRUE) test_rows <- t(sapply(test_rows, function(x){strsplit(trimws(x), "\\s{2,}")[[1]]}, USE.NAMES = FALSE)) model_rownames <- test_rows[, 1] test_rows <- apply(test_rows[, -1], 2, as.numeric) row.names(test_rows) <- model_rownames colnames(test_rows) <- c("Chi-Square", "DF", "Pvalue") retlist$compared <- test_rows return(retlist) }
/R/parseOutput.R
no_license
Ichimonji212/MplusAutomation
R
false
false
116,997
r
#' Extract values from Mplus output #' An internal function used by extractSummaries_1file to extract #' parameters from the output file using regular expressions. #' #' @param pattern the exact text to be matched in the outfile that identifies the parameter of interest #' @param textToScan the chunk of Mplus output to be parsed, passed as a vector of character strings (from the scan command). #' @param filename the name of the file containing textToScan. Used to make more intelligible warning messages. #' @param type the data type of the parameter, which determines the regexp used. Currently can be \dQuote{int}, \dQuote{dec}, \dQuote{str}, or \dQuote{calc}. Defaults to \dQuote{int}. #' @return A string or numeric vector #' @keywords internal #' @examples #' #make me!!! extractValue <- function(pattern, textToScan, filename, type="int") { #regex pattern now allows for specification to search for value on some line before or after match #example: +2:the Observed and the Replicated Chi-Square Values offset <- 0 if (grepl("^[+-]+\\d+:.*$", pattern, perl=TRUE)) { offset <- as.numeric(sub("^([+-]+\\d+):.*$", "\\1", pattern, perl=TRUE)) pattern <- sub("^[+-]+\\d+:(.*)$", "\\1", pattern, perl=TRUE) #chop offset } #locate the matching line in the output file matchpos <- grep(pattern, textToScan, ignore.case=TRUE) matchlines <- textToScan[(matchpos+offset)] if (length(matchlines) > 1) { stop("More than one match found for parameter: ", pattern, "\n ", filename) #return(matchlines) #not sure what I was thinking here... seems better to stop than warn and return lines } else if (length(matchlines) == 0) { #if the parameter of interest not found in this file, then return NA #warning(paste("Parameter not found: ", pattern, "\n ", filename, sep="")) if (type == "int") return(NA_integer_) else if (type == "dec") return(NA_real_) else if (type == "str") return(NA_character_) } #different idea: concatenate pattern with var type and match on that #then sub just the pattern part from the larger line typePrefix <- substr(type, 1, 3) if (typePrefix == "int") { regexp <- "-*\\d+" #optional negative sign in front } else if (typePrefix == "dec") { #regexpr: -*\\d+\\.\\d+ : -* optional negative sign, \\d+ match at least one digit \\. match decimal sign \\d+ match decimal digits regexp <- "-*\\d+\\.\\d+" } else if (typePrefix == "str") { regexp <- paste(pattern, ".*", sep="") } #locate the match valueMatches <- gregexpr(regexp, matchlines[1], perl=TRUE)[[1]] if (type == "str") { #remove the tag portion of the string (e.g., "title:"), retaining rest of line returnVal <- as.character(sub(pattern, "", matchlines[1], ignore.case=TRUE)) } else { #excessively tight syntax: replace dec[15] with 15, if number at end of type. Otherwise return just "dec". #then grep result for only numeric characters (\\d+). If grep is false (i.e., no numerals in substitution, #then no index was specified in type, so type must be simply "dec", "int", or "str" (as opposed to "int[15]"), so set as 1 if (!grepl("^\\d+$", whichMatch <- sub("^.*\\[(\\d+)\\]$", "\\1", type, perl=TRUE), perl=TRUE)) whichMatch <- 1 else whichMatch <- as.numeric(whichMatch) #pull from the start of the match through match.length, which is the length of characters that matched #need to subtract one from the start + length offset to grab the correct number of characters #(e.g., if the match runs from 40-44, the start will be 40, with length 5, but 40 + 5 would be 6 characters, hence -1 returnVal <- as.numeric(substr(matchlines[1], valueMatches[whichMatch], valueMatches[whichMatch] + attr(valueMatches, "match.length")[whichMatch] - 1)) } return(returnVal) } #' Worker function used in extractSummaries_1section #' #' @param arglist The argument list #' @param sectionHeaders A character vector with headers for each section of interest #' @param sectionFields is a list of data.frames where each data.frame specifies the fields to be extracted for that section #' @param textToParse The text to parse #' @param filename The filename #' @return A list #' @keywords internal #' @examples #' # make me!!! extractSummaries_1plan <- function(arglist, sectionHeaders, sectionFields, textToParse, filename) { #make this a more generic function that accepts headers and fields in case it is useful outside the MODEL FIT section if (length(sectionHeaders) < 1) stop("No section headers provided.") if (length(sectionHeaders) != length(sectionFields)) stop("Section headers and section fields have different lengths.") #multiple sections for (header in 1:length(sectionHeaders)) { #a blank section header indicates to match anywhere in the textToParse if (sectionHeaders[header] == "") { sectionText <- textToParse } else { #could be pretty inefficient if the same section header is repeated several times. #could build a list with divided output and check whether a section is present in the list before extracting sectionText <- getMultilineSection(sectionHeaders[header], textToParse, filename) } #process all fields for this section sectionFieldDF <- sectionFields[[header]] for (i in 1:nrow(sectionFieldDF)) { thisField <- sectionFieldDF[i,] #Check whether this field already exists and is not missing. If so, skip the extraction. #This was initially setup because of Tech 14 section changes where the number of final stage optimizations is different from v6 to v7. if (!thisField$varName %in% names(arglist) || is.na(arglist[[ thisField$varName ]])) { arglist[[ thisField$varName ]] <- extractValue(pattern=thisField$regexPattern, sectionText, filename, type=thisField$varType) } } } return(arglist) } #' Extract summary information for one section from Mplus output #' #' Function to extract model fit statistics from a section, wrapped to allow for multiple fit sections, as in EFA files. #' Calls \code{extractSummaries_1plan} #' #' @param modelFitSection The fit information section #' @param arglist The argument list #' @param filename The file name #' @return The argument list #' @keywords internal #' @examples #' # make me!!! extractSummaries_1section <- function(modelFitSection, arglist, filename, input=list()) { #DATA IMPUTATION outputs sometimes use the Mean/SD output (I believe in Mplus v6.12 and perhaps v7) #In Mplus v8, Model fit statistics are output as usual (e.g., ex11.6.out). #This is confusing, so we should just test for the Mean/SD output here and use the MI-type output if found useMIHeadings <- FALSE if (!is.null(input$data.imputation)) { header <- "Chi-Square Test of Model Fit" fields <- list(data.frame( varName=c("ChiSqM_DF", "ChiSqM_Mean", "ChiSqM_SD", "ChiSqM_NumComputations"), regexPattern=c("Degrees of Freedom", "Mean", "Std Dev", "Number of successful computations"), varType=c("int", "dec", "dec", "int"), stringsAsFactors=FALSE)) test <- extractSummaries_1plan(arglist, header, fields, modelFitSection, filename) if (!is.na(test$ChiSqM_Mean)) { useMIHeadings <- TRUE } } #MI and Montecarlo data types have fundamentally different output (means and sds per fit stat) if (useMIHeadings || grepl("imputation", arglist$DataType, ignore.case=TRUE) || grepl("montecarlo", arglist$DataType, ignore.case=TRUE)) { modelFitSectionHeaders <- c( "", #section-nonspecific parameters "Chi-Square Test of Model Fit", # "Chi-Square Test of Model Fit for the Baseline Model", "Loglikelihood::H0 Value", "Loglikelihood::H1 Value", "CFI/TLI::CFI", "CFI/TLI::TLI", "Bayesian Posterior Predictive Checking using Chi-Square::Posterior Predictive P-Value", "Bayesian Prior Posterior Predictive Checking using Chi-Square::Prior Posterior Predictive P-Value", "Information Criteria( Including the Auxiliary Part)*::Akaike \\(AIC\\)", "Information Criteria( Including the Auxiliary Part)*::Bayesian \\(BIC\\)", "Information Criteria( Including the Auxiliary Part)*::Sample-Size Adjusted BIC \\(n\\* = \\(n \\+ 2\\) / 24\\)", "RMSEA \\(Root Mean Square Error Of Approximation\\)", "WRMR \\(Weighted Root Mean Square Residual\\)", "Information Criteri(a|on)::Deviance \\(DIC\\)", # "Information Criteri(a|on)::Estimated Number of Parameters \\(pD\\)", "Information Criteri(a|on)::Bayesian \\(BIC\\)" ) modelFitSectionFields <- list( data.frame( varName=c("Parameters"), #defined outside of information criteria section for non-ML estimators regexPattern=c("^Number of Free Parameters"), varType=c("int"), stringsAsFactors=FALSE ), data.frame( varName=c("ChiSqM_DF", "ChiSqM_Mean", "ChiSqM_SD", "ChiSqM_NumComputations"), regexPattern=c("Degrees of Freedom", "Mean", "Std Dev", "Number of successful computations"), varType=c("int", "dec", "dec", "int"), stringsAsFactors=FALSE ), # data.frame( # varName=c("ChiSqBaseline_Value", "ChiSqBaseline_DF", "ChiSqBaseline_PValue"), # regexPattern=c("Value", "Degrees of Freedom", "^P-Value"), # varType=c("dec", "int", "dec"), stringsAsFactors=FALSE # ), data.frame( varName=c("LL_Mean", "LL_SD", "LL_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("UnrestrictedLL_Mean", "UnrestrictedLL_SD", "UnrestrictedLL_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("CFI_Mean", "CFI_SD", "CFI_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("TLI_Mean", "TLI_SD", "TLI_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("PostPred_PValue_Mean", "PostPred_PValue_SD", "PostPred_PValue_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("PriorPostPred_PValue_Mean", "PriorPostPred_PValue_SD", "PriorPostPred_PValue_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("AIC_Mean", "AIC_SD", "AIC_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("BIC_Mean", "BIC_SD", "BIC_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("aBIC_Mean", "aBIC_SD", "aBIC_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("RMSEA_Mean", "RMSEA_SD", "RMSEA_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("WRMR_Mean", "WRMR_SD", "WRMR_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( #Information Criterion:: DIC varName=c("DIC_Mean", "DIC_SD", "DIC_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( #Information Criterion:: Estimated number of parameters (pD) varName=c("pD_Mean", "pD_SD", "pD_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( #Information Criterion:: Bayesian (BIC) -- sometimes within Information Criterion, sometimes Information Criteria (above)... varName=c("BIC_Mean", "BIC_SD", "BIC_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ) ) #handle two-level models, which return separate srmr for between vs. within if (grepl("twolevel", arglist$AnalysisType, ignore.case=TRUE)) { modelFitSectionHeaders <- append(modelFitSectionHeaders, c( "SRMR \\(Standardized Root Mean Square Residual\\) for the WITHIN level", "SRMR \\(Standardized Root Mean Square Residual\\) for the BETWEEN level")) modelFitSectionFields <- c(modelFitSectionFields, list(data.frame( varName=c("SRMR.Within_Mean", "SRMR.Within_SD", "SRMR.Within_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("SRMR.Between_Mean", "SRMR.Between_SD", "SRMR.Between_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE )) ) } else { modelFitSectionHeaders <- append(modelFitSectionHeaders, "SRMR \\(Standardized Root Mean Square Residual\\)") modelFitSectionFields <- c(modelFitSectionFields, list(data.frame( varName=c("SRMR_Mean", "SRMR_SD", "SRMR_NumComputations"), regexPattern=c("Mean", "Std Dev", "Number of successful computations"), varType=c("dec", "dec", "int"), stringsAsFactors=FALSE )) ) } } else { #not imputation or monte carlo output modelFitSectionHeaders <- c( "", #section-inspecific parameters "Chi-Square Test of Model Fit", "Chi-Square Test of Model Fit for the Baseline Model", "{+3i}Chi-Square Test of Model Fit for the Binary and Ordered Categorical::{+2b}Pearson Chi-Square", #chi-square header spans two lines, so +3i "{+3i}Chi-Square Test of Model Fit for the Binary and Ordered Categorical::{+2b}Likelihood Ratio Chi-Square", "Chi-Square Test for MCAR under the Unrestricted Latent Class Indicator Model::{+2b}Pearson Chi-Square", #use blank line to find pearson within section "Chi-Square Test for MCAR under the Unrestricted Latent Class Indicator Model::{+2b}Likelihood Ratio Chi-Square", "Chi-Square Test for Difference Testing", "Loglikelihood( Including the Auxiliary Part)*", "CFI/TLI", "Information Criteria( Including the Auxiliary Part)*", "Information Criteria Including the Auxiliary Part", "RMSEA \\(Root Mean Square Error Of Approximation\\)", "WRMR \\(Weighted Root Mean Square Residual\\)", "Bayesian Posterior Predictive Checking using Chi-Square", "Information Criterion", #somehow singular for bayes output? "Wald Test of Parameter Constraints" ) modelFitSectionFields <- list( data.frame( varName=c("Parameters"), #defined outside of information criteria section for non-ML estimators regexPattern=c("^Number of Free Parameters"), #only match beginning of line (aux section has its own indented variant) varType=c("int"), stringsAsFactors=FALSE ), data.frame( varName=c("ChiSqM_Value", "ChiSqM_DF", "ChiSqM_PValue", "ChiSqM_ScalingCorrection"), regexPattern=c("^\\s*Value", "Degrees of Freedom", "^\\s*P-Value", "Scaling Correction Factor"), varType=c("dec", "int", "dec", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("ChiSqBaseline_Value", "ChiSqBaseline_DF", "ChiSqBaseline_PValue"), regexPattern=c("^\\s*Value", "Degrees of Freedom", "^\\s*P-Value"), varType=c("dec", "int", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("ChiSqCategoricalPearson_Value", "ChiSqCategoricalPearson_DF", "ChiSqCategoricalPearson_PValue"), regexPattern=c("^\\s*Value", "Degrees of Freedom", "^\\s*P-Value"), varType=c("dec", "int", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("ChiSqCategoricalLRT_Value", "ChiSqCategoricalLRT_DF", "ChiSqCategoricalLRT_PValue"), regexPattern=c("^\\s*Value", "Degrees of Freedom", "^\\s*P-Value"), varType=c("dec", "int", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("ChiSqMCARUnrestrictedPearson_Value", "ChiSqMCARUnrestrictedPearson_DF", "ChiSqMCARUnrestrictedPearson_PValue"), regexPattern=c("^\\s*Value", "Degrees of Freedom", "^\\s*P-Value"), varType=c("dec", "int", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("ChiSqMCARUnrestrictedLRT_Value", "ChiSqMCARUnrestrictedLRT_DF", "ChiSqMCARUnrestrictedLRT_PValue"), regexPattern=c("^\\s*Value", "Degrees of Freedom", "^\\s*P-Value"), varType=c("dec", "int", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("ChiSqDiffTest_Value", "ChiSqDiffTest_DF", "ChiSqDiffTest_PValue"), regexPattern=c("^\\s*Value", "Degrees of Freedom", "^\\s*P-Value"), varType=c("dec", "int", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("LL", "UnrestrictedLL", "LLCorrectionFactor", "UnrestrictedLLCorrectionFactor"), regexPattern=c("H0 Value", "H1 Value", "H0 Scaling Correction Factor", "H1 Scaling Correction Factor"), varType=c("dec", "dec", "dec", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("CFI", "TLI"), regexPattern=c("CFI", "TLI"), varType=c("dec", "dec"), stringsAsFactors=FALSE ), data.frame( #Information Criteria (v8 now includes DIC and pD here) varName=c("AIC", "BIC", "aBIC", "DIC", "pD"), regexPattern=c("Akaike \\(AIC\\)", "Bayesian \\(BIC\\)", "Sample-Size Adjusted BIC", "Deviance \\(DIC\\)", "Estimated Number of Parameters \\(pD\\)"), varType=c("dec", "dec", "dec", "dec", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("ParametersWithAux"), regexPattern=c("Number of Free Parameters"), varType=c("int"), stringsAsFactors=FALSE ), data.frame( varName=c("RMSEA_Estimate", "RMSEA_90CI_LB", "RMSEA_90CI_UB", "RMSEA_pLT05"), regexPattern=c("Estimate", "90 Percent C.I.", "90 Percent C.I.", "Probability RMSEA <= .05"), varType=c("dec", "dec[1]", "dec[2]", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("WRMR"), regexPattern=c("Value"), varType=c("dec"), stringsAsFactors=FALSE ), data.frame( #Bayesian Posterior Predictive Checking using Chi-Square varName=c("ObsRepChiSqDiff_95CI_LB", "ObsRepChiSqDiff_95CI_UB", "PostPred_PValue", "PriorPostPred_PValue"), regexPattern=c("+2:the Observed and the Replicated Chi-Square Values", "+2:the Observed and the Replicated Chi-Square Values", "^\\s*Posterior Predictive P-Value", "Prior Posterior Predictive P-Value"), varType=c("dec[1]", "dec[2]", "dec", "dec"), stringsAsFactors=FALSE ), data.frame( #Information Criterion (singular name under Mplus Bayes v7. Corrected to "Criteria" in v8) varName=c("DIC", "pD", "BIC"), regexPattern=c("Deviance \\(DIC\\)", "Estimated Number of Parameters \\(pD\\)", "Bayesian \\(BIC\\)"), #sometimes BIC is listed here (e.g., MI Bayes output) varType=c("dec", "dec", "dec"), stringsAsFactors=FALSE ), data.frame( #Wald Test of Parameter Constraints varName=c("WaldChiSq_Value", "WaldChiSq_DF", "WaldChiSq_PValue"), regexPattern=c("^\\s*Value", "Degrees of Freedom", "^\\s*P-Value"), varType=c("dec", "int", "dec"), stringsAsFactors=FALSE ) ) if (grepl("twolevel", arglist$AnalysisType, ignore.case=TRUE)) { modelFitSectionHeaders <- append(modelFitSectionHeaders, "SRMR \\(Standardized Root Mean Square Residual\\)") modelFitSectionFields <- c(modelFitSectionFields, list(data.frame( varName=c("SRMR.Within", "SRMR.Between"), regexPattern=c("Value for Within", "Value for Between"), varType=c("dec", "dec"), stringsAsFactors=FALSE )) ) } else if (grepl("threelevel", arglist$AnalysisType, ignore.case=TRUE)) { modelFitSectionHeaders <- append(modelFitSectionHeaders, "SRMR \\(Standardized Root Mean Square Residual\\)") modelFitSectionFields <- c(modelFitSectionFields, list(data.frame( varName=c("SRMR.Within", "SRMR.Between.L2", "SRMR.Between.L3"), regexPattern=c("Value for Within", "Value for Between Level 2", "Value for Between Level 3"), varType=c("dec", "dec", "dec"), stringsAsFactors=FALSE )) ) } else { modelFitSectionHeaders <- append(modelFitSectionHeaders, "SRMR \\(Standardized Root Mean Square Residual\\)") #append two lists together modelFitSectionFields <- c(modelFitSectionFields, list(data.frame( varName=c("SRMR"), regexPattern=c("Value"), varType=c("dec"), stringsAsFactors=FALSE ) )) } } arglist <- extractSummaries_1plan(arglist, modelFitSectionHeaders, modelFitSectionFields, modelFitSection, filename) return(arglist) } #' Divide text into fields #' #' Helper function to divide an input section into key-value pair list taken from mplus2lavaan #' #' @param section.text The section text #' @param required Required sections #' @return Divided sections #' @keywords internal #' @examples #' # make me!!! divideIntoFields <- function(section.text, required) { if (is.null(section.text)) { return(NULL) } section.split <- strsplit(paste(section.text, collapse=" "), ";", fixed=TRUE)[[1]] section.divide <- list() for (cmd in section.split) { if (grepl("^\\s*!.*", cmd, perl=TRUE)) next #skip comment lines if (grepl("^\\s+$", cmd, perl=TRUE)) next #skip blank lines #mplus is apparently tolerant of specifications that don't include IS/ARE/= #example: usevariables x1-x10; #thus, split on spaces and assume that first element is lhs, drop second element if IS/ARE/=, and assume remainder is rhs #but if user uses equals sign, then spaces will not always be present (e.g., usevariables=x1-x10) if ( (leadingEquals <- regexpr("^\\s*[A-Za-z]+[A-Za-z_-]*\\s*(=)", cmd[1L], perl=TRUE))[1L] > 0) { cmdName <- trimSpace(substr(cmd[1L], 1, attr(leadingEquals, "capture.start") - 1)) cmdArgs <- trimSpace(substr(cmd[1L], attr(leadingEquals, "capture.start") + 1, nchar(cmd[1L]))) } else { cmd.spacesplit <- strsplit(trimSpace(cmd[1L]), "\\s+", perl=TRUE)[[1L]] if (length(cmd.spacesplit) < 2L) { #for future: make room for this function to prase things like just TECH13 (no rhs) } else { cmdName <- trimSpace(cmd.spacesplit[1L]) if (length(cmd.spacesplit) > 2L && tolower(cmd.spacesplit[2L]) %in% c("is", "are")) { cmdArgs <- paste(cmd.spacesplit[3L:length(cmd.spacesplit)], collapse=" ") #remainder, removing is/are } else { cmdArgs <- paste(cmd.spacesplit[2L:length(cmd.spacesplit)], collapse=" ") #is/are not used, so just join rhs } } } section.divide[[make.names(tolower(cmdName))]] <- cmdArgs } if (!missing(required)) { stopifnot(all(required %in% names(section.divide))) } return(section.divide) } #' Extract warnings and errors from 1 mplus file #' #' Helper function #' #' @param outfiletext The text of the output file #' @param filename The filename #' @param input The input #' @return A list with two elements #' \item{errors}{Mplus Errors} #' \item{warnings}{Mplus Warnings} #' @keywords internal #' @examples #' # make me!!! extractWarningsErrors_1file <- function(outfiletext, filename, input) { warnerr <- list(warnings = list(), errors = list()) class(warnerr$errors) <- c("list", "mplus.errors") class(warnerr$warnings) <- c("list", "mplus.warnings") if (!inherits(input, "mplus.inp")) { warning("Could not identify warnings and errors; input is not of class mplus.inp") return(warnerr) } if (is.null(attr(input, "start.line")) || is.null(attr(input, "end.line")) || attr(input, "start.line") < 0L || attr(input, "end.line") < 0L) { warning("Could not identify bounds of input section: ", filename) return(warnerr) } #handle input warnings and errors first startInputWarnErr <- attr(input, "end.line") + 1L #first eligible line is after input section endInputWarnErr <- grep("^\\s*(INPUT READING TERMINATED NORMALLY|\\*\\*\\* WARNING.*|\\d+ (?:ERROR|WARNING)\\(S\\) FOUND IN THE INPUT INSTRUCTIONS|\\*\\*\\* ERROR.*)\\s*$", outfiletext, ignore.case=TRUE, perl=TRUE) w <- 1 #counters for warnings and errors lists e <- 1 #only process section if end was identified properly if (length(endInputWarnErr) > 0L) { #The above will match all of the possible relevant lines. #To identify input warnings/errors section, need to go to first blank line after the final warning or error. (look in next 100 lines) lastWarn <- endInputWarnErr[length(endInputWarnErr)] blank <- which(outfiletext[lastWarn:(lastWarn + 100 )] == "")[1L] + lastWarn - 1 warnerrtext <- outfiletext[startInputWarnErr[1L]:(blank-1)] lines <- friendlyGregexpr("^\\s*(\\*\\*\\* WARNING|\\*\\*\\* ERROR).*\\s*$", warnerrtext, perl=TRUE) if (!is.null(lines)) { for (l in 1:nrow(lines)) { if (l < nrow(lines)) { warn.err.body <- trimSpace(warnerrtext[(lines[l,"element"] + 1):(lines[l+1,"element"] - 1)]) } else { warn.err.body <- trimSpace(warnerrtext[(lines[l,"element"] + 1):length(warnerrtext)]) } if (substr(lines[l,"tag"], 1, 11) == "*** WARNING") { warnerr$warnings[[w]] <- warn.err.body w <- w + 1 } else if (substr(lines[l,"tag"], 1, 9) == "*** ERROR") { warnerr$errors[[e]] <- warn.err.body splittag <- strsplit(lines[l,"tag"], "\\s+", perl=TRUE)[[1L]] if (length(splittag) > 3L && splittag[3L] == "in") { attr(warnerr$errors[[e]], "section") <- tolower(paste(splittag[4L:(which(splittag == "command") - 1L)], collapse=".")) } e <- e + 1 } else { stop ("Cannot discern warning/error type: ", lines[l, "tag"]) } } } } #now handle estimation errors and warnings #these fall above either # 1) MODEL FIT INFORMATION: model converged with warnings # 2) MODEL RESULTS: model did not converge, so no fit statistics produced # 3) FINAL CLASS COUNTS (occurs for some mixture models, which report class counts before model results) # 4) TESTS OF MODEL FIT (older versions of Mplus) # # It's harder to determine where the section begins, however, because there is no clear boundary # with the preceding section, which is heterogeneous (e.g., sample stats). # # In the case of warnings only, the estimation warnings section is demarcated by # THE MODEL ESTIMATION TERMINATED NORMALLY above and MODEL FIT INFORMATION below. # # In other cases (maybe dependent on Mplus version), warnings are printed above THE MODEL ESTIMATION TERMINATED NORMALLY. # Allow for the possibility that the estimation warnings/errors section begins with WARNING: # # For failed models, the section likely begins with one of three possibilities: # 1) THE MODEL ESTIMATION DID NOT TERMINATE NORMALLY # 2) THE LOGLIKELIHOOD DECREASED # 3) NO CONVERGENCE # # Warnings that can potentially be ignored are prefixed by "WARNING: " # whereas more serious estimation problems (errors) typically have no prefix. # # Blank lines indicate a boundary in each message. #the end sections are more well behaved (esp. if there is Tech 9 output). Identify end first, then constrain start to precede end endEstWarnErr <- grep("^\\s*(MODEL FIT INFORMATION|FINAL CLASS COUNTS|MODEL RESULTS|TESTS OF MODEL FIT)\\s*$", outfiletext, ignore.case=TRUE, perl=TRUE) if (length(endEstWarnErr) == 0L) { return(warnerr) } #unable to find section properly startEstWarnErr <- grep("^\\s*(WARNING:.*|THE MODEL ESTIMATION DID NOT TERMINATE NORMALLY.*|THE LOGLIKELIHOOD DECREASED.*|THE MODEL ESTIMATION TERMINATED NORMALLY|NO CONVERGENCE\\.\\s+NUMBER OF ITERATIONS EXCEEDED\\..*)\\s*$", outfiletext[1:endEstWarnErr[1L]], ignore.case=TRUE, perl=TRUE) if (length(startEstWarnErr) > 0L && length(endEstWarnErr) > 0L) { warnerrtext <- outfiletext[startEstWarnErr[1L]:(endEstWarnErr[1L] - 1)] #if the model estimation terminated normally, delete this line from the text to parse (whereas the other start flags indicate a meaningful message) if (length(normexit <- grep("^\\s*THE MODEL ESTIMATION TERMINATED NORMALLY\\s*$", warnerrtext, perl=TRUE, ignore.case=TRUE)) > 0L) { warnerrtext <- warnerrtext[-normexit] } if (!any(warnerrtext != "")) { return(warnerr) #no non-blank lines -- just exit function as is } #trim blank lines from beginning and end of section warnerrtext <- warnerrtext[min(which(warnerrtext != "")):max(which(warnerrtext != ""))] #estimation warnings and errors are separated by blank lines. blanks <- which(warnerrtext == "") #trim consecutive blank lines (throw off blanks-based parsing below) consec <- which(diff(blanks) == 1) if (length(consec) > 0L) { warnerrtext <- warnerrtext[-1*blanks[consec]] blanks <- which(warnerrtext == "") #clunky } #for loop is probably clunky here, but works for now startMsg <- 1 #first line of a message for (line in 1:length(warnerrtext)) { if ((line %in% blanks && ! (line-1) %in% blanks) || line == length(warnerrtext)) { msg <- trimSpace(warnerrtext[startMsg:ifelse(line %in% blanks, line - 1, line)]) if (grepl("^\\s*WARNING:", msg[1L], ignore.case=TRUE, perl=TRUE)) { warnerr$warnings[[w]] <- msg w <- w+1 } else { warnerr$errors[[e]] <- msg #if not prefixed by WARNING:, treat as error e <- e + 1 } startMsg <- line + 1 } } } else { } #warning("Unable to identify estimation warnings and errors section.") } return(warnerr) } #' Extract and parse Mplus input file #' #' Function to extract and parse mplus input syntax from the output file #' #' @param outfiletext The text of the output file #' @param filename The filename #' @return The parsed input file #' @keywords internal #' @examples #' # make me!!! extractInput_1file <- function(outfiletext, filename) { input <- list() class(input) <- c("list", "mplus.inp") startInput <- grep("^\\s*INPUT INSTRUCTIONS\\s*$", outfiletext, ignore.case=TRUE, perl=TRUE) if (length(startInput) == 0L) { warning("Could not find beginning of input for: ", filename) attr(input, "start.line") <- attr(input, "end.line") <- -1L return(input) } else { startInput <- startInput[1L] + 1L } #skip input instructions line itself endInput <- grep("^\\s*(INPUT READING TERMINATED NORMALLY|\\*\\*\\* WARNING.*|\\d+ (?:ERROR|WARNING)\\(S\\) FOUND IN THE INPUT INSTRUCTIONS|\\*\\*\\* ERROR.*)\\s*$", outfiletext, ignore.case=TRUE, perl=TRUE) if (length(endInput) == 0L) { #In Mplus v6.12 (and perhaps at some other point in the evolution), the input parser output was not included. #In such cases, try to fall back to the first line of the TITLE: XXX line, which is reprinted after input title1 <- grep("\\s*TITLE:\\s*(.*)$", outfiletext[1:100], perl=TRUE) #assume it lives in first 100 lines if (length(title1)==1L && length((endinputTitle <- grep(sub("\\s*TITLE:\\s*(.*)$", "^\\\\s*\\1\\\\s*$", outfiletext[title1], perl=TRUE), outfiletext)) == 1L)) { endInput <- endinputTitle - 1L } else { warning("Could not find end of input for: ", filename) attr(input, "start.line") <- attr(input, "end.line") <- -1 return(input) } } else { endInput <- endInput[1L] - 1L } #one line before first warning or end of instructions input.text <- outfiletext[startInput[1L]:endInput[1L]] #explicit first element because there could be both warnings and errors. #some code adapted from mplus2lavaan prototype inputHeaders <- grep("^\\s*(title:|data.*:|variable:|define:|analysis:|model.*:|output:|savedata:|plot:|montecarlo:)", input.text, ignore.case=TRUE, perl=TRUE) stopifnot(length(inputHeaders) > 0L) for (h in 1:length(inputHeaders)) { sectionEnd <- ifelse(h < length(inputHeaders), inputHeaders[h+1] - 1, length(input.text)) section <- input.text[inputHeaders[h]:sectionEnd] sectionName <- trimSpace(sub("^([^:]+):.*$", "\\1", section[1L], perl=TRUE)) #obtain text before the colon #dump section name from input syntax section[1L] <- sub("^[^:]+:(.*)$", "\\1", section[1L], perl=TRUE) input[[make.names(tolower(sectionName))]] <- section } #divide some input sections into fields #need to do a better job here of handling blank lines and such input$title <- paste(trimSpace(input$title), collapse=" ") input$data <- divideIntoFields(input$data) input$data.imputation <- divideIntoFields(input$data.imputation) input$variable <- divideIntoFields(input$variable) input$analysis <- divideIntoFields(input$analysis) input$montecarlo <- divideIntoFields(input$montecarlo) attr(input, "start.line") <- startInput attr(input, "end.line") <- endInput return(input) } #' Extract the summaries from one file #' #' Description: This function parses an output file for specific model details. It returns a list of model details for a single output file. #' #' @param outfiletext This is the output file in string form to be parsed. Passed in from extractModelSummaries. #' @param filename Name of the file being parsed. Used in case of bad model, prints a warning. #' @return A list of the summaries #' @keywords internal #' @examples #' # make me!!! extractSummaries_1file <- function(outfiletext, filename, input) { #preallocates list arglist <- list() #obtain mplus software version if ((mplus.version <- regexpr("\\s*Mplus VERSION ([\\d\\.]+)\\s*", outfiletext[1L], perl=TRUE)) > 0L) { arglist$Mplus.version <- substr(outfiletext[1L], attr(mplus.version, "capture.start")[1L], attr(mplus.version, "capture.start")[1L] + attr(mplus.version, "capture.length")[1L] - 1) } ###Copy some elements of the input instructions into the summaries #copy title into arglist if (!is.null(input$title)) { arglist$Title <- input$title } else { #warning("Unable to locate title field. Returning missing") #Warning doesn't seem very useful arglist$Title <- NA_character_ } #extract the analysis type, which is important for setting other parameters. if (!is.null(input$analysis$type)) { arglist$AnalysisType <- input$analysis$type } else { arglist$AnalysisType <- "GENERAL" #Analysis type not specified, default to general } #extract the data type (important for detecting imputation datasets) if (!is.null(input$data$type)) { arglist$DataType <- input$data$type } else if (any(c("montecarlo", "model.population") %in% names(input))) { arglist$DataType <- "MONTECARLO" } else { arglist$DataType <- "INDIVIDUAL" #Data type not specified, default to individual } if (!is.null(input$data.imputation)) { arglist$NImputedDatasets <- input$data.imputation$ndatasets #number of imputed datasets } #End input instructions processing #BEGIN ANALYSIS SUMMARY PROCESSING analysisSummarySection <- getSection("^\\s*SUMMARY OF ANALYSIS\\s*$", outfiletext) arglist$Estimator <- extractValue(pattern="^\\s*Estimator\\s*", analysisSummarySection, filename, type="str") arglist$Observations <- extractValue(pattern="^\\s*Number of observations\\s*", analysisSummarySection, filename, type="int") # Fix for multigroup models, where Observations were not parsed correctly if(is.na(arglist$Observations)){ arglist$Observations <- extractValue(pattern="^\\s*Total sample size\\s*", analysisSummarySection, filename, type="int") } arglist$NGroups <- extractValue(pattern="^\\s*Number of groups\\s*", analysisSummarySection, filename, type="int") arglist$NDependentVars <- extractValue(pattern="^\\s*Number of dependent variables\\s*", analysisSummarySection, filename, type="int") arglist$NIndependentVars <- extractValue(pattern="^\\s*Number of independent variables\\s*", analysisSummarySection, filename, type="int") arglist$NContinuousLatentVars <- extractValue(pattern="^\\s*Number of continuous latent variables\\s*", analysisSummarySection, filename, type="int") arglist$NCategoricalLatentVars <- extractValue(pattern="^\\s*Number of categorical latent variables\\s*", analysisSummarySection, filename, type="int") arglist$InformationMatrix <- extractValue(pattern="^\\s*Information matrix\\s*", analysisSummarySection, filename, type="int") #END ANALYSIS SUMMARY PROCESSING #BEGIN MODEL FIT STATISTICS PROCESSING #handle EFA output, which has separate model fit sections within each file #do this by extracting model fit sections for each and using an rbind call if (grepl("(?!MIXTURE|TWOLEVEL)\\s*EFA\\s+", arglist$AnalysisType, ignore.case=TRUE, perl=TRUE)) { factorLB <- as.numeric(sub(".*EFA\\s+(\\d+).*", "\\1", arglist$AnalysisType, perl=TRUE)) factorUB <- as.numeric(sub(".*EFA\\s+\\d+\\s+(\\d+).*", "\\1", arglist$AnalysisType, perl=TRUE)) factorSeq <- seq(factorLB, factorUB) EFASections <- grep(paste("^\\s*EXPLORATORY FACTOR ANALYSIS WITH (", paste(factorSeq, collapse="|"), ") FACTOR\\(S\\):\\s*$", sep=""), outfiletext, perl=TRUE) if (!length(EFASections) > 0) stop("Unable to locate section headers for EFA model fit statistics") #need to convert from list to data.frame format to allow for proper handling of rbind below arglistBase <- as.data.frame(arglist, stringsAsFactors=FALSE) efaList <- list() for (thisFactor in 1:length(EFASections)) { #subset output by starting text to be searched at the point where factor output begins modelFitSection <- getSection_Blanklines("^(TESTS OF MODEL FIT|MODEL FIT INFORMATION)$", outfiletext[EFASections[thisFactor]:length(outfiletext)]) efaList[[thisFactor]] <- extractSummaries_1section(modelFitSection, arglistBase, filename) efaList[[thisFactor]]$NumFactors <- factorSeq[thisFactor] } arglist <- do.call(rbind, efaList) } else if (length(multisectionMatches <- grep("^\\s*MODEL FIT INFORMATION FOR (?!THE LATENT CLASS INDICATOR MODEL PART).*", outfiletext, perl=TRUE, value=TRUE)) > 0L) { #use negative lookahead to ensure we don't grab the TECH10 output for LCA where it lists model fit info for latent class part #support Mplus v8 invariance testing outputs with one model fit section per variant (MODEL FIT INFORMATION FOR THE SCALAR MODEL etc.) #need to convert from list to data.frame format to allow for proper handling of rbind below arglistBase <- as.data.frame(arglist, stringsAsFactors=FALSE) multiList <- list() sectionNames <- sub("^\\s*MODEL FIT INFORMATION FOR\\s+(?:THE)*\\s*([\\w\\.]+)", "\\1", multisectionMatches, perl=TRUE) for (s in 1:length(multisectionMatches)) { fitinfo <- getSection(multisectionMatches[s], outfiletext) if (!is.null(fitinfo)) { multiList[[s]] <- extractSummaries_1section(fitinfo, arglistBase, filename, input) } } arglist <- do.call(rbind, multiList) arglist$Model <- sectionNames #add model info } else { modelFitSection <- getSection("^(TESTS OF MODEL FIT|MODEL FIT INFORMATION)$", outfiletext) arglist <- extractSummaries_1section(modelFitSection, arglist, filename, input) } #CLASSIFICATION QUALITY classificationQuality <- getSection("^CLASSIFICATION QUALITY$", outfiletext) if (!is.null(classificationQuality)) arglist$Entropy <- extractValue(pattern="^\\s*Entropy\\s*", classificationQuality, filename, type="dec") #overkill #arglist <- extractSummaries_1plan(arglist, "", list(data.frame(varName="Entropy", regexPattern="Entropy", varType=c("dec"), stringsAsFactors=FALSE)), classificationQuality, filename) else arglist$Entropy <- NA_real_ #maybe try to avoid the is null logic and just have extractModelSummary correctly handle null sections #TECH11 OUTPUT: LMR LRT tech11Output <- getSection("^\\s*TECHNICAL 11 OUTPUT\\s*$", outfiletext) if (!is.null(tech11Output)) { tech11headers <- c( "Random Starts Specifications for the k-1 Class Analysis Model", "VUONG-LO-MENDELL-RUBIN LIKELIHOOD RATIO TEST FOR \\d+ \\(H0\\) VERSUS \\d+ CLASSES", "LO-MENDELL-RUBIN ADJUSTED LRT TEST" ) tech11fields <- list( data.frame( varName=c("T11_KM1Starts", "T11_KM1Final"), regexPattern=c("Number of initial stage random starts", "Number of final stage optimizations"), varType=c("int", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("T11_KM1LL", "T11_VLMR_2xLLDiff", "T11_VLMR_ParamDiff", "T11_VLMR_Mean", "T11_VLMR_SD", "T11_VLMR_PValue"), regexPattern=c("H0 Loglikelihood Value", "2 Times the Loglikelihood Difference", "Difference in the Number of Parameters", "Mean", "Standard Deviation", "P-Value"), varType=c("dec", "dec", "int", "dec", "dec", "dec"), stringsAsFactors=FALSE ), data.frame( varName=c("T11_LMR_Value", "T11_LMR_PValue"), regexPattern=c("^\\s*Value", "^\\s*P-Value"), varType=c("dec", "dec"), stringsAsFactors=FALSE ) ) arglist <- extractSummaries_1plan(arglist, tech11headers, tech11fields, tech11Output, filename) } tech14Output <- getSection("^\\s*TECHNICAL 14 OUTPUT\\s*$", outfiletext) if (!is.null(tech14Output)) { tech14headers <- c( "", #section-inspecific parameters "Random Starts Specifications for the k-1 Class Analysis Model", "Random Starts Specification for the k-1 Class Model for Generated Data", "Random Starts Specification for the k Class Model for Generated Data", "PARAMETRIC BOOTSTRAPPED LIKELIHOOD RATIO TEST FOR \\d+ \\(H0\\) VERSUS \\d+ CLASSES" ) tech14fields <- list( #top-level (no section) data.frame( varName=c("BLRT_RequestedDraws"), regexPattern=c("Number of bootstrap draws requested"), varType=c("str"), stringsAsFactors=FALSE ), #Random Starts Specifications for the k-1 Class Analysis Model data.frame( varName=c("BLRT_KM1AnalysisStarts", "BLRT_KM1AnalysisFinal"), regexPattern=c("Number of initial stage random starts", "Number of final stage optimizations"), varType=c("int", "int"), stringsAsFactors=FALSE ), #Random Starts Specification for the k-1 Class Model for Generated Data #v7 format: Number of final stage optimizations for the\n initial stage random starts <N> #v6 format: Number of final stage optimizations <N> #Thus, include the genfinal twice here to catch both circumstances data.frame( varName=c("BLRT_KM1GenStarts", "BLRT_KM1GenFinal", "BLRT_KM1GenFinal"), regexPattern=c("Number of initial stage random starts", "+1:Number of final stage optimizations for the", "Number of final stage optimizations"), varType=c("int", "int", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("BLRT_KGenStarts", "BLRT_KGenFinal"), regexPattern=c("Number of initial stage random starts", "Number of final stage optimizations"), varType=c("int", "int"), stringsAsFactors=FALSE ), data.frame( varName=c("BLRT_KM1LL", "BLRT_2xLLDiff", "BLRT_ParamDiff", "BLRT_PValue", "BLRT_SuccessfulDraws"), regexPattern=c("H0 Loglikelihood Value", "2 Times the Loglikelihood Difference", "Difference in the Number of Parameters", "Approximate P-Value", "Successful Bootstrap Draws"), varType=c("dec", "dec", "int", "dec", "int"), stringsAsFactors=FALSE ) ) arglist <- extractSummaries_1plan(arglist, tech14headers, tech14fields, tech14Output, filename) } #calculate adjusted AIC per Burnham & Anderson(2004), which is better than AIC for non-nested model selection #handle AICC calculation, requires AIC, Parameters, and observations if (!is.null(arglist$Parameters) && !is.na(arglist$Parameters) && !is.null(arglist$AIC) && !is.na(arglist$AIC) && !is.null(arglist$Observations) && !is.na(arglist$Observations)) { arglist$AICC <- arglist$AIC + (2*arglist$Parameters*(arglist$Parameters+1))/(arglist$Observations-arglist$Parameters-1) } else { arglist$AICC <- NA_real_ } #Only warn about missing LL for ML-based estimators #too convoluted to maintain (and not so useful), generating errors I don't want to debug # if ("Estimator" %in% extract && "LL" %in% extract # && !is.na(arglist$Estimator) && arglist$Estimator %in% c("ML", "MLR", "MLM", "MLMV", "MLF") # && ((grepl("imputation", arglist$DataType, ignore.case=TRUE) && is.na(arglist$LL_Mean)) # || (!grepl("imputation", arglist$DataType, ignore.case=TRUE) && is.na(arglist$LL)))) # warning("Model missing LL value, despite use of ML-based estimator. Likely a failed run.\n ", filename) # #for now, skip including input instructions in the returned data.frame. Makes the output too cluttered. #arglist$InputInstructions <- paste((outfiletext[(startInput+1):(endInput-1)]), collapse="\n") arglist$Filename <- splitFilePath(filename)$filename #only retain filename, not path arglist <- as.data.frame(arglist, stringsAsFactors=FALSE) class(arglist) <- c("data.frame", "mplus.summaries") attr(arglist, "filename") <- arglist$Filename return(arglist) } #' Extract summary statistics from a single output file or from a group of Mplus models within a directory #' #' Parses a group of Mplus model output files (.out extension) for model fit statistics. #' At this time, the details extracted are fixed and include: \code{Filename, InputInstructions, Title, Estimator, #' LL, BIC, aBIC, AIC, AICC, Parameters, Observations, CFI, TLI, RMSEA_Estimate, RMSEA_90CI_LB, RMSEA_90CI_UB, #' RMSEA_pLT05, ChiSqM_Value, ChiSqM_DF, ChiSq_PValue, BLRT_KM1LL, BLRT_PValue, BLRT_Numdraws)}. The #' infrastructure is in place to allow for user-specified selection of summary statistics in future versions. #' #' @param target the directory containing Mplus output files (.out) to parse OR the #' single output file to be parsed. Defaults to the current working directory. #' Example: "C:/Users/Michael/Mplus Runs" #' @param recursive optional. If \code{TRUE}, parse all models nested in #' subdirectories within \code{directory}. Defaults to \code{FALSE}. #' @param filefilter a Perl regular expression (PCRE-compatible) specifying particular #' output files to be parsed within \code{directory}. See \code{regex} or #' \url{http://www.pcre.org/pcre.txt} for details about regular expression syntax. #' #' @return Returns a \code{data.frame} containing model fit statistics for all output files within \code{directory}. #' The \code{data.frame} contains some of the following variables (depends on model type): #' \item{Title}{Title for the model, specified by the TITLE: command} #' \item{Filename}{Filename of the output file} #' \item{Estimator}{Estimator used for the model (e.g., ML, MLR, WLSMV, etc.)} #' \item{LL}{Log-likelihood of the model} #' \item{BIC}{Bayesian Information Criterion} #' \item{aBIC}{Sample-Size-Adjusted BIC (Sclove, 1987)} #' \item{AIC}{Akaike's Information Criterion} #' \item{AICC}{Corrected AIC, based on Sugiura (1978) and recommended by Burnham & Anderson (2002)} #' \item{DIC}{Deviance Information Criterion. Available in ESTIMATOR=BAYES output.} #' \item{Parameters}{Number of parameters estimated by the model} #' \item{pD}{Estimated number of parameters in Bayesian output} #' \item{Observations}{The number of observations for the model (does not suppport multiple-groups analysis at this time)} #' \item{CFI}{Confirmatory Fit Index} #' \item{TLI}{Tucker-Lewis Index} #' \item{RMSEA_Estimate}{Point estimate of root mean squared error of approximation} #' \item{RMSEA_90CI_LB}{Lower bound of the 90\% Confidence Interval around the RMSEA estimate.} #' \item{RMSEA_90CI_UB}{Upper bound of the 90\% Confidence Interval around the RMSEA estimate.} #' \item{RMSEA_pLT05}{Probability that the RMSEA estimate falls below .05, indicating good fit.} #' \item{ChiSqM_Value}{Model chi-squared value} #' \item{ChiSqM_DF}{Model chi-squared degrees of freedom} #' \item{ChiSqM_PValue}{Model chi-squared p value} #' \item{ChiSqM_ScalingCorrection}{H0 Scaling Correction Factor} #' \item{ObsRepChiSqDiff_95CI_LB}{Lower bound of 95\% confidence interval for the difference between observed and replicated chi-square values} #' \item{ObsRepChiSqDiff_95CI_UB}{Upper bound of 95\% confidence interval for the difference between observed and replicated chi-square values} #' \item{PostPred_PValue}{Posterior predictive p-value} #' \item{PriorPostPred_PValue}{Prior Posterior Predictive P-Value} #' \item{BLRT_RequestedDraws}{Number of requested bootstrap draws for TECH14.} #' \item{BLRT_KM1LL}{Log-likelihood of the K-1 model (one less class) for the Bootstrapped Likelihood Ratio Test (TECH14).} #' \item{BLRT_2xLLDiff}{Two times the log-likelihood difference of the models with K and K-1 classes (TECH14).} #' \item{BLRT_ParamDiff}{Difference in the number of parameters for models with K and K-1 classes (TECH14).} #' \item{BLRT_PValue}{P-value of the Bootstrapped Likelihood Ratio Test (TECH14) testing whether the K class model is significantly better than K-1} #' \item{BLRT_SuccessfulDraws}{The number of successful bootstrapped samples used in the Bootstrapped Likelihood Ratio Test} #' \item{SRMR}{Standardized root mean square residual} #' \item{SRMR.Between}{For TYPE=TWOLEVEL output, standardized root mean square residual for between level} #' \item{SRMR.Within}{For TYPE=TWOLEVEL output, standardized root mean square residual for within level} #' \item{WRMR}{Weighted root mean square residual} #' \item{ChiSqBaseline_Value}{Baseline (unstructured) chi-squared value} #' \item{ChiSqBaseline_DF}{Baseline (unstructured) chi-squared degrees of freedom} #' \item{ChiSqBaseline_PValue}{Baseline (unstructured) chi-squared p value} #' \item{NumFactors}{For TYPE=EFA output, the number of factors} #' \item{T11_KM1Starts}{TECH11: Number of initial stage random starts for k-1 model} #' \item{T11_KM1Final}{TECH11: Number of final stage optimizations for k-1 model} #' \item{T11_KM1LL}{TECH11: Log-likelihood of the K-1 model used for the Vuong-Lo-Mendell-Rubin LRT} #' \item{T11_VLMR_2xLLDiff}{TECH11: 2 * Log-likelihood Difference of K-class vs. K-1-class model for the Vuong-Lo-Mendell-Rubin LRT} #' \item{T11_VLMR_ParamDiff}{TECH11: Difference in number of parameters between K-class and K-1-class model for the Vuong-Lo-Mendell-Rubin LRT} #' \item{T11_VLMR_Mean}{TECH11: Vuong-Lo-Mendell-Rubin LRT mean} #' \item{T11_VLMR_SD}{TECH11: Vuong-Lo-Mendell-Rubin LRT standard deviation} #' \item{T11_VLMR_PValue}{TECH11: Vuong-Lo-Mendell-Rubin LRT p-value} #' \item{T11_LMR_Value}{TECH11: Lo-Mendell-Rubin Adjusted LRT value} #' \item{T11_LMR_PValue}{TECH11: Lo-Mendell-Rubin Adjusted LRT p-value} #' #' @author Michael Hallquist #' @seealso \code{\link{regex}}, \code{\link{runModels}}, \code{\link{readModels}} #' @keywords interface #' @export #' @examples #' \dontrun{ #' allExamples <- extractModelSummaries( #' "C:/Program Files/Mplus/Mplus Examples/User's Guide Examples") #' } extractModelSummaries <- function(target=getwd(), recursive=FALSE, filefilter) { #message("This function is deprecated and will be removed from future versions of MplusAutomation. Please use readModels() instead.") message("extractModelSummaries has been deprecated. Please use readModels(\"nameofMplusoutfile.out\", what=\"summaries\")$summaries to replicate the old functionality.") #retain working directory and reset at end of run # curdir <- getwd() # # outfiles <- getOutFileList(target, recursive, filefilter) # # details <- list() # # #for each output file, use the extractSummaries_1file function to extract relevant data # #note that extractSummaries_1file returns data as a list # #rbind creates an array of lists by appending each extractSummaries_1file return value # for (i in 1:length(outfiles)) { # #read the file # readfile <- scan(outfiles[i], what="character", sep="\n", strip.white=FALSE, blank.lines.skip=FALSE, quiet=TRUE) # # #bomb out for EFA files # if (length(grep("TYPE\\s+(IS|=|ARE)\\s+((MIXTURE|TWOLEVEL)\\s+)+EFA\\s+\\d+", readfile, ignore.case=TRUE, perl=TRUE)) > 0) { # warning(paste0("EFA, MIXTURE EFA, and TWOLEVEL EFA files are not currently supported by extractModelSummaries.\n Skipping outfile: ", outfiles[i])) # next #skip file # } # # #append params for this file to the details array # #note that this is a memory-inefficient solution because of repeated copying. Better to pre-allocate. # # inp <- extractInput_1file(readfile, outfiles[i]) # details[[i]] <- extractSummaries_1file(readfile, outfiles[i], inp) # } # # #if there are several output files, then use rbind.fill to align fields # if (length(details) > 1L) details <- do.call(rbind.fill, details) # else details <- details[[1L]] # # #reset working directory # setwd(curdir) # # #cleanup columns containing only NAs # for (col in names(details)) { # if (all(is.na(details[[col]]))) details[[col]] <- NULL # } # # return(details) } #' Add header to saved data #' #' Description #' #' @param outfile The output file #' @param director The current working directory by default #' @return NULL #' @keywords internal #' @examples #' # make me!!! addHeaderToSavedata <- function(outfile, directory=getwd()) { } #' Extract residual matrices #' #' Function that extracts the residual matrices including standardized ones #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return A list of the residual matrices #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractResiduals <- function(outfiletext, filename) { residSection <- getSection("^RESIDUAL OUTPUT$", outfiletext) if (is.null(residSection)) return(list()) #no residuals output #allow for multiple groups residSubsections <- getMultilineSection("ESTIMATED MODEL AND RESIDUALS \\(OBSERVED - ESTIMATED\\)( FOR [\\w\\d\\s\\.,_]+)*", residSection, filename, allowMultiple=TRUE) matchlines <- attr(residSubsections, "matchlines") if (length(residSubsections) == 0) { warning("No sections found within residuals output.") return(list()) } else if (length(residSubsections) > 1) groupNames <- make.names(gsub("^\\s*ESTIMATED MODEL AND RESIDUALS \\(OBSERVED - ESTIMATED\\)( FOR ([\\w\\d\\s\\.,_]+))*\\s*$", "\\2", residSection[matchlines], perl=TRUE)) residList <- list() #multiple groups possible for (g in 1:length(residSubsections)) { targetList <- list() targetList[["meanEst"]] <- matrixExtract(residSubsections[[g]], "Model Estimated Means(/Intercepts/Thresholds)*", filename) targetList[["meanResid"]] <- matrixExtract(residSubsections[[g]], "Residuals for Means(/Intercepts/Thresholds)*", filename) targetList[["meanResid.std"]] <- matrixExtract(residSubsections[[g]], "Standardized Residuals \\(z-scores\\) for Means(/Intercepts/Thresholds)*", filename) targetList[["meanResid.norm"]] <- matrixExtract(residSubsections[[g]], "Normalized Residuals for Means(/Intercepts/Thresholds)*", filename) targetList[["covarianceEst"]] <- matrixExtract(residSubsections[[g]], "Model Estimated Covariances(/Correlations/Residual Correlations)*", filename) targetList[["covarianceResid"]] <- matrixExtract(residSubsections[[g]], "Residuals for Covariances(/Correlations/Residual Correlations)*", filename) targetList[["covarianceResid.std"]] <- matrixExtract(residSubsections[[g]], "Standardized Residuals \\(z-scores\\) for Covariances(/Correlations/Residual Corr)*", filename) targetList[["covarianceResid.norm"]] <- matrixExtract(residSubsections[[g]], "Normalized Residuals for Covariances(/Correlations/Residual Correlations)*", filename) targetList[["slopeEst"]] <- matrixExtract(residSubsections[[g]], "Model Estimated Slopes", filename) targetList[["slopeResid"]] <- matrixExtract(residSubsections[[g]], "Residuals for Slopes", filename) if (length(residSubsections) > 1) { class(targetList) <- c("list", "mplus.residuals") residList[[groupNames[g]]] <- targetList } else residList <- targetList } class(residList) <- c("list", "mplus.residuals") if (length(residSubsections) > 1) attr(residList, "group.names") <- groupNames return(residList) } #' Extract Technical 1 matrix from Mplus #' #' Function that extracts the Tech1 matrix #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return A list of class \dQuote{mplus.tech1} #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech1 <- function(outfiletext, filename) { tech1Section <- getSection("^TECHNICAL 1 OUTPUT$", outfiletext) if (is.null(tech1Section)) return(list()) #no tech1 output tech1List <- list() paramSpecSubsections <- getMultilineSection("PARAMETER SPECIFICATION( FOR [\\w\\d\\s\\.,_]+)*", tech1Section, filename, allowMultiple=TRUE) matchlines <- attr(paramSpecSubsections, "matchlines") paramSpecList <- list() if (length(paramSpecSubsections) == 0) warning ("No parameter specfication sections found within TECH1 output.") else if (length(paramSpecSubsections) > 1) groupNames <- make.names(gsub("^\\s*PARAMETER SPECIFICATION( FOR ([\\w\\d\\s\\.,_]+))*\\s*$", "\\2", tech1Section[matchlines], perl=TRUE)) else #just one section, no groups groupNames <- "" for (g in 1:length(paramSpecSubsections)) { targetList <- list() targetList[["tau"]] <- matrixExtract(paramSpecSubsections[[g]], "TAU", filename) targetList[["nu"]] <- matrixExtract(paramSpecSubsections[[g]], "NU", filename) targetList[["lambda"]] <- matrixExtract(paramSpecSubsections[[g]], "LAMBDA", filename) targetList[["theta"]] <- matrixExtract(paramSpecSubsections[[g]], "THETA", filename) targetList[["alpha"]] <- matrixExtract(paramSpecSubsections[[g]], "ALPHA", filename) targetList[["beta"]] <- matrixExtract(paramSpecSubsections[[g]], "BETA", filename) targetList[["gamma"]] <- matrixExtract(paramSpecSubsections[[g]], "GAMMA", filename) targetList[["psi"]] <- matrixExtract(paramSpecSubsections[[g]], "PSI", filename) targetList[["delta"]] <- matrixExtract(paramSpecSubsections[[g]], "DELTA", filename) targetList[["gamma.c"]] <- matrixExtract(paramSpecSubsections[[g]], "GAMMA\\(C\\)", filename) targetList[["alpha.c"]] <- matrixExtract(paramSpecSubsections[[g]], "ALPHA\\(C\\)", filename) targetList[["new_additional"]] <- matrixExtract(paramSpecSubsections[[g]], "NEW/ADDITIONAL PARAMETERS", filename) #latent class indicator part includes subsections for each latent class, such as class-varying thresholds if (groupNames[g] == "LATENT.CLASS.INDICATOR.MODEL.PART") { tauLines <- grep("TAU\\(U\\) FOR LATENT CLASS \\d+", paramSpecSubsections[[g]], perl=TRUE, value=TRUE) uniqueLC <- unique(gsub("^\\s*TAU\\(U\\) FOR LATENT CLASS (\\d+)\\s*$", "\\1", tauLines, perl=TRUE)) for (lc in uniqueLC) { targetList[[paste0("tau.u.lc", lc)]] <- matrixExtract(paramSpecSubsections[[g]], paste0("TAU\\(U\\) FOR LATENT CLASS ", lc), filename) } } if (length(paramSpecSubsections) > 1) { class(targetList) <- c("list", "mplus.parameterSpecification") paramSpecList[[groupNames[g]]] <- targetList } else paramSpecList <- targetList } class(paramSpecList) <- c("list", "mplus.parameterSpecification") if (length(paramSpecSubsections) > 1) attr(paramSpecList, "group.names") <- groupNames startValSubsections <- getMultilineSection("STARTING VALUES( FOR [\\w\\d\\s\\.,_]+)*", tech1Section, filename, allowMultiple=TRUE) matchlines <- attr(startValSubsections, "matchlines") startValList <- list() if (length(startValSubsections) == 0) warning ("No starting value sections found within TECH1 output.") else if (length(startValSubsections) > 1) groupNames <- make.names(gsub("^\\s*STARTING VALUES( FOR ([\\w\\d\\s\\.,_]+))*\\s*$", "\\2", tech1Section[matchlines], perl=TRUE)) else groupNames <- "" for (g in 1:length(startValSubsections)) { targetList <- list() targetList[["tau"]] <- matrixExtract(startValSubsections[[g]], "TAU", filename) targetList[["nu"]] <- matrixExtract(startValSubsections[[g]], "NU", filename) targetList[["lambda"]] <- matrixExtract(startValSubsections[[g]], "LAMBDA", filename) targetList[["theta"]] <- matrixExtract(startValSubsections[[g]], "THETA", filename) targetList[["alpha"]] <- matrixExtract(startValSubsections[[g]], "ALPHA", filename) targetList[["beta"]] <- matrixExtract(startValSubsections[[g]], "BETA", filename) targetList[["gamma"]] <- matrixExtract(startValSubsections[[g]], "GAMMA", filename) targetList[["psi"]] <- matrixExtract(startValSubsections[[g]], "PSI", filename) targetList[["delta"]] <- matrixExtract(startValSubsections[[g]], "DELTA", filename) targetList[["gamma.c"]] <- matrixExtract(startValSubsections[[g]], "GAMMA\\(C\\)", filename) targetList[["alpha.c"]] <- matrixExtract(startValSubsections[[g]], "ALPHA\\(C\\)", filename) targetList[["new_additional"]] <- matrixExtract(startValSubsections[[g]], "NEW/ADDITIONAL PARAMETERS", filename) #latent class indicator part includes subsections for each latent class, such as class-varying thresholds if (groupNames[g] == "LATENT.CLASS.INDICATOR.MODEL.PART") { tauLines <- grep("TAU\\(U\\) FOR LATENT CLASS \\d+", startValSubsections[[g]], perl=TRUE, value=TRUE) uniqueLC <- unique(gsub("^\\s*TAU\\(U\\) FOR LATENT CLASS (\\d+)\\s*$", "\\1", tauLines, perl=TRUE)) for (lc in uniqueLC) { targetList[[paste0("tau.u.lc", lc)]] <- matrixExtract(startValSubsections[[g]], paste0("TAU\\(U\\) FOR LATENT CLASS ", lc), filename) } } if (length(startValSubsections) > 1) { class(targetList) <- c("list", "mplus.startingValues") startValList[[groupNames[g]]] <- targetList } else startValList <- targetList } class(startValList) <- c("list", "mplus.startingValues") if (length(startValSubsections) > 1) attr(startValList, "group.names") <- groupNames tech1List <- list(parameterSpecification=paramSpecList, startingValues=startValList) class(tech1List) <- c("list", "mplus.tech1") return(tech1List) } extractSampstat <- function(outfiletext, filename) { sampstatSection <- getSection("^SAMPLE STATISTICS$", outfiletext) if (is.null(sampstatSection)) { #try output from TYPE=BASIC, which places these in a section of a different name sampstatSection <- getSection("^RESULTS FOR BASIC ANALYSIS$", outfiletext) } if(!is.null(sampstatSection) & all(sampstatSection == "")){ first_line <- (attr(outfiletext, "headerlines")[attr(outfiletext, "headerlines") > tail(attr(sampstatSection, "lines"), 1)][1]+1) final_line <- (attr(outfiletext, "headerlines")[attr(outfiletext, "headerlines") > tail(attr(sampstatSection, "lines"), 1)][2]-1) sampstatSection <- outfiletext[first_line:final_line] } sampstatList <- list() sampstatSubsections <- getMultilineSection("ESTIMATED SAMPLE STATISTICS( FOR [\\w\\d\\s\\.,_]+)*", sampstatSection, filename, allowMultiple=TRUE) matchlines <- attr(sampstatSubsections, "matchlines") if(is.na(sampstatSubsections)){ sampstatSubsections <- list(sampstatSection) matchlines <- attr(sampstatSubsections, "lines") } if (length(sampstatSubsections) == 0) warning ("No sample statistics sections found within SAMPSTAT output.") else if (length(sampstatSubsections) > 1) groupNames <- make.names(gsub("^\\s*ESTIMATED SAMPLE STATISTICS( FOR ([\\w\\d\\s\\.,_]+))*\\s*$", "\\2", sampstatSection[matchlines], perl=TRUE)) else #just one section, no groups groupNames <- "" for (g in 1:length(sampstatSubsections)) { targetList <- list() targetList[["means"]] <- matrixExtract(sampstatSubsections[[g]], "Means", filename) targetList[["covariances"]] <- matrixExtract(sampstatSubsections[[g]], "Covariances", filename) targetList[["correlations"]] <- matrixExtract(sampstatSubsections[[g]], "Correlations", filename) targetList[["correlations.vardiag"]] <- matrixExtract(sampstatSubsections[[g]], "CORRELATION MATRIX \\(WITH VARIANCES ON THE DIAGONAL\\)", filename, ignore.case=TRUE) #these seem to show up in DATA: TYPE=IMPUTATION outputs (e.g., ex11.8part2.out) targetList[["means.intercepts.thresholds"]] <- matrixExtract(sampstatSubsections[[g]], "Means/Intercepts/Thresholds", filename, ignore.case=TRUE) targetList[["within.level.variance.covariance"]] <- matrixExtract(sampstatSubsections[[g]], "WITHIN LEVEL VARIANCE/COVARIANCE", filename, ignore.case=TRUE) targetList[["within.level.correlation"]] <- matrixExtract(sampstatSubsections[[g]], "WITHIN LEVEL CORRELATION", filename, ignore.case=TRUE) targetList[["between.level.variance.covariance"]] <- matrixExtract(sampstatSubsections[[g]], "BETWEEN LEVEL VARIANCE/COVARIANCE", filename, ignore.case=TRUE) targetList[["between.level.correlation"]] <- matrixExtract(sampstatSubsections[[g]], "BETWEEN LEVEL CORRELATION", filename, ignore.case=TRUE) #I think these are only in older outputs targetList[["covariances.correlations.resid_correlations"]] <- matrixExtract(sampstatSubsections[[g]], "Covariances/Correlations/Residual Correlations", filename) targetList[["slopes"]] <- matrixExtract(sampstatSubsections[[g]], "Slopes", filename) #latent class indicator part includes subsections for each latent class, such as class-varying thresholds # if (groupNames[g] == "LATENT.CLASS.INDICATOR.MODEL.PART") { # tauLines <- grep("TAU\\(U\\) FOR LATENT CLASS \\d+", sampstatSubsections[[g]], perl=TRUE, value=TRUE) # uniqueLC <- unique(gsub("^\\s*TAU\\(U\\) FOR LATENT CLASS (\\d+)\\s*$", "\\1", tauLines, perl=TRUE)) # for (lc in uniqueLC) { # targetList[[paste0("tau.u.lc", lc)]] <- matrixExtract(sampstatSubsections[[g]], paste0("TAU\\(U\\) FOR LATENT CLASS ", lc), filename) # } # } if (length(sampstatSubsections) > 1) { class(targetList) <- c("list", "mplus.sampstat") sampstatList[[groupNames[g]]] <- targetList } else{ sampstatList <- targetList } } ##Extract Univariate counts and proportions univariateCountsSection <- getSection("^UNIVARIATE PROPORTIONS AND COUNTS FOR CATEGORICAL VARIABLES$", outfiletext) #remove warning lines, which throw off the parser (e.g., ex6.15.out) univariateCountsSection <- univariateCountsSection[!grepl("\\s*WARNING:.*", univariateCountsSection, perl=TRUE)] if (!is.null(univariateCountsSection)) { countSubsections <- getMultilineSection("Group\\s+([\\w\\d\\.,_]+)*", univariateCountsSection, filename, allowMultiple=TRUE) matchlines <- attr(countSubsections, "matchlines") if (!is.list(countSubsections) && is.na(countSubsections[1])) { countSubsections <- list(univariateCountsSection) #no sublists by group } else if (length(countSubsections) > 1) groupNames <- make.names(gsub("^\\s*Group\\s+([\\w\\d\\s\\.,_]+)\\s*$", "\\1", univariateCountsSection[matchlines], perl=TRUE)) else #just one section, no groups stop("not sure how we got here") for (g in 1:length(countSubsections)) { targetList <- list() df <- data.frame(do.call(rbind, strsplit(trimSpace(parseCatOutput(countSubsections[[g]])), "\\s+", perl=TRUE)), stringsAsFactors=FALSE) names(df) <- c("variable", "proportion", "count") df$proportion <- as.numeric(df$proportion) df$count <- as.numeric(df$count) #divide variable column into variable and category for clarity df$category <- as.numeric(sub(".*\\.Cat\\.(\\d+)", "\\1", df$variable, perl=TRUE)) df$variable <- sub("^(.*)\\.Cat\\.\\d+$", "\\1", df$variable, perl=TRUE) df <- df[,c("variable", "category", "proportion", "count")] #reorder df #targetList[["proportions.counts"]] <- df targetList <- df #just a single element at the moment class(targetList) <- c("data.frame", "mplus.propcounts.data.frame") if (length(countSubsections) > 1) { #class(targetList) <- c("list", "mplus.propcounts") sampstatList[[groupNames[g]]][["proportions.counts"]] <- targetList } else sampstatList[["proportions.counts"]] <- targetList } } # Extract univariate sample statistics ------------------------------------ univariate_sampstat <- getSection("^UNIVARIATE SAMPLE STATISTICS$", outfiletext) if(!is.null(univariate_sampstat)){ stats <- lapply(univariate_sampstat[grepl("\\d$", univariate_sampstat)], function(x){strsplit(trimws(x), split = "\\s+")[[1]]}) if(length(stats) %% 2 == 0){ out <- cbind(do.call(rbind, stats[seq(1, length(stats), by = 2)]), do.call(rbind, stats[seq(2, length(stats), by = 2)])) #headers <- univariate_sampstat[grepl("\\/", univariate_sampstat)] #headers <- gsub("%", " %", headers) #headers <- lapply(trimws(headers), function(x){strsplit(x, "\\s{2,}")[[1]]}) #headers[[1]] <- c(gsub("\\/", "", headers[[1]][grepl("\\/", headers[[1]])]), gsub("\\/.*$", "", headers[[2]][grepl("\\/", headers[[2]])])) #headers[[2]] <- gsub("^.+?\\/", "", headers[[2]]) #colnames(out) <- gsub(" %", "%", c(headers[[1]], headers[[2]])) var_names <- out[, 1] out <- gsub("%", "", out) out <- apply(out[, -1], 2, as.numeric) colnames(out) <- c("Mean", "Skewness", "Minimum", "%Min", "20%", "40%", "Median", "Sample Size", "Variance", "Kurtosis", "Maximum", "%Max", "60%", "80%") rownames(out) <- var_names sampstatList$univariate.sample.statistics <- out[, c("Sample Size", "Mean", "Variance", "Skewness", "Kurtosis", "Minimum", "Maximum", "%Min", "%Max", "20%", "40%", "Median", "60%", "80%")] } } class(sampstatList) <- c("list", "mplus.sampstat") if (length(sampstatSubsections) > 1) attr(sampstatList, "group.names") <- groupNames return(sampstatList) } extractCovarianceCoverage <- function(outfiletext, filename) { #TODO: Return type is sometimes list, sometimes matrix; a bit inconsistent covcoverageSection <- getSection("^COVARIANCE COVERAGE OF DATA$", outfiletext) if (is.null(covcoverageSection)) { return(list()) } #no COVARIANCE COVERAGE OF DATA output covcoverageList <- list() covcoverageSubsections <- getMultilineSection("PROPORTION OF DATA PRESENT( FOR [\\w\\d\\s\\.,_]+)*", covcoverageSection, filename, allowMultiple=TRUE) matchlines <- attr(covcoverageSubsections, "matchlines") if (length(covcoverageSubsections) == 0 || is.na(covcoverageSubsections)) { #See UG ex9.7.out message("No PROPORTION OF DATA PRESENT sections found within COVARIANCE COVERAGE OF DATA output.") return(covcoverageList) } else if (length(covcoverageSubsections) > 1) { groupNames <- make.names(gsub("^\\s*PROPORTION OF DATA PRESENT( FOR ([\\w\\d\\s\\.,_]+))*\\s*$", "\\2", covcoverageSection[matchlines], perl=TRUE)) } else { #just one section, no groups groupNames <- "" } for (g in 1:length(covcoverageSubsections)) { #targetList <- list() #for now, there is just one matrix extracted, so no need to label it or treat it as a list. Leaving scaffolding commented out if useful later #targetList[["covcoverage"]] <- matrixExtract(covcoverageSubsections[[g]], "Covariance Coverage", filename) targetList <- matrixExtract(covcoverageSubsections[[g]], "Covariance Coverage", filename) if (length(covcoverageSubsections) > 1) { #class(targetList) <- c("list", "mplus.covcoverage") covcoverageList[[groupNames[g]]] <- targetList } else covcoverageList <- targetList } if (is.list(covcoverageList)) { class(covcoverageList) <- c("list", "mplus.covcoverage") } else { class(covcoverageList) <- c("matrix", "mplus.covcoverage") } #single numeric matrix if (length(covcoverageSubsections) > 1) { attr(covcoverageList, "group.names") <- groupNames } return(covcoverageList) } #' Extract free file output #' #' Function for reading "free" output where a sequence of values populates a matrix #' #' @param filename The name of the output file #' @param outfile The output file #' @param make_symmetric A logical indicating whether or not to make the matrix symmetric, defaults to \code{TRUE} #' @return a matrix #' @keywords internal #' @examples #' # make me!!! extractFreeFile <- function(filename, outfile, make_symmetric=TRUE) { #Adapted from code graciously provided by Joe Glass. if (isEmpty(filename)) return(NULL) #TODO: make this filename building into a function (duped from read raw) outfileDirectory <- splitFilePath(outfile)$directory savedataSplit <- splitFilePath(filename) #if outfile target directory is non-empty, but savedataFile is without directory, then append #outfile directory to savedataFile. This ensures that R need not be in the working directory #to read the savedataFile. But if savedataFile has an absolute directory, don't append #if savedata directory is present and absolute, or if no directory in outfile, just use filename as is if (!is.na(savedataSplit$directory) && savedataSplit$absolute) savedataFile <- filename #just use savedata filename if has absolute path else if (is.na(outfileDirectory)) savedataFile <- filename #just use savedata filename if outfile is missing path (working dir) else savedataFile <- file.path(outfileDirectory, filename) #savedata path relative or absent and outfile dir is present if (!file.exists(savedataFile)) { warning("Cannot read file: ", filename) return(NULL) } values <- scan(savedataFile, what="character", strip.white=FALSE, blank.lines.skip=FALSE, quiet=TRUE) matrix.size <- function(x) { # per algebra of quadratic equations: p is the # of rows & columns in a symmetric # matrix given x unique covariance elements (the lower triangle plus diagonal). # This was constructed from the equation x = p(p+1)/2. p <- (-1/2) + sqrt(2*x + (1/4)) # if p is not an integer, having x elements does not result in a symmetric matrix p.isinteger <- !length(grep("[^[:digit:]]", as.character(p))) if (p.isinteger) { return (p) } else { cat("The length of the supplied vector is not appropriate to generate the matrix. Please check the data file.") return(NULL) } } matSize <- matrix.size(length(values)) mat <- matrix(NA_real_, nrow=matSize, ncol=matSize, dimnames=list(1:matSize, 1:matSize)) # create empty symmetric matrix mat[upper.tri(mat, diag=TRUE)] <- as.numeric(values) # import savedata information into the upper triangle (plus diagonal) of the matrix if (make_symmetric) { mat[lower.tri(mat)] <- t(mat)[lower.tri(mat)] #populate lower triangle } else { mat <- t(mat) # transpose the matrix to create a lower triangular matrix (plus diagonal) } return(mat) } #' Extract Technical 3 matrix from Mplus #' #' Function that extracts the Tech3 matrix #' #' @param outfiletext the text of the output file #' @param savedata_info Information on saved data #' @param filename The name of the file #' @return A list of class \dQuote{mplus.tech3} #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech3 <- function(outfiletext, savedata_info, filename) { tech3Section <- getSection("^TECHNICAL 3 OUTPUT$", outfiletext) if (is.null(tech3Section)) return(list()) #no tech3 output tech3List <- list() tech3List[["paramCov"]] <- matrixExtract(tech3Section, "ESTIMATED COVARIANCE MATRIX FOR PARAMETER ESTIMATES", filename) tech3List[["paramCor"]] <- matrixExtract(tech3Section, "ESTIMATED CORRELATION MATRIX FOR PARAMETER ESTIMATES", filename) if (!is.null(savedata_info) && !is.na(savedata_info$tech3File)) { tech3List[["paramCov.savedata"]] <- extractFreeFile(savedata_info$tech3File, filename, make_symmetric=TRUE) } else { tech3List[["paramCov.savedata"]] <- NULL } class(tech3List) <- c("list", "mplus.tech3") return(tech3List) } #' Extract Technical 4 matrix from Mplus #' #' Function that extracts the Tech4 matrix #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return A list of class \dQuote{mplus.tech4} #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech4 <- function(outfiletext, filename) { #TODO: have empty list use mplus.tech4 class tech4Section <- getSection("^TECHNICAL 4 OUTPUT$", outfiletext) if (is.null(tech4Section)) return(list()) #no tech4 output tech4List <- list() tech4Subsections <- getMultilineSection("ESTIMATES DERIVED FROM THE MODEL( FOR [\\w\\d\\s\\.,_]+)*", tech4Section, filename, allowMultiple=TRUE) matchlines <- attr(tech4Subsections, "matchlines") if (length(tech4Subsections) == 0) { warning("No sections found within TECH4 output.") return(list()) } else if (length(tech4Subsections) > 1) { groupNames <- make.names(gsub("^\\s*ESTIMATES DERIVED FROM THE MODEL( FOR ([\\w\\d\\s\\.,_]+))*\\s*$", "\\2", tech4Section[matchlines], perl=TRUE)) } for (g in 1:length(tech4Subsections)) { targetList <- list() targetList[["latMeansEst"]] <- matrixExtract(tech4Subsections[[g]], "ESTIMATED MEANS FOR THE LATENT VARIABLES", filename) targetList[["latCovEst"]] <- matrixExtract(tech4Subsections[[g]], "ESTIMATED COVARIANCE MATRIX FOR THE LATENT VARIABLES", filename) targetList[["latCorEst"]] <- matrixExtract(tech4Subsections[[g]], "ESTIMATED CORRELATION MATRIX FOR THE LATENT VARIABLES", filename) if (length(tech4Subsections) > 1) { class(targetList) <- c("list", "mplus.tech4") tech4List[[groupNames[g]]] <- targetList } else tech4List <- targetList } class(tech4List) <- c("list", "mplus.tech4") return(tech4List) } #' Extract Technical 7 from Mplus #' #' The TECH7 option is used in conjunction with TYPE=MIXTURE to request sample statistics #' for each class using raw data weighted by the estimated posterior probabilities for each class. #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return A list of class \dQuote{mplus.tech7} #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech7 <- function(outfiletext, filename) { #TODO: have empty list use mplus.tech7 class #not sure whether there are sometimes multiple groups within this section. tech7Section <- getSection("^TECHNICAL 7 OUTPUT$", outfiletext) if (is.null(tech7Section)) return(list()) #no tech7 output tech7List <- list() tech7Subsections <- getMultilineSection("SAMPLE STATISTICS WEIGHTED BY ESTIMATED CLASS PROBABILITIES FOR CLASS \\d+", tech7Section, filename, allowMultiple=TRUE) matchlines <- attr(tech7Subsections, "matchlines") if (length(tech7Subsections) == 0) { warning("No sections found within tech7 output.") return(list()) } else if (length(tech7Subsections) > 1) { groupNames <- make.names(gsub("^\\s*SAMPLE STATISTICS WEIGHTED BY ESTIMATED CLASS PROBABILITIES FOR (CLASS \\d+)\\s*$", "\\1", tech7Section[matchlines], perl=TRUE)) } for (g in 1:length(tech7Subsections)) { targetList <- list() targetList[["classSampMeans"]] <- matrixExtract(tech7Subsections[[g]], "Means", filename) targetList[["classSampCovs"]] <- matrixExtract(tech7Subsections[[g]], "Covariances", filename) if (length(tech7Subsections) > 1) { class(targetList) <- c("list", "mplus.tech7") tech7List[[groupNames[g]]] <- targetList } else tech7List <- targetList } class(tech7List) <- c("list", "mplus.tech7") return(tech7List) } #' Extract Technical 8 from Mplus #' #' The TECH8 option is used to print the optimization history of a model. #' It also prints the potential scale reduction in Bayesian models. #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return A list of class \dQuote{mplus.tech8} #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech8 <- function(outfiletext, filename) { #not sure whether there are sometimes multiple groups within this section. #for now, this function only extract PSR in Bayes models tech8Section <- getSection("^TECHNICAL 8 OUTPUT$", outfiletext) tech8List <- list() class(tech8List) <- c("list", "mplus.tech8") psr <- data.frame(); class(psr) <- c("data.frame", "mplus.psr.data.frame"); tech8List[["psr"]] <- psr if (is.null(tech8Section)) return(tech8List) #no tech8 output #psr extraction subfunction extractPSR <- function(text) { startline <- grep("ITERATION\\s+SCALE REDUCTION\\s+HIGHEST PSR", text, perl=TRUE) if (length(startline) > 0L) { firstBlank <- which(text == "") firstBlank <- firstBlank[firstBlank > startline][1L] #first blank after starting line toparse <- text[(startline+1):firstBlank] psr <- data.frame(matrix(as.numeric(unlist(strsplit(trimSpace(toparse), "\\s+", perl=TRUE))), ncol=3, byrow=TRUE, dimnames=list(NULL, c("iteration", "psr", "param.highest.psr")))) class(psr) <- c("data.frame", "mplus.psr.data.frame") return(psr) } else { return(NULL) } } bayesPSR <- getMultilineSection("TECHNICAL 8 OUTPUT FOR BAYES ESTIMATION", tech8Section, filename, allowMultiple=FALSE) if (!is.na(bayesPSR[1L])) { #new outputs have "Iterations for model estimation" and "Iterations for computing PPPP" if (any(grepl("Iterations for computing PPPP", bayesPSR))) { pppp_text <- getSection("Iterations for computing PPPP", bayesPSR, headers = c("Iterations for computing PPPP", "Iterations for model estimation")) model_text <- getSection("Iterations for model estimation", bayesPSR, headers = c("Iterations for computing PPPP", "Iterations for model estimation")) tech8List[["psr"]] <- extractPSR(model_text) tech8List[["psr_pppp"]] <- extractPSR(pppp_text) } else { tech8List[["psr"]] <- extractPSR(bayesPSR) } } return(tech8List) } #' Extract Technical 9 matrix from Mplus #' #' Function that extracts the Tech9 matrix #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return A list of class \dQuote{mplus.tech9} #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech9 <- function(outfiletext, filename) { tech9List <- list() class(tech9List) <- c("list", "mplus.tech9") tech9Section <- getSection("^TECHNICAL 9 OUTPUT$", outfiletext) if (is.null(tech9Section)) return(tech9List) #no tech9 output tech9Reps <- grep("^\\s*REPLICATION \\d+:\\s*$", tech9Section, perl=TRUE) repNums <- as.numeric(gsub("^\\s*REPLICATION (\\d+):\\s*$", "\\1", tech9Section[tech9Reps], perl=TRUE)) if (length(tech9Reps) > 0L) { for (l in 1:length(tech9Reps)) { if (l < length(tech9Reps)) { msg <- paste(tech9Section[ (tech9Reps[l]+1):(tech9Reps[l+1]-1) ], collapse=" ") } else { msg <- paste(tech9Section[ (tech9Reps[l]+1):length(tech9Section) ], collapse=" ") } msg <- trimSpace(gsub("\\s+", " ", msg, perl=TRUE)) tech9List[[ paste0("rep", repNums[l]) ]] <- list(rep=repNums[l], error=msg) } } return(tech9List) } #' Extract Technical 10 matrix from Mplus #' #' Function that extracts the Tech10 matrix #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return An empty list #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech10 <- function(outfiletext, filename) { tech10Section <- getSection("^TECHNICAL 10 OUTPUT$", outfiletext) if (is.null(tech10Section)) return(list()) #no tech10 output tech10List <- list() } #' Extract Technical 12 from Mplus #' #' The TECH12 option is used in conjunction with TYPE=MIXTURE to request residuals for observed #' versus model estimated means, variances, covariances, univariate skewness, and univariate #' kurtosis. The observed values come from the total sample. The estimated values are computed as #' a mixture across the latent classes. #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return A list of class \dQuote{mplus.tech12} #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech12 <- function(outfiletext, filename) { #not sure whether there are sometimes multiple groups within this section. tech12Section <- getSection("^TECHNICAL 12 OUTPUT$", outfiletext) tech12List <- list() class(tech12List) <- c("list", "mplus.tech12") if (is.null(tech12Section)) return(tech12List) #no tech12 output tech12Subsections <- getMultilineSection("ESTIMATED MIXED MODEL AND RESIDUALS \\(OBSERVED - EXPECTED\\)", tech12Section, filename, allowMultiple=TRUE) matchlines <- attr(tech12Subsections, "matchlines") if (length(tech12Subsections) == 0) { warning("No sections found within tech12 output.") return(list()) } else if (length(tech12Subsections) > 1) { warning("extractTech12 does not yet know how to handle multiple sections (if such exist)") } for (g in 1:length(tech12Subsections)) { targetList <- list() targetList[["obsMeans"]] <- matrixExtract(tech12Subsections[[g]], "Observed Means", filename) targetList[["mixedMeans"]] <- matrixExtract(tech12Subsections[[g]], "Estimated Mixed Means", filename) targetList[["mixedMeansResid"]] <- matrixExtract(tech12Subsections[[g]], "Residuals for Mixed Means", filename) targetList[["obsCovs"]] <- matrixExtract(tech12Subsections[[g]], "Observed Covariances", filename) targetList[["mixedCovs"]] <- matrixExtract(tech12Subsections[[g]], "Estimated Mixed Covariances", filename) targetList[["mixedCovsResid"]] <- matrixExtract(tech12Subsections[[g]], "Residuals for Mixed Covariances", filename) targetList[["obsSkewness"]] <- matrixExtract(tech12Subsections[[g]], "Observed Skewness", filename) targetList[["mixedSkewness"]] <- matrixExtract(tech12Subsections[[g]], "Estimated Mixed Skewness", filename) targetList[["mixedSkewnessResid"]] <- matrixExtract(tech12Subsections[[g]], "Residuals for Mixed Skewness", filename) targetList[["obsKurtosis"]] <- matrixExtract(tech12Subsections[[g]], "Observed Kurtosis", filename) targetList[["mixedKurtosis"]] <- matrixExtract(tech12Subsections[[g]], "Estimated Mixed Kurtosis", filename) targetList[["mixedKurtosisResid"]] <- matrixExtract(tech12Subsections[[g]], "Residuals for Mixed Kurtosis", filename) if (length(tech12Subsections) > 1) { class(targetList) <- c("list", "mplus.tech12") tech12List[[g]] <- targetList #no known case where there are many output sections } else tech12List <- targetList } class(tech12List) <- c("list", "mplus.tech12") return(tech12List) } #' Extract Technical 15 from Mplus #' #' The TECH15 option is used in conjunction with TYPE=MIXTURE to request conditional probabilities #' for the latent class variables. #' #' @param outfiletext the text of the output file #' @param filename The name of the file #' @return A list of class \dQuote{mplus.tech15} #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractTech15 <- function(outfiletext, filename) { tech15Section <- getSection("^TECHNICAL 15 OUTPUT$", outfiletext) tech15List <- list(conditional.probabilities = trimws(tech15Section[grepl("^\\s+?P\\(", tech15Section)])) class(tech15List) <- c("list", "mplus.tech15") if (is.null(tech15Section)) return(tech15List) #no tech15 output return(tech15List) } #' Extract Factor Score Statistics #' #' Function for extracting matrices for factor scores #' #' @param outfiletext The text of the output file #' @param filename The name of the output file #' @return A list #' @keywords internal #' @seealso \code{\link{matrixExtract}} #' @examples #' # make me!!! extractFacScoreStats <- function(outfiletext, filename) { #for now, skip getSection call and use nested header to getMultilineSection to avoid issue of SAMPLE STATISTICS appearing both #as top-level header and sub-header within factor scores fssSection <- getMultilineSection("SAMPLE STATISTICS FOR ESTIMATED FACTOR SCORES::SAMPLE STATISTICS", outfiletext, filename, allowMultiple=FALSE) fssList <- list() class(fssList) <- c("list", "mplus.facscorestats") if (is.na(fssSection[1L])) return(fssList) #no factor scores output fssList[["Means"]] <- matrixExtract(fssSection, "Means", filename) fssList[["Covariances"]] <- matrixExtract(fssSection, "Covariances", filename) fssList[["Correlations"]] <- matrixExtract(fssSection, "Correlations", filename) return(fssList) } #' Extract Latent Class Counts #' #' Function for extracting counts of latent classes #' #' @param outfiletext The text of the output file #' @param filename The name of the output file #' @return a list #' @keywords internal #' @examples #' # make me!!! extractClassCounts <- function(outfiletext, filename, summaries) { #### #TODO: Implement class count extraction for multiple categorical latent variable models. #Example: UG7.21 #Output is quite different because of latent class patterns, transition probabilities, etc. #helper function for three-column class output getClassCols <- function(sectiontext) { #identify lines of the form class number, class count, class proportion: e.g., 1 136.38 .2728 numberLines <- grep("^\\s*\\d+\\s+[0-9\\.-]+\\s+[0-9\\.-]+\\s*$", sectiontext, perl=TRUE) if (length(numberLines) > 0) { #row bind each line, convert to numeric, and store as data.frame counts <- data.frame(do.call(rbind, lapply(strsplit(trimSpace(sectiontext[numberLines]), "\\s+", perl=TRUE), as.numeric))) if (!ncol(counts) == 3) { warning("Number of columns for model class counts is not three.") return(NULL) } names(counts) <- c("class", "count", "proportion") #store counts as integer counts <- transform(counts, class=as.integer(class)) return(counts) } else { return(NULL) } } countlist <- list() if(is.null(summaries)||missing(summaries)||summaries$NCategoricalLatentVars==1||is.na(summaries$NCategoricalLatentVars)){ #Starting in Mplus v7.3 and above, formatting of the class counts appears to have changed... #Capture the alternatives here if (is.null(summaries)||missing(summaries) || is.null(summaries$Mplus.version) || as.numeric(summaries$Mplus.version) < 7.3) { modelCounts <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASSES$", outfiletext) ppCounts <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASS PATTERNS$", outfiletext) mostLikelyCounts <- getSection("^CLASSIFICATION OF INDIVIDUALS BASED ON THEIR MOST LIKELY LATENT CLASS MEMBERSHIP$", outfiletext) } else { modelCounts <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASSES$::^BASED ON THE ESTIMATED MODEL$", outfiletext) ppCounts <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASSES$::^BASED ON ESTIMATED POSTERIOR PROBABILITIES$", outfiletext) mostLikelyCounts <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASSES$::^BASED ON THEIR MOST LIKELY LATENT CLASS MEMBERSHIP$", outfiletext) } countlist[["modelEstimated"]] <- getClassCols(modelCounts) countlist[["posteriorProb"]] <- getClassCols(ppCounts) countlist[["mostLikely"]] <- getClassCols(mostLikelyCounts) #most likely by posterior probability section mostLikelyProbs <- getSection("^Average Latent Class Probabilities for Most Likely Latent Class Membership \\((Row|Column)\\)$", outfiletext) if (length(mostLikelyProbs) > 1L) { mostLikelyProbs <- mostLikelyProbs[-1L] } #remove line 1: "by Latent Class (Column)" #Example: #Average Latent Class Probabilities for Most Likely Latent Class Membership (Row) #by Latent Class (Column) # # 1 2 # # 1 0.986 0.014 # 2 0.030 0.970 # #A bit of a wonky section. Some notes: # 1) Rows represent those hard classified into that class. # 2) Rows sum to 1.0 and represent the summed average posterior probabilities of all the class assignment possibilities. # 3) Columns represent average posterior probabilitity of being in class 1 for those hard classified as 1 or 2. # 4) High diagonal indicates that hard classification matches posterior probability patterns. countlist[["avgProbs.mostLikely"]] <- unlabeledMatrixExtract(mostLikelyProbs, filename) #same, but for classification probabilities #also, starting ~Mplus 7.3, the columns and rows appear to have switched in this and the logit section (hence the Column|Row syntax) classificationProbs <- getSection("^Classification Probabilities for the Most Likely Latent Class Membership \\((Column|Row)\\)$", outfiletext) if (length(classificationProbs) > 1L) { classificationProbs <- classificationProbs[-1L] } #remove line 1: "by Latent Class (Column)" countlist[["classificationProbs.mostLikely"]] <- unlabeledMatrixExtract(classificationProbs, filename) #same, but for classification probability logits classificationLogitProbs <- getSection("^Logits for the Classification Probabilities for the Most Likely Latent Class Membership \\((Column|Row)\\)$", outfiletext) if (length(classificationLogitProbs) > 1L) { classificationLogitProbs <- classificationLogitProbs[-1L] } #remove line 1: "by Latent Class (Column)" countlist[["logitProbs.mostLikely"]] <- unlabeledMatrixExtract(classificationLogitProbs, filename) } else { # Exctract class_counts for multiple categorical latent variables. getClassCols_lta <- function(sectiontext) { numberLines <- grep("^\\s*([a-zA-Z0-9]+)?(\\s+[0-9\\.-]{1,}){1,}$", sectiontext, perl=TRUE) if (length(numberLines) > 0) { parsedlines <- strsplit(trimSpace(sectiontext[numberLines]), "\\s+", perl=TRUE) num_values <- sapply(parsedlines, length) if(length(unique(num_values)) == 1){ counts <- data.frame(t(sapply(parsedlines, as.numeric)), stringsAsFactors = FALSE) } else { # Pad shorter lines with NA on the left side parsedlines[which(num_values != max(num_values))] <- lapply(parsedlines[which(num_values != max(num_values))], function(x){ c(rep(NA, (max(num_values) - length(x))), x) }) counts <- do.call(rbind, parsedlines) # Repeat existing values on subsequent rows in columns containing NAs counts[,1] <- inverse.rle(list(lengths = diff(c(which(!is.na(counts[,1])), (nrow(counts)+1))), values = counts[,1][complete.cases(counts[,1])])) counts <- data.frame(counts, stringsAsFactors = FALSE) counts[, 2:4] <- lapply(counts[, 2:4], as.numeric) } return(counts) } else { return(NULL) } } if (missing(summaries) || is.null(summaries$Mplus.version) || as.numeric(summaries$Mplus.version) < 7.3) { posteriorProb.patterns <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASSES$::^BASED ON ESTIMATED POSTERIOR PROBABILITIES$", outfiletext) mostLikely.patterns <- getSection("^CLASSIFICATION OF INDIVIDUALS BASED ON THEIR MOST LIKELY LATENT CLASS PATTERN$", outfiletext) mostLikelyCounts <- getSection("CLASSIFICATION OF INDIVIDUALS BASED ON THEIR MOST LIKELY LATENT CLASS PATTERN", outfiletext) } else { posteriorProb.patterns <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASS PATTERNS$::^BASED ON ESTIMATED POSTERIOR PROBABILITIES$", outfiletext) mostLikely.patterns <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASS PATTERNS$::^BASED ON THEIR MOST LIKELY LATENT CLASS PATTERN$", outfiletext) mostLikelyCounts <- getSection("^FINAL CLASS COUNTS AND PROPORTIONS FOR EACH LATENT CLASS VARIABLE$::^BASED ON THEIR MOST LIKELY LATENT CLASS PATTERN$", outfiletext) } # Class counts countlist[["modelEstimated"]] <- getClassCols_lta( getSection( "^FINAL CLASS COUNTS AND PROPORTIONS FOR EACH LATENT CLASS VARIABLE$::^BASED ON THE ESTIMATED MODEL$", outfiletext ) ) countlist[["posteriorProb"]] <- getClassCols_lta( getSection( "^FINAL CLASS COUNTS AND PROPORTIONS FOR EACH LATENT CLASS VARIABLE$::^BASED ON ESTIMATED POSTERIOR PROBABILITIES$", outfiletext ) ) countlist[["mostLikely"]] <- getClassCols_lta(mostLikelyCounts) countlist[which(names(countlist) %in% c("modelEstimated", "posteriorProb", "mostLikely"))] <- lapply(countlist[which(names(countlist) %in% c("modelEstimated", "posteriorProb", "mostLikely"))], setNames, c("variable", "class", "count", "proportion")) # Patterns countlist[["modelEstimated.patterns"]] <- getClassCols_lta( getSection( "^FINAL CLASS COUNTS AND PROPORTIONS FOR THE LATENT CLASS PATTERNS$::^BASED ON THE ESTIMATED MODEL$", outfiletext ) ) countlist[["posteriorProb.patterns"]] <- getClassCols_lta(posteriorProb.patterns) countlist[["mostLikely.patterns"]] <- getClassCols_lta(mostLikely.patterns) countlist[which(names(countlist) %in% c("modelEstimated.patterns", "posteriorProb.patterns", "mostLikely.patterns"))] <- lapply(countlist[which(names(countlist) %in% c("modelEstimated.patterns", "posteriorProb.patterns", "mostLikely.patterns"))], setNames, c(paste0("class.", unique( c(countlist[["mostLikely"]]$variable, countlist[["modelEstimated"]]$variable, countlist[["posteriorProb"]]$variable) )), "count", "proportion")) #Average latent class probabilities avgProbs <- getSection( "^Average Latent Class Probabilities for Most Likely Latent Class Pattern \\((Row|Column)\\)$::^by Latent Class Pattern \\((Row|Column)\\)$", outfiletext) column_headers <- strsplit(trimws(grep("\\s*Latent Class\\s{2,}", avgProbs, value = TRUE)), "\\s+", perl=TRUE)[[1]][-1] variable_pattern_rows <- grep(paste(c("^(\\s{2,}\\d+){", length(column_headers), "}$"), collapse = ""), avgProbs, perl=TRUE) variable_pattern_rows <- variable_pattern_rows[!c(FALSE, diff(variable_pattern_rows) != 1)] variable_patterns <- avgProbs[variable_pattern_rows] variable_patterns <- data.frame(t(sapply(strsplit(trimws(variable_patterns), "\\s+", perl=TRUE), as.numeric))) names(variable_patterns) <- c("Latent Class Pattern No.", column_headers[-1]) probs <- grep(paste(c("^\\s+\\d{1,}(\\s{2,}[0-9\\.-]+)+$"), collapse = ""), avgProbs[(variable_pattern_rows[length(variable_pattern_rows)]+1):length(avgProbs)], perl=TRUE, value = TRUE) # If the table is truncated, concatenate its parts if(length(probs) %% nrow(variable_patterns) > 1){ for(i in 2:(length(probs) %% nrow(variable_patterns))){ probs[1:(nrow(variable_patterns)+1)] <- paste(probs[1:(nrow(variable_patterns)+1)], substring(probs[((i-1)*(nrow(variable_patterns)+1)+1):(i*(nrow(variable_patterns)+1))], first = 8) ) } probs <- probs[1:nrow(variable_patterns)] } probs <- t(sapply(strsplit(trimws(probs[-1]), "\\s+", perl=TRUE), as.numeric))[,-1] countlist[["avgProbs.mostLikely"]] <- probs countlist[["avgProbs.mostLikely.patterns"]] <- variable_patterns # AFAIK this section is not reported for multiple categorical variables countlist[["classificationProbs.mostLikely"]] <- NULL # AFAIK this section is not reported for multiple categorical variables countlist[["logitProbs.mostLikely"]] <- NULL transitionProbs <- getSection("^LATENT TRANSITION PROBABILITIES BASED ON THE ESTIMATED MODEL$", outfiletext) if(!is.null(transitionProbs)){ section_starts <- grep("\\(Columns\\)$", transitionProbs) transitionProbs <- mapply(FUN = function(begin, end){ probs <- grep("^\\s+\\d{1,}(\\s{2,}[0-9\\.-]{2,}){1,}$", transitionProbs[begin:end], perl=TRUE, value = TRUE) probs <- do.call(rbind, strsplit(trimws(probs), "\\s+", perl=TRUE))[,-1] cbind(paste(gsub("\\s+(\\w+) Classes.*$", "\\1", transitionProbs[begin]) , ".", rep(c(1:nrow(probs)), ncol(probs)), sep = ""), paste(gsub(".+?by (\\w+) Classes.*$", "\\1", transitionProbs[begin]) , ".", as.vector(sapply(1:ncol(probs), rep, nrow(probs))), sep = ""), as.vector(probs)) }, begin = section_starts, end = c(section_starts[-1], length(transitionProbs)), SIMPLIFY = FALSE) if(length(transitionProbs) > 1){ transitionProbs <- do.call(rbind, transitionProbs) } else { transitionProbs <- transitionProbs[[1]] } transitionProbs <- data.frame(transitionProbs, stringsAsFactors = FALSE) names(transitionProbs) <- c("from", "to", "probability") transitionProbs$probability <- as.numeric(transitionProbs$probability) } countlist[["transitionProbs"]] <- transitionProbs } return(countlist) } #' Reconstruct matrix from unlabeled multi-line text output #' #' worker function for extracting Mplus matrix output from an unlabeled section #' where matrices are spread across blocks to keep within width constraints #' example: class counts output from latent class models. #' #' @param outfiletext The text of the output file #' @param filename The name of the output file #' @return a matrix #' @keywords internal #' @examples #' # make me!!! unlabeledMatrixExtract <- function(outfiletext, filename) { #This function extends the matrixExtract function by allowing for the matrix to be recreated #to have no header labels and where section headers have a blank line on either side. Only example is in the class counts section, where when there #are many classes, the most likely x posterior probability matrix is too wide and is output like this: # 1 2 3 4 5 6 7 8 9 # #1 0.885 0.000 0.000 0.017 0.024 0.000 0.000 0.019 0.055 #2 0.000 0.775 0.006 0.000 0.000 0.064 0.097 0.013 0.000 #3 0.000 0.004 0.826 0.035 0.000 0.082 0.000 0.000 0.052 #4 0.014 0.002 0.070 0.804 0.018 0.035 0.000 0.008 0.046 #5 0.042 0.000 0.001 0.076 0.842 0.000 0.000 0.001 0.038 #6 0.000 0.096 0.063 0.014 0.001 0.732 0.021 0.026 0.008 #7 0.002 0.091 0.010 0.005 0.001 0.034 0.808 0.005 0.005 #8 0.118 0.014 0.006 0.004 0.000 0.030 0.015 0.514 0.139 #9 0.030 0.001 0.056 0.059 0.014 0.024 0.000 0.109 0.691 #10 0.030 0.062 0.007 0.007 0.002 0.052 0.130 0.108 0.063 # # 10 # #1 0.000 #2 0.046 #3 0.001 #4 0.004 #5 0.000 #6 0.038 #7 0.039 #8 0.159 #9 0.016 #10 0.539 #Only one matrix can be extracted from outfiletext since sections are unlabeled if (length(outfiletext) > 0L && length(outfiletext) > 1L) { #pattern match: 1) blank line; 2) integers line; 3) blank line #find these cases, then add "DUMMY" to each of the header blank lines blankLines <- which(outfiletext == "") if (length(blankLines) > 0L) { headerLines <- c() for (b in 1:length(blankLines)) { if (b < length(blankLines) && blankLines[b+1] == blankLines[b] + 2) { # a blank line followed by a non-blank line followed by a blank line... # check that it represents an integer sequence (this may need to be removed in more general cases) intLine <- strsplit(trimSpace(outfiletext[blankLines[b]+1]), "\\s+", perl=TRUE)[[1L]] firstCol <- as.numeric(intLine[1L]) #number of the class in the first column if (all(intLine == firstCol:(firstCol + length(intLine) - 1) )) { headerLines <- c(headerLines, blankLines[b]) } } } #add the header to blank lines preceding class labels row outfiletext[headerLines] <- "DUMMY" #now use matrix extract to reconstruct matrix unlabeledMat <- matrixExtract(outfiletext, "DUMMY", filename) return(unlabeledMat) } else { return(NULL) } } else { return(NULL) } } #' Reconstruct matrix from multi-line text output #' #' main worker function for extracting Mplus matrix output #' where matrices are spread across blocks to keep within width constraints #' example: tech1 matrix output. #' #' @param outfiletext The text of the output file #' @param headerLine The header line #' @param filename The name of the output file #' @return a matrix #' @keywords internal #' @examples #' # make me!!! matrixExtract <- function(outfiletext, headerLine, filename, ignore.case=FALSE) { matLines <- getMultilineSection(headerLine, outfiletext, filename, allowMultiple=TRUE, ignore.case=ignore.case) if (!is.na(matLines[1])) { numBlocks <- length(matLines) blockList <- list() for (m in 1:numBlocks) { colHeaders <- strsplit(trimSpace(matLines[[m]][1]), "\\s+", perl=TRUE)[[1]] #m+3 because m+1 is col header, m+2 is line of underscores block <- matLines[[m]][c(-1,-2)] block <- block[block != ""] #drop blank lines #10Jul2012: Occasionally, Mplus includes a blank line block just for fun... like this: #Residuals for Covariances/Correlations/Residual Correlations #STRES4 #________ #in this case, skip the block if (length(block) == 0) next splitData <- strsplit(trimSpace(block), "\\s+", perl=TRUE) #alternative to remove blank lines after strsplit (above easier to read) #remove blank lines by comparing against character(0) #splitData2 <- splitData[sapply(splitData, function(x) !identical(x, character(0)))] #May 2017: in Mplus v7*, there is a header on the beginning of each row, including for vectors such as NU,TAU, etc. #example: # NU # Y X1 X2 W # ________ ________ ________ ________ # 1 0 0 0 0 #in Mplus v8, the "1" header on parameter vectors has been removed. # NU # Y12T3 Y13T3 Y14T3 # ________ ________ ________ # 0 0 0 #To overcome this problem, check the number of columns in splitData compared to the number of column headers. #If the number of columns is equal to the number of column headers, add a "1" at the beginning to make parsing code # consistent with v7 and expectation is matrix assembly in the aggMat section below. #Only add this tweak if the first element of v is not identical to any column header. #Otherwise this will add a "1" to some rows that are part of a matrix, not param vector. splitData <- lapply(splitData, function(v) { if (length(v) == length(colHeaders) && (! v[1L] %in% colHeaders)) { v <- c("1", v) } return(v) }) #pull out row names from each element rowHeaders <- sapply(splitData, "[", 1) mat <- matrix(NA_real_, nrow=length(rowHeaders), ncol=length(colHeaders), dimnames=list(rowHeaders, colHeaders)) for (r in 1:length(splitData)) { line <- mplus_as.numeric(splitData[[r]][-1]) #use mplus_as.numeric to handle D+XX scientific notation in output if ((lenDiff <- length(colHeaders) - length(line)) > 0) line <- c(line, rep(NA, lenDiff)) mat[r,] <- line } blockList[[m]] <- mat } #aggregate sections aggMatCols <- do.call("c", lapply(blockList, colnames)) aggMatRows <- rownames(blockList[[1]]) #row names are shared across blocks in Mplus output aggMat <- matrix(NA, nrow=length(aggMatRows), ncol=length(aggMatCols), dimnames=list(aggMatRows, aggMatCols)) #Unfortunately, due to Mplus 8-character printing limits for matrix sections, row/col names are not guaranteed to be unique. #This causes problems for using name-based matching to fill the matrix. #We know that blocks are printed from left-to-right by column (i.e., the block 1 has the first X columns, block 2 has the next Y columns). #Thus, we should be able to use a counter and fill columns numerically. This does not get around a problem of non-unique row names since we #can't easily discern the rows represented in a block based on row numbering alone. Thus, this is an incomplete solution for now (Aug2015 MH) colCounter <- 1 for (l in blockList) { aggMat[rownames(l), colCounter:(colCounter + ncol(l) - 1)] <- l #fill in just the block of the aggregate matrix represented in l colCounter <- colCounter + ncol(l) } } else { #warning("No lines identified for matrix extraction using header: \n ", headerLine) aggMat <- NULL } return(aggMat) } #EXTRACT DATA SUMMARY SECTION #NB. This does not support three-level output yet! #' Function to extract the SUMMARY OF DATA section from Mplus outputs #' #' @param outfiletext The text of the output file #' @param filename the name of the file containing textToScan. Used to make more intelligible warning messages. #' @keywords internal extractDataSummary <- function(outfiletext, filename) { dataSummarySection <- getSection("^\\s*SUMMARY OF DATA( FOR THE FIRST DATA SET)*\\s*$", outfiletext) if (is.null(dataSummarySection)) { empty <- list() class(empty) <- c("list", "mplus.data_summary") return(empty) } #detect groups multipleGroupMatches <- grep("^\\s*Group \\w+(?:\\s+\\(\\d+\\))*\\s*$", dataSummarySection, ignore.case=TRUE, perl=TRUE) #support Mplus v8 syntax Group G1 (0) with parentheses of numeric value if (length(multipleGroupMatches) > 0L) { groupNames <- sub("^\\s*Group (\\w+)(?:\\s+\\(\\d+\\))*\\s*$", "\\1", dataSummarySection[multipleGroupMatches], perl=TRUE) toparse <- list() #divide into a list by group for (i in 1:length(multipleGroupMatches)) { if (i < length(multipleGroupMatches)) { end <- multipleGroupMatches[i+1] - 1 } else { end <- length(dataSummarySection) } section <- dataSummarySection[(multipleGroupMatches[i]+1):end] attr(section, "group.name") <- groupNames[i] toparse[[groupNames[i]]] <- section } } else { attr(dataSummarySection, "group.name") <- "all" toparse <- list(all=dataSummarySection) } summaries <- c() iccs <- c() for (section in toparse) { summaries <- rbind(summaries, data.frame( NClusters = extractValue(pattern="^\\s*Number of clusters\\s*", section, filename, type="int"), NMissPatterns = extractValue(pattern="^\\s*Number of missing data patterns\\s*", section, filename, type="int"), AvgClusterSize = extractValue(pattern="^\\s*Average cluster size\\s*", section, filename, type="dec"), Group=attr(section, "group.name") )) #parse icc icc_start <- grep("^\\s*Estimated Intraclass Correlations for the Y Variables( for [\\w\\._]+ level)*\\s*$", section, perl=TRUE) iccout <- c() if (length(icc_start) > 0L) { to_parse <- trimSpace(section[(icc_start+1):length(section)]) #this assumes nothing comes afterwards in the section #problem: there is an unknown number of columns in this output. Example: # # Intraclass Intraclass Intraclass #Variable Correlation Variable Correlation Variable Correlation #Q22 0.173 Q38 0.320 Q39 0.127 #Q40 0.270 #solution: variables are always odd positions, correlations are always even repeat_line <- grep("(\\s*Variable\\s+Correlation\\s*)+", to_parse) if (length(repeat_line) == 1L) { #x <- to_parse[repeat_line] #not needed with odd/even solution #nrepeats <- length(regmatches(x, gregexpr("g", x))) icc_values <- strsplit(to_parse[(repeat_line+1):length(to_parse)], "\\s+") for (ss in icc_values) { if (length(ss) > 0L) { positions <- 1:length(ss) vars <- ss[positions[positions %% 2 != 0]] vals <- ss[positions[positions %% 2 == 0]] iccout <- rbind(iccout, data.frame(variable=vars, ICC=as.numeric(vals), stringsAsFactors=FALSE)) } } iccout$Group <- attr(section, "group.name") iccs <- rbind(iccs, iccout) } } } #trim out "all" in single group case if (length(multipleGroupMatches) == 0L) { summaries$Group <- NULL iccs$Group <- NULL } retlist <- list(overall=summaries, ICCs=iccs) class(retlist) <- c("list", "mplus.data_summary") return(retlist) } #Caspar van Lissa code for extract invariance testing section extractInvarianceTesting <- function(outfiletext, filename) { inv_test_firstline <- grep("^Invariance Testing$", outfiletext) if (length(inv_test_firstline) == 0L) { return(list()) } #section not found inv_test_endline <- grep("^MODEL FIT INFORMATION", outfiletext) retlist <- list() inv_test_endline <- inv_test_endline[inv_test_endline > inv_test_firstline][1] inv_test <- outfiletext[(inv_test_firstline+2):(inv_test_endline-3)] model_rows <- grep("^\\s+?\\w+(\\s{2,}[0-9.]+){4}$", inv_test, value = TRUE) model_rows <- t(sapply(model_rows, function(x){strsplit(trimws(x), "\\s+")[[1]]}, USE.NAMES = FALSE)) model_rownames <- model_rows[, 1] model_rows <- apply(model_rows[, -1], 2, as.numeric) row.names(model_rows) <- model_rownames colnames(model_rows) <- c("Parameters", "Chi-Square", "DF", "Pvalue") retlist$models <- model_rows[, -1] test_rows <- grep("^\\s+?(\\w+\\s){3}(\\s{2,}[0-9.]+){3}$", inv_test, value = TRUE) test_rows <- t(sapply(test_rows, function(x){strsplit(trimws(x), "\\s{2,}")[[1]]}, USE.NAMES = FALSE)) model_rownames <- test_rows[, 1] test_rows <- apply(test_rows[, -1], 2, as.numeric) row.names(test_rows) <- model_rownames colnames(test_rows) <- c("Chi-Square", "DF", "Pvalue") retlist$compared <- test_rows return(retlist) }
wrap.text <- structure(function(tex = "", wid = 30) { ## Purpose: Wraps text at wid characters wide (or smaller if necessary) ## ---------------------------------------------------------------------- ## Modified from: ## ---------------------------------------------------------------------- ## Arguments: ## tex: character string ## wid: how wide before the text is wrapped ## ---------------------------------------------------------------------- ## Author: Patrick Connolly, Creation date: 19 Mar 2007, 14:27 out.vec <- NULL tex.leng <- nchar(tex) tex.vec <- c(sapply(strsplit(tex, ""), paste, sep = "")) while(tex.leng > wid){ tex.leng <- length(tex.vec) spaces <- which(tex.vec == " ") need.break <- spaces > wid if(any(need.break) > 0){ break.at <- spaces[which(need.break)[1] -1] out.vec <- c(out.vec, "\n", tex.vec[1:break.at -1]) tex.vec <- tex.vec[(break.at + 1):tex.leng] } else { ### one last break seems to be necessary now if(length(tex.vec) > wid){ last.break <- rev(spaces)[1] tex.vec[last.break] <- "\n" } out.vec <- c(out.vec, "\n", tex.vec) tex.vec <- "" } } paste(out.vec[-1], collapse = "") } , comment = "19/03/2007")
/.tmp/hrapgc.wrap.text.R
no_license
Tuxkid/Gems
R
false
false
1,266
r
wrap.text <- structure(function(tex = "", wid = 30) { ## Purpose: Wraps text at wid characters wide (or smaller if necessary) ## ---------------------------------------------------------------------- ## Modified from: ## ---------------------------------------------------------------------- ## Arguments: ## tex: character string ## wid: how wide before the text is wrapped ## ---------------------------------------------------------------------- ## Author: Patrick Connolly, Creation date: 19 Mar 2007, 14:27 out.vec <- NULL tex.leng <- nchar(tex) tex.vec <- c(sapply(strsplit(tex, ""), paste, sep = "")) while(tex.leng > wid){ tex.leng <- length(tex.vec) spaces <- which(tex.vec == " ") need.break <- spaces > wid if(any(need.break) > 0){ break.at <- spaces[which(need.break)[1] -1] out.vec <- c(out.vec, "\n", tex.vec[1:break.at -1]) tex.vec <- tex.vec[(break.at + 1):tex.leng] } else { ### one last break seems to be necessary now if(length(tex.vec) > wid){ last.break <- rev(spaces)[1] tex.vec[last.break] <- "\n" } out.vec <- c(out.vec, "\n", tex.vec) tex.vec <- "" } } paste(out.vec[-1], collapse = "") } , comment = "19/03/2007")
\name{rollapply} \alias{rollapply} \alias{rollapplyr} \alias{rollapply.default} \alias{rollapply.ts} \alias{rollapply.zoo} \title{Apply Rolling Functions} \description{ A generic function for applying a function to rolling margins of an array. } \usage{ rollapply(data, \dots) \method{rollapply}{ts}(data, \dots) \method{rollapply}{zoo}(data, width, FUN, \dots, by = 1, by.column = TRUE, fill = if (na.pad) NA, na.pad = FALSE, partial = FALSE, align = c("center", "left", "right"), coredata = TRUE) \method{rollapply}{default}(data, \dots) rollapplyr(\dots, align = "right") } \arguments{ \item{data}{the data to be used (representing a series of observations).} \item{width}{numeric vector or list. In the simplest case this is an integer specifying the window width (in numbers of observations) which is aligned to the original sample according to the \code{align} argument. Alternatively, \code{width} can be a list regarded as offsets compared to the current time, see below for details.} \item{FUN}{the function to be applied.} \item{\dots}{optional arguments to \code{FUN}.} \item{by}{calculate FUN at every \code{by}-th time point rather than every point. \code{by} is only used if \code{width} is length 1 and either a plain scalar or a list.} \item{by.column}{logical. If \code{TRUE}, \code{FUN} is applied to each column separately.} \item{fill}{a three-component vector or list (recycled otherwise) providing filling values at the left/within/to the right of the data range. See the \code{fill} argument of \code{\link{na.fill}} for details.} \item{na.pad}{deprecated. Use \code{fill = NA} instead of \code{na.pad = TRUE}.} \item{partial}{logical or numeric. If \code{FALSE} (default) then \code{FUN} is only applied when all indexes of the rolling window are within the observed time range. If \code{TRUE}, then the subset of indexes that are in range are passed to \code{FUN}. A numeric argument to \code{partial} can be used to determin the minimal window size for partial computations. See below for more details.} \item{align}{specifyies whether the index of the result should be left- or right-aligned or centered (default) compared to the rolling window of observations. This argument is only used if \code{width} represents widths.} \item{coredata}{logical. Should only the \code{coredata(data)} be passed to every \code{width} window? If set to \code{FALSE} the full zoo series is used.} } \details{ If \code{width} is a plain numeric vector its elements are regarded as widths to be interpreted in conjunction with \code{align} whereas if \code{width} is a list its components are regarded as offsets. In the above cases if the length of \code{width} is 1 then \code{width} is recycled for every \code{by}-th point. If \code{width} is a list its components represent integer offsets such that the i-th component of the list refers to time points at positions \code{i + width[[i]]}. If any of these points are below 1 or above the length of \code{index(data)} then \code{FUN} is not evaluated for that point unless \code{partial = TRUE} and in that case only the valid points are passed. The rolling function can also be applied to partial windows by setting \code{partial = TRUE} For example, if \code{width = 3, align = "right"} then for the first point just that point is passed to \code{FUN} since the two points to its left are out of range. For the same example, if \code{partial = FALSE} then \code{FUN} is not invoked at all for the first two points. If \code{partial} is a numeric then it specifies the minimum number of offsets that must be within range. Negative \code{partial} is interpreted as \code{FALSE}. If \code{FUN} is \code{mean}, \code{max} or \code{median} and \code{by.column} is \code{TRUE} and width is a plain scalar and there are no other arguments then special purpose code is used to enhance performance. Also in the case of \code{mean} such special purpose code is only invoked if the \code{data} argument has no \code{NA} values. See \code{\link{rollmean}}, \code{\link{rollmax}} and \code{\link{rollmedian}} for more details. Currently, there are methods for \code{"zoo"} and \code{"ts"} series and \code{"default"} method for ordinary vectors and matrices. \code{rollapplyr} is a wrapper around \code{rollapply} that uses a default of \code{align = "right"}. } \value{ A object of the same class as \code{data} with the results of the rolling function. } \seealso{\code{\link{rollmean}}} \examples{ ## rolling mean z <- zoo(11:15, as.Date(31:35)) rollapply(z, 2, mean) ## non-overlapping means z2 <- zoo(rnorm(6)) rollapply(z2, 3, mean, by = 3) # means of nonoverlapping groups of 3 aggregate(z2, c(3,3,3,6,6,6), mean) # same ## optimized vs. customized versions rollapply(z2, 3, mean) # uses rollmean which is optimized for mean rollmean(z2, 3) # same rollapply(z2, 3, (mean)) # does not use rollmean ## rolling regression: ## set up multivariate zoo series with ## number of UK driver deaths and lags 1 and 12 seat <- as.zoo(log(UKDriverDeaths)) time(seat) <- as.yearmon(time(seat)) seat <- merge(y = seat, y1 = lag(seat, k = -1), y12 = lag(seat, k = -12), all = FALSE) ## run a rolling regression with a 3-year time window ## (similar to a SARIMA(1,0,0)(1,0,0)_12 fitted by OLS) rr <- rollapply(seat, width = 36, FUN = function(z) coef(lm(y ~ y1 + y12, data = as.data.frame(z))), by.column = FALSE, align = "right") ## plot the changes in coefficients ## showing the shifts after the oil crisis in Oct 1973 ## and after the seatbelt legislation change in Jan 1983 plot(rr) ## rolling mean by time window (e.g., 3 days) rather than ## by number of observations (e.g., when these are unequally spaced): # ## - test data tt <- as.Date("2000-01-01") + c(1, 2, 5, 6, 7, 8, 10) z <- zoo(seq_along(tt), tt) ## - fill it out to a daily series, zm, using NAs ## using a zero width zoo series g on a grid g <- zoo(, seq(start(z), end(z), "day")) zm <- merge(z, g) ## - 3-day rolling mean rollapply(zm, 3, mean, na.rm = TRUE, fill = NA) ## different values of rule argument z <- zoo(c(NA, NA, 2, 3, 4, 5, NA)) rollapply(z, 3, sum, na.rm = TRUE) rollapply(z, 3, sum, na.rm = TRUE, fill = NULL) rollapply(z, 3, sum, na.rm = TRUE, fill = NA) rollapply(z, 3, sum, na.rm = TRUE, partial = TRUE) # this will exclude time points 1 and 2 # It corresonds to align = "right", width = 3 rollapply(zoo(1:8), list(seq(-2, 0)), sum) # but this will include points 1 and 2 rollapply(zoo(1:8), list(seq(-2, 0)), sum, partial = 1) rollapply(zoo(1:8), list(seq(-2, 0)), sum, partial = 0) # so will this rollapply(zoo(1:8), list(seq(-2, 0)), sum, fill = NA) # by = 3, align = "right" L <- rep(list(NULL), 8) L[seq(3, 8, 3)] <- list(seq(-2, 0)) str(L) rollapply(zoo(1:8), L, sum) rollapply(zoo(1:8), list(0:2), sum, fill = 1:3) rollapply(zoo(1:8), list(0:2), sum, fill = 3) L2 <- rep(list(-(2:0)), 10) L2[5] <- list(NULL) str(L2) rollapply(zoo(1:10), L2, sum, fill = "extend") rollapply(zoo(1:10), L2, sum, fill = list("extend", NULL)) rollapply(zoo(1:10), L2, sum, fill = list("extend", NA)) rollapply(zoo(1:10), L2, sum, fill = NA) rollapply(zoo(1:10), L2, sum, fill = 1:3) rollapply(zoo(1:10), L2, sum, partial = TRUE) rollapply(zoo(1:10), L2, sum, partial = TRUE, fill = 99) rollapply(zoo(1:10), list(-1), sum, partial = 0) rollapply(zoo(1:10), list(-1), sum, partial = TRUE) rollapply(zoo(cbind(a = 1:6, b = 11:16)), 3, rowSums, by.column = FALSE) # these two are the same rollapply(zoo(cbind(a = 1:6, b = 11:16)), 3, sum) rollapply(zoo(cbind(a = 1:6, b = 11:16)), 3, colSums, by.column = FALSE) # these two are the same rollapply(zoo(1:6), 2, sum, by = 2, align = "right") aggregate(zoo(1:6), c(2, 2, 4, 4, 6, 6), sum) # these two are the same rollapply(zoo(1:3), list(-1), c) lag(zoo(1:3), -1) # these two are the same rollapply(zoo(1:3), list(1), c) lag(zoo(1:3)) # these two are the same rollapply(zoo(1:5), list(c(-1, 0, 1)), sum) rollapply(zoo(1:5), 3, sum) # these two are the same rollapply(zoo(1:5), list(0:2), sum) rollapply(zoo(1:5), 3, sum, align = "left") # these two are the same rollapply(zoo(1:5), list(-(2:0)), sum) rollapply(zoo(1:5), 3, sum, align = "right") # these two are the same rollapply(zoo(1:6), list(NULL, NULL, -(2:0)), sum) rollapply(zoo(1:6), 3, sum, by = 3, align = "right") # these two are the same rollapply(zoo(1:5), list(c(-1, 1)), sum) rollapply(zoo(1:5), 3, function(x) sum(x[-2])) # these two are the same rollapply(1:5, 3, rev) embed(1:5, 3) # these four are the same x <- 1:6 rollapply(c(0, 0, x), 3, sum, align = "right") - x rollapply(x, 3, sum, partial = TRUE, align = "right") - x rollapply(x, 3, function(x) sum(x[-3]), partial = TRUE, align = "right") rollapply(x, list(-(2:1)), sum, partial = 0) # same as Matlab's buffer(x, n, p) for valid non-negative p # See http://www.mathworks.com/help/toolbox/signal/buffer.html x <- 1:30; n <- 7; p <- 3 t(rollapply(c(rep(0, p), x, rep(0, n-p)), n, by = n-p, c)) # these three are the same y <- 10 * seq(8); k <- 4; d <- 2 # 1 # from http://ucfagls.wordpress.com/2011/06/14/embedding-a-time-series-with-time-delay-in-r-part-ii/ Embed <- function(x, m, d = 1, indices = FALSE, as.embed = TRUE) { n <- length(x) - (m-1)*d X <- seq_along(x) if(n <= 0) stop("Insufficient observations for the requested embedding") out <- matrix(rep(X[seq_len(n)], m), ncol = m) out[,-1] <- out[,-1, drop = FALSE] + rep(seq_len(m - 1) * d, each = nrow(out)) if(as.embed) out <- out[, rev(seq_len(ncol(out)))] if(!indices) out <- matrix(x[out], ncol = m) out } Embed(y, k, d) # 2 rollapply(y, list(-d * seq(0, k-1)), c) # 3 rollapply(y, d*k-1, function(x) x[d * seq(k-1, 0) + 1]) } \keyword{iteration} \keyword{array} \keyword{ts}
/man/rollapply.Rd
no_license
SongD90/zoo
R
false
false
9,926
rd
\name{rollapply} \alias{rollapply} \alias{rollapplyr} \alias{rollapply.default} \alias{rollapply.ts} \alias{rollapply.zoo} \title{Apply Rolling Functions} \description{ A generic function for applying a function to rolling margins of an array. } \usage{ rollapply(data, \dots) \method{rollapply}{ts}(data, \dots) \method{rollapply}{zoo}(data, width, FUN, \dots, by = 1, by.column = TRUE, fill = if (na.pad) NA, na.pad = FALSE, partial = FALSE, align = c("center", "left", "right"), coredata = TRUE) \method{rollapply}{default}(data, \dots) rollapplyr(\dots, align = "right") } \arguments{ \item{data}{the data to be used (representing a series of observations).} \item{width}{numeric vector or list. In the simplest case this is an integer specifying the window width (in numbers of observations) which is aligned to the original sample according to the \code{align} argument. Alternatively, \code{width} can be a list regarded as offsets compared to the current time, see below for details.} \item{FUN}{the function to be applied.} \item{\dots}{optional arguments to \code{FUN}.} \item{by}{calculate FUN at every \code{by}-th time point rather than every point. \code{by} is only used if \code{width} is length 1 and either a plain scalar or a list.} \item{by.column}{logical. If \code{TRUE}, \code{FUN} is applied to each column separately.} \item{fill}{a three-component vector or list (recycled otherwise) providing filling values at the left/within/to the right of the data range. See the \code{fill} argument of \code{\link{na.fill}} for details.} \item{na.pad}{deprecated. Use \code{fill = NA} instead of \code{na.pad = TRUE}.} \item{partial}{logical or numeric. If \code{FALSE} (default) then \code{FUN} is only applied when all indexes of the rolling window are within the observed time range. If \code{TRUE}, then the subset of indexes that are in range are passed to \code{FUN}. A numeric argument to \code{partial} can be used to determin the minimal window size for partial computations. See below for more details.} \item{align}{specifyies whether the index of the result should be left- or right-aligned or centered (default) compared to the rolling window of observations. This argument is only used if \code{width} represents widths.} \item{coredata}{logical. Should only the \code{coredata(data)} be passed to every \code{width} window? If set to \code{FALSE} the full zoo series is used.} } \details{ If \code{width} is a plain numeric vector its elements are regarded as widths to be interpreted in conjunction with \code{align} whereas if \code{width} is a list its components are regarded as offsets. In the above cases if the length of \code{width} is 1 then \code{width} is recycled for every \code{by}-th point. If \code{width} is a list its components represent integer offsets such that the i-th component of the list refers to time points at positions \code{i + width[[i]]}. If any of these points are below 1 or above the length of \code{index(data)} then \code{FUN} is not evaluated for that point unless \code{partial = TRUE} and in that case only the valid points are passed. The rolling function can also be applied to partial windows by setting \code{partial = TRUE} For example, if \code{width = 3, align = "right"} then for the first point just that point is passed to \code{FUN} since the two points to its left are out of range. For the same example, if \code{partial = FALSE} then \code{FUN} is not invoked at all for the first two points. If \code{partial} is a numeric then it specifies the minimum number of offsets that must be within range. Negative \code{partial} is interpreted as \code{FALSE}. If \code{FUN} is \code{mean}, \code{max} or \code{median} and \code{by.column} is \code{TRUE} and width is a plain scalar and there are no other arguments then special purpose code is used to enhance performance. Also in the case of \code{mean} such special purpose code is only invoked if the \code{data} argument has no \code{NA} values. See \code{\link{rollmean}}, \code{\link{rollmax}} and \code{\link{rollmedian}} for more details. Currently, there are methods for \code{"zoo"} and \code{"ts"} series and \code{"default"} method for ordinary vectors and matrices. \code{rollapplyr} is a wrapper around \code{rollapply} that uses a default of \code{align = "right"}. } \value{ A object of the same class as \code{data} with the results of the rolling function. } \seealso{\code{\link{rollmean}}} \examples{ ## rolling mean z <- zoo(11:15, as.Date(31:35)) rollapply(z, 2, mean) ## non-overlapping means z2 <- zoo(rnorm(6)) rollapply(z2, 3, mean, by = 3) # means of nonoverlapping groups of 3 aggregate(z2, c(3,3,3,6,6,6), mean) # same ## optimized vs. customized versions rollapply(z2, 3, mean) # uses rollmean which is optimized for mean rollmean(z2, 3) # same rollapply(z2, 3, (mean)) # does not use rollmean ## rolling regression: ## set up multivariate zoo series with ## number of UK driver deaths and lags 1 and 12 seat <- as.zoo(log(UKDriverDeaths)) time(seat) <- as.yearmon(time(seat)) seat <- merge(y = seat, y1 = lag(seat, k = -1), y12 = lag(seat, k = -12), all = FALSE) ## run a rolling regression with a 3-year time window ## (similar to a SARIMA(1,0,0)(1,0,0)_12 fitted by OLS) rr <- rollapply(seat, width = 36, FUN = function(z) coef(lm(y ~ y1 + y12, data = as.data.frame(z))), by.column = FALSE, align = "right") ## plot the changes in coefficients ## showing the shifts after the oil crisis in Oct 1973 ## and after the seatbelt legislation change in Jan 1983 plot(rr) ## rolling mean by time window (e.g., 3 days) rather than ## by number of observations (e.g., when these are unequally spaced): # ## - test data tt <- as.Date("2000-01-01") + c(1, 2, 5, 6, 7, 8, 10) z <- zoo(seq_along(tt), tt) ## - fill it out to a daily series, zm, using NAs ## using a zero width zoo series g on a grid g <- zoo(, seq(start(z), end(z), "day")) zm <- merge(z, g) ## - 3-day rolling mean rollapply(zm, 3, mean, na.rm = TRUE, fill = NA) ## different values of rule argument z <- zoo(c(NA, NA, 2, 3, 4, 5, NA)) rollapply(z, 3, sum, na.rm = TRUE) rollapply(z, 3, sum, na.rm = TRUE, fill = NULL) rollapply(z, 3, sum, na.rm = TRUE, fill = NA) rollapply(z, 3, sum, na.rm = TRUE, partial = TRUE) # this will exclude time points 1 and 2 # It corresonds to align = "right", width = 3 rollapply(zoo(1:8), list(seq(-2, 0)), sum) # but this will include points 1 and 2 rollapply(zoo(1:8), list(seq(-2, 0)), sum, partial = 1) rollapply(zoo(1:8), list(seq(-2, 0)), sum, partial = 0) # so will this rollapply(zoo(1:8), list(seq(-2, 0)), sum, fill = NA) # by = 3, align = "right" L <- rep(list(NULL), 8) L[seq(3, 8, 3)] <- list(seq(-2, 0)) str(L) rollapply(zoo(1:8), L, sum) rollapply(zoo(1:8), list(0:2), sum, fill = 1:3) rollapply(zoo(1:8), list(0:2), sum, fill = 3) L2 <- rep(list(-(2:0)), 10) L2[5] <- list(NULL) str(L2) rollapply(zoo(1:10), L2, sum, fill = "extend") rollapply(zoo(1:10), L2, sum, fill = list("extend", NULL)) rollapply(zoo(1:10), L2, sum, fill = list("extend", NA)) rollapply(zoo(1:10), L2, sum, fill = NA) rollapply(zoo(1:10), L2, sum, fill = 1:3) rollapply(zoo(1:10), L2, sum, partial = TRUE) rollapply(zoo(1:10), L2, sum, partial = TRUE, fill = 99) rollapply(zoo(1:10), list(-1), sum, partial = 0) rollapply(zoo(1:10), list(-1), sum, partial = TRUE) rollapply(zoo(cbind(a = 1:6, b = 11:16)), 3, rowSums, by.column = FALSE) # these two are the same rollapply(zoo(cbind(a = 1:6, b = 11:16)), 3, sum) rollapply(zoo(cbind(a = 1:6, b = 11:16)), 3, colSums, by.column = FALSE) # these two are the same rollapply(zoo(1:6), 2, sum, by = 2, align = "right") aggregate(zoo(1:6), c(2, 2, 4, 4, 6, 6), sum) # these two are the same rollapply(zoo(1:3), list(-1), c) lag(zoo(1:3), -1) # these two are the same rollapply(zoo(1:3), list(1), c) lag(zoo(1:3)) # these two are the same rollapply(zoo(1:5), list(c(-1, 0, 1)), sum) rollapply(zoo(1:5), 3, sum) # these two are the same rollapply(zoo(1:5), list(0:2), sum) rollapply(zoo(1:5), 3, sum, align = "left") # these two are the same rollapply(zoo(1:5), list(-(2:0)), sum) rollapply(zoo(1:5), 3, sum, align = "right") # these two are the same rollapply(zoo(1:6), list(NULL, NULL, -(2:0)), sum) rollapply(zoo(1:6), 3, sum, by = 3, align = "right") # these two are the same rollapply(zoo(1:5), list(c(-1, 1)), sum) rollapply(zoo(1:5), 3, function(x) sum(x[-2])) # these two are the same rollapply(1:5, 3, rev) embed(1:5, 3) # these four are the same x <- 1:6 rollapply(c(0, 0, x), 3, sum, align = "right") - x rollapply(x, 3, sum, partial = TRUE, align = "right") - x rollapply(x, 3, function(x) sum(x[-3]), partial = TRUE, align = "right") rollapply(x, list(-(2:1)), sum, partial = 0) # same as Matlab's buffer(x, n, p) for valid non-negative p # See http://www.mathworks.com/help/toolbox/signal/buffer.html x <- 1:30; n <- 7; p <- 3 t(rollapply(c(rep(0, p), x, rep(0, n-p)), n, by = n-p, c)) # these three are the same y <- 10 * seq(8); k <- 4; d <- 2 # 1 # from http://ucfagls.wordpress.com/2011/06/14/embedding-a-time-series-with-time-delay-in-r-part-ii/ Embed <- function(x, m, d = 1, indices = FALSE, as.embed = TRUE) { n <- length(x) - (m-1)*d X <- seq_along(x) if(n <= 0) stop("Insufficient observations for the requested embedding") out <- matrix(rep(X[seq_len(n)], m), ncol = m) out[,-1] <- out[,-1, drop = FALSE] + rep(seq_len(m - 1) * d, each = nrow(out)) if(as.embed) out <- out[, rev(seq_len(ncol(out)))] if(!indices) out <- matrix(x[out], ncol = m) out } Embed(y, k, d) # 2 rollapply(y, list(-d * seq(0, k-1)), c) # 3 rollapply(y, d*k-1, function(x) x[d * seq(k-1, 0) + 1]) } \keyword{iteration} \keyword{array} \keyword{ts}
# Packages ---- # load packages library(rtracklayer) # for importing BED/GFF/etc. library(plyranges) # for working with GenomicRanges library(ChIPseeker) # to annotate peaks library(profileplyr) # for profile heatmaps library(ggplot2) # change the default ggplot theme theme_set(theme_classic(base_size = 14)) # Chromosome info ---- # read chromosome sizes (for GRanges annotation) chroms <- read.table("resources/GRCh38.109.chrom_sizes.tsv", col.names = c("seqnames", "seqlengths")) # order chromosomes in a more intuititve manner # and retain only autosomes (no contigs, no MT) chroms <- chroms[match(c(1:22, "X", "Y"), chroms$seqnames), ] # if you had MT, you can use this code to set it as circular chroms$is_circular <- chroms$seqnames == "MT" # view table chroms # Import peaks ---- # list peak files brd4_files <- list.files(path = "preprocessed/nf-chipseq", pattern = "brd4_.*broadPeak", recursive = TRUE, full.names = TRUE) names(brd4_files) <- gsub("_peaks.broadPeak", "", basename(brd4_files)) brd4_files # take the peak files, and then... brd4_ranges <- brd4_files |> # ... loop through and import them, and then... lapply(import, format = "broadPeak") |> # ... bind them all together bind_ranges(.id = "sample") brd4_ranges # subset ranges to contain only main chromosomes brd4_ranges <- brd4_ranges[seqnames(brd4_ranges) %in% chroms$seqnames, ] seqlevels(brd4_ranges) <- chroms$seqnames brd4_ranges <- set_genome_info(brd4_ranges, genome = "GRCh38", seqnames = chroms$seqnames, seqlengths = chroms$seqlengths, is_circular = chroms$is_circular) # add treatment variable brd4_ranges <- brd4_ranges |> mutate(treatment = ifelse(grepl("_e2_", sample), "e2", "veh")) # Coverage ranges ---- # calculate coverage across genome brd4_coverage <- brd4_ranges |> compute_coverage() # visualise occupancy rates brd4_coverage |> # remove intervals with no coverage at all filter(score > 0) |> # convert to data.frame as.data.frame() |> # barplot of counts for each coverage score ggplot(aes(x = score)) + geom_bar() + scale_x_continuous(breaks = 1:6) + labs(x = "# overlaps") # get intervals with coverage >= 2 brd4_coverage2 <- brd4_coverage |> filter(score >= 2) # create consensus peak intervals brd4_consensus <- brd4_ranges |> # filter to retain ranges with enough coverage filter_by_overlaps(brd4_coverage2) |> # merge ranges within 1kb of each other reduce(min.gapwidth = 1e3) # Annotate peaks ---- # import GTF as a TxDb object genes <- GenomicFeatures::makeTxDbFromGFF("resources/GRCh38.109.gtf.gz") # we use ChIPseeker to annotate the peaks brd4_consensus <- brd4_consensus |> annotatePeak(tssRegion = c(-3e3, 3e3), TxDb = genes) |> # convert back to GRanges as.GRanges() brd4_consensus # barplot of annotations brd4_consensus |> # remove gene IDs from exon/intro annotations for cleaner plot mutate(annotation = gsub("Exon .*", "Exon", annotation)) |> mutate(annotation = gsub("Intron .*", "Intron", annotation)) |> # make plot as.data.frame() |> ggplot(aes(annotation)) + geom_bar() + coord_flip() # Exercise ---- # !!!FIX!!! list peak files h2bub1_files <- list.files(path = "preprocessed/nf-chipseq", pattern = "FIXME", recursive = TRUE, full.names = TRUE) names(h2bub1_files) <- gsub("_peaks.broadPeak", "", basename(h2bub1_files)) # take the peak files, and then... h2bub1_ranges <- h2bub1_files |> # ... loop through and import them, and then... lapply(import, format = "broadPeak") |> # ... bind them all together bind_ranges(.id = "sample") # subset ranges to contain only main chromosomes h2bub1_ranges <- h2bub1_ranges[seqnames(h2bub1_ranges) %in% chroms$seqnames, ] seqlevels(h2bub1_ranges) <- chroms$seqnames h2bub1_ranges <- set_genome_info(h2bub1_ranges, genome = "GRCh38", seqnames = chroms$seqnames, seqlengths = chroms$seqlengths, is_circular = chroms$is_circular) # add treatment variable h2bub1_ranges <- h2bub1_ranges |> mutate(treatment = ifelse(grepl("_e2_", sample), "e2", "veh")) # !!!FIX!!! calculate coverage across genome h2bub1_coverage <- FIXME # occupancy rates h2bub1_coverage |> # remove intervals with no coverage at all filter(score > 0) |> # convert to data.frame as.data.frame() |> # barplot of counts for each coverage score ggplot(aes(x = score)) + geom_bar() + scale_x_continuous(breaks = 1:6) + labs(x = "# overlaps") # !!!FIX!!! get intervals with coverage >= 2 h2bub1_coverage2 <- FIXME # !!!FIX!!! create consensus peak intervals h2bub1_consensus <- h2bub1_ranges |> # filter to retain ranges with enough coverage FIXME |> # merge ranges within 1kb of each other reduce(min.gapwidth = 1e3) # !!!FIX!!! use ChIPseeker to annotate the peaks h2bub1_consensus <- FIXME # barplot of annotations h2bub1_consensus |> # remove gene IDs from exon/intro annotations for cleaner plot mutate(annotation = gsub("Exon .*", "Exon", annotation)) |> mutate(annotation = gsub("Intron .*", "Intron", annotation)) |> # make plot as.data.frame() |> ggplot(aes(annotation)) + geom_bar() + coord_flip() # Subset peaks ---- # read DEGs from Nagarajan 2017 degs <- read.csv("resources/degs_nagarajan2017.csv") # subset annotated intervals brd4_consensus_degs <- brd4_consensus |> filter(geneId %in% degs$ensembl_gene_id)
/course_files/participants/scripts/02-peak_ranges.R
permissive
cambiotraining/chipseq
R
false
false
5,860
r
# Packages ---- # load packages library(rtracklayer) # for importing BED/GFF/etc. library(plyranges) # for working with GenomicRanges library(ChIPseeker) # to annotate peaks library(profileplyr) # for profile heatmaps library(ggplot2) # change the default ggplot theme theme_set(theme_classic(base_size = 14)) # Chromosome info ---- # read chromosome sizes (for GRanges annotation) chroms <- read.table("resources/GRCh38.109.chrom_sizes.tsv", col.names = c("seqnames", "seqlengths")) # order chromosomes in a more intuititve manner # and retain only autosomes (no contigs, no MT) chroms <- chroms[match(c(1:22, "X", "Y"), chroms$seqnames), ] # if you had MT, you can use this code to set it as circular chroms$is_circular <- chroms$seqnames == "MT" # view table chroms # Import peaks ---- # list peak files brd4_files <- list.files(path = "preprocessed/nf-chipseq", pattern = "brd4_.*broadPeak", recursive = TRUE, full.names = TRUE) names(brd4_files) <- gsub("_peaks.broadPeak", "", basename(brd4_files)) brd4_files # take the peak files, and then... brd4_ranges <- brd4_files |> # ... loop through and import them, and then... lapply(import, format = "broadPeak") |> # ... bind them all together bind_ranges(.id = "sample") brd4_ranges # subset ranges to contain only main chromosomes brd4_ranges <- brd4_ranges[seqnames(brd4_ranges) %in% chroms$seqnames, ] seqlevels(brd4_ranges) <- chroms$seqnames brd4_ranges <- set_genome_info(brd4_ranges, genome = "GRCh38", seqnames = chroms$seqnames, seqlengths = chroms$seqlengths, is_circular = chroms$is_circular) # add treatment variable brd4_ranges <- brd4_ranges |> mutate(treatment = ifelse(grepl("_e2_", sample), "e2", "veh")) # Coverage ranges ---- # calculate coverage across genome brd4_coverage <- brd4_ranges |> compute_coverage() # visualise occupancy rates brd4_coverage |> # remove intervals with no coverage at all filter(score > 0) |> # convert to data.frame as.data.frame() |> # barplot of counts for each coverage score ggplot(aes(x = score)) + geom_bar() + scale_x_continuous(breaks = 1:6) + labs(x = "# overlaps") # get intervals with coverage >= 2 brd4_coverage2 <- brd4_coverage |> filter(score >= 2) # create consensus peak intervals brd4_consensus <- brd4_ranges |> # filter to retain ranges with enough coverage filter_by_overlaps(brd4_coverage2) |> # merge ranges within 1kb of each other reduce(min.gapwidth = 1e3) # Annotate peaks ---- # import GTF as a TxDb object genes <- GenomicFeatures::makeTxDbFromGFF("resources/GRCh38.109.gtf.gz") # we use ChIPseeker to annotate the peaks brd4_consensus <- brd4_consensus |> annotatePeak(tssRegion = c(-3e3, 3e3), TxDb = genes) |> # convert back to GRanges as.GRanges() brd4_consensus # barplot of annotations brd4_consensus |> # remove gene IDs from exon/intro annotations for cleaner plot mutate(annotation = gsub("Exon .*", "Exon", annotation)) |> mutate(annotation = gsub("Intron .*", "Intron", annotation)) |> # make plot as.data.frame() |> ggplot(aes(annotation)) + geom_bar() + coord_flip() # Exercise ---- # !!!FIX!!! list peak files h2bub1_files <- list.files(path = "preprocessed/nf-chipseq", pattern = "FIXME", recursive = TRUE, full.names = TRUE) names(h2bub1_files) <- gsub("_peaks.broadPeak", "", basename(h2bub1_files)) # take the peak files, and then... h2bub1_ranges <- h2bub1_files |> # ... loop through and import them, and then... lapply(import, format = "broadPeak") |> # ... bind them all together bind_ranges(.id = "sample") # subset ranges to contain only main chromosomes h2bub1_ranges <- h2bub1_ranges[seqnames(h2bub1_ranges) %in% chroms$seqnames, ] seqlevels(h2bub1_ranges) <- chroms$seqnames h2bub1_ranges <- set_genome_info(h2bub1_ranges, genome = "GRCh38", seqnames = chroms$seqnames, seqlengths = chroms$seqlengths, is_circular = chroms$is_circular) # add treatment variable h2bub1_ranges <- h2bub1_ranges |> mutate(treatment = ifelse(grepl("_e2_", sample), "e2", "veh")) # !!!FIX!!! calculate coverage across genome h2bub1_coverage <- FIXME # occupancy rates h2bub1_coverage |> # remove intervals with no coverage at all filter(score > 0) |> # convert to data.frame as.data.frame() |> # barplot of counts for each coverage score ggplot(aes(x = score)) + geom_bar() + scale_x_continuous(breaks = 1:6) + labs(x = "# overlaps") # !!!FIX!!! get intervals with coverage >= 2 h2bub1_coverage2 <- FIXME # !!!FIX!!! create consensus peak intervals h2bub1_consensus <- h2bub1_ranges |> # filter to retain ranges with enough coverage FIXME |> # merge ranges within 1kb of each other reduce(min.gapwidth = 1e3) # !!!FIX!!! use ChIPseeker to annotate the peaks h2bub1_consensus <- FIXME # barplot of annotations h2bub1_consensus |> # remove gene IDs from exon/intro annotations for cleaner plot mutate(annotation = gsub("Exon .*", "Exon", annotation)) |> mutate(annotation = gsub("Intron .*", "Intron", annotation)) |> # make plot as.data.frame() |> ggplot(aes(annotation)) + geom_bar() + coord_flip() # Subset peaks ---- # read DEGs from Nagarajan 2017 degs <- read.csv("resources/degs_nagarajan2017.csv") # subset annotated intervals brd4_consensus_degs <- brd4_consensus |> filter(geneId %in% degs$ensembl_gene_id)
Sim_Fn <- function( n_species, n_stations=200, n_factors=2, SpatialScale=0.1, SD_O=1.0, logMeanDens=3.0, Psi=NULL, Loc=NULL ){ # Parameters if( is.null(Psi) ){ Psi = matrix( rnorm(n_factors*n_species), nrow=n_factors, ncol=n_species) for(i in 1:nrow(Psi)) Psi[i,seq(from=1,to=i-1,length=i-1)] = 0 } Beta = rep(logMeanDens, n_species) # Spatial model if( is.null(Loc) ) Loc = cbind( "x"=runif(n_stations, min=0,max=1), "y"=runif(n_stations, min=0,max=1) ) model_O <- RMgauss(var=SD_O^2, scale=SpatialScale) # Simulate fields Omega = matrix(NA, ncol=n_factors, nrow=n_stations) for(i in 1:n_factors){ Omega[,i] = RFsimulate(model = model_O, x=Loc[,'x'], y=Loc[,'y'])@data[,1] } ln_Y_exp = Omega%*%Psi + outer(rep(1,n_stations),Beta) # Simulate data Y = matrix(rpois( n_stations*n_species, lambda=exp(ln_Y_exp) ), ncol=n_species, byrow=FALSE) X = cbind( rep(1,nrow(Y)) ) # Return stuff Sim_List = list("X"=X, "Y"=Y, "Psi"=Psi, "Loc"=Loc, "Omega"=Omega, "ln_Y_exp"=ln_Y_exp) return(Sim_List) }
/R/Sim_Fn.R
no_license
GodinA/spatial_factor_analysis
R
false
false
1,042
r
Sim_Fn <- function( n_species, n_stations=200, n_factors=2, SpatialScale=0.1, SD_O=1.0, logMeanDens=3.0, Psi=NULL, Loc=NULL ){ # Parameters if( is.null(Psi) ){ Psi = matrix( rnorm(n_factors*n_species), nrow=n_factors, ncol=n_species) for(i in 1:nrow(Psi)) Psi[i,seq(from=1,to=i-1,length=i-1)] = 0 } Beta = rep(logMeanDens, n_species) # Spatial model if( is.null(Loc) ) Loc = cbind( "x"=runif(n_stations, min=0,max=1), "y"=runif(n_stations, min=0,max=1) ) model_O <- RMgauss(var=SD_O^2, scale=SpatialScale) # Simulate fields Omega = matrix(NA, ncol=n_factors, nrow=n_stations) for(i in 1:n_factors){ Omega[,i] = RFsimulate(model = model_O, x=Loc[,'x'], y=Loc[,'y'])@data[,1] } ln_Y_exp = Omega%*%Psi + outer(rep(1,n_stations),Beta) # Simulate data Y = matrix(rpois( n_stations*n_species, lambda=exp(ln_Y_exp) ), ncol=n_species, byrow=FALSE) X = cbind( rep(1,nrow(Y)) ) # Return stuff Sim_List = list("X"=X, "Y"=Y, "Psi"=Psi, "Loc"=Loc, "Omega"=Omega, "ln_Y_exp"=ln_Y_exp) return(Sim_List) }
#' Subset cast dataset per probability threshold #' #' Takes the input cast dataset and subsets into dataset containing only those species that were part of at least one species pair occurring with a probability >= a certain threshold value. #' @param data Input cast dataset #' @param prob Numeric probability threshold value, from which on (>=) species are subset - e.g. 0.5 (standard) #' @param site EBN (El Bosque Nuevo) or PP (Peru Panguana) #' @return By probability threshold reduced dataset #' @export # get the species of the species pairs that show >=X prob overlap in the observations datasets cast_site_prob <- function(data, prob = 0.50, site = "missing"){ if(site %in% c("PP", "EBN")){ #extracting the species from spp pairs prob >=X prob into single datasets sub_data_1 <- data$Spp1[data$TempOverlapProb >= prob] sub_data_2 <- data$Spp2[data$TempOverlapProb >= prob] #create one dataset with all the species just extracted if(site == "PP") {cast_prob <- cast_PP[rownames(cast_PP) %in% sub_data_1 | rownames(cast_PP) %in% sub_data_2,] return(cast_prob)} else {cast_prob <- cast_EBN[rownames(cast_EBN) %in% sub_data_1 | rownames(cast_EBN) %in% sub_data_2,] return(cast_prob)}} else {return("Error: Please set site to EBN or PP")} }
/R/cast_site_prob.R
no_license
JonasGeschke/NicheOverlapR
R
false
false
1,292
r
#' Subset cast dataset per probability threshold #' #' Takes the input cast dataset and subsets into dataset containing only those species that were part of at least one species pair occurring with a probability >= a certain threshold value. #' @param data Input cast dataset #' @param prob Numeric probability threshold value, from which on (>=) species are subset - e.g. 0.5 (standard) #' @param site EBN (El Bosque Nuevo) or PP (Peru Panguana) #' @return By probability threshold reduced dataset #' @export # get the species of the species pairs that show >=X prob overlap in the observations datasets cast_site_prob <- function(data, prob = 0.50, site = "missing"){ if(site %in% c("PP", "EBN")){ #extracting the species from spp pairs prob >=X prob into single datasets sub_data_1 <- data$Spp1[data$TempOverlapProb >= prob] sub_data_2 <- data$Spp2[data$TempOverlapProb >= prob] #create one dataset with all the species just extracted if(site == "PP") {cast_prob <- cast_PP[rownames(cast_PP) %in% sub_data_1 | rownames(cast_PP) %in% sub_data_2,] return(cast_prob)} else {cast_prob <- cast_EBN[rownames(cast_EBN) %in% sub_data_1 | rownames(cast_EBN) %in% sub_data_2,] return(cast_prob)}} else {return("Error: Please set site to EBN or PP")} }
category Math #@# :author: 小林茂雄 bigdecimal は浮動小数点数演算ライブラリです。 任意の精度で 10 進表現された浮動小数点数を扱えます。 예: require 'bigdecimal' a = BigDecimal::new("0.123456789123456789") b = BigDecimal("123456.78912345678", 40) print a + b # => 0.123456912580245903456789E6 一般的な 10 進数の計算でも有用です。2 進数の浮動小数点演算には微小な誤 差があるのに対し、[[c:BigDecimal]] では正確な値を得る事ができます。 例1: 0.0001 を 10000 回足す場合。 sum = 0 for i in (1..10000) sum = sum + 0.0001 end print sum # => 0.9999999999999062 例2: 0.0001 を 10000 回足す場合。(BigDecimal) require 'bigdecimal' sum = BigDecimal.new("0") for i in (1..10000) sum = sum + BigDecimal.new("0.0001") end print sum # => 0.1E1 例3: 1.2 - 1.0 と 0.2 との比較 (BigDecimal.new("1.2") - BigDecimal("1.0")) == BigDecimal("0.2") # => true (1.2 - 1.0) == 0.2 # => false === 特別な値 正確な計算結果の提供のために、[[c:BigDecimal]] はいくつかの特別な値を持 ちます。 ==== 無限大 [[c:BigDecimal]] による演算の際には無限大を表す値を返す場合があります。 예: BigDecimal("1.0") / BigDecimal("0.0") #=> infinity BigDecimal("-1.0") / BigDecimal("0.0") #=> -infinity 無限大を表す [[c:BigDecimal]] オブジェクトを作成する場合、 [[m:Kernel.#BigDecimal]] の引数に "Infinity" や "-Infinity" を指定して ください。(大文字小文字を区別します) BigDecimal("Infinity") # => #<BigDecimal:f74a2ebc,'Infinity',4(4)> BigDecimal("+Infinity") # => #<BigDecimal:f74a2e6c,'Infinity',4(4)> BigDecimal("-Infinity") # => #<BigDecimal:f74a2e1c,'-Infinity',4(4)> BigDecimal("infinity") # => #<BigDecimal:f74a2dcc,'0.0',4(4)> BigDecimal("-infinity") # => #<BigDecimal:f74a2d7c,'-0.0',4(4)> ==== 非数(Not a Number) 0 / 0 のような未定義の計算を行った場合、非数(Not a Number)を表す値を返 します。 예: BigDecimal("0.0") / BigDecimal("0.0") # => #<BigDecimal:f74490d8,'NaN',4(24)> NaN を表す [[c:BigDecimal]] オブジェクトを作成する場合、 [[m:Kernel.#BigDecimal]] の引数に "NaN" を指定してください。(大文字小文 字を区別します) BigDecimal("NaN") # => #<BigDecimal:a0e49e4,'NaN',4(4)> NaN はどのような値と比較しても一致しません。(NaN 自身を含みます) BigDecimal("NaN") == 0.0 # => false BigDecimal("NaN") == BigDecimal("NaN") # => false ==== +ゼロと-ゼロ 計算結果が現在の有効桁数に比べて小さい値である場合、0 を返します。 負の非常に小さな [[c:BigDecimal]] の値は -0 を表す値になります。 BigDecimal.new("1.0") / BigDecimal.new("-Infinity") # => #<BigDecimal:f74a9f64,'-0.0',4(20)> 正の非常に小さな [[c:BigDecimal]] の値は -0 を表す値になります。 BigDecimal.new("1.0") / BigDecimal.new("Infinity") # => #<BigDecimal:f74a9e88,'0.0',4(20)> 精度については [[m:BigDecimal.mode]] も併せて参照してください。 また、0.0 と -0.0 は比較した場合に同じ値であるとみなされます。 BigDecimal("0.0") == BigDecimal("-0.0") # => true これは数学的には特に意味がない事に注意してください。数学的な 0 は符号を持ちません。 === 他の数値オブジェクトとの変換 (coerce) BigDecimal オブジェクトが算術演算子の左にあるときは、 BigDecimal オブジェクトが右にあるオブジェクトを (必要なら) BigDecimal に変換してから計算します。 従って、BigDecimal オブジェクト以外でも数値を意味するものなら 右に置けば演算は可能です。 ただし、文字列は (通常) 数値に自動変換することはできません。 文字列を数値に自動変換したい場合は bigfloat.c の 「/* #define ENABLE_NUMERIC_STRING */」のコメントを外してから、 再コンパイル、再インストールする必要があります。 文字列で数値を与える場合は注意が必要です。 数値に変換できない文字があると、 単に変換を止めるだけでエラーにはなりません。 "10XX"なら 10、"XXXX"は 0 と扱われます。 a = BigDecimal.E(20) c = a * "0.123456789123456789123456789" # 文字を BigDecimal に変換してから計算 無限大や非数を表す文字として、 "Infinity"、"+Infinity"、"-Infinity"、"NaN" も使用できます (大文字・小文字を区別します)。 ただし、mode メソッドで false を指定した場合は例外が発生します。 また、BigDecimalクラスは coerce(Ruby本参照)をサポートしています。 従って、BigDecimal オブジェクトが右にある場合も大抵は大丈夫です。 ただ、現在の Ruby インタプリタの仕様上、文字列が左にあると計算できません。 a = BigDecimal.E(20) c = "0.123456789123456789123456789" * a # エラー 必要性があるとは思いませんが、 どうしてもと言う人は String オブジェクトを継承した新たなクラスを作成してから、 そのクラスで coerce をサポートしてください。 ===[a:internal_structure] 内部構造 BigDecimal内部で浮動小数点は構造体(Real)で表現されます。 そのうち仮数部は unsigned long の配列 (以下の構造体要素 frac) で管理されます。 概念的には、以下のようになります。 <浮動小数点数> = 0.xxxxxxxxx * BASE ** n ここで、x は仮数部を表す数字、BASE は基数 (10 進表現なら 10)、 n は指数部を表す整数値です。BASEが大きいほど、大きな数値が表現できます。 つまり、配列のサイズを少なくできます。 BASE は大きいほど都合がよいわけですが、デバッグのやりやすさなどを考慮して、 10000になっています (BASE は VpInit() 関数で自動的に計算します)。 これは 32 ビット整数の場合です。64ビット整数の場合はもっと大きな値になります。 残念ながら、64 ビット整数でのテストはまだやっていません。 もし、テストをした方がいれば結果を教えてください。 BASE が 10000 のときは、以下の仮数部の配列 (frac) の各要素には最大で 4 桁の数字が格納されます。 浮動小数点構造体 (Real) は以下のようになっています。 typedef struct { unsigned long MaxPrec; // 最大精度(frac[]の配列サイズ) unsigned long Prec; // 精度(frac[]の使用サイズ) short sign; // 以下のように符号等の状態を定義します。 // ==0 : NaN // 1 : +0 // -1 : -0 // 2 : 正の値 // -2 : 負の値 // 3 : +Infinity // -3 : -Infinity unsigned short flag; // 各種の制御フラッグ int exponent; // 指数部の値(仮数部*BASE**exponent) unsigned long frac[1]; // 仮数部の配列(可変) } Real; 例えば BASE=10000 のとき 1234.56784321 という数字は、 0.1234 5678 4321*(10000)**1 ですから frac[0] = 1234、frac[1] = 5678、frac[2] = 4321、 Prec = 3、sign = 2、exponent = 1 となります。 MaxPrec は Prec より大きければいくつでもかまいません。 flag の使用方法は実装に依存して内部で使用されます。 === 2 進と 10 進 BigDecimal は <浮動小数点数> = 0.xxxxxxxxx*10**n という 10 進形式で数値を保持します。 しかし、計算機の浮動小数点数の内部表現は、 言うまでもなく <浮動小数点数> = 0.bbbbbbbb*2**n という 2 進形式が普通です (x は 0 から 9 まで、b は 0 か 1 の数字)。 BigDecimal がなぜ 10 進の内部表現形式を採用したのかを以下に説明します。 === 10 進のメリット ==== デバッグのしやすさ まず、プログラム作成が楽です。 frac[0]=1234、frac[1]=5678、frac[2]=4321、 exponent=1、sign=2 なら数値が 1234.56784321 であるのは見ればすぐに分かります。 ==== 10進表記された数値なら確実に内部表現に変換できる 例えば、以下のようなプログラムは全く誤差無しで計算することができます。 以下の例は、一行に一つの数値が書いてあるファイル file の合計数値を求めるものです。 file = File::open(....,"r") s = BigDecimal::new("0") while line = file.gets s = s + line end この例を 2 進数で計算すると誤差が入る可能性があります。 例えば 0.1 を2進で表現すると 0.1 = b1*2**(-1)+b1*2**(-2)+b3*2**(-3)+b4*2**(-4) …… と無限に続いてしまいます (b1=0,b2=0,b3=0,b4=1...)。 ここで bn(n=1,2,3,...) は 2進を表現する 0 か 1 の数字列です。 従って、どこかで打ち切る必要があります。ここで変換誤差が入ります。 もちろん、これを再度 10 進表記にして印刷するような場合は 適切な丸め操作(四捨五入)によって再び "0.1" と表示されます。 しかし、内部では正確な 0.1 ではありません。 ==== 有効桁数は有限である (つまり自動決定できる) 0.1 を表現するための領域はたった一つの配列要素 (frac[0] = 1) で済みます。 配列要素の数は10進数値から自動的に決定できます。 これは、可変長浮動小数点演算では大事なことです。 逆に 0.1 を 2 進表現したときに 2 進の有効桁をいくつにするのかは、 0.1 という数値だけからは決定できません。 === 10 進のデメリット 実は今までのメリットは、そのままデメリットにもなります。 そもそも、10 進を 2 進に変換するような操作は 変換誤差を伴う場合を回避することはできません。 大概のコンピュータは 10 進の内部表現を持っていないので、 BigDecimal を利用して誤差無しの計算をする場合は、 計算速度を無視しても最後まで BigDecimal を使用し続ける必要があります。 ==== 最初は何か? 自分で計算するときにわざわざ 2 進数を使う人は極めてまれです。 計算機にデータを入力するときもほとんどの場合、 10進数で入力します。 その結果、double 等の計算機内部表現は最初から誤差が入っている場合があります。 BigDecimal はユーザ入力を誤差無しで取り込むことができます。 デバッグのしやすさと、データ読みこみ時に誤差が入らないという 2 点が実際のメリットです。 ====[a:precision] 計算精度について 「有効桁数」とは BigDecimal が精度を保証する桁数です。ぴったりではありません、 若干の余裕を持って計算されます。また、例えば32ビットのシステムでは10進で4桁毎に計算します。 従って、現状では、内部の「有効桁数」は4の倍数となっています。 c = a op b という計算 (op は + - * /) をしたときの動作は以下のようになります。 (1) 乗算は (a の有効桁数) + (b の有効桁数)、 除算は (a の最大有効桁数) + (b の最大有効桁数) 分の最大桁数 (実際は、余裕を持って、もう少し大きくなります) を持つ変数 c を新たに生成します。 加減算の場合は、誤差が出ないだけの精度を持つ c を生成します。 例えば c = 0.1+0.1*10**(-100) のような場合、c の精度は100桁以上の精度を持つようになります。 (2) 次に c = a op b の計算を実行します。 このように、加減算と乗算での c は必ず「誤差が出ない」だけの精度を持って生成されます (BigDecimal.limit を指定しない場合)。 除算は (a の最大有効桁数) + (b の最大有効桁数) 分の最大桁数を持つ c が生成されますが、 c = 1.0/3.0 のような計算で明らかなように、 c の最大精度を超えるところで計算が打ち切られる場合があります。 いずれにせよ、c の最大精度は a や b より大きくなりますので c が必要とするメモリー領域は大きくなることに注意して下さい。 注意:「+, -, *, /」では結果の精度(有効桁数)を自分で指定できません。 精度をコントロールしたい場合は、以下のインスタンスメソッドを使用します。 : add, sub, mult, div これらのメソッドは先頭 (最左) の数字からの桁数を指定できます。 BigDecimal("2").div(3,12) # 2.0/3.0 => 0.6666666666 67E0 : truncate, round, ceil, floor これらのメソッドは小数点からの相対位置を指定して桁数を決定します。 BigDecimal("6.66666666666666").round(12) # => 0.6666666666 667E1 ==== 自分で精度をコントロールしたい場合 自分で精度(有効桁数)をコントロールしたい場合は add、sub、mult、div 等のメソッドが使用できます。以下の円周率を計算するプログラム例のように、求める桁数は自分で指定することができます。 #!/usr/local/bin/ruby require "bigdecimal"require "bigdecimal" # # Calculates 3.1415.... (the number of times that a circle's diameter # will fit around the circle) using J. Machin's formula. # def big_pi(sig) # sig: Number of significant figures exp = -sig pi = BigDecimal::new("0") two = BigDecimal::new("2") m25 = BigDecimal::new("-0.04") m57121 = BigDecimal::new("-57121") u = BigDecimal::new("1") k = BigDecimal::new("1") w = BigDecimal::new("1") t = BigDecimal::new("-80") while (u.nonzero?&& u.exponent >= exp) t = t*m25 u = t.div(k,sig) pi = pi + u k = k+two end u = BigDecimal::new("1") k = BigDecimal::new("1") w = BigDecimal::new("1") t = BigDecimal::new("956") while (u.nonzero?&& u.exponent >= exp ) t = t.div(m57121,sig) u = t.div(k,sig) pi = pi + u k = k+two end pi end if $0 == __FILE__ if ARGV.size == 1 print "PI("+ARGV[0]+"):\n" p big_pi(ARGV[0].to_i) else print "TRY: ruby pi.rb 1000 \n" end end === その他 以下のメソッド以外にも、(C ではない) Ruby ソースの形で提供されているものもあります。예를 들어 require "bigdecimal/math.rb" とすることで、sin や cos といった関数が使用できるようになります。 使用方法など、詳細は [[lib:bigdecimal/math]] を参照して下さい。 その他、Float との相互変換などの メソッドが [[lib:bigdecimal/util]] でサポートされています。利用するには require "bigdecimal/util.rb" のようにします。詳細は [[lib:bigdecimal/util]] を参照して下さい。 #@include(bigdecimal/BigDecimal) #@since 1.9.3 #@include(bigdecimal/BigMath) #@end
/target/rubydoc/refm/api/src/bigdecimal.rd
no_license
nacyot/omegat-rurima-ruby
R
false
false
15,462
rd
category Math #@# :author: 小林茂雄 bigdecimal は浮動小数点数演算ライブラリです。 任意の精度で 10 進表現された浮動小数点数を扱えます。 예: require 'bigdecimal' a = BigDecimal::new("0.123456789123456789") b = BigDecimal("123456.78912345678", 40) print a + b # => 0.123456912580245903456789E6 一般的な 10 進数の計算でも有用です。2 進数の浮動小数点演算には微小な誤 差があるのに対し、[[c:BigDecimal]] では正確な値を得る事ができます。 例1: 0.0001 を 10000 回足す場合。 sum = 0 for i in (1..10000) sum = sum + 0.0001 end print sum # => 0.9999999999999062 例2: 0.0001 を 10000 回足す場合。(BigDecimal) require 'bigdecimal' sum = BigDecimal.new("0") for i in (1..10000) sum = sum + BigDecimal.new("0.0001") end print sum # => 0.1E1 例3: 1.2 - 1.0 と 0.2 との比較 (BigDecimal.new("1.2") - BigDecimal("1.0")) == BigDecimal("0.2") # => true (1.2 - 1.0) == 0.2 # => false === 特別な値 正確な計算結果の提供のために、[[c:BigDecimal]] はいくつかの特別な値を持 ちます。 ==== 無限大 [[c:BigDecimal]] による演算の際には無限大を表す値を返す場合があります。 예: BigDecimal("1.0") / BigDecimal("0.0") #=> infinity BigDecimal("-1.0") / BigDecimal("0.0") #=> -infinity 無限大を表す [[c:BigDecimal]] オブジェクトを作成する場合、 [[m:Kernel.#BigDecimal]] の引数に "Infinity" や "-Infinity" を指定して ください。(大文字小文字を区別します) BigDecimal("Infinity") # => #<BigDecimal:f74a2ebc,'Infinity',4(4)> BigDecimal("+Infinity") # => #<BigDecimal:f74a2e6c,'Infinity',4(4)> BigDecimal("-Infinity") # => #<BigDecimal:f74a2e1c,'-Infinity',4(4)> BigDecimal("infinity") # => #<BigDecimal:f74a2dcc,'0.0',4(4)> BigDecimal("-infinity") # => #<BigDecimal:f74a2d7c,'-0.0',4(4)> ==== 非数(Not a Number) 0 / 0 のような未定義の計算を行った場合、非数(Not a Number)を表す値を返 します。 예: BigDecimal("0.0") / BigDecimal("0.0") # => #<BigDecimal:f74490d8,'NaN',4(24)> NaN を表す [[c:BigDecimal]] オブジェクトを作成する場合、 [[m:Kernel.#BigDecimal]] の引数に "NaN" を指定してください。(大文字小文 字を区別します) BigDecimal("NaN") # => #<BigDecimal:a0e49e4,'NaN',4(4)> NaN はどのような値と比較しても一致しません。(NaN 自身を含みます) BigDecimal("NaN") == 0.0 # => false BigDecimal("NaN") == BigDecimal("NaN") # => false ==== +ゼロと-ゼロ 計算結果が現在の有効桁数に比べて小さい値である場合、0 を返します。 負の非常に小さな [[c:BigDecimal]] の値は -0 を表す値になります。 BigDecimal.new("1.0") / BigDecimal.new("-Infinity") # => #<BigDecimal:f74a9f64,'-0.0',4(20)> 正の非常に小さな [[c:BigDecimal]] の値は -0 を表す値になります。 BigDecimal.new("1.0") / BigDecimal.new("Infinity") # => #<BigDecimal:f74a9e88,'0.0',4(20)> 精度については [[m:BigDecimal.mode]] も併せて参照してください。 また、0.0 と -0.0 は比較した場合に同じ値であるとみなされます。 BigDecimal("0.0") == BigDecimal("-0.0") # => true これは数学的には特に意味がない事に注意してください。数学的な 0 は符号を持ちません。 === 他の数値オブジェクトとの変換 (coerce) BigDecimal オブジェクトが算術演算子の左にあるときは、 BigDecimal オブジェクトが右にあるオブジェクトを (必要なら) BigDecimal に変換してから計算します。 従って、BigDecimal オブジェクト以外でも数値を意味するものなら 右に置けば演算は可能です。 ただし、文字列は (通常) 数値に自動変換することはできません。 文字列を数値に自動変換したい場合は bigfloat.c の 「/* #define ENABLE_NUMERIC_STRING */」のコメントを外してから、 再コンパイル、再インストールする必要があります。 文字列で数値を与える場合は注意が必要です。 数値に変換できない文字があると、 単に変換を止めるだけでエラーにはなりません。 "10XX"なら 10、"XXXX"は 0 と扱われます。 a = BigDecimal.E(20) c = a * "0.123456789123456789123456789" # 文字を BigDecimal に変換してから計算 無限大や非数を表す文字として、 "Infinity"、"+Infinity"、"-Infinity"、"NaN" も使用できます (大文字・小文字を区別します)。 ただし、mode メソッドで false を指定した場合は例外が発生します。 また、BigDecimalクラスは coerce(Ruby本参照)をサポートしています。 従って、BigDecimal オブジェクトが右にある場合も大抵は大丈夫です。 ただ、現在の Ruby インタプリタの仕様上、文字列が左にあると計算できません。 a = BigDecimal.E(20) c = "0.123456789123456789123456789" * a # エラー 必要性があるとは思いませんが、 どうしてもと言う人は String オブジェクトを継承した新たなクラスを作成してから、 そのクラスで coerce をサポートしてください。 ===[a:internal_structure] 内部構造 BigDecimal内部で浮動小数点は構造体(Real)で表現されます。 そのうち仮数部は unsigned long の配列 (以下の構造体要素 frac) で管理されます。 概念的には、以下のようになります。 <浮動小数点数> = 0.xxxxxxxxx * BASE ** n ここで、x は仮数部を表す数字、BASE は基数 (10 進表現なら 10)、 n は指数部を表す整数値です。BASEが大きいほど、大きな数値が表現できます。 つまり、配列のサイズを少なくできます。 BASE は大きいほど都合がよいわけですが、デバッグのやりやすさなどを考慮して、 10000になっています (BASE は VpInit() 関数で自動的に計算します)。 これは 32 ビット整数の場合です。64ビット整数の場合はもっと大きな値になります。 残念ながら、64 ビット整数でのテストはまだやっていません。 もし、テストをした方がいれば結果を教えてください。 BASE が 10000 のときは、以下の仮数部の配列 (frac) の各要素には最大で 4 桁の数字が格納されます。 浮動小数点構造体 (Real) は以下のようになっています。 typedef struct { unsigned long MaxPrec; // 最大精度(frac[]の配列サイズ) unsigned long Prec; // 精度(frac[]の使用サイズ) short sign; // 以下のように符号等の状態を定義します。 // ==0 : NaN // 1 : +0 // -1 : -0 // 2 : 正の値 // -2 : 負の値 // 3 : +Infinity // -3 : -Infinity unsigned short flag; // 各種の制御フラッグ int exponent; // 指数部の値(仮数部*BASE**exponent) unsigned long frac[1]; // 仮数部の配列(可変) } Real; 例えば BASE=10000 のとき 1234.56784321 という数字は、 0.1234 5678 4321*(10000)**1 ですから frac[0] = 1234、frac[1] = 5678、frac[2] = 4321、 Prec = 3、sign = 2、exponent = 1 となります。 MaxPrec は Prec より大きければいくつでもかまいません。 flag の使用方法は実装に依存して内部で使用されます。 === 2 進と 10 進 BigDecimal は <浮動小数点数> = 0.xxxxxxxxx*10**n という 10 進形式で数値を保持します。 しかし、計算機の浮動小数点数の内部表現は、 言うまでもなく <浮動小数点数> = 0.bbbbbbbb*2**n という 2 進形式が普通です (x は 0 から 9 まで、b は 0 か 1 の数字)。 BigDecimal がなぜ 10 進の内部表現形式を採用したのかを以下に説明します。 === 10 進のメリット ==== デバッグのしやすさ まず、プログラム作成が楽です。 frac[0]=1234、frac[1]=5678、frac[2]=4321、 exponent=1、sign=2 なら数値が 1234.56784321 であるのは見ればすぐに分かります。 ==== 10進表記された数値なら確実に内部表現に変換できる 例えば、以下のようなプログラムは全く誤差無しで計算することができます。 以下の例は、一行に一つの数値が書いてあるファイル file の合計数値を求めるものです。 file = File::open(....,"r") s = BigDecimal::new("0") while line = file.gets s = s + line end この例を 2 進数で計算すると誤差が入る可能性があります。 例えば 0.1 を2進で表現すると 0.1 = b1*2**(-1)+b1*2**(-2)+b3*2**(-3)+b4*2**(-4) …… と無限に続いてしまいます (b1=0,b2=0,b3=0,b4=1...)。 ここで bn(n=1,2,3,...) は 2進を表現する 0 か 1 の数字列です。 従って、どこかで打ち切る必要があります。ここで変換誤差が入ります。 もちろん、これを再度 10 進表記にして印刷するような場合は 適切な丸め操作(四捨五入)によって再び "0.1" と表示されます。 しかし、内部では正確な 0.1 ではありません。 ==== 有効桁数は有限である (つまり自動決定できる) 0.1 を表現するための領域はたった一つの配列要素 (frac[0] = 1) で済みます。 配列要素の数は10進数値から自動的に決定できます。 これは、可変長浮動小数点演算では大事なことです。 逆に 0.1 を 2 進表現したときに 2 進の有効桁をいくつにするのかは、 0.1 という数値だけからは決定できません。 === 10 進のデメリット 実は今までのメリットは、そのままデメリットにもなります。 そもそも、10 進を 2 進に変換するような操作は 変換誤差を伴う場合を回避することはできません。 大概のコンピュータは 10 進の内部表現を持っていないので、 BigDecimal を利用して誤差無しの計算をする場合は、 計算速度を無視しても最後まで BigDecimal を使用し続ける必要があります。 ==== 最初は何か? 自分で計算するときにわざわざ 2 進数を使う人は極めてまれです。 計算機にデータを入力するときもほとんどの場合、 10進数で入力します。 その結果、double 等の計算機内部表現は最初から誤差が入っている場合があります。 BigDecimal はユーザ入力を誤差無しで取り込むことができます。 デバッグのしやすさと、データ読みこみ時に誤差が入らないという 2 点が実際のメリットです。 ====[a:precision] 計算精度について 「有効桁数」とは BigDecimal が精度を保証する桁数です。ぴったりではありません、 若干の余裕を持って計算されます。また、例えば32ビットのシステムでは10進で4桁毎に計算します。 従って、現状では、内部の「有効桁数」は4の倍数となっています。 c = a op b という計算 (op は + - * /) をしたときの動作は以下のようになります。 (1) 乗算は (a の有効桁数) + (b の有効桁数)、 除算は (a の最大有効桁数) + (b の最大有効桁数) 分の最大桁数 (実際は、余裕を持って、もう少し大きくなります) を持つ変数 c を新たに生成します。 加減算の場合は、誤差が出ないだけの精度を持つ c を生成します。 例えば c = 0.1+0.1*10**(-100) のような場合、c の精度は100桁以上の精度を持つようになります。 (2) 次に c = a op b の計算を実行します。 このように、加減算と乗算での c は必ず「誤差が出ない」だけの精度を持って生成されます (BigDecimal.limit を指定しない場合)。 除算は (a の最大有効桁数) + (b の最大有効桁数) 分の最大桁数を持つ c が生成されますが、 c = 1.0/3.0 のような計算で明らかなように、 c の最大精度を超えるところで計算が打ち切られる場合があります。 いずれにせよ、c の最大精度は a や b より大きくなりますので c が必要とするメモリー領域は大きくなることに注意して下さい。 注意:「+, -, *, /」では結果の精度(有効桁数)を自分で指定できません。 精度をコントロールしたい場合は、以下のインスタンスメソッドを使用します。 : add, sub, mult, div これらのメソッドは先頭 (最左) の数字からの桁数を指定できます。 BigDecimal("2").div(3,12) # 2.0/3.0 => 0.6666666666 67E0 : truncate, round, ceil, floor これらのメソッドは小数点からの相対位置を指定して桁数を決定します。 BigDecimal("6.66666666666666").round(12) # => 0.6666666666 667E1 ==== 自分で精度をコントロールしたい場合 自分で精度(有効桁数)をコントロールしたい場合は add、sub、mult、div 等のメソッドが使用できます。以下の円周率を計算するプログラム例のように、求める桁数は自分で指定することができます。 #!/usr/local/bin/ruby require "bigdecimal"require "bigdecimal" # # Calculates 3.1415.... (the number of times that a circle's diameter # will fit around the circle) using J. Machin's formula. # def big_pi(sig) # sig: Number of significant figures exp = -sig pi = BigDecimal::new("0") two = BigDecimal::new("2") m25 = BigDecimal::new("-0.04") m57121 = BigDecimal::new("-57121") u = BigDecimal::new("1") k = BigDecimal::new("1") w = BigDecimal::new("1") t = BigDecimal::new("-80") while (u.nonzero?&& u.exponent >= exp) t = t*m25 u = t.div(k,sig) pi = pi + u k = k+two end u = BigDecimal::new("1") k = BigDecimal::new("1") w = BigDecimal::new("1") t = BigDecimal::new("956") while (u.nonzero?&& u.exponent >= exp ) t = t.div(m57121,sig) u = t.div(k,sig) pi = pi + u k = k+two end pi end if $0 == __FILE__ if ARGV.size == 1 print "PI("+ARGV[0]+"):\n" p big_pi(ARGV[0].to_i) else print "TRY: ruby pi.rb 1000 \n" end end === その他 以下のメソッド以外にも、(C ではない) Ruby ソースの形で提供されているものもあります。예를 들어 require "bigdecimal/math.rb" とすることで、sin や cos といった関数が使用できるようになります。 使用方法など、詳細は [[lib:bigdecimal/math]] を参照して下さい。 その他、Float との相互変換などの メソッドが [[lib:bigdecimal/util]] でサポートされています。利用するには require "bigdecimal/util.rb" のようにします。詳細は [[lib:bigdecimal/util]] を参照して下さい。 #@include(bigdecimal/BigDecimal) #@since 1.9.3 #@include(bigdecimal/BigMath) #@end
\name{setFeatures-methods} \alias{setFeatures-methods} \alias{setFeatures} \title{The \code{setFeatures} method} \description{Sets the features to an object of the \code{\linkS4class{mtkFactor}} class.} \usage{setFeatures(this, aFList)} \value{invisible} \arguments{ \item{this}{an object of the class \code{\linkS4class{mtkFactor}}} \item{aFList}{a list of \code{\linkS4class{mtkFeature}} objects.} } \author{Hervé Richard, BioSP, Inra, Herve.Richard@avignon.inra.fr, Hervé Monod and Juhui WANG, MIA-jouy, INRA} \examples{ # Build an object of the "mtkFactor" class x1 <- make.mtkFactor(name="x1", type="double", nominal=0, distribName="unif", distribPara=list(min=-pi, max=pi)) # Define the list of features f <- make.mtkFeatureList(list(f=4.5,c=+6,shape="parabolic")) # Assign the features to the factor setFeatures(x1,f) }
/man/setFeatures-methods.Rd
no_license
santoshpanda15/mtk
R
false
false
841
rd
\name{setFeatures-methods} \alias{setFeatures-methods} \alias{setFeatures} \title{The \code{setFeatures} method} \description{Sets the features to an object of the \code{\linkS4class{mtkFactor}} class.} \usage{setFeatures(this, aFList)} \value{invisible} \arguments{ \item{this}{an object of the class \code{\linkS4class{mtkFactor}}} \item{aFList}{a list of \code{\linkS4class{mtkFeature}} objects.} } \author{Hervé Richard, BioSP, Inra, Herve.Richard@avignon.inra.fr, Hervé Monod and Juhui WANG, MIA-jouy, INRA} \examples{ # Build an object of the "mtkFactor" class x1 <- make.mtkFactor(name="x1", type="double", nominal=0, distribName="unif", distribPara=list(min=-pi, max=pi)) # Define the list of features f <- make.mtkFeatureList(list(f=4.5,c=+6,shape="parabolic")) # Assign the features to the factor setFeatures(x1,f) }
library(jsonlite) library(ggplot2) jscoviddata <- read_json("https://opendata.ecdc.europa.eu/covid19/hospitalicuadmissionrates/json/", simplifyVector = TRUE) startdato <- as.Date(c('2020-05-01')) jscoviddata$date <- as.Date(jscoviddata$date) jscoviddata <- jscoviddata[which(jscoviddata$date>= startdato),] ggplot(subset(jscoviddata, country %in% c("Denmark") & indicator %in% c("Daily hospital occupancy","Daily ICU occupancy")), aes(x = date, y = value, colour = indicator)) + geom_line() + # scale_color_colorblind() + scale_x_date(date_labels="%b",date_breaks ="1 month" ,limits = c(startdato, Sys.Date())) + stat_smooth(method=loess , na.rm = TRUE ) + ggtitle("Danmark daglige hospitalstal") ggplot(subset(jscoviddata, country %in% c("France") & indicator %in% c("Daily hospital occupancy","Daily ICU occupancy")), aes(x = date, y = value, colour = indicator)) + geom_line() + # scale_color_colorblind() + scale_x_date(date_labels="%b",date_breaks ="1 month" ,limits = c(startdato, Sys.Date())) + stat_smooth(method=loess , na.rm = TRUE ) + ggtitle("Frankrig daglige hospitalstal") ggplot(subset(jscoviddata, country %in% c("Italy") & indicator %in% c("Daily hospital occupancy","Daily ICU occupancy")), aes(x = date, y = value, colour = indicator)) + geom_line() + # scale_color_colorblind() + scale_x_date(date_labels="%b",date_breaks ="1 month" ,limits = c(startdato, Sys.Date())) + stat_smooth(method=loess , na.rm = TRUE ) + ggtitle("Italien daglige hospitalstal")
/jsontest.R
no_license
llindegaard/covid
R
false
false
1,492
r
library(jsonlite) library(ggplot2) jscoviddata <- read_json("https://opendata.ecdc.europa.eu/covid19/hospitalicuadmissionrates/json/", simplifyVector = TRUE) startdato <- as.Date(c('2020-05-01')) jscoviddata$date <- as.Date(jscoviddata$date) jscoviddata <- jscoviddata[which(jscoviddata$date>= startdato),] ggplot(subset(jscoviddata, country %in% c("Denmark") & indicator %in% c("Daily hospital occupancy","Daily ICU occupancy")), aes(x = date, y = value, colour = indicator)) + geom_line() + # scale_color_colorblind() + scale_x_date(date_labels="%b",date_breaks ="1 month" ,limits = c(startdato, Sys.Date())) + stat_smooth(method=loess , na.rm = TRUE ) + ggtitle("Danmark daglige hospitalstal") ggplot(subset(jscoviddata, country %in% c("France") & indicator %in% c("Daily hospital occupancy","Daily ICU occupancy")), aes(x = date, y = value, colour = indicator)) + geom_line() + # scale_color_colorblind() + scale_x_date(date_labels="%b",date_breaks ="1 month" ,limits = c(startdato, Sys.Date())) + stat_smooth(method=loess , na.rm = TRUE ) + ggtitle("Frankrig daglige hospitalstal") ggplot(subset(jscoviddata, country %in% c("Italy") & indicator %in% c("Daily hospital occupancy","Daily ICU occupancy")), aes(x = date, y = value, colour = indicator)) + geom_line() + # scale_color_colorblind() + scale_x_date(date_labels="%b",date_breaks ="1 month" ,limits = c(startdato, Sys.Date())) + stat_smooth(method=loess , na.rm = TRUE ) + ggtitle("Italien daglige hospitalstal")
library(curl) library(tidyverse) source("cache.R") read_apc_csv <- function(path) { readr::read_csv( path, col_types = cols( .default = readr::col_character(), proParte = readr::col_logical(), taxonRankSortOrder = readr::col_double(), created = readr::col_datetime(format = ""), modified = readr::col_datetime(format = "") ) ) } apc_fetch <- function() { cache_csv("apc.csv", function() { download.file("https://biodiversity.org.au/nsl/services/export/taxonCsv", "apc.csv", mode = 'wb') read_apc_csv("apc.csv") }, read_apc_csv) }
/r/ausplotsr-play/apc.R
no_license
puyo/exercises
R
false
false
610
r
library(curl) library(tidyverse) source("cache.R") read_apc_csv <- function(path) { readr::read_csv( path, col_types = cols( .default = readr::col_character(), proParte = readr::col_logical(), taxonRankSortOrder = readr::col_double(), created = readr::col_datetime(format = ""), modified = readr::col_datetime(format = "") ) ) } apc_fetch <- function() { cache_csv("apc.csv", function() { download.file("https://biodiversity.org.au/nsl/services/export/taxonCsv", "apc.csv", mode = 'wb') read_apc_csv("apc.csv") }, read_apc_csv) }
# lambda = np lambda = 2000*0.0085 lambda dpois(5, 17)
/Biostats hw3/hw3.R
permissive
spsanderson/bio-informatics
R
false
false
55
r
# lambda = np lambda = 2000*0.0085 lambda dpois(5, 17)
function (grammar, numExpr = 1, max.depth = length(grammar$def), startSymb = GrammarStartSymbol(grammar), max.string = GrammarMaxSequenceRange(grammar, max.depth, startSymb), wrappings = 3, retries = 100) { n = length(max.string) ret.list = list() for (i in 1:numExpr) { for (j in 1:retries) { genome = round(runif(n) * max.string) expr = GrammarGenotypeToPhenotype(genome, grammar, wrappings) if (expr$type == "T") { ret.list[[length(ret.list) + 1]] = parse(text = as.character(expr$parsed)) break } } if (expr$type == "NT") { ret.list[[length(ret.list) + 1]] = NULL } } if (numExpr == 1) return(ret.list[[1]]) else return(ret.list) }
/R/GrammarRandomExpression.R
no_license
ramcqueary/gramEvol3
R
false
false
815
r
function (grammar, numExpr = 1, max.depth = length(grammar$def), startSymb = GrammarStartSymbol(grammar), max.string = GrammarMaxSequenceRange(grammar, max.depth, startSymb), wrappings = 3, retries = 100) { n = length(max.string) ret.list = list() for (i in 1:numExpr) { for (j in 1:retries) { genome = round(runif(n) * max.string) expr = GrammarGenotypeToPhenotype(genome, grammar, wrappings) if (expr$type == "T") { ret.list[[length(ret.list) + 1]] = parse(text = as.character(expr$parsed)) break } } if (expr$type == "NT") { ret.list[[length(ret.list) + 1]] = NULL } } if (numExpr == 1) return(ret.list[[1]]) else return(ret.list) }
install.packages("tidyverse") install.packages("e1071") # 왜도, 첨도 install.packages("plotly") library(tidyverse) library(e1071) library(plotly) # 예제 데이터 : ggplot2::diamonds # 양적 자료 : price # 일변량 양적 자료의 분석 # 1. 표 = 빈도표 # (1) 구간의 빈도 mean(diamonds$price) x1 <- c(10, 20, 30, NA) # 일반 평균 mean(x1) mean(x1, na.rm = TRUE) # 절사 평균 mean(diamonds$price, trim = 0.05, na.rm = TRUE ) mean(diamonds$price, trim = 0.10, na.rm = TRUE ) # 중위수 median(diamonds$price, na.rm = TRUE) # 최빈수 table(diamonds$price) which.max(table(diamonds$price)) # 605, 261 : 261 번째 있는 605 값을 의미 # 분포 모양 # 1. 왜도 (Skewness) # 대칭 여부 알려 주는값 # e1071::skewness(data$variable) e1071::skewness(diamonds$price) # 1.618305 왜도가 0 보다 큰 비대칭 ( 우측 큰 값이 이상치 인 경우) # 첨도 # 중심이 얼마나 뾰죡 한가 알려 주는 값 # e1071::kurtosis(data$variable) e1071::kurtosis(diamonds$price) # 2.177 첨도가 0보다 큰값 이면 # 기타 # 최소값 min(diamonds$price) # 326 # 최대값 max(diamonds$price) # 18823 rnorm(n = 10) set.seed(100) d1 <- rnorm(n = 10, mean = 170, sd = 10) # 모평균 170, 모표준편자 10, 정규분포 # 170 이하는 pnorm(170, mean = 170, sd = 10) # 190 이하는 1- pnorm(190, mean = 170, sd = 10) pnorm(190, mean = 170, sd = 10, lower.tail = FALSE) # 190 보타 큰값을 달라는 의미 # 140 ~ 200 pnorm(200, mean = 170, sd = 10) - pnorm(140, mean = 170, sd = 10) # 확률 변수 구하기 qnorm(0.95, mean = 170, sd = 10) # 밀도 구하기 : 특정 구간에 데이터가 어느 정도마 몰려 있는지 dnorm(170, mean = 170, sd = 10) # 0.039 ( 170 구간에 데이터가 3.9% 몰려 있다는 의미)
/study/test2.R
no_license
metalzoa/r-study
R
false
false
1,810
r
install.packages("tidyverse") install.packages("e1071") # 왜도, 첨도 install.packages("plotly") library(tidyverse) library(e1071) library(plotly) # 예제 데이터 : ggplot2::diamonds # 양적 자료 : price # 일변량 양적 자료의 분석 # 1. 표 = 빈도표 # (1) 구간의 빈도 mean(diamonds$price) x1 <- c(10, 20, 30, NA) # 일반 평균 mean(x1) mean(x1, na.rm = TRUE) # 절사 평균 mean(diamonds$price, trim = 0.05, na.rm = TRUE ) mean(diamonds$price, trim = 0.10, na.rm = TRUE ) # 중위수 median(diamonds$price, na.rm = TRUE) # 최빈수 table(diamonds$price) which.max(table(diamonds$price)) # 605, 261 : 261 번째 있는 605 값을 의미 # 분포 모양 # 1. 왜도 (Skewness) # 대칭 여부 알려 주는값 # e1071::skewness(data$variable) e1071::skewness(diamonds$price) # 1.618305 왜도가 0 보다 큰 비대칭 ( 우측 큰 값이 이상치 인 경우) # 첨도 # 중심이 얼마나 뾰죡 한가 알려 주는 값 # e1071::kurtosis(data$variable) e1071::kurtosis(diamonds$price) # 2.177 첨도가 0보다 큰값 이면 # 기타 # 최소값 min(diamonds$price) # 326 # 최대값 max(diamonds$price) # 18823 rnorm(n = 10) set.seed(100) d1 <- rnorm(n = 10, mean = 170, sd = 10) # 모평균 170, 모표준편자 10, 정규분포 # 170 이하는 pnorm(170, mean = 170, sd = 10) # 190 이하는 1- pnorm(190, mean = 170, sd = 10) pnorm(190, mean = 170, sd = 10, lower.tail = FALSE) # 190 보타 큰값을 달라는 의미 # 140 ~ 200 pnorm(200, mean = 170, sd = 10) - pnorm(140, mean = 170, sd = 10) # 확률 변수 구하기 qnorm(0.95, mean = 170, sd = 10) # 밀도 구하기 : 특정 구간에 데이터가 어느 정도마 몰려 있는지 dnorm(170, mean = 170, sd = 10) # 0.039 ( 170 구간에 데이터가 3.9% 몰려 있다는 의미)
library(ggplot2) library(rpart) #decision tree library(ROCR) library(RCurl) # load data (from https://www.kaggle.com/ludobenistant/hr-analytics) hr = read.csv("C:/Users/ingrid.brizotti/Downloads/HR_comma_sep.csv") # Divide 70% to train and 30% to test set.seed(4) hr_train <- sample(nrow(hr), floor(nrow(hr)*0.7)) train <- hr[hr_train,] test <- hr[-hr_train,] # run the classification tree tree1 <- rpart(formula = left ~ satisfaction_level + last_evaluation + number_project + average_montly_hours + Work_accident + promotion_last_5years + salary, data = train, method = "class") # plot tree plot(tree1, uniform=TRUE, main="Classification Tree") text(tree1, use.n=TRUE, all=TRUE, cex=.8) # confusion matrix (training data) conf_matrix_tree <- table(train$left, predict(tree1, type="class")) rownames(conf_matrix_tree) <- paste("Actual", rownames(conf_matrix_tree), sep = ":") colnames(conf_matrix_tree) <- paste("Pred", colnames(conf_matrix_tree), sep = ":") print(conf_matrix_tree) # On test set test_tree = predict(tree1, test, type = "prob") #Storing Model Performance Scores pred_tree <-prediction(test_tree[,2], test$left) # Calculating Area under Curve perf_tree <- performance(pred_tree,"auc") perf_tree # Calculating True Positive and False Positive Rate perf_tree <- performance(pred_tree, "tpr", "fpr") # Plot the ROC curve plot(perf_tree, lwd = 1.5) #Calculating KS statistics ks1.tree <- max(attr(perf_tree, "y.values")[[1]] - (attr(perf_tree, "x.values")[[1]])) ks1.tree
/04.Classification tree.R
no_license
ingridbrizotti/Data-science-in-R
R
false
false
1,619
r
library(ggplot2) library(rpart) #decision tree library(ROCR) library(RCurl) # load data (from https://www.kaggle.com/ludobenistant/hr-analytics) hr = read.csv("C:/Users/ingrid.brizotti/Downloads/HR_comma_sep.csv") # Divide 70% to train and 30% to test set.seed(4) hr_train <- sample(nrow(hr), floor(nrow(hr)*0.7)) train <- hr[hr_train,] test <- hr[-hr_train,] # run the classification tree tree1 <- rpart(formula = left ~ satisfaction_level + last_evaluation + number_project + average_montly_hours + Work_accident + promotion_last_5years + salary, data = train, method = "class") # plot tree plot(tree1, uniform=TRUE, main="Classification Tree") text(tree1, use.n=TRUE, all=TRUE, cex=.8) # confusion matrix (training data) conf_matrix_tree <- table(train$left, predict(tree1, type="class")) rownames(conf_matrix_tree) <- paste("Actual", rownames(conf_matrix_tree), sep = ":") colnames(conf_matrix_tree) <- paste("Pred", colnames(conf_matrix_tree), sep = ":") print(conf_matrix_tree) # On test set test_tree = predict(tree1, test, type = "prob") #Storing Model Performance Scores pred_tree <-prediction(test_tree[,2], test$left) # Calculating Area under Curve perf_tree <- performance(pred_tree,"auc") perf_tree # Calculating True Positive and False Positive Rate perf_tree <- performance(pred_tree, "tpr", "fpr") # Plot the ROC curve plot(perf_tree, lwd = 1.5) #Calculating KS statistics ks1.tree <- max(attr(perf_tree, "y.values")[[1]] - (attr(perf_tree, "x.values")[[1]])) ks1.tree
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tune_models.R \name{tune_models} \alias{tune_models} \title{Tune multiple machine learning models using cross validation to optimize performance} \usage{ tune_models(d, outcome, models, metric, positive_class, n_folds = 5, tune_depth = 10, tune_method = "random", hyperparameters, model_class) } \arguments{ \item{d}{A data frame} \item{outcome}{Name of the column to predict} \item{models}{Names of models to try, by default "rf" for random forest and "knn" for k-nearest neighbors. See \code{\link{supported_models}} for available models.} \item{metric}{What metric to use to assess model performance? Options for regression: "RMSE" (root-mean-squared error, default), "MAE" (mean-absolute error), or "Rsquared." For classification: "ROC" (area under the receiver operating characteristic curve), or "PR" (area under the precision-recall curve).} \item{positive_class}{For classification only, which outcome level is the "yes" case, i.e. should be associated with high probabilities? Defaults to "Y" or "yes" if present, otherwise is the first level of the outcome variable (first alphabetically if the training data outcome was not already a factor).} \item{n_folds}{How many folds to use in cross-validation? Default = 5.} \item{tune_depth}{How many hyperparameter combinations to try? Defualt = 10.} \item{tune_method}{How to search hyperparameter space? Default = "random".} \item{hyperparameters}{Currently not supported.} \item{model_class}{"regression" or "classification". If not provided, this will be determined by the class of `outcome` with the determination displayed in a message.} } \value{ A model_list object } \description{ Tune multiple machine learning models using cross validation to optimize performance } \details{ Note that this function is training a lot of models (100 by default) and so can take a while to execute. In general a model is trained for each hyperparameter combination in each fold for each model, so run time is a function of length(models) x n_folds x tune_depth. At the default settings, a 1000 row, 10 column data frame should complete in about 30 seconds on a good laptop. } \examples{ \dontrun{ ### Takes ~20 seconds # Prepare data for tuning d <- prep_data(pima_diabetes, patient_id, outcome = diabetes) # Tune random forest and k-nearest neighbors classification models m <- tune_models(d, outcome = diabetes) # Get some info about the tuned models m # Get more detailed info summary(m) # Plot performance over hyperparameter values for each algorithm plot(m) # Extract confusion matrix for random forest (the model with best-performing # hyperparameter values is used) caret::confusionMatrix(m$`Random Forest`, norm = "none") # Compare performance of algorithms at best hyperparameter values rs <- resamples(m) dotplot(rs) } } \seealso{ \code{\link{prep_data}}, \code{\link{predict.model_list}}, \code{\link{supported_models}} }
/man/tune_models.Rd
permissive
hughvnguyen/healthcareai-r
R
false
true
2,988
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tune_models.R \name{tune_models} \alias{tune_models} \title{Tune multiple machine learning models using cross validation to optimize performance} \usage{ tune_models(d, outcome, models, metric, positive_class, n_folds = 5, tune_depth = 10, tune_method = "random", hyperparameters, model_class) } \arguments{ \item{d}{A data frame} \item{outcome}{Name of the column to predict} \item{models}{Names of models to try, by default "rf" for random forest and "knn" for k-nearest neighbors. See \code{\link{supported_models}} for available models.} \item{metric}{What metric to use to assess model performance? Options for regression: "RMSE" (root-mean-squared error, default), "MAE" (mean-absolute error), or "Rsquared." For classification: "ROC" (area under the receiver operating characteristic curve), or "PR" (area under the precision-recall curve).} \item{positive_class}{For classification only, which outcome level is the "yes" case, i.e. should be associated with high probabilities? Defaults to "Y" or "yes" if present, otherwise is the first level of the outcome variable (first alphabetically if the training data outcome was not already a factor).} \item{n_folds}{How many folds to use in cross-validation? Default = 5.} \item{tune_depth}{How many hyperparameter combinations to try? Defualt = 10.} \item{tune_method}{How to search hyperparameter space? Default = "random".} \item{hyperparameters}{Currently not supported.} \item{model_class}{"regression" or "classification". If not provided, this will be determined by the class of `outcome` with the determination displayed in a message.} } \value{ A model_list object } \description{ Tune multiple machine learning models using cross validation to optimize performance } \details{ Note that this function is training a lot of models (100 by default) and so can take a while to execute. In general a model is trained for each hyperparameter combination in each fold for each model, so run time is a function of length(models) x n_folds x tune_depth. At the default settings, a 1000 row, 10 column data frame should complete in about 30 seconds on a good laptop. } \examples{ \dontrun{ ### Takes ~20 seconds # Prepare data for tuning d <- prep_data(pima_diabetes, patient_id, outcome = diabetes) # Tune random forest and k-nearest neighbors classification models m <- tune_models(d, outcome = diabetes) # Get some info about the tuned models m # Get more detailed info summary(m) # Plot performance over hyperparameter values for each algorithm plot(m) # Extract confusion matrix for random forest (the model with best-performing # hyperparameter values is used) caret::confusionMatrix(m$`Random Forest`, norm = "none") # Compare performance of algorithms at best hyperparameter values rs <- resamples(m) dotplot(rs) } } \seealso{ \code{\link{prep_data}}, \code{\link{predict.model_list}}, \code{\link{supported_models}} }
# 叶明亮, "应用时间序列组合预测方法的卷烟销售预测模型," 福建电脑, vol. 36, no. 02, pp. 63-67, 2020. # !可用模型函数:1-均值法:meanF(p,d);2-季节朴素法:snaiveF(p,d);3-时间序列分解:stlDeco(p); # +4-ETS指数平滑法:etsF(p,d);5-季节ARIMA法:sarimaF(p,d);6-多季节STL分解预测:mstlF(p,d); # +7-多季节动态谐波回归:mdhArimaF(p,d);8-TBATS模型:tbatsF(p,d);9-神经网络自回归:nnarF(p,d); # +10-组合预测模型:combineF(p,d). # ! 模型7-mdhArimaF/8-tbatsF/9-nnarF/10-combineF 的计算时间较长. # !10-组合预测模型无法计算置信区间. # 读入需要的包 library(fpp2) library(urca) # 1-均值法(数据路径/path,预测天数/duration) meanF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cols_name <- colnames(cq_dmj) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 cq_dmjfit1 <- meanf(cq_dmj_Ts,h=duration) cq_df <- as.data.frame(cq_dmjfit1) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_meanf_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 2-季节性朴素预测方法(数据路径/path,预测天数/duration) snaiveF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 cq_dmjfit1 <- snaive(cq_dmj_Ts,h=duration) cq_df <- as.data.frame(cq_dmjfit1) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_snaive_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 3-时间序列STL分解(数据路径/path) stlDeco<-function(path){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 fit <- stl(cq_dmj_Ts, t.window=13, s.window="periodic", robust=TRUE) cq_df <- as.data.frame(fit$time.series) row.names(cq_df) <- cq_dmj[[1]] # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_STL_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 4-ETS指数平滑预测法(数据路径/path,预测天数/duration) etsF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 fit <- ets(cq_dmj_Ts) cq_dmjfit1 <- forecast(fit, h=duration) cq_df <- as.data.frame(cq_dmjfit1) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_ETS_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 5-季节性ARIMA预测法(数据路径/path,预测天数/duration) sarimaF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 fit <- auto.arima(cq_dmj_Ts, stepwise=FALSE, approximation=FALSE) cq_dmjfit1 <- forecast(fit, h=duration) cq_df <- as.data.frame(cq_dmjfit1) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_sARIMA_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 6-多季节STL分解预测(数据路径/path,预测天数/duration) mstlF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 cq_dmjfit1 <- stlf(cq_dmj_Ts, h=duration) cq_df <- as.data.frame(cq_dmjfit1) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_mSTL_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 7-多季节动态谐波回归预测(数据路径/path,预测天数/duration) mdhArimaF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 fit <- auto.arima(cq_dmj_Ts, seasonal=FALSE, lambda=0, xreg=fourier(cq_dmj_Ts, K=10)) cq_dmjfit1 <- forecast(fit, h=duration,xreg=fourier(cq_dmj_Ts,K=10, h=duration)) cq_df <- as.data.frame(cq_dmjfit1) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_mdhARIMA_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 8-TBATS模型(数据路径/path,预测天数/duration) tbatsF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 fit <- tbats(cq_dmj_Ts) cq_dmjfit1 <- forecast(fit, h=duration) cq_df <- as.data.frame(cq_dmjfit1) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_TBATS_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 9-神经网络自回归模型(数据路径/path,预测天数/duration) nnarF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 fit <- nnetar(cq_dmj_Ts, lambda=0) cq_dmjfit1 <- forecast(fit, PI=TRUE, h=duration) cq_df <- as.data.frame(cq_dmjfit1) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_NNAR_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 10-组合预测模型(数据路径/path,预测天数/duration) combineF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 五种子模型:mdhArimaF、ARIMA、STLF、NNAR、TBATS MDH <- forecast(auto.arima(cq_dmj_Ts, seasonal=FALSE, lambda=0, xreg=fourier(cq_dmj_Ts, K=10)), h=duration, xreg=fourier(cq_dmj_Ts,K=10, h=duration)) ARIMA <- forecast(auto.arima(cq_dmj_Ts, lambda=0, biasadj=TRUE, stepwise=FALSE, approximation=FALSE), h=duration) STL <- stlf(cq_dmj_Ts, lambda=0, h=duration, biasadj=TRUE) NNAR <- forecast(nnetar(cq_dmj_Ts), h=duration) TBATS <- forecast(tbats(cq_dmj_Ts, biasadj=TRUE), h=duration) # 组合五种的均值作为最后结果 Combination <- (MDH[["mean"]] + ARIMA[["mean"]] + STL[["mean"]] + NNAR[["mean"]] + TBATS[["mean"]])/5 # 预测并处理预测结果 cq_df <- as.data.frame(Combination) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_combined_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # =======================================================================================
/TimeSeries/Demo_Forecasting_Ts_week.R
no_license
zkxshg/Test_of_R
R
false
false
10,835
r
# 叶明亮, "应用时间序列组合预测方法的卷烟销售预测模型," 福建电脑, vol. 36, no. 02, pp. 63-67, 2020. # !可用模型函数:1-均值法:meanF(p,d);2-季节朴素法:snaiveF(p,d);3-时间序列分解:stlDeco(p); # +4-ETS指数平滑法:etsF(p,d);5-季节ARIMA法:sarimaF(p,d);6-多季节STL分解预测:mstlF(p,d); # +7-多季节动态谐波回归:mdhArimaF(p,d);8-TBATS模型:tbatsF(p,d);9-神经网络自回归:nnarF(p,d); # +10-组合预测模型:combineF(p,d). # ! 模型7-mdhArimaF/8-tbatsF/9-nnarF/10-combineF 的计算时间较长. # !10-组合预测模型无法计算置信区间. # 读入需要的包 library(fpp2) library(urca) # 1-均值法(数据路径/path,预测天数/duration) meanF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cols_name <- colnames(cq_dmj) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 cq_dmjfit1 <- meanf(cq_dmj_Ts,h=duration) cq_df <- as.data.frame(cq_dmjfit1) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_meanf_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 2-季节性朴素预测方法(数据路径/path,预测天数/duration) snaiveF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 cq_dmjfit1 <- snaive(cq_dmj_Ts,h=duration) cq_df <- as.data.frame(cq_dmjfit1) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_snaive_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 3-时间序列STL分解(数据路径/path) stlDeco<-function(path){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 fit <- stl(cq_dmj_Ts, t.window=13, s.window="periodic", robust=TRUE) cq_df <- as.data.frame(fit$time.series) row.names(cq_df) <- cq_dmj[[1]] # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_STL_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 4-ETS指数平滑预测法(数据路径/path,预测天数/duration) etsF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 fit <- ets(cq_dmj_Ts) cq_dmjfit1 <- forecast(fit, h=duration) cq_df <- as.data.frame(cq_dmjfit1) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_ETS_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 5-季节性ARIMA预测法(数据路径/path,预测天数/duration) sarimaF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 fit <- auto.arima(cq_dmj_Ts, stepwise=FALSE, approximation=FALSE) cq_dmjfit1 <- forecast(fit, h=duration) cq_df <- as.data.frame(cq_dmjfit1) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_sARIMA_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 6-多季节STL分解预测(数据路径/path,预测天数/duration) mstlF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 cq_dmjfit1 <- stlf(cq_dmj_Ts, h=duration) cq_df <- as.data.frame(cq_dmjfit1) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_mSTL_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 7-多季节动态谐波回归预测(数据路径/path,预测天数/duration) mdhArimaF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 fit <- auto.arima(cq_dmj_Ts, seasonal=FALSE, lambda=0, xreg=fourier(cq_dmj_Ts, K=10)) cq_dmjfit1 <- forecast(fit, h=duration,xreg=fourier(cq_dmj_Ts,K=10, h=duration)) cq_df <- as.data.frame(cq_dmjfit1) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_mdhARIMA_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 8-TBATS模型(数据路径/path,预测天数/duration) tbatsF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 fit <- tbats(cq_dmj_Ts) cq_dmjfit1 <- forecast(fit, h=duration) cq_df <- as.data.frame(cq_dmjfit1) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_TBATS_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 9-神经网络自回归模型(数据路径/path,预测天数/duration) nnarF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 预测并处理预测结果 fit <- nnetar(cq_dmj_Ts, lambda=0) cq_dmjfit1 <- forecast(fit, PI=TRUE, h=duration) cq_df <- as.data.frame(cq_dmjfit1) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_NNAR_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # ======================================================================================= # 10-组合预测模型(数据路径/path,预测天数/duration) combineF<-function(path, duration){ # 读入并处理数据 cq_dmj <- read.csv(file=path, header=TRUE, sep=",") cq_dmj[2][cq_dmj[2]==0]<-NA cq_dmj[2] <- na.interp(cq_dmj[2]) cq_dmj_Ts <- ts(cq_dmj[[2]],start=c(cq_dmj[1,1]%/%100, cq_dmj[1,1]%%100),frequency=52) # 五种子模型:mdhArimaF、ARIMA、STLF、NNAR、TBATS MDH <- forecast(auto.arima(cq_dmj_Ts, seasonal=FALSE, lambda=0, xreg=fourier(cq_dmj_Ts, K=10)), h=duration, xreg=fourier(cq_dmj_Ts,K=10, h=duration)) ARIMA <- forecast(auto.arima(cq_dmj_Ts, lambda=0, biasadj=TRUE, stepwise=FALSE, approximation=FALSE), h=duration) STL <- stlf(cq_dmj_Ts, lambda=0, h=duration, biasadj=TRUE) NNAR <- forecast(nnetar(cq_dmj_Ts), h=duration) TBATS <- forecast(tbats(cq_dmj_Ts, biasadj=TRUE), h=duration) # 组合五种的均值作为最后结果 Combination <- (MDH[["mean"]] + ARIMA[["mean"]] + STL[["mean"]] + NNAR[["mean"]] + TBATS[["mean"]])/5 # 预测并处理预测结果 cq_df <- as.data.frame(Combination) foreDate <- '' LastDate <- as.integer(cq_dmj[[1]][length(cq_dmj[[1]])]) for (i in 1:duration) {foreDate[i] <- ((LastDate)%/%100 + (LastDate%%100 + i)%/%52)*100 + (LastDate%%100 + i)%%52} row.names(cq_df) <- foreDate # 保存并传出路径 Result_path <- paste(substr(path,1,nchar(path)-4),"_combined_Result.csv") write.csv(cq_df, file = Result_path) return(Result_path) } # =======================================================================================
# Before we can load headers we need some paths defined. They # may be provided by a system environment variable or just # having already been set in the workspace if( !exists( "EMISSPROC_DIR" ) ){ if( Sys.getenv( "EMISSIONSPROC" ) != "" ){ EMISSPROC_DIR <- Sys.getenv( "EMISSIONSPROC" ) } else { stop("Could not determine location of emissions data system. Please set the R var EMISSPROC_DIR to the appropriate location") } } # Universal header file - provides logging, file support, etc. source(paste(EMISSPROC_DIR,"/../_common/headers/GCAM_header.R",sep="")) source(paste(EMISSPROC_DIR,"/../_common/headers/EMISSIONS_header.R",sep="")) logstart( "L141.hfc_R_S_T_Y.R" ) adddep(paste(EMISSPROC_DIR,"/../_common/headers/GCAM_header.R",sep="")) adddep(paste(EMISSPROC_DIR,"/../_common/headers/EMISSIONS_header.R",sep="")) printlog( "Historical HFC emissions by GCAM technology, computed from EDGAR emissions data" ) # ----------------------------------------------------------------------------- # 1. Read files sourcedata( "COMMON_ASSUMPTIONS", "A_common_data", extension = ".R" ) sourcedata( "EMISSIONS_ASSUMPTIONS", "A_emissions_data", extension = ".R" ) GCAM_region_names <- readdata( "COMMON_MAPPINGS", "GCAM_region_names") GCAM_tech <- readdata( "EMISSIONS_MAPPINGS", "gcam_fgas_tech" ) Other_F <- readdata( "EMISSIONS_MAPPINGS", "other_f_gases" ) L144.in_EJ_R_bld_serv_F_Yh <- readdata( "ENERGY_LEVEL1_DATA", "L144.in_EJ_R_bld_serv_F_Yh" ) iso_GCAM_regID <- readdata( "COMMON_MAPPINGS", "iso_GCAM_regID") EDGAR_sector <- readdata( "EMISSIONS_MAPPINGS", "EDGAR_sector" ) EDGAR_nation <- readdata( "EMISSIONS_MAPPINGS", "EDGAR_nation" ) GWP <- readdata( "EMISSIONS_ASSUMPTIONS", "A41.GWP" ) EDGAR_HFC125 <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC125" ) EDGAR_HFC134a <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC134a" ) EDGAR_HFC143a <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC143a" ) EDGAR_HFC152a <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC152a" ) EDGAR_HFC227ea <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC227ea" ) EDGAR_HFC23 <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC23" ) EDGAR_HFC236fa <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC236fa" ) EDGAR_HFC245fa <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC245fa" ) EDGAR_HFC32 <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC32" ) EDGAR_HFC365mfc <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC365mfc" ) EDGAR_HFC43 <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC43" ) # ----------------------------------------------------------------------------- # 2. Perform computations printlog( "Map EDGAR HFC emissions to GCAM technologies" ) #First, bind all gases together EDGAR_HFC125$Non.CO2 <- "HFC125" EDGAR_HFC134a$Non.CO2 <- "HFC134a" EDGAR_HFC143a$Non.CO2 <- "HFC143a" EDGAR_HFC152a$Non.CO2 <- "HFC152a" EDGAR_HFC227ea$Non.CO2 <- "HFC227ea" EDGAR_HFC23$Non.CO2 <- "HFC23" EDGAR_HFC236fa$Non.CO2 <- "HFC236fa" EDGAR_HFC245fa$Non.CO2 <- "HFC245fa" EDGAR_HFC32$Non.CO2 <- "HFC32" EDGAR_HFC365mfc$Non.CO2 <- "HFC365mfc" EDGAR_HFC43$Non.CO2 <- "HFC43" L141.EDGAR_HFC <- rbind( EDGAR_HFC125, EDGAR_HFC134a, EDGAR_HFC143a, EDGAR_HFC152a, EDGAR_HFC227ea, EDGAR_HFC23, EDGAR_HFC236fa, EDGAR_HFC245fa, EDGAR_HFC32, EDGAR_HFC365mfc, EDGAR_HFC43 ) #Then, prepare EDGAR data for use L141.EDGAR_HFC$EDGAR_agg_sector <- EDGAR_sector$agg_sector[ match( L141.EDGAR_HFC$IPCC_description, EDGAR_sector$IPCC_description )] L141.EDGAR_HFC$iso <- EDGAR_nation$iso[ match( L141.EDGAR_HFC$ISO_A3, EDGAR_nation$ISO_A3 )] L141.EDGAR_HFC$GCAM_region_ID <- iso_GCAM_regID$GCAM_region_ID[ match( L141.EDGAR_HFC$iso, iso_GCAM_regID$iso )] L141.EDGAR_HFC <- L141.EDGAR_HFC[ c( "GCAM_region_ID", "EDGAR_agg_sector", "Non.CO2", X_EDGAR_historical_years) ] L141.EDGAR_hfc_R_S_T_Yh.melt <- melt( L141.EDGAR_HFC, id.vars=c( "GCAM_region_ID", "EDGAR_agg_sector", "Non.CO2" ) ) L141.EDGAR_hfc_R_S_T_Yh.melt <- aggregate( L141.EDGAR_hfc_R_S_T_Yh.melt$value, by=as.list( L141.EDGAR_hfc_R_S_T_Yh.melt[ c( "GCAM_region_ID", "EDGAR_agg_sector", "Non.CO2", "variable" ) ] ), sum ) names( L141.EDGAR_hfc_R_S_T_Yh.melt )[ names( L141.EDGAR_hfc_R_S_T_Yh.melt ) == "x" ] <- "EDGAR_emissions" #Map in other f-gas sector, which varies by gas L141.EDGAR_hfc_R_S_T_Yh_rest <- subset( L141.EDGAR_hfc_R_S_T_Yh.melt, L141.EDGAR_hfc_R_S_T_Yh.melt$EDGAR_agg_sector != "other_f_gases") L141.EDGAR_hfc_R_S_T_Yh_other <- subset( L141.EDGAR_hfc_R_S_T_Yh.melt, L141.EDGAR_hfc_R_S_T_Yh.melt$EDGAR_agg_sector == "other_f_gases") L141.EDGAR_hfc_R_S_T_Yh_other$EDGAR_agg_sector <- Other_F$Sector[ match( L141.EDGAR_hfc_R_S_T_Yh_other$Non.CO2, Other_F$Gas )] L141.EDGAR_hfc_R_S_T_Yh.melt <- rbind( L141.EDGAR_hfc_R_S_T_Yh_rest, L141.EDGAR_hfc_R_S_T_Yh_other ) #Map to GCAM technologies L141.hfc_R_S_T_Yh.melt <- GCAM_tech L141.hfc_R_S_T_Yh.melt <- repeat_and_add_vector( L141.hfc_R_S_T_Yh.melt, "GCAM_region_ID", GCAM_region_names$GCAM_region_ID ) L141.hfc_R_S_T_Yh.melt <- repeat_and_add_vector( L141.hfc_R_S_T_Yh.melt, "xyear", X_EDGAR_historical_years ) L141.hfc_R_S_T_Yh.melt <- repeat_and_add_vector( L141.hfc_R_S_T_Yh.melt, "Non.CO2", HFCs ) L141.hfc_R_S_T_Yh.melt$emissions <- L141.EDGAR_hfc_R_S_T_Yh.melt$EDGAR_emissions[ match( vecpaste(L141.hfc_R_S_T_Yh.melt[ c( "GCAM_region_ID", "EDGAR_agg_sector", "Non.CO2", "xyear" ) ] ), vecpaste( L141.EDGAR_hfc_R_S_T_Yh.melt[ c( "GCAM_region_ID", "EDGAR_agg_sector", "Non.CO2", "variable" )] ))] L141.hfc_R_S_T_Yh.melt$emissions[ is.na( L141.hfc_R_S_T_Yh.melt$emissions )] <- 0 #Disaggregate cooling emissions to residential and commercial sectors L141.R_cooling_T_Yh <- subset(L144.in_EJ_R_bld_serv_F_Yh, L144.in_EJ_R_bld_serv_F_Yh$service %in% c( "comm cooling", "resid cooling" )) L141.R_cooling_T_Yh <- subset( L141.R_cooling_T_Yh, L141.R_cooling_T_Yh$fuel == "electricity" ) L141.R_cooling_T_Yh.melt <- melt( L141.R_cooling_T_Yh, id.vars=c( "GCAM_region_ID", "sector", "fuel", "service")) L141.R_cooling_Yh <- aggregate( L141.R_cooling_T_Yh.melt$value, by=as.list( L141.R_cooling_T_Yh.melt[ c( "GCAM_region_ID", "variable")]), sum ) L141.R_cooling_T_Yh.melt$total <- L141.R_cooling_Yh$x[ match( vecpaste( L141.R_cooling_T_Yh.melt[ c( "GCAM_region_ID", "variable" ) ]), vecpaste( L141.R_cooling_Yh[ c( "GCAM_region_ID", "variable" )]))] L141.R_cooling_T_Yh.melt$share <- L141.R_cooling_T_Yh.melt$value / L141.R_cooling_T_Yh.melt$total L141.hfc_R_S_T_Yh.melt$share <- L141.R_cooling_T_Yh.melt$share[ match( vecpaste( L141.hfc_R_S_T_Yh.melt[ c( "GCAM_region_ID", "xyear", "supplysector" ) ]), vecpaste(L141.R_cooling_T_Yh.melt[ c( "GCAM_region_ID", "variable", "service" ) ] ))] L141.hfc_R_S_T_Yh.melt$share[ is.na( L141.hfc_R_S_T_Yh.melt$share ) ] <- 1 L141.hfc_R_S_T_Yh.melt$emissions <- L141.hfc_R_S_T_Yh.melt$emissions * L141.hfc_R_S_T_Yh.melt$share #Reshape L141.hfc_R_S_T_Yh.melt <- aggregate( L141.hfc_R_S_T_Yh.melt$emissions, by=as.list( L141.hfc_R_S_T_Yh.melt[ c( "GCAM_region_ID", "supplysector", "subsector", "stub.technology", "Non.CO2", "xyear" ) ]), sum) L141.hfc_R_S_T_Yh <- dcast( L141.hfc_R_S_T_Yh.melt, GCAM_region_ID + supplysector + subsector + stub.technology + Non.CO2 ~ xyear, value.var=c( "x" )) L141.hfc_R_S_T_Yh[ is.na( L141.hfc_R_S_T_Yh ) ] <- 0 #Compute cooling emissions factors L141.hfc_R_cooling_T_Yh.melt <- subset( L141.hfc_R_S_T_Yh.melt, L141.hfc_R_S_T_Yh.melt$supplysector %in% c( "comm cooling", "resid cooling" ) ) names( L141.hfc_R_cooling_T_Yh.melt )[ names( L141.hfc_R_cooling_T_Yh.melt ) == "x" ] <- "emissions" L141.hfc_R_cooling_T_Yh.melt$energy <- L141.R_cooling_T_Yh.melt$value[ match( vecpaste( L141.hfc_R_cooling_T_Yh.melt[ c( "GCAM_region_ID", "supplysector", "xyear")]), vecpaste( L141.R_cooling_T_Yh.melt[ c( "GCAM_region_ID", "service", "variable" )]) )] L141.hfc_R_cooling_T_Yh.melt$em_fact <- L141.hfc_R_cooling_T_Yh.melt$emissions / L141.hfc_R_cooling_T_Yh.melt$energy L141.hfc_ef_R_cooling_Yh <- dcast( L141.hfc_R_cooling_T_Yh.melt, GCAM_region_ID + supplysector + subsector + stub.technology + Non.CO2 ~ xyear, value.var=c( "em_fact" )) L141.hfc_ef_R_cooling_Yh[ is.na( L141.hfc_ef_R_cooling_Yh ) ] <- 0 # ----------------------------------------------------------------------------- # 3. Output #Add comments for each table comments.L141.hfc_R_S_T_Yh <- c( "HFC emissions by region / sector / technology / gas / historical year", "Unit = Gg" ) comments.L141.hfc_ef_R_cooling_Yh <- c( "HFC emissions factors for cooling by region / sector / technology / gas / historical year", "Unit = Gg / EJ" ) #write tables as CSV files writedata( L141.hfc_R_S_T_Yh, domain="EMISSIONS_LEVEL1_DATA", fn="L141.hfc_R_S_T_Yh", comments=comments.L141.hfc_R_S_T_Yh ) writedata( L141.hfc_ef_R_cooling_Yh, domain="EMISSIONS_LEVEL1_DATA", fn="L141.hfc_ef_R_cooling_Yh", comments=comments.L141.hfc_ef_R_cooling_Yh ) # Every script should finish with this line logstop()
/input/gcam-data-system/emissions-processing-code/level1/L141.hfc_R_S_T_Y.R
permissive
Randynat/gcam-core
R
false
false
8,909
r
# Before we can load headers we need some paths defined. They # may be provided by a system environment variable or just # having already been set in the workspace if( !exists( "EMISSPROC_DIR" ) ){ if( Sys.getenv( "EMISSIONSPROC" ) != "" ){ EMISSPROC_DIR <- Sys.getenv( "EMISSIONSPROC" ) } else { stop("Could not determine location of emissions data system. Please set the R var EMISSPROC_DIR to the appropriate location") } } # Universal header file - provides logging, file support, etc. source(paste(EMISSPROC_DIR,"/../_common/headers/GCAM_header.R",sep="")) source(paste(EMISSPROC_DIR,"/../_common/headers/EMISSIONS_header.R",sep="")) logstart( "L141.hfc_R_S_T_Y.R" ) adddep(paste(EMISSPROC_DIR,"/../_common/headers/GCAM_header.R",sep="")) adddep(paste(EMISSPROC_DIR,"/../_common/headers/EMISSIONS_header.R",sep="")) printlog( "Historical HFC emissions by GCAM technology, computed from EDGAR emissions data" ) # ----------------------------------------------------------------------------- # 1. Read files sourcedata( "COMMON_ASSUMPTIONS", "A_common_data", extension = ".R" ) sourcedata( "EMISSIONS_ASSUMPTIONS", "A_emissions_data", extension = ".R" ) GCAM_region_names <- readdata( "COMMON_MAPPINGS", "GCAM_region_names") GCAM_tech <- readdata( "EMISSIONS_MAPPINGS", "gcam_fgas_tech" ) Other_F <- readdata( "EMISSIONS_MAPPINGS", "other_f_gases" ) L144.in_EJ_R_bld_serv_F_Yh <- readdata( "ENERGY_LEVEL1_DATA", "L144.in_EJ_R_bld_serv_F_Yh" ) iso_GCAM_regID <- readdata( "COMMON_MAPPINGS", "iso_GCAM_regID") EDGAR_sector <- readdata( "EMISSIONS_MAPPINGS", "EDGAR_sector" ) EDGAR_nation <- readdata( "EMISSIONS_MAPPINGS", "EDGAR_nation" ) GWP <- readdata( "EMISSIONS_ASSUMPTIONS", "A41.GWP" ) EDGAR_HFC125 <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC125" ) EDGAR_HFC134a <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC134a" ) EDGAR_HFC143a <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC143a" ) EDGAR_HFC152a <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC152a" ) EDGAR_HFC227ea <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC227ea" ) EDGAR_HFC23 <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC23" ) EDGAR_HFC236fa <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC236fa" ) EDGAR_HFC245fa <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC245fa" ) EDGAR_HFC32 <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC32" ) EDGAR_HFC365mfc <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC365mfc" ) EDGAR_HFC43 <- readdata( "EMISSIONS_LEVEL0_DATA", "EDGAR_HFC43" ) # ----------------------------------------------------------------------------- # 2. Perform computations printlog( "Map EDGAR HFC emissions to GCAM technologies" ) #First, bind all gases together EDGAR_HFC125$Non.CO2 <- "HFC125" EDGAR_HFC134a$Non.CO2 <- "HFC134a" EDGAR_HFC143a$Non.CO2 <- "HFC143a" EDGAR_HFC152a$Non.CO2 <- "HFC152a" EDGAR_HFC227ea$Non.CO2 <- "HFC227ea" EDGAR_HFC23$Non.CO2 <- "HFC23" EDGAR_HFC236fa$Non.CO2 <- "HFC236fa" EDGAR_HFC245fa$Non.CO2 <- "HFC245fa" EDGAR_HFC32$Non.CO2 <- "HFC32" EDGAR_HFC365mfc$Non.CO2 <- "HFC365mfc" EDGAR_HFC43$Non.CO2 <- "HFC43" L141.EDGAR_HFC <- rbind( EDGAR_HFC125, EDGAR_HFC134a, EDGAR_HFC143a, EDGAR_HFC152a, EDGAR_HFC227ea, EDGAR_HFC23, EDGAR_HFC236fa, EDGAR_HFC245fa, EDGAR_HFC32, EDGAR_HFC365mfc, EDGAR_HFC43 ) #Then, prepare EDGAR data for use L141.EDGAR_HFC$EDGAR_agg_sector <- EDGAR_sector$agg_sector[ match( L141.EDGAR_HFC$IPCC_description, EDGAR_sector$IPCC_description )] L141.EDGAR_HFC$iso <- EDGAR_nation$iso[ match( L141.EDGAR_HFC$ISO_A3, EDGAR_nation$ISO_A3 )] L141.EDGAR_HFC$GCAM_region_ID <- iso_GCAM_regID$GCAM_region_ID[ match( L141.EDGAR_HFC$iso, iso_GCAM_regID$iso )] L141.EDGAR_HFC <- L141.EDGAR_HFC[ c( "GCAM_region_ID", "EDGAR_agg_sector", "Non.CO2", X_EDGAR_historical_years) ] L141.EDGAR_hfc_R_S_T_Yh.melt <- melt( L141.EDGAR_HFC, id.vars=c( "GCAM_region_ID", "EDGAR_agg_sector", "Non.CO2" ) ) L141.EDGAR_hfc_R_S_T_Yh.melt <- aggregate( L141.EDGAR_hfc_R_S_T_Yh.melt$value, by=as.list( L141.EDGAR_hfc_R_S_T_Yh.melt[ c( "GCAM_region_ID", "EDGAR_agg_sector", "Non.CO2", "variable" ) ] ), sum ) names( L141.EDGAR_hfc_R_S_T_Yh.melt )[ names( L141.EDGAR_hfc_R_S_T_Yh.melt ) == "x" ] <- "EDGAR_emissions" #Map in other f-gas sector, which varies by gas L141.EDGAR_hfc_R_S_T_Yh_rest <- subset( L141.EDGAR_hfc_R_S_T_Yh.melt, L141.EDGAR_hfc_R_S_T_Yh.melt$EDGAR_agg_sector != "other_f_gases") L141.EDGAR_hfc_R_S_T_Yh_other <- subset( L141.EDGAR_hfc_R_S_T_Yh.melt, L141.EDGAR_hfc_R_S_T_Yh.melt$EDGAR_agg_sector == "other_f_gases") L141.EDGAR_hfc_R_S_T_Yh_other$EDGAR_agg_sector <- Other_F$Sector[ match( L141.EDGAR_hfc_R_S_T_Yh_other$Non.CO2, Other_F$Gas )] L141.EDGAR_hfc_R_S_T_Yh.melt <- rbind( L141.EDGAR_hfc_R_S_T_Yh_rest, L141.EDGAR_hfc_R_S_T_Yh_other ) #Map to GCAM technologies L141.hfc_R_S_T_Yh.melt <- GCAM_tech L141.hfc_R_S_T_Yh.melt <- repeat_and_add_vector( L141.hfc_R_S_T_Yh.melt, "GCAM_region_ID", GCAM_region_names$GCAM_region_ID ) L141.hfc_R_S_T_Yh.melt <- repeat_and_add_vector( L141.hfc_R_S_T_Yh.melt, "xyear", X_EDGAR_historical_years ) L141.hfc_R_S_T_Yh.melt <- repeat_and_add_vector( L141.hfc_R_S_T_Yh.melt, "Non.CO2", HFCs ) L141.hfc_R_S_T_Yh.melt$emissions <- L141.EDGAR_hfc_R_S_T_Yh.melt$EDGAR_emissions[ match( vecpaste(L141.hfc_R_S_T_Yh.melt[ c( "GCAM_region_ID", "EDGAR_agg_sector", "Non.CO2", "xyear" ) ] ), vecpaste( L141.EDGAR_hfc_R_S_T_Yh.melt[ c( "GCAM_region_ID", "EDGAR_agg_sector", "Non.CO2", "variable" )] ))] L141.hfc_R_S_T_Yh.melt$emissions[ is.na( L141.hfc_R_S_T_Yh.melt$emissions )] <- 0 #Disaggregate cooling emissions to residential and commercial sectors L141.R_cooling_T_Yh <- subset(L144.in_EJ_R_bld_serv_F_Yh, L144.in_EJ_R_bld_serv_F_Yh$service %in% c( "comm cooling", "resid cooling" )) L141.R_cooling_T_Yh <- subset( L141.R_cooling_T_Yh, L141.R_cooling_T_Yh$fuel == "electricity" ) L141.R_cooling_T_Yh.melt <- melt( L141.R_cooling_T_Yh, id.vars=c( "GCAM_region_ID", "sector", "fuel", "service")) L141.R_cooling_Yh <- aggregate( L141.R_cooling_T_Yh.melt$value, by=as.list( L141.R_cooling_T_Yh.melt[ c( "GCAM_region_ID", "variable")]), sum ) L141.R_cooling_T_Yh.melt$total <- L141.R_cooling_Yh$x[ match( vecpaste( L141.R_cooling_T_Yh.melt[ c( "GCAM_region_ID", "variable" ) ]), vecpaste( L141.R_cooling_Yh[ c( "GCAM_region_ID", "variable" )]))] L141.R_cooling_T_Yh.melt$share <- L141.R_cooling_T_Yh.melt$value / L141.R_cooling_T_Yh.melt$total L141.hfc_R_S_T_Yh.melt$share <- L141.R_cooling_T_Yh.melt$share[ match( vecpaste( L141.hfc_R_S_T_Yh.melt[ c( "GCAM_region_ID", "xyear", "supplysector" ) ]), vecpaste(L141.R_cooling_T_Yh.melt[ c( "GCAM_region_ID", "variable", "service" ) ] ))] L141.hfc_R_S_T_Yh.melt$share[ is.na( L141.hfc_R_S_T_Yh.melt$share ) ] <- 1 L141.hfc_R_S_T_Yh.melt$emissions <- L141.hfc_R_S_T_Yh.melt$emissions * L141.hfc_R_S_T_Yh.melt$share #Reshape L141.hfc_R_S_T_Yh.melt <- aggregate( L141.hfc_R_S_T_Yh.melt$emissions, by=as.list( L141.hfc_R_S_T_Yh.melt[ c( "GCAM_region_ID", "supplysector", "subsector", "stub.technology", "Non.CO2", "xyear" ) ]), sum) L141.hfc_R_S_T_Yh <- dcast( L141.hfc_R_S_T_Yh.melt, GCAM_region_ID + supplysector + subsector + stub.technology + Non.CO2 ~ xyear, value.var=c( "x" )) L141.hfc_R_S_T_Yh[ is.na( L141.hfc_R_S_T_Yh ) ] <- 0 #Compute cooling emissions factors L141.hfc_R_cooling_T_Yh.melt <- subset( L141.hfc_R_S_T_Yh.melt, L141.hfc_R_S_T_Yh.melt$supplysector %in% c( "comm cooling", "resid cooling" ) ) names( L141.hfc_R_cooling_T_Yh.melt )[ names( L141.hfc_R_cooling_T_Yh.melt ) == "x" ] <- "emissions" L141.hfc_R_cooling_T_Yh.melt$energy <- L141.R_cooling_T_Yh.melt$value[ match( vecpaste( L141.hfc_R_cooling_T_Yh.melt[ c( "GCAM_region_ID", "supplysector", "xyear")]), vecpaste( L141.R_cooling_T_Yh.melt[ c( "GCAM_region_ID", "service", "variable" )]) )] L141.hfc_R_cooling_T_Yh.melt$em_fact <- L141.hfc_R_cooling_T_Yh.melt$emissions / L141.hfc_R_cooling_T_Yh.melt$energy L141.hfc_ef_R_cooling_Yh <- dcast( L141.hfc_R_cooling_T_Yh.melt, GCAM_region_ID + supplysector + subsector + stub.technology + Non.CO2 ~ xyear, value.var=c( "em_fact" )) L141.hfc_ef_R_cooling_Yh[ is.na( L141.hfc_ef_R_cooling_Yh ) ] <- 0 # ----------------------------------------------------------------------------- # 3. Output #Add comments for each table comments.L141.hfc_R_S_T_Yh <- c( "HFC emissions by region / sector / technology / gas / historical year", "Unit = Gg" ) comments.L141.hfc_ef_R_cooling_Yh <- c( "HFC emissions factors for cooling by region / sector / technology / gas / historical year", "Unit = Gg / EJ" ) #write tables as CSV files writedata( L141.hfc_R_S_T_Yh, domain="EMISSIONS_LEVEL1_DATA", fn="L141.hfc_R_S_T_Yh", comments=comments.L141.hfc_R_S_T_Yh ) writedata( L141.hfc_ef_R_cooling_Yh, domain="EMISSIONS_LEVEL1_DATA", fn="L141.hfc_ef_R_cooling_Yh", comments=comments.L141.hfc_ef_R_cooling_Yh ) # Every script should finish with this line logstop()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/iq-functions.R \name{iq_table} \alias{iq_table} \title{Import IQ conversion table} \usage{ iq_table(table = NULL, subtest = NULL, ...) } \arguments{ \item{table}{path or data.frame with conversion data} \item{subtest}{character vector indicating which subtest} \item{...}{arguments to \code{rio::import}} } \value{ long tibble of the wanted conversion table } \description{ Import a punched version of the IQ conversion table, for scaling raw scores to norm or T-scores } \examples{ \dontrun{ conversion_table <- iq_table("tests/testthat/iq_table_subtest.tsv", header=TRUE) iq_table(conversion_table, "vocabulary") } } \seealso{ Other iq-functions: \code{\link{iq_raw2score}()} } \concept{iq-functions}
/man/iq_table.Rd
permissive
LCBC-UiO/Conversions
R
false
true
784
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/iq-functions.R \name{iq_table} \alias{iq_table} \title{Import IQ conversion table} \usage{ iq_table(table = NULL, subtest = NULL, ...) } \arguments{ \item{table}{path or data.frame with conversion data} \item{subtest}{character vector indicating which subtest} \item{...}{arguments to \code{rio::import}} } \value{ long tibble of the wanted conversion table } \description{ Import a punched version of the IQ conversion table, for scaling raw scores to norm or T-scores } \examples{ \dontrun{ conversion_table <- iq_table("tests/testthat/iq_table_subtest.tsv", header=TRUE) iq_table(conversion_table, "vocabulary") } } \seealso{ Other iq-functions: \code{\link{iq_raw2score}()} } \concept{iq-functions}
#visualize.R rm(list=ls()) source('my_functions.R') # LOAD -------------------------------------------------------------------- #load cleaned data otuput from prepare.R ll=load('data/train_set.Rdata') ll # LEAVING FREQUENCIES AMONG GROUPS ------------------------------------------------ #are there any groups particularly wanting to leave? #first get the grand 'leaving rate' grand_prop_leaving = sum(fed_train$my_leaving=='yes') / nrow(fed_train) #split out demographic with outcome demo_leave = fed_train %>% select(my_leaving, RANDOM:DMINORITY) #get proportions leaving for each demographic feature prop_leaving = demo_leave %>% select(-AGENCY, -LEVEL1) %>% pivot_longer(DSEX:DMINORITY, names_to = 'demo_feature', values_to = 'value') %>% group_by(demo_feature, value) %>% summarize(prop_leaving = sum(my_leaving=='yes')/n()) #function to look at proportion leaving for a given demographic column barplot_proportion_leaving = function(col){ prop_leaving %>% filter(demo_feature==col) %>% ggplot(aes_string(x='value', y='prop_leaving')) + geom_bar(stat='identity') + geom_hline(yintercept = grand_prop_leaving, lty=2) + labs(x=col, y='proportion leaving') } #plot for the demographic columns demo_types = demo_leave %>% select(DSEX:DMINORITY) %>% colnames() bp_list = map(demo_types, barplot_proportion_leaving) plot_grid(plotlist = bp_list, nrow=3) # PRINCIAPL COMPONENT ANLAYSIS OF QUESTIONAIRE ------------- #do different demograpohic groupings explain variance in question respones? # splice out the question data q_df = fed_train %>% select(my_leaving, grep("^Q", colnames(fed_train))) #RUN PCA #function to plot pca from normalized expression counts run_pca = function(df, trait_df, pcs = 2){ #build pca pca <- prcomp(as.matrix(df)) percentVar <- pca$sdev^2/sum(pca$sdev^2) score_df = data.frame(pca$x[,1:pcs]) res_df = cbind(trait_df, score_df) attr(res_df, "percentVar") <- percentVar[1:pcs] return(res_df) } #run pca pca_df = q_df %>% select(-my_leaving) %>% run_pca(demo_leave) #PLOT PCA #choose groups head(pca_df) groups_to_plot = demo_leave %>% select(-RANDOM, -AGENCY, -LEVEL1) %>% colnames() #choose pcs pcs_to_plot = paste('PC', 1:2, sep='') #plot the densities for each group and pc pca_plt_list = list() for (pc in pcs_to_plot){ for (group in groups_to_plot){ pca_plt_list[[paste(group,pc,sep='-')]] = plot_pc_density(pca_df, pc, group) } } #plot accross groups and PCs 1-4 plot_grid(plotlist = pca_plt_list, nrow=length(pcs_to_plot)) #plot just leaving plot_pc_density(pca_df, 'PC1', 'my_leaving') + theme(legend.position = 'right') + labs(fill='leaving') #leaving is skewed to right on PC1, which captures nearly half the variation #in questionare responses. So it looks like the questionare has useful information #in predicting whether people will leave. # LOOK AT CORRELATIONS WITH LEAVING ------------------------------------------------ #how well do the different questions predict 'leaving'? #convert leaving categories to numeric num_leaving = q_df %>% mutate(my_leaving = if_else(my_leaving=='yes', 1, 0)) #get the correlation matrix for question data cor_df = num_leaving %>% cor() %>% data.frame() %>% rownames_to_column('question') %>% as_tibble() %>% arrange(my_leaving) #plot distribution of correlations cor_df %>% filter(question != 'my_leaving') %>% ggplot(aes(x=my_leaving)) + geom_histogram() + labs(x='correlation with leaving') + scale_x_continuous(breaks = seq(-0.4, 0, 0.1), limits = c(-.4, 0)) #so all the questions indicate good things, #and lower values indicate dissatisfaction #check top questions top_qs = cor_df %>% filter(my_leaving < -0.28) %>% pull(question) top_qs #Overall questions are strong predictors # Q69: Considering everything, how satisfied are you with your job? # Q71: Considering everything, how satisfied are you with your organization? # Q40: I recommend my organization as a good place to work. #More interesting, opportunity to rise and feeling appreciated are important # Q67: How satisfied are you with your opportunity to get a better job in your organization? # Q11: My talents are used well in the workplace. # GET CORRELATION BETWEEN QUESTIONS --------------------------------------- #how similar are the questions? #get correlation matrix between questions q_cor_df = q_df %>% select(-my_leaving) %>% cor() %>% data.frame() %>% rownames_to_column('compare') %>% as_tibble() %>% pivot_longer(-compare, names_to = 'q', values_to = 'cor') %>% filter(!compare==q) %>% arrange(desc(cor)) %>% mutate(i=1:length(compare)) %>% filter(!i%%2==0) %>% select(-i) #plot histogram q_cor_df %>% ggplot(aes(x=cor)) + geom_histogram() #top correlated questions q_cor_df #So there are simply some highly similar questions # Q51: I have trust and confidence in my supervisor # Q52: Overall, how good a job do you feel is being done by your immediate supervisor? # Q58: Managers promote communication among different work units # Q59: Managers support collaboration across work units to accomplish work objectives #could merge these into single questions by taking their mean, #or could eliminate the one that correlates more weakly with leaving status # COLLAPSE INTO SINGLE FEATURE -------------------------------------------- #make a simple dissatisfaction feature #slice response as numeric y = q_df %>% mutate(y=my_leaving=='yes') %>% pull(y) %>% as.numeric() #get mean response get_mean_q_response = function(df){ qnum_df = df %>% select(starts_with('Q')) %>% apply(1, mean) } #get the mean response mean_q = get_mean_q_response(fed_train) sat_df = tibble(leaving = fed_train$my_leaving, satis = mean_q) #correlations cor(y, mean_q) cor(pca_df$PC1, y) cor(pca_df$PC1, mean_q) cor(mean_q, fed_train$Q69) # Q69: Considering everything, how satisfied are you with your job? #plot the densities for the mean score pd1 = plot_density(pwr = 1) pd2 = plot_density(pwr = 2) pwr=2 splitter = pd2[['int']] #build simplified feature set simple_df = demo_leave %>% mutate(satis = mean_q, splitter = splitter) #make simple prediction library(caret) simple_confusion = function(df){ pred_df = df %>% mutate(satisfaction = satis^pwr) %>% mutate(pred = if_else(satisfaction > splitter, 'no', 'yes')) caret::confusionMatrix(data=factor(pred_df$pred), reference = factor(pred_df$my_leaving), positive = 'yes') } simple_confusion(simple_df) # OUTPUT SIMPLE DATAFRAMES ------------------------------------------------ #write out the simplified training set simple_df %>% write_csv('data/train_simple.csv') #apply simple model to test set ll=load('data/test_set.Rdata') ll test_satis =get_mean_q_response(fed_test) simple_test = fed_test %>% mutate(satis = test_satis, splitter = splitter) %>% select(colnames(simple_df)) #test simple model simple_confusion(simple_test) #write out simple_test %>% write_csv('data/test_simple.csv') #compress to fit on github system('gzip data/train_simple.csv') system('gzip data/test_simple.csv') # DOES PREDICTIVE VALUE OF QUESTIONS VARY BY DEMOGRAPHY? ------------------ #how does the correlation between questions and leaving vary between groups? #look at out demographic types demo_types #pivot longer for demographic groups long_dem = fed_train %>% mutate(my_leaving = if_else(my_leaving=='yes', 1, 0)) %>% select(-RANDOM, -POSTWT, -AGENCY, -LEVEL1) %>% pivot_longer(DSEX:DMINORITY, names_to = 'trait', values_to = 'trait_value') #get outcome y = long_dem %>% pull(my_leaving) #function to subset a dataframe into the levels for a given column sub_by_col = function(df, col){ df=data.frame(df) lvls = unique(df[,col]) df_list = list() for (l in lvls){ df_list[[l]] = df[df[,col]==l,] } print('levels:') print(lvls) return(df_list) } #get an indepdendent dataframe for each group dem_dfs = sub_by_col(long_dem, 'trait_value') names(dem_dfs) #function to get correlation between each question and leaving get_cor_leaving = function(df, colname){ leaving = df %>% pull(my_leaving) x = df %>% select(grep("^Q", colnames(df))) cors = cor(x, leaving) cor_df = tibble('q' = rownames(cors), 'c' = cors[,1]) colnames(cor_df) = c('q', colname) return(cor_df) } #populate a list of question-leaving correlations cor_dfs = list() for (n in names(dem_dfs)){ print(n) cor_dfs[[n]] = get_cor_leaving(dem_dfs[[n]], n) } #assemble into single df dem_cor_df = purrr::reduce(cor_dfs, left_join, 'q') qs = dem_cor_df %>% pull(q) #get correlation of correlations cor_cors = dem_cor_df %>% dplyr::select(-q) %>% cor() #plot library(pheatmap) pheatmap(cor_cors)
/visualize.R
no_license
Groves-Dixon-Demos/Federal-Employees-Feelings
R
false
false
9,217
r
#visualize.R rm(list=ls()) source('my_functions.R') # LOAD -------------------------------------------------------------------- #load cleaned data otuput from prepare.R ll=load('data/train_set.Rdata') ll # LEAVING FREQUENCIES AMONG GROUPS ------------------------------------------------ #are there any groups particularly wanting to leave? #first get the grand 'leaving rate' grand_prop_leaving = sum(fed_train$my_leaving=='yes') / nrow(fed_train) #split out demographic with outcome demo_leave = fed_train %>% select(my_leaving, RANDOM:DMINORITY) #get proportions leaving for each demographic feature prop_leaving = demo_leave %>% select(-AGENCY, -LEVEL1) %>% pivot_longer(DSEX:DMINORITY, names_to = 'demo_feature', values_to = 'value') %>% group_by(demo_feature, value) %>% summarize(prop_leaving = sum(my_leaving=='yes')/n()) #function to look at proportion leaving for a given demographic column barplot_proportion_leaving = function(col){ prop_leaving %>% filter(demo_feature==col) %>% ggplot(aes_string(x='value', y='prop_leaving')) + geom_bar(stat='identity') + geom_hline(yintercept = grand_prop_leaving, lty=2) + labs(x=col, y='proportion leaving') } #plot for the demographic columns demo_types = demo_leave %>% select(DSEX:DMINORITY) %>% colnames() bp_list = map(demo_types, barplot_proportion_leaving) plot_grid(plotlist = bp_list, nrow=3) # PRINCIAPL COMPONENT ANLAYSIS OF QUESTIONAIRE ------------- #do different demograpohic groupings explain variance in question respones? # splice out the question data q_df = fed_train %>% select(my_leaving, grep("^Q", colnames(fed_train))) #RUN PCA #function to plot pca from normalized expression counts run_pca = function(df, trait_df, pcs = 2){ #build pca pca <- prcomp(as.matrix(df)) percentVar <- pca$sdev^2/sum(pca$sdev^2) score_df = data.frame(pca$x[,1:pcs]) res_df = cbind(trait_df, score_df) attr(res_df, "percentVar") <- percentVar[1:pcs] return(res_df) } #run pca pca_df = q_df %>% select(-my_leaving) %>% run_pca(demo_leave) #PLOT PCA #choose groups head(pca_df) groups_to_plot = demo_leave %>% select(-RANDOM, -AGENCY, -LEVEL1) %>% colnames() #choose pcs pcs_to_plot = paste('PC', 1:2, sep='') #plot the densities for each group and pc pca_plt_list = list() for (pc in pcs_to_plot){ for (group in groups_to_plot){ pca_plt_list[[paste(group,pc,sep='-')]] = plot_pc_density(pca_df, pc, group) } } #plot accross groups and PCs 1-4 plot_grid(plotlist = pca_plt_list, nrow=length(pcs_to_plot)) #plot just leaving plot_pc_density(pca_df, 'PC1', 'my_leaving') + theme(legend.position = 'right') + labs(fill='leaving') #leaving is skewed to right on PC1, which captures nearly half the variation #in questionare responses. So it looks like the questionare has useful information #in predicting whether people will leave. # LOOK AT CORRELATIONS WITH LEAVING ------------------------------------------------ #how well do the different questions predict 'leaving'? #convert leaving categories to numeric num_leaving = q_df %>% mutate(my_leaving = if_else(my_leaving=='yes', 1, 0)) #get the correlation matrix for question data cor_df = num_leaving %>% cor() %>% data.frame() %>% rownames_to_column('question') %>% as_tibble() %>% arrange(my_leaving) #plot distribution of correlations cor_df %>% filter(question != 'my_leaving') %>% ggplot(aes(x=my_leaving)) + geom_histogram() + labs(x='correlation with leaving') + scale_x_continuous(breaks = seq(-0.4, 0, 0.1), limits = c(-.4, 0)) #so all the questions indicate good things, #and lower values indicate dissatisfaction #check top questions top_qs = cor_df %>% filter(my_leaving < -0.28) %>% pull(question) top_qs #Overall questions are strong predictors # Q69: Considering everything, how satisfied are you with your job? # Q71: Considering everything, how satisfied are you with your organization? # Q40: I recommend my organization as a good place to work. #More interesting, opportunity to rise and feeling appreciated are important # Q67: How satisfied are you with your opportunity to get a better job in your organization? # Q11: My talents are used well in the workplace. # GET CORRELATION BETWEEN QUESTIONS --------------------------------------- #how similar are the questions? #get correlation matrix between questions q_cor_df = q_df %>% select(-my_leaving) %>% cor() %>% data.frame() %>% rownames_to_column('compare') %>% as_tibble() %>% pivot_longer(-compare, names_to = 'q', values_to = 'cor') %>% filter(!compare==q) %>% arrange(desc(cor)) %>% mutate(i=1:length(compare)) %>% filter(!i%%2==0) %>% select(-i) #plot histogram q_cor_df %>% ggplot(aes(x=cor)) + geom_histogram() #top correlated questions q_cor_df #So there are simply some highly similar questions # Q51: I have trust and confidence in my supervisor # Q52: Overall, how good a job do you feel is being done by your immediate supervisor? # Q58: Managers promote communication among different work units # Q59: Managers support collaboration across work units to accomplish work objectives #could merge these into single questions by taking their mean, #or could eliminate the one that correlates more weakly with leaving status # COLLAPSE INTO SINGLE FEATURE -------------------------------------------- #make a simple dissatisfaction feature #slice response as numeric y = q_df %>% mutate(y=my_leaving=='yes') %>% pull(y) %>% as.numeric() #get mean response get_mean_q_response = function(df){ qnum_df = df %>% select(starts_with('Q')) %>% apply(1, mean) } #get the mean response mean_q = get_mean_q_response(fed_train) sat_df = tibble(leaving = fed_train$my_leaving, satis = mean_q) #correlations cor(y, mean_q) cor(pca_df$PC1, y) cor(pca_df$PC1, mean_q) cor(mean_q, fed_train$Q69) # Q69: Considering everything, how satisfied are you with your job? #plot the densities for the mean score pd1 = plot_density(pwr = 1) pd2 = plot_density(pwr = 2) pwr=2 splitter = pd2[['int']] #build simplified feature set simple_df = demo_leave %>% mutate(satis = mean_q, splitter = splitter) #make simple prediction library(caret) simple_confusion = function(df){ pred_df = df %>% mutate(satisfaction = satis^pwr) %>% mutate(pred = if_else(satisfaction > splitter, 'no', 'yes')) caret::confusionMatrix(data=factor(pred_df$pred), reference = factor(pred_df$my_leaving), positive = 'yes') } simple_confusion(simple_df) # OUTPUT SIMPLE DATAFRAMES ------------------------------------------------ #write out the simplified training set simple_df %>% write_csv('data/train_simple.csv') #apply simple model to test set ll=load('data/test_set.Rdata') ll test_satis =get_mean_q_response(fed_test) simple_test = fed_test %>% mutate(satis = test_satis, splitter = splitter) %>% select(colnames(simple_df)) #test simple model simple_confusion(simple_test) #write out simple_test %>% write_csv('data/test_simple.csv') #compress to fit on github system('gzip data/train_simple.csv') system('gzip data/test_simple.csv') # DOES PREDICTIVE VALUE OF QUESTIONS VARY BY DEMOGRAPHY? ------------------ #how does the correlation between questions and leaving vary between groups? #look at out demographic types demo_types #pivot longer for demographic groups long_dem = fed_train %>% mutate(my_leaving = if_else(my_leaving=='yes', 1, 0)) %>% select(-RANDOM, -POSTWT, -AGENCY, -LEVEL1) %>% pivot_longer(DSEX:DMINORITY, names_to = 'trait', values_to = 'trait_value') #get outcome y = long_dem %>% pull(my_leaving) #function to subset a dataframe into the levels for a given column sub_by_col = function(df, col){ df=data.frame(df) lvls = unique(df[,col]) df_list = list() for (l in lvls){ df_list[[l]] = df[df[,col]==l,] } print('levels:') print(lvls) return(df_list) } #get an indepdendent dataframe for each group dem_dfs = sub_by_col(long_dem, 'trait_value') names(dem_dfs) #function to get correlation between each question and leaving get_cor_leaving = function(df, colname){ leaving = df %>% pull(my_leaving) x = df %>% select(grep("^Q", colnames(df))) cors = cor(x, leaving) cor_df = tibble('q' = rownames(cors), 'c' = cors[,1]) colnames(cor_df) = c('q', colname) return(cor_df) } #populate a list of question-leaving correlations cor_dfs = list() for (n in names(dem_dfs)){ print(n) cor_dfs[[n]] = get_cor_leaving(dem_dfs[[n]], n) } #assemble into single df dem_cor_df = purrr::reduce(cor_dfs, left_join, 'q') qs = dem_cor_df %>% pull(q) #get correlation of correlations cor_cors = dem_cor_df %>% dplyr::select(-q) %>% cor() #plot library(pheatmap) pheatmap(cor_cors)
# + tags=["parameters"] # - df = read.csv(upstream[['upvotes-dump']]) head(df) hist(df$mean_upvotes)
/etl/plot/upvotes.R
permissive
adityalahariya/projects
R
false
false
106
r
# + tags=["parameters"] # - df = read.csv(upstream[['upvotes-dump']]) head(df) hist(df$mean_upvotes)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/NMFOffset-class.R \docType{class} \name{NMFOffset-class} \alias{NMFOffset-class} \alias{initialize,NMFOffset-method} \title{NMF Model - Nonnegative Matrix Factorization with Offset} \usage{ \S4method{initialize}{NMFOffset}(.Object, ..., offset) } \arguments{ \item{offset}{optional numeric vector used to initialise slot \sQuote{offset}.} } \description{ This class implements the \emph{Nonnegative Matrix Factorization with Offset} model, required by the NMF with Offset algorithm. } \details{ The NMF with Offset algorithm is defined by \cite{Badea2008} as a modification of the euclidean based NMF algorithm from \code{Lee2001} (see section Details and references below). It aims at obtaining 'cleaner' factor matrices, by the introduction of an offset matrix, explicitly modelling a feature specific baseline -- constant across samples. } \section{Creating objects from the Class}{ Object of class \code{NMFOffset} can be created using the standard way with operator \code{\link{new}} However, as for all NMF model classes -- that extend class \code{\linkS4class{NMF}}, objects of class \code{NMFOffset} should be created using factory method \code{\link{nmfModel}} : \code{new('NMFOffset')} \code{nmfModel(model='NMFOffset')} \code{nmfModel(model='NMFOffset', W=w, offset=rep(1, nrow(w)))} See \code{\link{nmfModel}} for more details on how to use the factory method. } \section{Initialize method}{ The initialize method for \code{NMFOffset} objects tries to correct the initial value passed for slot \code{offset}, so that it is consistent with the dimensions of the \code{NMF} model: it will pad the offset vector with NA values to get the length equal to the number of rows in the basis matrix. } \examples{ # create a completely empty NMF object new('NMFOffset') # create a NMF object based on random (compatible) matrices n <- 50; r <- 3; p <- 20 w <- rmatrix(n, r) h <- rmatrix(r, p) nmfModel(model='NMFOffset', W=w, H=h, offset=rep(0.5, nrow(w))) # apply Nonsmooth NMF algorithm to a random target matrix V <- rmatrix(n, p) \dontrun{nmf(V, r, 'offset')} # random NMF model with offset rnmf(3, 10, 5, model='NMFOffset') } \seealso{ Other NMF-model: \code{\link{NMFns-class}}, \code{\link{NMFstd-class}} }
/man/NMFOffset-class.Rd
no_license
pooranis/NMF
R
false
true
2,323
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/NMFOffset-class.R \docType{class} \name{NMFOffset-class} \alias{NMFOffset-class} \alias{initialize,NMFOffset-method} \title{NMF Model - Nonnegative Matrix Factorization with Offset} \usage{ \S4method{initialize}{NMFOffset}(.Object, ..., offset) } \arguments{ \item{offset}{optional numeric vector used to initialise slot \sQuote{offset}.} } \description{ This class implements the \emph{Nonnegative Matrix Factorization with Offset} model, required by the NMF with Offset algorithm. } \details{ The NMF with Offset algorithm is defined by \cite{Badea2008} as a modification of the euclidean based NMF algorithm from \code{Lee2001} (see section Details and references below). It aims at obtaining 'cleaner' factor matrices, by the introduction of an offset matrix, explicitly modelling a feature specific baseline -- constant across samples. } \section{Creating objects from the Class}{ Object of class \code{NMFOffset} can be created using the standard way with operator \code{\link{new}} However, as for all NMF model classes -- that extend class \code{\linkS4class{NMF}}, objects of class \code{NMFOffset} should be created using factory method \code{\link{nmfModel}} : \code{new('NMFOffset')} \code{nmfModel(model='NMFOffset')} \code{nmfModel(model='NMFOffset', W=w, offset=rep(1, nrow(w)))} See \code{\link{nmfModel}} for more details on how to use the factory method. } \section{Initialize method}{ The initialize method for \code{NMFOffset} objects tries to correct the initial value passed for slot \code{offset}, so that it is consistent with the dimensions of the \code{NMF} model: it will pad the offset vector with NA values to get the length equal to the number of rows in the basis matrix. } \examples{ # create a completely empty NMF object new('NMFOffset') # create a NMF object based on random (compatible) matrices n <- 50; r <- 3; p <- 20 w <- rmatrix(n, r) h <- rmatrix(r, p) nmfModel(model='NMFOffset', W=w, H=h, offset=rep(0.5, nrow(w))) # apply Nonsmooth NMF algorithm to a random target matrix V <- rmatrix(n, p) \dontrun{nmf(V, r, 'offset')} # random NMF model with offset rnmf(3, 10, 5, model='NMFOffset') } \seealso{ Other NMF-model: \code{\link{NMFns-class}}, \code{\link{NMFstd-class}} }
\name{salting2.df} \alias{salting2.df} \title{Salting out effects in forensic blood alcohol determination} \description{Blood alcohol measurements determined by headspace gas chromatography have been challenged on the grounds that the presence of the preservative sodium fluoride in blood samples artificially increases headspace alcohol concentrations due to a salting out effect. Blood samples containing varying amounts of ethanol and sodium fluoride (NaF) were tested using semi-automated headspace gas chromatography with n-propyl alcohol as the internal standard to assess the validity of this challenge. Miller et al found, in fact, that under these test conditions the measured alcohol levels are systematically depressed as the amount of sodium fluoride in the blood sample increases. Blood was drawn from each of four subjects at two time points, first near the time of estimated peak blood alcohol concentration and then approximately 1.5 hours later. Samples were initially analyzed with NaF at manufacturer's levels (ca. 10 mg/mL). } \usage{ data(salting2.df) } \format{ A data frame containing four variables \tabular{rlll}{ [,1] \tab subject \tab numeric factor \tab subject identifier 1-4 \cr [,2] \tab time \tab numeric factor \tab time sample taken 0 or 1.5h \cr [,3] \tab NaF \tab numeric factor \tab the level of sodium fluoride added in mg/mL \cr [,4] \tab EtOH \tab numeric \tab alcohol concentration in g/100mL \cr } } \details{ Note that the blocking and treatment factors in this data frame are numeric. Therefore, to use them as such will require the use of \code{factor} or \code{ordered}. } \references{ B.A. Miller, S.M. Day, T.E. Vasquez, F.M. Evans, Absence of salting out effects in forensic blood alcohol determination at various concentrations of sodium fluoride using semi-automated headspace gas chromatography, Science & Justice, Volume 44, Issue 2, April 2004, Pages 73-76. } \author{B. A. Miller et al.} \keyword{datasets}
/man/salting2.df.Rd
no_license
cran/dafs
R
false
false
2,033
rd
\name{salting2.df} \alias{salting2.df} \title{Salting out effects in forensic blood alcohol determination} \description{Blood alcohol measurements determined by headspace gas chromatography have been challenged on the grounds that the presence of the preservative sodium fluoride in blood samples artificially increases headspace alcohol concentrations due to a salting out effect. Blood samples containing varying amounts of ethanol and sodium fluoride (NaF) were tested using semi-automated headspace gas chromatography with n-propyl alcohol as the internal standard to assess the validity of this challenge. Miller et al found, in fact, that under these test conditions the measured alcohol levels are systematically depressed as the amount of sodium fluoride in the blood sample increases. Blood was drawn from each of four subjects at two time points, first near the time of estimated peak blood alcohol concentration and then approximately 1.5 hours later. Samples were initially analyzed with NaF at manufacturer's levels (ca. 10 mg/mL). } \usage{ data(salting2.df) } \format{ A data frame containing four variables \tabular{rlll}{ [,1] \tab subject \tab numeric factor \tab subject identifier 1-4 \cr [,2] \tab time \tab numeric factor \tab time sample taken 0 or 1.5h \cr [,3] \tab NaF \tab numeric factor \tab the level of sodium fluoride added in mg/mL \cr [,4] \tab EtOH \tab numeric \tab alcohol concentration in g/100mL \cr } } \details{ Note that the blocking and treatment factors in this data frame are numeric. Therefore, to use them as such will require the use of \code{factor} or \code{ordered}. } \references{ B.A. Miller, S.M. Day, T.E. Vasquez, F.M. Evans, Absence of salting out effects in forensic blood alcohol determination at various concentrations of sodium fluoride using semi-automated headspace gas chromatography, Science & Justice, Volume 44, Issue 2, April 2004, Pages 73-76. } \author{B. A. Miller et al.} \keyword{datasets}
#Linear Regression #1. Packages library(MASS) library(plyr) library(ggplot2) library(knitr) library(GGally) #2. Linear regression # Import data set crime <- read.table("http://www.andrew.cmu.edu/user/achoulde/94842/data/crime_simple.txt", sep = "\t", header = TRUE) # Assign more meaningful variable names colnames(crime) <- c("crime.per.million", "young.males", "is.south", "average.ed", "exp.per.cap.1960", "exp.per.cap.1959", "labour.part", "male.per.fem", "population", "nonwhite", "unemp.youth", "unemp.adult", "median.assets", "num.low.salary") # Convert is.south to a factor # Divide average.ed by 10 so that the variable is actually average education # Convert median assets to 1000's of dollars instead of 10's crime <- transform(crime, is.south = as.factor(is.south), average.ed = average.ed / 10, median.assets = median.assets / 100) # print summary of the data summary(crime) # Scatter plot of outcome (crime.per.million) against average.ed qplot(average.ed, crime.per.million, data = crime) # correlation between education and crime with(crime, cor(average.ed, crime.per.million)) # Scatter plot of outcome (crime.per.million) against median.assets qplot(median.assets, crime.per.million, data = crime) # correlation between education and crime with(crime, cor(median.assets, crime.per.million)) # Boxplots showing crime rate broken down by southern vs non-southern state qplot(is.south, crime.per.million, geom = "boxplot", data = crime) crime.lm <- lm(crime.per.million ~ ., data = crime) # Summary of the linear regression model crime.lm summary(crime.lm) options(scipen=4) # Set scipen = 0 to get back to default summary(crime.lm) # List all attributes of the linear model attributes(crime.lm) crime.lm$coef # Pull coefficients element from summary(lm) object round(summary(crime.lm)$coef, 3) # Pull the coefficients table from summary(lm) crime.lm.coef <- round(summary(crime.lm)$coef, 3) # See what this gives class(crime.lm.coef) attributes(crime.lm.coef) crime.lm.coef["average.ed", "Pr(>|t|)"] plot(crime.lm) diamonds.lm <- lm(price ~ carat + cut + clarity + color, data = diamonds) plot(diamonds.lm) diamonds.lm2 <- lm(log(price) ~ I(log(carat)) + cut + clarity + color, data = diamonds) plot(diamonds.lm2) economic.var.names <- c("exp.per.cap.1959", "exp.per.cap.1960", "unemp.adult", "unemp.youth", "labour.part", "median.assets") pairs(crime[,economic.var.names]) round(cor(crime[,economic.var.names]), 3) # Function taken from ?pairs Example section. panel.cor <- function(x, y, digits = 2, prefix = "", cex.cor, ...) { usr <- par("usr"); on.exit(par(usr)) par(usr = c(0, 1, 0, 1)) r <- abs(cor(x, y)) txt <- format(c(r, 0.123456789), digits = digits)[1] txt <- paste0(prefix, txt) if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt) text(0.5, 0.5, txt, cex = pmax(1, cex.cor * r)) } # Use panel.cor to display correlations in lower panel. pairs(crime[,economic.var.names], lower.panel = panel.cor) # ggpairs from GGally library # Unlike pairs(), ggpairs() works with non-numeric # predictors in addition to numeric ones. # Consider ggpairs() for your final project ggpairs(crime[,c(economic.var.names, "is.south")], axisLabels = "internal") crime.lm.2 <- update(crime.lm, . ~ . - exp.per.cap.1959 - unemp.youth) summary(crime.lm.2) crime.lm.summary.2 <- summary(crime.lm.2) kable(crime.lm.summary.2$coef, digits = c(3, 3, 3, 4), format = 'markdown') #3. Thinking more critically about linear regression crime.lm <- lm(crime.per.million ~ ., data = crime) crime.lm2 <- update(crime.lm, . ~ . - exp.per.cap.1959 - unemp.youth) kable(summary(crime.lm)$coef, digits = c(3, 3, 3, 4), format = 'markdown') crime.lm.summary2 <- summary(crime.lm2) kable(crime.lm.summary2$coef, digits = c(3, 3, 3, 4), format = 'markdown') # all 95% confidence intervals confint(crime.lm2) # Just for education confint(crime.lm2, parm = "average.ed") # 75% confidence interval confint(crime.lm2, parm = "average.ed", level = 0.75) # How does 2 SE rule compare to confint output? # lower endpoint coef(crime.lm2)["average.ed"] - 2* summary(crime.lm2)$coef["average.ed", "Std. Error"] # upper endpoint coef(crime.lm2)["average.ed"] + 2* summary(crime.lm2)$coef["average.ed", "Std. Error"] my.data <- data.frame(y = c(12, 13, 10, 5, 7, 12, 15), x1 = c(6, 6.5, 5, 2.5, 3.5, 6, 7.5), x2 = c(6, 6.5, 5, 2.5, 3.5, 6, 7.5)) my.data crime.lm.summary2$coef["exp.per.cap.1960",] crime.lm.summary2$coef["average.ed",] #4. Factors in linear regression #추가 colnames(birthwt) <- c("birthwt.below.2500", "mother.age", "mother.weight", "race", "mother.smokes", "previous.prem.labor", "hypertension", "uterine.irr", "physician.visits", "birthwt.grams") birthwt <- transform(birthwt, race = as.factor(mapvalues(race, c(1, 2, 3), c("white","black", "other"))), mother.smokes = as.factor(mapvalues(mother.smokes, c(0,1), c("no", "yes"))), hypertension = as.factor(mapvalues(hypertension, c(0,1), c("no", "yes"))), uterine.irr = as.factor(mapvalues(uterine.irr, c(0,1), c("no", "yes"))) ) # Fit regression model birthwt.lm <- lm(birthwt.grams ~ race + mother.age, data = birthwt) # Regression model summary summary(birthwt.lm) # Calculate race-specific intercepts intercepts <- c(coef(birthwt.lm)["(Intercept)"], coef(birthwt.lm)["(Intercept)"] + coef(birthwt.lm)["raceother"], coef(birthwt.lm)["(Intercept)"] + coef(birthwt.lm)["racewhite"]) lines.df <- data.frame(intercepts = intercepts, slopes = rep(coef(birthwt.lm)["mother.age"], 3), race = levels(birthwt$race)) qplot(x = mother.age, y = birthwt.grams, color = race, data = birthwt) + geom_abline(aes(intercept = intercepts, slope = slopes, color = race), data = lines.df) head(model.matrix(birthwt.lm), 20) qplot(x = mother.age, y = birthwt.grams, color = race, data = birthwt) + geom_abline(aes(intercept = intercepts, slope = slopes, color = race), data = lines.df) qplot(x = mother.age, y = birthwt.grams, color = race, data = birthwt) + stat_smooth(method = "lm", se = FALSE, fullrange = TRUE) birthwt.lm.interact <- lm(birthwt.grams ~ race * mother.age, data = birthwt) summary(birthwt.lm.interact)
/R code/HW7.R
no_license
Kim-Ayeong/Data_Science_with_R
R
false
false
6,291
r
#Linear Regression #1. Packages library(MASS) library(plyr) library(ggplot2) library(knitr) library(GGally) #2. Linear regression # Import data set crime <- read.table("http://www.andrew.cmu.edu/user/achoulde/94842/data/crime_simple.txt", sep = "\t", header = TRUE) # Assign more meaningful variable names colnames(crime) <- c("crime.per.million", "young.males", "is.south", "average.ed", "exp.per.cap.1960", "exp.per.cap.1959", "labour.part", "male.per.fem", "population", "nonwhite", "unemp.youth", "unemp.adult", "median.assets", "num.low.salary") # Convert is.south to a factor # Divide average.ed by 10 so that the variable is actually average education # Convert median assets to 1000's of dollars instead of 10's crime <- transform(crime, is.south = as.factor(is.south), average.ed = average.ed / 10, median.assets = median.assets / 100) # print summary of the data summary(crime) # Scatter plot of outcome (crime.per.million) against average.ed qplot(average.ed, crime.per.million, data = crime) # correlation between education and crime with(crime, cor(average.ed, crime.per.million)) # Scatter plot of outcome (crime.per.million) against median.assets qplot(median.assets, crime.per.million, data = crime) # correlation between education and crime with(crime, cor(median.assets, crime.per.million)) # Boxplots showing crime rate broken down by southern vs non-southern state qplot(is.south, crime.per.million, geom = "boxplot", data = crime) crime.lm <- lm(crime.per.million ~ ., data = crime) # Summary of the linear regression model crime.lm summary(crime.lm) options(scipen=4) # Set scipen = 0 to get back to default summary(crime.lm) # List all attributes of the linear model attributes(crime.lm) crime.lm$coef # Pull coefficients element from summary(lm) object round(summary(crime.lm)$coef, 3) # Pull the coefficients table from summary(lm) crime.lm.coef <- round(summary(crime.lm)$coef, 3) # See what this gives class(crime.lm.coef) attributes(crime.lm.coef) crime.lm.coef["average.ed", "Pr(>|t|)"] plot(crime.lm) diamonds.lm <- lm(price ~ carat + cut + clarity + color, data = diamonds) plot(diamonds.lm) diamonds.lm2 <- lm(log(price) ~ I(log(carat)) + cut + clarity + color, data = diamonds) plot(diamonds.lm2) economic.var.names <- c("exp.per.cap.1959", "exp.per.cap.1960", "unemp.adult", "unemp.youth", "labour.part", "median.assets") pairs(crime[,economic.var.names]) round(cor(crime[,economic.var.names]), 3) # Function taken from ?pairs Example section. panel.cor <- function(x, y, digits = 2, prefix = "", cex.cor, ...) { usr <- par("usr"); on.exit(par(usr)) par(usr = c(0, 1, 0, 1)) r <- abs(cor(x, y)) txt <- format(c(r, 0.123456789), digits = digits)[1] txt <- paste0(prefix, txt) if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt) text(0.5, 0.5, txt, cex = pmax(1, cex.cor * r)) } # Use panel.cor to display correlations in lower panel. pairs(crime[,economic.var.names], lower.panel = panel.cor) # ggpairs from GGally library # Unlike pairs(), ggpairs() works with non-numeric # predictors in addition to numeric ones. # Consider ggpairs() for your final project ggpairs(crime[,c(economic.var.names, "is.south")], axisLabels = "internal") crime.lm.2 <- update(crime.lm, . ~ . - exp.per.cap.1959 - unemp.youth) summary(crime.lm.2) crime.lm.summary.2 <- summary(crime.lm.2) kable(crime.lm.summary.2$coef, digits = c(3, 3, 3, 4), format = 'markdown') #3. Thinking more critically about linear regression crime.lm <- lm(crime.per.million ~ ., data = crime) crime.lm2 <- update(crime.lm, . ~ . - exp.per.cap.1959 - unemp.youth) kable(summary(crime.lm)$coef, digits = c(3, 3, 3, 4), format = 'markdown') crime.lm.summary2 <- summary(crime.lm2) kable(crime.lm.summary2$coef, digits = c(3, 3, 3, 4), format = 'markdown') # all 95% confidence intervals confint(crime.lm2) # Just for education confint(crime.lm2, parm = "average.ed") # 75% confidence interval confint(crime.lm2, parm = "average.ed", level = 0.75) # How does 2 SE rule compare to confint output? # lower endpoint coef(crime.lm2)["average.ed"] - 2* summary(crime.lm2)$coef["average.ed", "Std. Error"] # upper endpoint coef(crime.lm2)["average.ed"] + 2* summary(crime.lm2)$coef["average.ed", "Std. Error"] my.data <- data.frame(y = c(12, 13, 10, 5, 7, 12, 15), x1 = c(6, 6.5, 5, 2.5, 3.5, 6, 7.5), x2 = c(6, 6.5, 5, 2.5, 3.5, 6, 7.5)) my.data crime.lm.summary2$coef["exp.per.cap.1960",] crime.lm.summary2$coef["average.ed",] #4. Factors in linear regression #추가 colnames(birthwt) <- c("birthwt.below.2500", "mother.age", "mother.weight", "race", "mother.smokes", "previous.prem.labor", "hypertension", "uterine.irr", "physician.visits", "birthwt.grams") birthwt <- transform(birthwt, race = as.factor(mapvalues(race, c(1, 2, 3), c("white","black", "other"))), mother.smokes = as.factor(mapvalues(mother.smokes, c(0,1), c("no", "yes"))), hypertension = as.factor(mapvalues(hypertension, c(0,1), c("no", "yes"))), uterine.irr = as.factor(mapvalues(uterine.irr, c(0,1), c("no", "yes"))) ) # Fit regression model birthwt.lm <- lm(birthwt.grams ~ race + mother.age, data = birthwt) # Regression model summary summary(birthwt.lm) # Calculate race-specific intercepts intercepts <- c(coef(birthwt.lm)["(Intercept)"], coef(birthwt.lm)["(Intercept)"] + coef(birthwt.lm)["raceother"], coef(birthwt.lm)["(Intercept)"] + coef(birthwt.lm)["racewhite"]) lines.df <- data.frame(intercepts = intercepts, slopes = rep(coef(birthwt.lm)["mother.age"], 3), race = levels(birthwt$race)) qplot(x = mother.age, y = birthwt.grams, color = race, data = birthwt) + geom_abline(aes(intercept = intercepts, slope = slopes, color = race), data = lines.df) head(model.matrix(birthwt.lm), 20) qplot(x = mother.age, y = birthwt.grams, color = race, data = birthwt) + geom_abline(aes(intercept = intercepts, slope = slopes, color = race), data = lines.df) qplot(x = mother.age, y = birthwt.grams, color = race, data = birthwt) + stat_smooth(method = "lm", se = FALSE, fullrange = TRUE) birthwt.lm.interact <- lm(birthwt.grams ~ race * mother.age, data = birthwt) summary(birthwt.lm.interact)
# Author: Robert J. Hijmans # Date : June 2008 # Version 1.0 # Licence GPL v3 .gdFixGeoref <- function(mdata) { gdversion <- getOption('rasterGDALVersion') test <- gdversion < '1.8.0' if (test) { if (! is.null(mdata) ) { for (i in 1:length(mdata)) { if (mdata[i] == "AREA_OR_POINT=Area") { return(FALSE) } else if (mdata[i] == "AREA_OR_POINT=Point") { return(TRUE) } } } } return(FALSE) } .rasterFromGDAL <- function(filename, band, type, sub=0, RAT=TRUE, silent=TRUE, warn=TRUE, crs="", ...) { .requireRgdal() if (sub > 0) { gdalinfo <- rgdal::GDALinfo(filename, silent=TRUE, returnRAT=FALSE, returnCategoryNames=FALSE) sub <- round(sub) subdsmdata <- attr(gdalinfo, 'subdsmdata') i <- grep(paste("SUBDATASET_", sub, "_NAME", sep=''), subdsmdata) if (length(i) > 0) { x <- subdsmdata[i[1]] filename <- unlist(strsplit(x, '='))[2] } else { stop(paste('subdataset "sub=', sub, '" not available', sep='')) } } w <- getOption('warn') on.exit(options('warn' = w)) options('warn'=-1) gdalinfo <- try ( rgdal::GDALinfo(filename, silent=silent, returnRAT=RAT, returnCategoryNames=RAT) ) options('warn'= w) if ( inherits(gdalinfo, "try-error")) { gdalinfo <- rgdal::GDALinfo(filename, silent=silent, returnRAT=FALSE, returnCategoryNames=FALSE) warning('Could not read RAT or Category names') } nc <- as.integer(gdalinfo[["columns"]]) nr <- as.integer(gdalinfo[["rows"]]) xn <- gdalinfo[["ll.x"]] xn <- round(xn, digits=9) xx <- xn + gdalinfo[["res.x"]] * nc xx <- round(xx, digits=9) yn <- gdalinfo[["ll.y"]] yn <- round(yn, digits=9) yx <- yn + gdalinfo[["res.y"]] * nr yx <- round(yx, digits=9) nbands <- as.integer(gdalinfo[["bands"]]) if (isTRUE(attr(gdalinfo, "ysign") == 1)) { warning("data seems flipped. Consider using: flip(x, direction='y')") } rotated <- FALSE if (gdalinfo['oblique.x'] != 0 | gdalinfo['oblique.y'] != 0) { rotated <- TRUE ## adapted from rgdal::getGeoTransFunc if (warn) { warning('\n\n This file has a rotation\n Support for such files is limited and results of data processing might be wrong.\n Proceed with caution & consider using the "rectify" function\n') } rotMat <- matrix(gdalinfo[c('res.x', 'oblique.x', 'oblique.y', 'res.y')], 2) ysign <- attr(gdalinfo, 'ysign') rotMat[4] <- rotMat[4] * ysign invMat <- solve(rotMat) offset <- c(xn, yx) trans <- function(x, inv=FALSE) { if (inv) { x <- t(t(x) - c(offset[1], offset[2])) x <- round( x %*% invMat + 0.5 ) x[x < 1] <- NA x[x[,1] > nc | x[,2] > nr, ] <- NA } else { x <- (x - 0.5) %*% rotMat x <- t(t(x) + c(offset[1], offset[2])) } return(x) } crd <- trans(cbind(c(0, 0, nc, nc), c(0, nr, 0, nr))+0.5) rot <- methods::new(".Rotation") gtr <- gdalinfo[c('ll.x', 'res.x', 'oblique.x', NA, 'oblique.y', 'res.y')] gtr[4] <- yx gtr[6] <- gtr[6] * ysign rot@geotrans <- gtr rot@transfun <- trans xn <- min(crd[,1]) xx <- max(crd[,1]) yn <- min(crd[,2]) yx <- max(crd[,2]) } mdata <- attr(gdalinfo, 'mdata') fixGeoref <- FALSE try( fixGeoref <- .gdFixGeoref(mdata), silent=TRUE ) # for ENVI files bnames <- unique(mdata[grep("Band_", mdata)]) if (length(bnames) > 0) { bn <- sapply(strsplit(bnames, '='), function(x) x[2]) bi <- gsub("Band_", "", sapply(strsplit(bnames, '='), function(x) x[1])) bnames <- try(bn[order(as.integer(bi))], silent=TRUE) if ( inherits(bnames, "try-error") ) { bnames <- NULL } } else { gobj <- rgdal::GDAL.open(filename) bnames <- rep("", nbands) for (i in 1:nbands) { objbnd <- rgdal::getRasterBand(gobj, i) bnames[i] <- rgdal::getDescription(objbnd) } rgdal::GDAL.close(gobj) } if (type == 'RasterBrick') { r <- brick(ncols=nc, nrows=nr, xmn=xn, ymn=yn, xmx=xx, ymx=yx, crs="") r@file@nbands <- r@data@nlayers <- nbands band <- 1:nbands #RAT <- FALSE } else { r <- raster(ncols=nc, nrows=nr, xmn=xn, ymn=yn, xmx=xx, ymx=yx, crs="") r@file@nbands <- as.integer(nbands) band <- as.integer(band) if ( band > nbands(r) ) { stop(paste("band too high. Should be between 1 and", nbands)) #if (warn) { #stop("band too high. Set to nbands") #} #band <- nbands(r) } if ( band < 1) { stop(paste("band should be 1 or higher")) #if (warn) { #stop("band too low. Set to 1") #} #band <- 1 } r@data@band <- as.integer(band) nbands <-1 } if (rotated) { r@rotated <- TRUE r@rotation <- rot } crs <- .getProj(attr(gdalinfo, 'projection'), crs) r@crs <- CRS(crs, TRUE) #r@crs <- CRS(crs, FALSE) # F to avoid warnings about other than WGS84 datums or ellipsoids # r@history[[1]] <- mdata bi <- attr(gdalinfo, 'df') GDType <- as.character(bi[['GDType']]) hasNoDataValues <- bi[['hasNoDataValue']] NoDataValue <- bi[['NoDataValue']] # if (getOption('rasterNewRGDALVersion')) { # sbi <- attr(gdalinfo, 'sdf') # Bmin <- sbi[['Bmin']] # Bmax <- sbi[['Bmax']] # } else { Bmin <- bi[['Bmin']] Bmax <- bi[['Bmax']] # } RATlist <- attr(gdalinfo, 'RATlist') CATlist <- attr(gdalinfo, 'CATlist') blockrows <- integer(nbands) blockcols <- integer(nbands) x <- rgdal::GDAL.open(filename, silent=TRUE) ct <- rgdal::getColorTable( x ) if (! is.null(ct)) { r@legend@colortable <- ct } for (i in 1:nbands) { bs <- rgdal::getRasterBlockSize( rgdal::getRasterBand(x, i) ) blockrows[i] <- bs[1] blockcols[i] <- bs[2] } rgdal::GDAL.close(x) r@file@blockrows <- blockrows r@file@blockcols <- blockcols if (fixGeoref) { message('Fixing "AREA_OR_POINT=Point" georeference') rs <- res(r) xmin(r) <- xmin(r) - 0.5 * rs[1] xmax(r) <- xmax(r) - 0.5 * rs[1] ymin(r) <- ymin(r) + 0.5 * rs[2] ymax(r) <- ymax(r) + 0.5 * rs[2] } if (type == 'RasterBrick') { ub <- unique(bnames) if (length(ub) == nlayers(r)) { names(r) <- bnames } else { names(r) <- rep(gsub(" ", "_", extension(basename(filename), "")), nbands) } } else { lnames <- gsub(" ", "_", extension(basename(filename), "")) if (nbands > 1) { lnames <- paste(lnames, '_', band, sep='') } names(r) <- lnames } r@file@name <- filename r@file@driver <- 'gdal' r@data@fromdisk <- TRUE datatype <- "FLT4S" minv <- rep(Inf, nlayers(r)) maxv <- rep(-Inf, nlayers(r)) try ( minv <- as.numeric( Bmin ) , silent=TRUE ) try ( maxv <- as.numeric( Bmax ) , silent=TRUE ) minv[minv == -4294967295] <- Inf maxv[maxv == 4294967295] <- -Inf try ( datatype <- .getRasterDType ( GDType[1] ), silent=TRUE ) if ( all(c(is.finite(minv), is.finite(maxv)))) { r@data@haveminmax <- TRUE } r@file@datanotation <- datatype r@data@min <- minv[band] r@data@max <- maxv[band] rats <- ! sapply(RATlist, is.null) if (any(rats)) { att <- vector(length=nlayers(r), mode='list') for (i in 1:length(RATlist)) { if (! is.null(RATlist[[i]])) { dr <- data.frame(RATlist[[i]], stringsAsFactors=TRUE) wv <- which(colnames(dr)=='VALUE') if (length(wv) > 0) { if (wv != 1) { dr <- data.frame(dr[,wv,drop=FALSE], dr[,-wv,drop=FALSE]) } colnames(dr)[1] <- 'ID' } else { if (all((colnames(dr) %in% c('Red', 'Green', 'Blue', 'Opacity', 'Histogram')))) { # this is really a color table rats[i] <- FALSE if (is.null(ct)) { r@legend@colortable <- grDevices::rgb(dr$Red, dr$Green, dr$Blue, dr$Opacity) } next } else { j <- which(colnames(dr) == 'Histogram') if (isTRUE(j>0) & ncol(dr) > 1) { dr <- data.frame(ID=0:(nrow(dr)-1), COUNT=dr[,j], dr[,-j,drop=FALSE]) } else { dr <- data.frame(ID=0:(nrow(dr)-1), dr) } } } att[[i]] <- dr } } r@data@attributes <- att[band] r@data@isfactor <- rats[band] } else { cats <- ! sapply(CATlist, is.null) if (any(cats)) { att <- vector(length=nlayers(r), mode='list') for (i in 1:length(CATlist)) { if (! is.null(CATlist[[i]])) { att[[i]] <- data.frame(ID=(1:length(CATlist[[i]]))-1, category=CATlist[[i]], stringsAsFactors=TRUE) } } r@data@attributes <- att[band] r@data@isfactor <- cats[band] } } return(r) }
/R/rasterFromGDAL.R
no_license
szanardo-rms/raster
R
false
false
8,528
r
# Author: Robert J. Hijmans # Date : June 2008 # Version 1.0 # Licence GPL v3 .gdFixGeoref <- function(mdata) { gdversion <- getOption('rasterGDALVersion') test <- gdversion < '1.8.0' if (test) { if (! is.null(mdata) ) { for (i in 1:length(mdata)) { if (mdata[i] == "AREA_OR_POINT=Area") { return(FALSE) } else if (mdata[i] == "AREA_OR_POINT=Point") { return(TRUE) } } } } return(FALSE) } .rasterFromGDAL <- function(filename, band, type, sub=0, RAT=TRUE, silent=TRUE, warn=TRUE, crs="", ...) { .requireRgdal() if (sub > 0) { gdalinfo <- rgdal::GDALinfo(filename, silent=TRUE, returnRAT=FALSE, returnCategoryNames=FALSE) sub <- round(sub) subdsmdata <- attr(gdalinfo, 'subdsmdata') i <- grep(paste("SUBDATASET_", sub, "_NAME", sep=''), subdsmdata) if (length(i) > 0) { x <- subdsmdata[i[1]] filename <- unlist(strsplit(x, '='))[2] } else { stop(paste('subdataset "sub=', sub, '" not available', sep='')) } } w <- getOption('warn') on.exit(options('warn' = w)) options('warn'=-1) gdalinfo <- try ( rgdal::GDALinfo(filename, silent=silent, returnRAT=RAT, returnCategoryNames=RAT) ) options('warn'= w) if ( inherits(gdalinfo, "try-error")) { gdalinfo <- rgdal::GDALinfo(filename, silent=silent, returnRAT=FALSE, returnCategoryNames=FALSE) warning('Could not read RAT or Category names') } nc <- as.integer(gdalinfo[["columns"]]) nr <- as.integer(gdalinfo[["rows"]]) xn <- gdalinfo[["ll.x"]] xn <- round(xn, digits=9) xx <- xn + gdalinfo[["res.x"]] * nc xx <- round(xx, digits=9) yn <- gdalinfo[["ll.y"]] yn <- round(yn, digits=9) yx <- yn + gdalinfo[["res.y"]] * nr yx <- round(yx, digits=9) nbands <- as.integer(gdalinfo[["bands"]]) if (isTRUE(attr(gdalinfo, "ysign") == 1)) { warning("data seems flipped. Consider using: flip(x, direction='y')") } rotated <- FALSE if (gdalinfo['oblique.x'] != 0 | gdalinfo['oblique.y'] != 0) { rotated <- TRUE ## adapted from rgdal::getGeoTransFunc if (warn) { warning('\n\n This file has a rotation\n Support for such files is limited and results of data processing might be wrong.\n Proceed with caution & consider using the "rectify" function\n') } rotMat <- matrix(gdalinfo[c('res.x', 'oblique.x', 'oblique.y', 'res.y')], 2) ysign <- attr(gdalinfo, 'ysign') rotMat[4] <- rotMat[4] * ysign invMat <- solve(rotMat) offset <- c(xn, yx) trans <- function(x, inv=FALSE) { if (inv) { x <- t(t(x) - c(offset[1], offset[2])) x <- round( x %*% invMat + 0.5 ) x[x < 1] <- NA x[x[,1] > nc | x[,2] > nr, ] <- NA } else { x <- (x - 0.5) %*% rotMat x <- t(t(x) + c(offset[1], offset[2])) } return(x) } crd <- trans(cbind(c(0, 0, nc, nc), c(0, nr, 0, nr))+0.5) rot <- methods::new(".Rotation") gtr <- gdalinfo[c('ll.x', 'res.x', 'oblique.x', NA, 'oblique.y', 'res.y')] gtr[4] <- yx gtr[6] <- gtr[6] * ysign rot@geotrans <- gtr rot@transfun <- trans xn <- min(crd[,1]) xx <- max(crd[,1]) yn <- min(crd[,2]) yx <- max(crd[,2]) } mdata <- attr(gdalinfo, 'mdata') fixGeoref <- FALSE try( fixGeoref <- .gdFixGeoref(mdata), silent=TRUE ) # for ENVI files bnames <- unique(mdata[grep("Band_", mdata)]) if (length(bnames) > 0) { bn <- sapply(strsplit(bnames, '='), function(x) x[2]) bi <- gsub("Band_", "", sapply(strsplit(bnames, '='), function(x) x[1])) bnames <- try(bn[order(as.integer(bi))], silent=TRUE) if ( inherits(bnames, "try-error") ) { bnames <- NULL } } else { gobj <- rgdal::GDAL.open(filename) bnames <- rep("", nbands) for (i in 1:nbands) { objbnd <- rgdal::getRasterBand(gobj, i) bnames[i] <- rgdal::getDescription(objbnd) } rgdal::GDAL.close(gobj) } if (type == 'RasterBrick') { r <- brick(ncols=nc, nrows=nr, xmn=xn, ymn=yn, xmx=xx, ymx=yx, crs="") r@file@nbands <- r@data@nlayers <- nbands band <- 1:nbands #RAT <- FALSE } else { r <- raster(ncols=nc, nrows=nr, xmn=xn, ymn=yn, xmx=xx, ymx=yx, crs="") r@file@nbands <- as.integer(nbands) band <- as.integer(band) if ( band > nbands(r) ) { stop(paste("band too high. Should be between 1 and", nbands)) #if (warn) { #stop("band too high. Set to nbands") #} #band <- nbands(r) } if ( band < 1) { stop(paste("band should be 1 or higher")) #if (warn) { #stop("band too low. Set to 1") #} #band <- 1 } r@data@band <- as.integer(band) nbands <-1 } if (rotated) { r@rotated <- TRUE r@rotation <- rot } crs <- .getProj(attr(gdalinfo, 'projection'), crs) r@crs <- CRS(crs, TRUE) #r@crs <- CRS(crs, FALSE) # F to avoid warnings about other than WGS84 datums or ellipsoids # r@history[[1]] <- mdata bi <- attr(gdalinfo, 'df') GDType <- as.character(bi[['GDType']]) hasNoDataValues <- bi[['hasNoDataValue']] NoDataValue <- bi[['NoDataValue']] # if (getOption('rasterNewRGDALVersion')) { # sbi <- attr(gdalinfo, 'sdf') # Bmin <- sbi[['Bmin']] # Bmax <- sbi[['Bmax']] # } else { Bmin <- bi[['Bmin']] Bmax <- bi[['Bmax']] # } RATlist <- attr(gdalinfo, 'RATlist') CATlist <- attr(gdalinfo, 'CATlist') blockrows <- integer(nbands) blockcols <- integer(nbands) x <- rgdal::GDAL.open(filename, silent=TRUE) ct <- rgdal::getColorTable( x ) if (! is.null(ct)) { r@legend@colortable <- ct } for (i in 1:nbands) { bs <- rgdal::getRasterBlockSize( rgdal::getRasterBand(x, i) ) blockrows[i] <- bs[1] blockcols[i] <- bs[2] } rgdal::GDAL.close(x) r@file@blockrows <- blockrows r@file@blockcols <- blockcols if (fixGeoref) { message('Fixing "AREA_OR_POINT=Point" georeference') rs <- res(r) xmin(r) <- xmin(r) - 0.5 * rs[1] xmax(r) <- xmax(r) - 0.5 * rs[1] ymin(r) <- ymin(r) + 0.5 * rs[2] ymax(r) <- ymax(r) + 0.5 * rs[2] } if (type == 'RasterBrick') { ub <- unique(bnames) if (length(ub) == nlayers(r)) { names(r) <- bnames } else { names(r) <- rep(gsub(" ", "_", extension(basename(filename), "")), nbands) } } else { lnames <- gsub(" ", "_", extension(basename(filename), "")) if (nbands > 1) { lnames <- paste(lnames, '_', band, sep='') } names(r) <- lnames } r@file@name <- filename r@file@driver <- 'gdal' r@data@fromdisk <- TRUE datatype <- "FLT4S" minv <- rep(Inf, nlayers(r)) maxv <- rep(-Inf, nlayers(r)) try ( minv <- as.numeric( Bmin ) , silent=TRUE ) try ( maxv <- as.numeric( Bmax ) , silent=TRUE ) minv[minv == -4294967295] <- Inf maxv[maxv == 4294967295] <- -Inf try ( datatype <- .getRasterDType ( GDType[1] ), silent=TRUE ) if ( all(c(is.finite(minv), is.finite(maxv)))) { r@data@haveminmax <- TRUE } r@file@datanotation <- datatype r@data@min <- minv[band] r@data@max <- maxv[band] rats <- ! sapply(RATlist, is.null) if (any(rats)) { att <- vector(length=nlayers(r), mode='list') for (i in 1:length(RATlist)) { if (! is.null(RATlist[[i]])) { dr <- data.frame(RATlist[[i]], stringsAsFactors=TRUE) wv <- which(colnames(dr)=='VALUE') if (length(wv) > 0) { if (wv != 1) { dr <- data.frame(dr[,wv,drop=FALSE], dr[,-wv,drop=FALSE]) } colnames(dr)[1] <- 'ID' } else { if (all((colnames(dr) %in% c('Red', 'Green', 'Blue', 'Opacity', 'Histogram')))) { # this is really a color table rats[i] <- FALSE if (is.null(ct)) { r@legend@colortable <- grDevices::rgb(dr$Red, dr$Green, dr$Blue, dr$Opacity) } next } else { j <- which(colnames(dr) == 'Histogram') if (isTRUE(j>0) & ncol(dr) > 1) { dr <- data.frame(ID=0:(nrow(dr)-1), COUNT=dr[,j], dr[,-j,drop=FALSE]) } else { dr <- data.frame(ID=0:(nrow(dr)-1), dr) } } } att[[i]] <- dr } } r@data@attributes <- att[band] r@data@isfactor <- rats[band] } else { cats <- ! sapply(CATlist, is.null) if (any(cats)) { att <- vector(length=nlayers(r), mode='list') for (i in 1:length(CATlist)) { if (! is.null(CATlist[[i]])) { att[[i]] <- data.frame(ID=(1:length(CATlist[[i]]))-1, category=CATlist[[i]], stringsAsFactors=TRUE) } } r@data@attributes <- att[band] r@data@isfactor <- cats[band] } } return(r) }
# # This is the server logic of a Shiny web application. # # Define server logic required to draw a histogram shinyServer(function(input, output, session) { library(shiny) library(tidyverse) library(tidyr) library(dplyr) library(lubridate) library(ggplot2) #load Stations stations_data <- read.csv("Project/__Stations.csv", stringsAsFactors = FALSE) stations_data <- stations_data %>% select(everything(), -Network) %>% arrange(StationName) #load pollution data based on pollutant code folder <- "/Users/paulina/_Glasgow/R/AirPollution_Project/Project/" pm25_list <- list.files(path = folder, pattern = "PM2") pm10_list <- list.files(path = folder, pattern = "PM10") so2_list <- list.files(path = folder, pattern = "SO2") no2_list <- list.files(path = folder, pattern = "NO2") all_list <- list.files(path = folder, pattern = "CZ") pm25_data <- do.call("rbind", lapply(pm25_list, function(x) read.csv(paste(folder, x, sep=''), stringsAsFactors = FALSE))) pm10_data <- do.call("rbind", lapply(pm10_list, function(x) read.csv(paste(folder, x, sep=''), stringsAsFactors = FALSE))) so2_data <- do.call("rbind", lapply(so2_list, function(x) read.csv(paste(folder, x, sep=''), stringsAsFactors = FALSE))) no2_data <- do.call("rbind", lapply(no2_list, function(x) read.csv(paste(folder, x, sep=''), stringsAsFactors = FALSE))) #tidying data #getting data for the plot pollutants <- reactive({input$pollutant}) stationsList <- reactive({input$stationsList}) aggregation <- reactive({input$selectAggregation}) hour <- reactive({input$hour}) threshold <- reactive({input$threshold}) data <- reactive({ # select data set based on input$.. from ui.R if (pollutants() == "PM2.5") { data <- pm25_data %>% mutate(Date = make_datetime(Year, Month, Day, Hour)) %>% select(Date, everything()) %>% select(-(Year:Day)) } else if (pollutants() == "PM10") { data <- pm10_data %>% mutate(Date = make_datetime(Year, Month, Day, Hour)) %>% select(Date, everything()) %>% select(-(Year:Day)) } else if (pollutants() == "SO2") { data <- no2_data %>% mutate(Date = make_datetime(Year, Month, Day, Hour)) %>% select(Date, everything()) %>% select(-(Year:Day)) } else { data <- so2_data %>% mutate(Date = make_datetime(Year, Month, Day, Hour)) %>% select(Date, everything()) %>% select(-(Year:Day)) } }) rawHourlyData <- reactive({ # joining tables mergedData <- merge(x = data(), y = stations_data, by.x = c("AirQualityStationEoICode"), by.y = c("EoICode"), all.y=TRUE) mergedData %>% filter(StationName %in% stationsList()) %>% filter(Date >= input$dateRange[1] & Date <= input$dateRange[2]) %>% filter(Hour >= input$hour[1] & Hour <= input$hour[2]) }) dailyAverages <- reactive({ rawHourlyData() %>% group_by(Date, AirPollutant, AirQualityStationEoICode, StationName, Latitude, Longitude) %>% summarise(averageDailyConcentration = mean(Concentration)) }) dailyMaxima <- reactive({ rawHourlyData() %>% group_by(Date, AirPollutant, AirQualityStationEoICode, StationName, Latitude, Longitude) %>% summarise(maxDailyConcentration = max(Concentration)) }) thresholdData <- reactive({ if (aggregation() == "Hours per day for which threshold exceeded") { rawHourlyData() %>% mutate(Difference = Concentration - threshold()) %>% mutate(ExceedsThreshold = ifelse(Difference >=0, 1, 0)) %>% group_by(Date, AirPollutant, AirQualityStationEoICode, StationName, Latitude, Longitude) %>% summarise(HoursCount = sum(ExceedsThreshold)) } else if (aggregation() == "Hours per year for which threshold exceeded") { rawHourlyData() %>% mutate(Difference = Concentration - threshold()) %>% mutate(ExceedsThreshold = ifelse(Difference >=0, 1, 0)) %>% group_by(Date, AirPollutant, AirQualityStationEoICode, StationName, Latitude, Longitude) %>% summarise(HoursCount = sum(ExceedsThreshold)) } else { rawHourlyData() %>% mutate(Difference = Concentration - threshold()) %>% mutate(ExceedsThreshold = ifelse(Difference >=0, 1, 0)) %>% group_by(day(Date), AirPollutant, AirQualityStationEoICode, StationName, Latitude, Longitude) %>% summarise(HoursCount = sum(ExceedsThreshold)) } }) useData <- reactive({ if (aggregation() == "Raw hourly data") { rawHourlyData() } else if (aggregation() == "Daily average") { dailyAverages() } else if (aggregation() == "Daily maxima") { dailyMaxima() } else if (aggregation() == "Hours per day for which threshold exceeded") { #do nothing yet } else if (aggregation() == "Hours per year for which threshold exceeded") { #do nothing yet } else { } }) dates <- reactive({ useData() %>% summarise(min = min(useData()$Date, na.rm = TRUE), max = max(useData()$Date, na.rm = TRUE) ) }) observe({ updateDateRangeInput(session, "dateRange", start = dates()[1], end = dates()[2], min = dates()[1], max = dates()[2] ) }) output$distPlot <- renderPlot({ # draw the time series plot p <- ggplot(useData()) if (aggregation() == "Raw hourly data") { p <- p + aes(x = Date, y = Concentration, color = StationName) + geom_line() + labs(color = "Station Name") if (pollutants() == "SO2") { p <- p + geom_hline(aes(yintercept = 350, linetype = "EU Air Quality Standard"), colour= "blue") } else if (pollutants() == "NO2") { p <- p + geom_hline(aes(yintercept = 200, linetype = "EU Air Quality Standard"), colour= "blue") } else { #do nothing } } else if (aggregation() == "Daily average") { p <- p + aes(x = Date, y = averageDailyConcentration, color = StationName) + geom_line() + labs(y = "Average Daily Concentration", color = "Station Name") if (pollutants() == "PM10") { p <- p + geom_hline(aes(yintercept = 50, linetype = "EU Air Quality Standard"), colour= "blue") } else if (pollutants() == "SO2") { p <- p + geom_hline(aes(yintercept = 125, linetype = "EU Air Quality Standard"), colour= "blue") } else { #do nothing } } else if (aggregation() == "Daily maxima") { p <- p + aes(x = Date, y = maxDailyConcentration, color = StationName) + geom_line() + labs(y = "Maximum Daily Concentration", color = "Station Name") if (pollutants() == "PM10") { p <- p + geom_hline(aes(yintercept = 50, linetype = "EU Air Quality Standard"), colour= "blue") } else if (pollutants() == "SO2") { p <- p + geom_hline(aes(yintercept = 125, linetype = "EU Air Quality Standard"), colour= "blue") } else { #do nothing } } else { #do nothing yet } p + labs(caption = "(based on data from European Environmental Agency)") }) output$map <- renderPlot({ maps::map("world", "CZech Republic") points(useData()$Longitude, useData()$Latitude, pch = 16, col = "blue") text(useData()$Longitude, y = useData()$Latitude, useData()$StationName, pos = 4) }) output$table <- DT::renderDT({ useData() }) output$downloadDataset <- downloadHandler( filename = "airPollutionPlotData.csv", content = function(file) { write.csv(useData(), file, row.names = FALSE) } ) output$report <- downloadHandler( filename = "report.doc", content = function(file) { tempReport <- file.path(tempdir(), "report.Rmd") file.copy("report.Rmd", tempReport, overwrite = TRUE) params <- list(n = input$slider) rmarkdown::render(tempReport, output_file = file, params = params, envir = new.env(parent = globalenv()) ) } ) })
/server.R
no_license
was1paulina/airpollution
R
false
false
9,579
r
# # This is the server logic of a Shiny web application. # # Define server logic required to draw a histogram shinyServer(function(input, output, session) { library(shiny) library(tidyverse) library(tidyr) library(dplyr) library(lubridate) library(ggplot2) #load Stations stations_data <- read.csv("Project/__Stations.csv", stringsAsFactors = FALSE) stations_data <- stations_data %>% select(everything(), -Network) %>% arrange(StationName) #load pollution data based on pollutant code folder <- "/Users/paulina/_Glasgow/R/AirPollution_Project/Project/" pm25_list <- list.files(path = folder, pattern = "PM2") pm10_list <- list.files(path = folder, pattern = "PM10") so2_list <- list.files(path = folder, pattern = "SO2") no2_list <- list.files(path = folder, pattern = "NO2") all_list <- list.files(path = folder, pattern = "CZ") pm25_data <- do.call("rbind", lapply(pm25_list, function(x) read.csv(paste(folder, x, sep=''), stringsAsFactors = FALSE))) pm10_data <- do.call("rbind", lapply(pm10_list, function(x) read.csv(paste(folder, x, sep=''), stringsAsFactors = FALSE))) so2_data <- do.call("rbind", lapply(so2_list, function(x) read.csv(paste(folder, x, sep=''), stringsAsFactors = FALSE))) no2_data <- do.call("rbind", lapply(no2_list, function(x) read.csv(paste(folder, x, sep=''), stringsAsFactors = FALSE))) #tidying data #getting data for the plot pollutants <- reactive({input$pollutant}) stationsList <- reactive({input$stationsList}) aggregation <- reactive({input$selectAggregation}) hour <- reactive({input$hour}) threshold <- reactive({input$threshold}) data <- reactive({ # select data set based on input$.. from ui.R if (pollutants() == "PM2.5") { data <- pm25_data %>% mutate(Date = make_datetime(Year, Month, Day, Hour)) %>% select(Date, everything()) %>% select(-(Year:Day)) } else if (pollutants() == "PM10") { data <- pm10_data %>% mutate(Date = make_datetime(Year, Month, Day, Hour)) %>% select(Date, everything()) %>% select(-(Year:Day)) } else if (pollutants() == "SO2") { data <- no2_data %>% mutate(Date = make_datetime(Year, Month, Day, Hour)) %>% select(Date, everything()) %>% select(-(Year:Day)) } else { data <- so2_data %>% mutate(Date = make_datetime(Year, Month, Day, Hour)) %>% select(Date, everything()) %>% select(-(Year:Day)) } }) rawHourlyData <- reactive({ # joining tables mergedData <- merge(x = data(), y = stations_data, by.x = c("AirQualityStationEoICode"), by.y = c("EoICode"), all.y=TRUE) mergedData %>% filter(StationName %in% stationsList()) %>% filter(Date >= input$dateRange[1] & Date <= input$dateRange[2]) %>% filter(Hour >= input$hour[1] & Hour <= input$hour[2]) }) dailyAverages <- reactive({ rawHourlyData() %>% group_by(Date, AirPollutant, AirQualityStationEoICode, StationName, Latitude, Longitude) %>% summarise(averageDailyConcentration = mean(Concentration)) }) dailyMaxima <- reactive({ rawHourlyData() %>% group_by(Date, AirPollutant, AirQualityStationEoICode, StationName, Latitude, Longitude) %>% summarise(maxDailyConcentration = max(Concentration)) }) thresholdData <- reactive({ if (aggregation() == "Hours per day for which threshold exceeded") { rawHourlyData() %>% mutate(Difference = Concentration - threshold()) %>% mutate(ExceedsThreshold = ifelse(Difference >=0, 1, 0)) %>% group_by(Date, AirPollutant, AirQualityStationEoICode, StationName, Latitude, Longitude) %>% summarise(HoursCount = sum(ExceedsThreshold)) } else if (aggregation() == "Hours per year for which threshold exceeded") { rawHourlyData() %>% mutate(Difference = Concentration - threshold()) %>% mutate(ExceedsThreshold = ifelse(Difference >=0, 1, 0)) %>% group_by(Date, AirPollutant, AirQualityStationEoICode, StationName, Latitude, Longitude) %>% summarise(HoursCount = sum(ExceedsThreshold)) } else { rawHourlyData() %>% mutate(Difference = Concentration - threshold()) %>% mutate(ExceedsThreshold = ifelse(Difference >=0, 1, 0)) %>% group_by(day(Date), AirPollutant, AirQualityStationEoICode, StationName, Latitude, Longitude) %>% summarise(HoursCount = sum(ExceedsThreshold)) } }) useData <- reactive({ if (aggregation() == "Raw hourly data") { rawHourlyData() } else if (aggregation() == "Daily average") { dailyAverages() } else if (aggregation() == "Daily maxima") { dailyMaxima() } else if (aggregation() == "Hours per day for which threshold exceeded") { #do nothing yet } else if (aggregation() == "Hours per year for which threshold exceeded") { #do nothing yet } else { } }) dates <- reactive({ useData() %>% summarise(min = min(useData()$Date, na.rm = TRUE), max = max(useData()$Date, na.rm = TRUE) ) }) observe({ updateDateRangeInput(session, "dateRange", start = dates()[1], end = dates()[2], min = dates()[1], max = dates()[2] ) }) output$distPlot <- renderPlot({ # draw the time series plot p <- ggplot(useData()) if (aggregation() == "Raw hourly data") { p <- p + aes(x = Date, y = Concentration, color = StationName) + geom_line() + labs(color = "Station Name") if (pollutants() == "SO2") { p <- p + geom_hline(aes(yintercept = 350, linetype = "EU Air Quality Standard"), colour= "blue") } else if (pollutants() == "NO2") { p <- p + geom_hline(aes(yintercept = 200, linetype = "EU Air Quality Standard"), colour= "blue") } else { #do nothing } } else if (aggregation() == "Daily average") { p <- p + aes(x = Date, y = averageDailyConcentration, color = StationName) + geom_line() + labs(y = "Average Daily Concentration", color = "Station Name") if (pollutants() == "PM10") { p <- p + geom_hline(aes(yintercept = 50, linetype = "EU Air Quality Standard"), colour= "blue") } else if (pollutants() == "SO2") { p <- p + geom_hline(aes(yintercept = 125, linetype = "EU Air Quality Standard"), colour= "blue") } else { #do nothing } } else if (aggregation() == "Daily maxima") { p <- p + aes(x = Date, y = maxDailyConcentration, color = StationName) + geom_line() + labs(y = "Maximum Daily Concentration", color = "Station Name") if (pollutants() == "PM10") { p <- p + geom_hline(aes(yintercept = 50, linetype = "EU Air Quality Standard"), colour= "blue") } else if (pollutants() == "SO2") { p <- p + geom_hline(aes(yintercept = 125, linetype = "EU Air Quality Standard"), colour= "blue") } else { #do nothing } } else { #do nothing yet } p + labs(caption = "(based on data from European Environmental Agency)") }) output$map <- renderPlot({ maps::map("world", "CZech Republic") points(useData()$Longitude, useData()$Latitude, pch = 16, col = "blue") text(useData()$Longitude, y = useData()$Latitude, useData()$StationName, pos = 4) }) output$table <- DT::renderDT({ useData() }) output$downloadDataset <- downloadHandler( filename = "airPollutionPlotData.csv", content = function(file) { write.csv(useData(), file, row.names = FALSE) } ) output$report <- downloadHandler( filename = "report.doc", content = function(file) { tempReport <- file.path(tempdir(), "report.Rmd") file.copy("report.Rmd", tempReport, overwrite = TRUE) params <- list(n = input$slider) rmarkdown::render(tempReport, output_file = file, params = params, envir = new.env(parent = globalenv()) ) } ) })
data <- read.csv ("Full_Responses.csv") #This portion of the code is seperated by item where R3-R17 corresponds #to individual survey items. The correspondence is as follow: #R3- Omitting nonSignificant studies or variables #R4- Omitting nonSignificant covariates #R5- HARKing #R6- Omitting analyses #R7- Rounding p-values #R8- Data Exclusion ARKing #R9- Data peeking #R10- Analysis Gaming #R11- Hiding methodological problems #R12- Filling in missing data #R13- Preregistration #R14- Data Sharing #R15- Materials Sharing #R16- Replication #R17- Open Access #The code for each item is as follow #T_data_R3_3 <- dplyr::select(data, R3_3, US, Career): This section creates a subset dataset that #contains the item of interest and variables. US and Career are included for both analyses covering #career stage and location due to their similar coding structure. #T_data_R3_3 <- T_data_R3_3 [!is.na(T_data_R3_3$R3_3),]: This section of the codes removes empty cells. #wtest_1 <- wilcox.test (US$R3_3, Non_US$R3_3, alternative = "less"): This section of the code runs #a wilcox test. #The summary() call provides the descriptive statistics for each group. #Notes on code- #Object names were selected for clarity. The sequential nature is the same across scripts. That stated, the naming convention #equiv is used across scripts. To call a specific equiv, the associated script must be rerun. #for loops were specifically not coded in order to allow for replicability of a single item or subset of items. #That said, a for loop can be created to streamline the code presented here. T_data_R3_3 <- dplyr::select(data, R3_3, US, Career) T_data_R3_3 <- T_data_R3_3 [!is.na(T_data_R3_3$R3_3),] US <- subset (T_data_R3_3, T_data_R3_3$US == 1) Non_US <- subset (T_data_R3_3, T_data_R3_3$US == 0) wtest_1 <- wilcox.test (US$R3_3, Non_US$R3_3, alternative = "less") summary (US) summary (Non_US) T_data_R4_3 <- dplyr::select(data, R4_3, US, Career) T_data_R4_3 <- T_data_R4_3 [!is.na(T_data_R4_3$R4_3),] US <- subset (T_data_R4_3, T_data_R4_3$US == 1) Non_US <- subset (T_data_R4_3, T_data_R4_3$US == 0) wtest_2 <- wilcox.test (US$R4_3, Non_US$R4_3, alternative = "less") summary (US) summary (Non_US) T_data_R5_3 <- dplyr::select(data, R5_3, US, Career) T_data_R5_3 <- T_data_R5_3 [!is.na(T_data_R5_3$R5_3),] US <- subset (T_data_R5_3, T_data_R5_3$US == 1) Non_US <- subset (T_data_R5_3, T_data_R5_3$US == 0) wtest_3 <- wilcox.test (US$R5_3, Non_US$R5_3, alternative = "less") summary (US) summary (Non_US) T_data_R6_3 <- dplyr::select(data, R6_3, US, Career) T_data_R6_3 <- T_data_R6_3 [!is.na(T_data_R6_3$R6_3),] US <- subset (T_data_R6_3, T_data_R6_3$US == 1) Non_US <- subset (T_data_R6_3, T_data_R6_3$US == 0) wtest_4 <- wilcox.test (US$R6_3, Non_US$R6_3, alternative = "less") summary (US) summary (Non_US) T_data_R7_3 <- dplyr::select(data, R7_3, US, Career) T_data_R7_3 <- T_data_R7_3 [!is.na(T_data_R7_3$R7_3),] US <- subset (T_data_R7_3, T_data_R7_3$US == 1) Non_US <- subset (T_data_R7_3, T_data_R7_3$US == 0) wtest_5 <- wilcox.test (US$R7_3, Non_US$R7_3, alternative = "less") summary (US) summary (Non_US) T_data_R8_3 <- dplyr::select(data, R8_3, US, Career) T_data_R8_3 <- T_data_R8_3 [!is.na(T_data_R8_3$R8_3),] US <- subset (T_data_R8_3, T_data_R8_3$US == 1) Non_US <- subset (T_data_R8_3, T_data_R8_3$US == 0) wtest_6 <- wilcox.test (US$R8_3, Non_US$R8_3, alternative = "less") summary (US) summary (Non_US) T_data_R9_3 <- dplyr::select(data, R9_3, US, Career) T_data_R9_3 <- T_data_R9_3 [!is.na(T_data_R9_3$R9_3),] US <- subset (T_data_R9_3, T_data_R9_3$US == 1) Non_US <- subset (T_data_R9_3, T_data_R9_3$US == 0) wtest_7 <- wilcox.test (US$R9_3, Non_US$R9_3, alternative = "less") summary (US) summary (Non_US) T_data_R10_3 <- dplyr::select(data, R10_3, US, Career) T_data_R10_3 <- T_data_R10_3 [!is.na(T_data_R10_3$R10_3),] US <- subset (T_data_R10_3, T_data_R10_3$US == 1) Non_US <- subset (T_data_R10_3, T_data_R10_3$US == 0) wtest_8 <- wilcox.test (US$R10_3, Non_US$R10_3, alternative = "less") summary (US) summary (Non_US) T_data_R11_3 <- dplyr::select(data, R11_3, US, Career) T_data_R11_3 <- T_data_R11_3 [!is.na(T_data_R11_3$R11_3),] US <- subset (T_data_R11_3, T_data_R11_3$US == 1) Non_US <- subset (T_data_R11_3, T_data_R11_3$US == 0) wtest_9 <- wilcox.test (US$R11_3, Non_US$R11_3, alternative = "less") summary (US) summary (Non_US) T_data_R12_3 <- dplyr::select(data, R12_3, US, Career) T_data_R12_3 <- T_data_R12_3 [!is.na(T_data_R12_3$R12_3),] US <- subset (T_data_R12_3, T_data_R12_3$US == 1) Non_US <- subset (T_data_R12_3, T_data_R12_3$US == 0) wtest_10 <- wilcox.test (US$R12_3, Non_US$R12_3, alternative = "less") summary (US) summary (Non_US) T_data_R13_3 <- dplyr::select(data, R13_3, US, Career) T_data_R13_3 <- T_data_R13_3 [!is.na(T_data_R13_3$R13_3),] US <- subset (T_data_R13_3, T_data_R13_3$US == 1) Non_US <- subset (T_data_R13_3, T_data_R13_3$US == 0) wtest_11 <- wilcox.test (US$R13_3, Non_US$R13_3, alternative = "greater") summary (US) summary (Non_US) T_data_R14_3 <- dplyr::select(data, R14_3, US, Career) T_data_R14_3 <- T_data_R14_3 [!is.na(T_data_R14_3$R14_3),] US <- subset (T_data_R14_3, T_data_R14_3$US == 1) Non_US <- subset (T_data_R14_3, T_data_R14_3$US == 0) wtest_12 <- wilcox.test (US$R14_3, Non_US$R14_3, alternative = "greater") summary (US) summary (Non_US) T_data_R15_3 <- dplyr::select(data, R15_3, US, Career) T_data_R15_3 <- T_data_R15_3 [!is.na(T_data_R15_3$R15_3),] US <- subset (T_data_R15_3, T_data_R15_3$US == 1) Non_US <- subset (T_data_R15_3, T_data_R15_3$US == 0) wtest_13 <- wilcox.test (US$R15_3, Non_US$R15_3, alternative = "greater") summary (US) summary (Non_US) T_data_R16_3 <- dplyr::select(data, R16_3, US, Career) T_data_R16_3 <- T_data_R16_3 [!is.na(T_data_R16_3$R16_3),] US <- subset (T_data_R16_3, T_data_R16_3$US == 1) Non_US <- subset (T_data_R16_3, T_data_R16_3$US == 0) wtest_14 <- wilcox.test (US$R16_3, Non_US$R16_3, alternative = "greater") summary (US) summary (Non_US) T_data_R17_3 <- dplyr::select(data, R17_3, US, Career) T_data_R17_3 <- T_data_R17_3 [!is.na(T_data_R17_3$R17_3),] US <- subset (T_data_R17_3, T_data_R17_3$US == 1) Non_US <- subset (T_data_R17_3, T_data_R17_3$US == 0) wtest_15 <- wilcox.test (US$R17_3, Non_US$R17_3, alternative = "greater") summary (US) summary (Non_US) #These are calls and will produce the wilcox test output by item. This output will #also produce means. wtest_1 wtest_2 wtest_3 wtest_4 wtest_5 wtest_6 wtest_7 wtest_8 wtest_9 wtest_10 wtest_11 wtest_12 wtest_13 wtest_14 wtest_15
/Makel_Agnoli/Makel_orig_OSF_files/QRP_B_US_NONUS_FIXED_ANNOTATED.R
no_license
alexholcombe/ChinHolcombePickettVazireCrimSurvey
R
false
false
6,770
r
data <- read.csv ("Full_Responses.csv") #This portion of the code is seperated by item where R3-R17 corresponds #to individual survey items. The correspondence is as follow: #R3- Omitting nonSignificant studies or variables #R4- Omitting nonSignificant covariates #R5- HARKing #R6- Omitting analyses #R7- Rounding p-values #R8- Data Exclusion ARKing #R9- Data peeking #R10- Analysis Gaming #R11- Hiding methodological problems #R12- Filling in missing data #R13- Preregistration #R14- Data Sharing #R15- Materials Sharing #R16- Replication #R17- Open Access #The code for each item is as follow #T_data_R3_3 <- dplyr::select(data, R3_3, US, Career): This section creates a subset dataset that #contains the item of interest and variables. US and Career are included for both analyses covering #career stage and location due to their similar coding structure. #T_data_R3_3 <- T_data_R3_3 [!is.na(T_data_R3_3$R3_3),]: This section of the codes removes empty cells. #wtest_1 <- wilcox.test (US$R3_3, Non_US$R3_3, alternative = "less"): This section of the code runs #a wilcox test. #The summary() call provides the descriptive statistics for each group. #Notes on code- #Object names were selected for clarity. The sequential nature is the same across scripts. That stated, the naming convention #equiv is used across scripts. To call a specific equiv, the associated script must be rerun. #for loops were specifically not coded in order to allow for replicability of a single item or subset of items. #That said, a for loop can be created to streamline the code presented here. T_data_R3_3 <- dplyr::select(data, R3_3, US, Career) T_data_R3_3 <- T_data_R3_3 [!is.na(T_data_R3_3$R3_3),] US <- subset (T_data_R3_3, T_data_R3_3$US == 1) Non_US <- subset (T_data_R3_3, T_data_R3_3$US == 0) wtest_1 <- wilcox.test (US$R3_3, Non_US$R3_3, alternative = "less") summary (US) summary (Non_US) T_data_R4_3 <- dplyr::select(data, R4_3, US, Career) T_data_R4_3 <- T_data_R4_3 [!is.na(T_data_R4_3$R4_3),] US <- subset (T_data_R4_3, T_data_R4_3$US == 1) Non_US <- subset (T_data_R4_3, T_data_R4_3$US == 0) wtest_2 <- wilcox.test (US$R4_3, Non_US$R4_3, alternative = "less") summary (US) summary (Non_US) T_data_R5_3 <- dplyr::select(data, R5_3, US, Career) T_data_R5_3 <- T_data_R5_3 [!is.na(T_data_R5_3$R5_3),] US <- subset (T_data_R5_3, T_data_R5_3$US == 1) Non_US <- subset (T_data_R5_3, T_data_R5_3$US == 0) wtest_3 <- wilcox.test (US$R5_3, Non_US$R5_3, alternative = "less") summary (US) summary (Non_US) T_data_R6_3 <- dplyr::select(data, R6_3, US, Career) T_data_R6_3 <- T_data_R6_3 [!is.na(T_data_R6_3$R6_3),] US <- subset (T_data_R6_3, T_data_R6_3$US == 1) Non_US <- subset (T_data_R6_3, T_data_R6_3$US == 0) wtest_4 <- wilcox.test (US$R6_3, Non_US$R6_3, alternative = "less") summary (US) summary (Non_US) T_data_R7_3 <- dplyr::select(data, R7_3, US, Career) T_data_R7_3 <- T_data_R7_3 [!is.na(T_data_R7_3$R7_3),] US <- subset (T_data_R7_3, T_data_R7_3$US == 1) Non_US <- subset (T_data_R7_3, T_data_R7_3$US == 0) wtest_5 <- wilcox.test (US$R7_3, Non_US$R7_3, alternative = "less") summary (US) summary (Non_US) T_data_R8_3 <- dplyr::select(data, R8_3, US, Career) T_data_R8_3 <- T_data_R8_3 [!is.na(T_data_R8_3$R8_3),] US <- subset (T_data_R8_3, T_data_R8_3$US == 1) Non_US <- subset (T_data_R8_3, T_data_R8_3$US == 0) wtest_6 <- wilcox.test (US$R8_3, Non_US$R8_3, alternative = "less") summary (US) summary (Non_US) T_data_R9_3 <- dplyr::select(data, R9_3, US, Career) T_data_R9_3 <- T_data_R9_3 [!is.na(T_data_R9_3$R9_3),] US <- subset (T_data_R9_3, T_data_R9_3$US == 1) Non_US <- subset (T_data_R9_3, T_data_R9_3$US == 0) wtest_7 <- wilcox.test (US$R9_3, Non_US$R9_3, alternative = "less") summary (US) summary (Non_US) T_data_R10_3 <- dplyr::select(data, R10_3, US, Career) T_data_R10_3 <- T_data_R10_3 [!is.na(T_data_R10_3$R10_3),] US <- subset (T_data_R10_3, T_data_R10_3$US == 1) Non_US <- subset (T_data_R10_3, T_data_R10_3$US == 0) wtest_8 <- wilcox.test (US$R10_3, Non_US$R10_3, alternative = "less") summary (US) summary (Non_US) T_data_R11_3 <- dplyr::select(data, R11_3, US, Career) T_data_R11_3 <- T_data_R11_3 [!is.na(T_data_R11_3$R11_3),] US <- subset (T_data_R11_3, T_data_R11_3$US == 1) Non_US <- subset (T_data_R11_3, T_data_R11_3$US == 0) wtest_9 <- wilcox.test (US$R11_3, Non_US$R11_3, alternative = "less") summary (US) summary (Non_US) T_data_R12_3 <- dplyr::select(data, R12_3, US, Career) T_data_R12_3 <- T_data_R12_3 [!is.na(T_data_R12_3$R12_3),] US <- subset (T_data_R12_3, T_data_R12_3$US == 1) Non_US <- subset (T_data_R12_3, T_data_R12_3$US == 0) wtest_10 <- wilcox.test (US$R12_3, Non_US$R12_3, alternative = "less") summary (US) summary (Non_US) T_data_R13_3 <- dplyr::select(data, R13_3, US, Career) T_data_R13_3 <- T_data_R13_3 [!is.na(T_data_R13_3$R13_3),] US <- subset (T_data_R13_3, T_data_R13_3$US == 1) Non_US <- subset (T_data_R13_3, T_data_R13_3$US == 0) wtest_11 <- wilcox.test (US$R13_3, Non_US$R13_3, alternative = "greater") summary (US) summary (Non_US) T_data_R14_3 <- dplyr::select(data, R14_3, US, Career) T_data_R14_3 <- T_data_R14_3 [!is.na(T_data_R14_3$R14_3),] US <- subset (T_data_R14_3, T_data_R14_3$US == 1) Non_US <- subset (T_data_R14_3, T_data_R14_3$US == 0) wtest_12 <- wilcox.test (US$R14_3, Non_US$R14_3, alternative = "greater") summary (US) summary (Non_US) T_data_R15_3 <- dplyr::select(data, R15_3, US, Career) T_data_R15_3 <- T_data_R15_3 [!is.na(T_data_R15_3$R15_3),] US <- subset (T_data_R15_3, T_data_R15_3$US == 1) Non_US <- subset (T_data_R15_3, T_data_R15_3$US == 0) wtest_13 <- wilcox.test (US$R15_3, Non_US$R15_3, alternative = "greater") summary (US) summary (Non_US) T_data_R16_3 <- dplyr::select(data, R16_3, US, Career) T_data_R16_3 <- T_data_R16_3 [!is.na(T_data_R16_3$R16_3),] US <- subset (T_data_R16_3, T_data_R16_3$US == 1) Non_US <- subset (T_data_R16_3, T_data_R16_3$US == 0) wtest_14 <- wilcox.test (US$R16_3, Non_US$R16_3, alternative = "greater") summary (US) summary (Non_US) T_data_R17_3 <- dplyr::select(data, R17_3, US, Career) T_data_R17_3 <- T_data_R17_3 [!is.na(T_data_R17_3$R17_3),] US <- subset (T_data_R17_3, T_data_R17_3$US == 1) Non_US <- subset (T_data_R17_3, T_data_R17_3$US == 0) wtest_15 <- wilcox.test (US$R17_3, Non_US$R17_3, alternative = "greater") summary (US) summary (Non_US) #These are calls and will produce the wilcox test output by item. This output will #also produce means. wtest_1 wtest_2 wtest_3 wtest_4 wtest_5 wtest_6 wtest_7 wtest_8 wtest_9 wtest_10 wtest_11 wtest_12 wtest_13 wtest_14 wtest_15
# packages library(rgdal) ## pour occupancy #library(RColorBrewer) ## pour colorer maps library(unmarked) #library(classInt) library(spdep) # get 30 models with delta-AIC < 2 (see Table3_covariate_selection.R) load('allmodursus.RData') all_mod # get covariates # read in det/non-det data data_occ <- read.csv('Bear_OccSM_0814_mod.csv', header=FALSE, sep=",") # sites ids site_list <- data_occ[,1] # get subsections and associated covariates sousmassif.rg <- readOGR(".", "sousmassif_og") # get covariates data_cov <- sousmassif.rg@data # filter to keep only subsections with monitoring cov <- subset(data_cov,select=c('Numero','alt_moy','tri_moy','dens_my','prc_frt','prc_shr','prc_rds','cnnct_f','diff_hm','Area')) cov2 = NULL test_suivi <- rep(0, dim(cov)[1]) for (i in 1:nrow(cov)){ if (sum(cov[i,'Numero'] == site_list) != 0){ cov2 <- rbind(cov2,cov[i,]) test_suivi[i] <- 1 } } # standardize covariates RUG <- cov2$tri_moy # roughness RUG <- (RUG-mean(RUG))/sd(RUG) DTHM <- cov2$dens_my # human density DTHM <- (DTHM-mean(DTHM))/sd(DTHM) CVFR <- cov2$prc_frt # forest cover CVFR <- (CVFR-mean(CVFR))/sd(CVFR) CVBS <- cov2$prc_shr # shrub cover CVBS <- (CVBS-mean(CVBS))/sd(CVBS) LGRT <- cov2$prc_rds # road length LGRT <- (LGRT-mean(LGRT))/sd(LGRT) AREA <- (cov2$Area/1000000 - mean(cov2$Area/1000000))/sd(cov2$Area/1000000) # subsection size # test trend using each of the 30 models result = NULL for (i in 1:30){ # loop on models # psi/Occupancy labels <- names(all_mod[[i]]['psi']@estimates)[-1] # drop intercept nb_cov <- length(labels) logit_intercept <- all_mod[[i]]['psi']@estimates[1] logit_slopes <- all_mod[[i]]['psi']@estimates[2:(2+nb_cov-1)] * t(sapply(labels, function(x) eval(parse(text=x)))) logit_par = logit_intercept + apply(logit_slopes,2,sum) data_psi <- 1/(1+exp(-logit_par)) # gam/Colonization labels <- names(all_mod[[i]]['col']@estimates)[-1] # drop intercept nb_cov <- length(labels) logit_intercept <- all_mod[[i]]['col']@estimates[1] logit_slopes <- all_mod[[i]]['col']@estimates[2:(2+nb_cov-1)] * t(sapply(labels, function(x) eval(parse(text=x)))) logit_par = logit_intercept + apply(logit_slopes,2,sum) data_gamma <- 1/(1+exp(-logit_par)) # eps/Extinction labels <- names(all_mod[[i]]['ext']@estimates)[-1] # drop intercept nb_cov <- length(labels) logit_intercept <- all_mod[[i]]['ext']@estimates[1] logit_slopes <- all_mod[[i]]['ext']@estimates[2:(2+nb_cov-1)] * t(sapply(labels, function(x) eval(parse(text=x)))) logit_par = logit_intercept + apply(logit_slopes,2,sum) data_eps <- 1/(1+exp(-logit_par)) psi <- gamma <- eps <- rep(NA, dim(data_cov)[1]) a <- 1 for (j in 1:dim(data_cov)[1]){ if (test_suivi[j] == 1){ psi[j] <- data_psi[a] gamma[j] <- data_gamma[a] eps[j] <- data_eps[a] a <- a+1 } } # get parameters data_cov$psi <- psi data_cov$psi2 <- psi * (1-eps) + (1-psi) * gamma data_cov$psi3 <- data_cov$psi2 * (1-eps) + (1-data_cov$psi2) * gamma data_cov$psi4 <- data_cov$psi3 * (1-eps) + (1-data_cov$psi3) * gamma data_cov$psi5 <- data_cov$psi4 * (1-eps) + (1-data_cov$psi4) * gamma data_cov$psi6 <- data_cov$psi5 * (1-eps) + (1-data_cov$psi5) * gamma data_cov$psi7 <- data_cov$psi6 * (1-eps) + (1-data_cov$psi6) * gamma data_cov$gamma <- gamma data_cov$eps <- eps # dataset with occ prob estimates by subsections/years data_trend = data.frame(subsection=rep(cov$Numero,7),occupancy=c(data_cov$psi,data_cov$psi2,data_cov$psi3,data_cov$psi4,data_cov$psi5,data_cov$psi6,data_cov$psi7),year=c(rep(2008,138),rep(2009,138),rep(2010,138),rep(2011,138),rep(2012,138),rep(2013,138),rep(2014,138))) # posterior distributions oflatent occurrence) using empirical Bayes methods re <- ranef(all_mod[[i]]) # stores the estimated posterior distributions of the latent occurrence # "safer to use the posterior mean even though this will not be an integer in general", see ?bup res = bup(re, stat="mean") # posterior mean # build adjancency matrix nb.r = poly2nb(sousmassif.rg, queen=F) # two areas are neighbors if share common edges with length >0 mat = nb2mat(nb.r, style="B") # mat is the 0/1 adjacency matrix mat2 = matrix(as.numeric(mat),nrow=138,byrow=T) mat3 = mat2[!is.na(data_cov$psi),!is.na(data_cov$psi)] # test time effect library(spaMM) # help(spaMM) stat = fixedLRT(null.formula=occupancy~1+adjacency(1|subsection), formula=occupancy~year+adjacency(1|subsection), adjMatrix=mat3,family=gaussian(), HLmethod='ML',data= data_trend) detach(package:spaMM) # spaMM overrides unmarked, and generates conflicts in using the raneff function result = rbind(result,stat$basicLRT) } result LR2 df pvalue 1 25.65404 1 4.084385e-07 2 23.56641 1 1.206760e-06 3 26.44434 1 2.712417e-07 4 27.87521 1 1.293973e-07 5 35.46650 1 2.594754e-09 6 76.49419 1 0.000000e+00 7 23.52359 1 1.233915e-06 8 27.85504 1 1.307531e-07 9 26.48704 1 2.653127e-07 10 35.35988 1 2.740745e-09 11 21.27237 1 3.984325e-06 12 18.10081 1 2.095128e-05 13 77.09445 1 0.000000e+00 14 36.11472 1 1.860360e-09 15 25.91195 1 3.573507e-07 16 98.22300 1 0.000000e+00 17 87.23120 1 0.000000e+00 18 34.38891 1 4.512878e-09 19 31.33306 1 2.173469e-08 20 23.73614 1 1.104885e-06 21 16.54959 1 4.739426e-05 22 72.11309 1 0.000000e+00 23 21.98990 1 2.740893e-06 24 21.14715 1 4.253332e-06 25 18.20122 1 1.987510e-05 26 22.10090 1 2.586884e-06 27 24.05342 1 9.369987e-07 28 37.54229 1 8.945223e-10 29 97.99203 1 0.000000e+00 30 37.92658 1 7.345774e-10
/test_trend_occupancy.R
no_license
oliviergimenez/ursus_Pyrenees_occupancy
R
false
false
5,453
r
# packages library(rgdal) ## pour occupancy #library(RColorBrewer) ## pour colorer maps library(unmarked) #library(classInt) library(spdep) # get 30 models with delta-AIC < 2 (see Table3_covariate_selection.R) load('allmodursus.RData') all_mod # get covariates # read in det/non-det data data_occ <- read.csv('Bear_OccSM_0814_mod.csv', header=FALSE, sep=",") # sites ids site_list <- data_occ[,1] # get subsections and associated covariates sousmassif.rg <- readOGR(".", "sousmassif_og") # get covariates data_cov <- sousmassif.rg@data # filter to keep only subsections with monitoring cov <- subset(data_cov,select=c('Numero','alt_moy','tri_moy','dens_my','prc_frt','prc_shr','prc_rds','cnnct_f','diff_hm','Area')) cov2 = NULL test_suivi <- rep(0, dim(cov)[1]) for (i in 1:nrow(cov)){ if (sum(cov[i,'Numero'] == site_list) != 0){ cov2 <- rbind(cov2,cov[i,]) test_suivi[i] <- 1 } } # standardize covariates RUG <- cov2$tri_moy # roughness RUG <- (RUG-mean(RUG))/sd(RUG) DTHM <- cov2$dens_my # human density DTHM <- (DTHM-mean(DTHM))/sd(DTHM) CVFR <- cov2$prc_frt # forest cover CVFR <- (CVFR-mean(CVFR))/sd(CVFR) CVBS <- cov2$prc_shr # shrub cover CVBS <- (CVBS-mean(CVBS))/sd(CVBS) LGRT <- cov2$prc_rds # road length LGRT <- (LGRT-mean(LGRT))/sd(LGRT) AREA <- (cov2$Area/1000000 - mean(cov2$Area/1000000))/sd(cov2$Area/1000000) # subsection size # test trend using each of the 30 models result = NULL for (i in 1:30){ # loop on models # psi/Occupancy labels <- names(all_mod[[i]]['psi']@estimates)[-1] # drop intercept nb_cov <- length(labels) logit_intercept <- all_mod[[i]]['psi']@estimates[1] logit_slopes <- all_mod[[i]]['psi']@estimates[2:(2+nb_cov-1)] * t(sapply(labels, function(x) eval(parse(text=x)))) logit_par = logit_intercept + apply(logit_slopes,2,sum) data_psi <- 1/(1+exp(-logit_par)) # gam/Colonization labels <- names(all_mod[[i]]['col']@estimates)[-1] # drop intercept nb_cov <- length(labels) logit_intercept <- all_mod[[i]]['col']@estimates[1] logit_slopes <- all_mod[[i]]['col']@estimates[2:(2+nb_cov-1)] * t(sapply(labels, function(x) eval(parse(text=x)))) logit_par = logit_intercept + apply(logit_slopes,2,sum) data_gamma <- 1/(1+exp(-logit_par)) # eps/Extinction labels <- names(all_mod[[i]]['ext']@estimates)[-1] # drop intercept nb_cov <- length(labels) logit_intercept <- all_mod[[i]]['ext']@estimates[1] logit_slopes <- all_mod[[i]]['ext']@estimates[2:(2+nb_cov-1)] * t(sapply(labels, function(x) eval(parse(text=x)))) logit_par = logit_intercept + apply(logit_slopes,2,sum) data_eps <- 1/(1+exp(-logit_par)) psi <- gamma <- eps <- rep(NA, dim(data_cov)[1]) a <- 1 for (j in 1:dim(data_cov)[1]){ if (test_suivi[j] == 1){ psi[j] <- data_psi[a] gamma[j] <- data_gamma[a] eps[j] <- data_eps[a] a <- a+1 } } # get parameters data_cov$psi <- psi data_cov$psi2 <- psi * (1-eps) + (1-psi) * gamma data_cov$psi3 <- data_cov$psi2 * (1-eps) + (1-data_cov$psi2) * gamma data_cov$psi4 <- data_cov$psi3 * (1-eps) + (1-data_cov$psi3) * gamma data_cov$psi5 <- data_cov$psi4 * (1-eps) + (1-data_cov$psi4) * gamma data_cov$psi6 <- data_cov$psi5 * (1-eps) + (1-data_cov$psi5) * gamma data_cov$psi7 <- data_cov$psi6 * (1-eps) + (1-data_cov$psi6) * gamma data_cov$gamma <- gamma data_cov$eps <- eps # dataset with occ prob estimates by subsections/years data_trend = data.frame(subsection=rep(cov$Numero,7),occupancy=c(data_cov$psi,data_cov$psi2,data_cov$psi3,data_cov$psi4,data_cov$psi5,data_cov$psi6,data_cov$psi7),year=c(rep(2008,138),rep(2009,138),rep(2010,138),rep(2011,138),rep(2012,138),rep(2013,138),rep(2014,138))) # posterior distributions oflatent occurrence) using empirical Bayes methods re <- ranef(all_mod[[i]]) # stores the estimated posterior distributions of the latent occurrence # "safer to use the posterior mean even though this will not be an integer in general", see ?bup res = bup(re, stat="mean") # posterior mean # build adjancency matrix nb.r = poly2nb(sousmassif.rg, queen=F) # two areas are neighbors if share common edges with length >0 mat = nb2mat(nb.r, style="B") # mat is the 0/1 adjacency matrix mat2 = matrix(as.numeric(mat),nrow=138,byrow=T) mat3 = mat2[!is.na(data_cov$psi),!is.na(data_cov$psi)] # test time effect library(spaMM) # help(spaMM) stat = fixedLRT(null.formula=occupancy~1+adjacency(1|subsection), formula=occupancy~year+adjacency(1|subsection), adjMatrix=mat3,family=gaussian(), HLmethod='ML',data= data_trend) detach(package:spaMM) # spaMM overrides unmarked, and generates conflicts in using the raneff function result = rbind(result,stat$basicLRT) } result LR2 df pvalue 1 25.65404 1 4.084385e-07 2 23.56641 1 1.206760e-06 3 26.44434 1 2.712417e-07 4 27.87521 1 1.293973e-07 5 35.46650 1 2.594754e-09 6 76.49419 1 0.000000e+00 7 23.52359 1 1.233915e-06 8 27.85504 1 1.307531e-07 9 26.48704 1 2.653127e-07 10 35.35988 1 2.740745e-09 11 21.27237 1 3.984325e-06 12 18.10081 1 2.095128e-05 13 77.09445 1 0.000000e+00 14 36.11472 1 1.860360e-09 15 25.91195 1 3.573507e-07 16 98.22300 1 0.000000e+00 17 87.23120 1 0.000000e+00 18 34.38891 1 4.512878e-09 19 31.33306 1 2.173469e-08 20 23.73614 1 1.104885e-06 21 16.54959 1 4.739426e-05 22 72.11309 1 0.000000e+00 23 21.98990 1 2.740893e-06 24 21.14715 1 4.253332e-06 25 18.20122 1 1.987510e-05 26 22.10090 1 2.586884e-06 27 24.05342 1 9.369987e-07 28 37.54229 1 8.945223e-10 29 97.99203 1 0.000000e+00 30 37.92658 1 7.345774e-10
library(RTest) ### Name: htmlify_string ### Title: function to make strings xml and html compatible ### Aliases: htmlify_string ### ** Examples input_string <- "<5" stopifnot(htmlify_string(input_string)=="&lt;5")
/data/genthat_extracted_code/RTest/examples/htmlify_string.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
224
r
library(RTest) ### Name: htmlify_string ### Title: function to make strings xml and html compatible ### Aliases: htmlify_string ### ** Examples input_string <- "<5" stopifnot(htmlify_string(input_string)=="&lt;5")
args <- commandArgs(trailingOnly = FALSE) base_dir = dirname(substring( args[grep("--file=", args)], 8)) setwd(base_dir) param_string = substring(args[grep("--params=", args)], 10) s = strsplit(param_string, "[|]")[[1]] all_params = strsplit(s," ") all_params = lapply(all_params, as.numeric) b = all_params[[1]] # bias d = all_params[[2]] # tresh kd = all_params[[3]] # capacitance R = all_params[[4]] # resistance dt = 1e-03 source('../serialize_to_bin.R') source('encode.R') require(entropy) dir2load = "/home/alexeyche/my/sim/ucr_fb_spikes/wavelets" #dir2load = "/home/alexeyche/prog/sim/ucr_fb_spikes/wavelets" labels = c("train", "test") nums = c(300, 300) entrop_all = NULL good_ids = c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,84,92,98,101,103,106,109,110,114,118,121,124,126,128,132,136,137,138,140,145,146,149,150,152,153,155,159,164,165,166,167,170,171,173,175,176,178,179,181,187,188,189,190,192,194,198,199,201,202,203,206,208,210,212,215,216,218,219,220,222,223,224,226,229,230,231,232,233,234,235,236,240,243,245,246,247,248,250,252,253,254,256,259,260,261,262,265,266,267,269,271,273,276,278,279,280,281,282,285,288,289,290,291,292,293,295,297,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,343,344,345,346,347,348,349,350,353,360,366,381,395,401,403,404,406,407,411,412,418,421,424,425,426,428,429,430,433,434,435,436,437,438,440,441,442,443,445,446,447,448,449,451,453,459,462,464,465,466,467,473,477,481,482,483,488,495,496,497,498,501,508,510,514,515,516,518,519,520,521,522,523,526,527,530,531,533,534,536,537,538,539,540,543,546,547,548,549,550,552,554,555,556,557,559,560,562,564,565,566,568,570,573,574,575,577,578,579,583,588,593,594,596,597,598,599) for(ds_j in 1:length(good_ids)) { lab = "train" id = good_ids[ds_j] if(good_ids[ds_j] >300) { lab = "test" id = id - 300 } m = loadMatrix( sprintf("%s/%s_wavelets", dir2load, lab), id ) entrop = NULL for(fi in 1:nrow(m)) { sp = iaf_encode(m[fi,], dt, b[fi], d[fi], 0, R[fi], kd[fi]) if(length(sp)> 5) { entrop = c(entrop, entropy(diff(sp))) } else { entrop = c(entrop, 100/(1+length(sp))) } } entrop_all = cbind(entrop_all, entrop) } cat(sum((rowMeans(entrop_all)^2)), "\n")
/cns/R/srm/tem/find_best_code.R
no_license
alexeyche/alexeyche-junk
R
false
false
2,443
r
args <- commandArgs(trailingOnly = FALSE) base_dir = dirname(substring( args[grep("--file=", args)], 8)) setwd(base_dir) param_string = substring(args[grep("--params=", args)], 10) s = strsplit(param_string, "[|]")[[1]] all_params = strsplit(s," ") all_params = lapply(all_params, as.numeric) b = all_params[[1]] # bias d = all_params[[2]] # tresh kd = all_params[[3]] # capacitance R = all_params[[4]] # resistance dt = 1e-03 source('../serialize_to_bin.R') source('encode.R') require(entropy) dir2load = "/home/alexeyche/my/sim/ucr_fb_spikes/wavelets" #dir2load = "/home/alexeyche/prog/sim/ucr_fb_spikes/wavelets" labels = c("train", "test") nums = c(300, 300) entrop_all = NULL good_ids = c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,84,92,98,101,103,106,109,110,114,118,121,124,126,128,132,136,137,138,140,145,146,149,150,152,153,155,159,164,165,166,167,170,171,173,175,176,178,179,181,187,188,189,190,192,194,198,199,201,202,203,206,208,210,212,215,216,218,219,220,222,223,224,226,229,230,231,232,233,234,235,236,240,243,245,246,247,248,250,252,253,254,256,259,260,261,262,265,266,267,269,271,273,276,278,279,280,281,282,285,288,289,290,291,292,293,295,297,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,343,344,345,346,347,348,349,350,353,360,366,381,395,401,403,404,406,407,411,412,418,421,424,425,426,428,429,430,433,434,435,436,437,438,440,441,442,443,445,446,447,448,449,451,453,459,462,464,465,466,467,473,477,481,482,483,488,495,496,497,498,501,508,510,514,515,516,518,519,520,521,522,523,526,527,530,531,533,534,536,537,538,539,540,543,546,547,548,549,550,552,554,555,556,557,559,560,562,564,565,566,568,570,573,574,575,577,578,579,583,588,593,594,596,597,598,599) for(ds_j in 1:length(good_ids)) { lab = "train" id = good_ids[ds_j] if(good_ids[ds_j] >300) { lab = "test" id = id - 300 } m = loadMatrix( sprintf("%s/%s_wavelets", dir2load, lab), id ) entrop = NULL for(fi in 1:nrow(m)) { sp = iaf_encode(m[fi,], dt, b[fi], d[fi], 0, R[fi], kd[fi]) if(length(sp)> 5) { entrop = c(entrop, entropy(diff(sp))) } else { entrop = c(entrop, 100/(1+length(sp))) } } entrop_all = cbind(entrop_all, entrop) } cat(sum((rowMeans(entrop_all)^2)), "\n")
tTestScaledMdd <- function (n.or.n1, n2 = n.or.n1, alpha = 0.05, power = 0.95, sample.type = ifelse(!missing(n2) && !is.null(n2), "two.sample", "one.sample"), alternative = "two.sided", two.sided.direction = "greater", approx = FALSE, tol = 1e-07, maxiter = 1000) { sample.type <- match.arg(sample.type, c("one.sample", "two.sample")) alternative <- match.arg(alternative, c("two.sided", "less", "greater")) two.sided.direction <- match.arg(two.sided.direction, c("greater", "less")) if (!is.vector(n.or.n1, mode = "numeric") || !is.vector(alpha, mode = "numeric") || !is.vector(power, mode = "numeric")) stop("'n.or.n1', 'alpha', and 'power' must be numeric vectors.") if (!all(is.finite(n.or.n1)) || !all(is.finite(alpha)) || !all(is.finite(power))) stop(paste("Missing (NA), Infinite (Inf, -Inf), and", "Undefined (Nan) values are not allowed in", "'n.or.n1', 'alpha', or 'power'")) if (any(n.or.n1 < 2)) stop("All values of 'n.or.n1' must be greater than or equal to 2") if (any(alpha <= 0) || any(alpha >= 1)) stop("All values of 'alpha' must be greater than 0 and less than 1") if (any(power < alpha) || any(power >= 1)) stop(paste("All values of 'power' must be greater than or equal to", "the corresponding elements of 'alpha', and less than 1")) if (sample.type == "two.sample" && !missing(n2)) { if (is.null(n2) || !is.vector(n2, mode = "numeric")) stop("'n2' must be a numeric vector") if (!all(is.finite(n2))) stop(paste("Missing (NA), Infinite (Inf, -Inf), and", "Undefined (Nan) values are not allowed in 'n2'")) if (any(n2 < 2)) stop("All values of 'n2' must be greater than or equal to 2") } alt.fac <- ifelse(alternative == "two.sided", 2, 1) if (sample.type == "two.sample") { df <- n.or.n1 + n2 - 2 oorn <- 1/sqrt((n.or.n1 * n2)/(n.or.n1 + n2)) } else { df <- n.or.n1 - 1 oorn <- 1/sqrt(n.or.n1) } delta.over.sigma.vec <- oorn * (qt(1 - alpha/alt.fac, df) + qt(power, df)) index <- power == alpha delta.over.sigma.vec[index] <- 0 if (!approx) { alt <- ifelse(alternative == "less", "greater", alternative) arg.mat <- cbind.no.warn(n.or.n1 = as.vector(n.or.n1), n2 = as.vector(n2), power = as.vector(power), alpha = as.vector(alpha)) for (i in c("n.or.n1", "n2", "power", "alpha")) assign(i, arg.mat[, i]) N <- nrow(arg.mat) fcn.for.root <- function(delta.over.sigma, n.or.n1, n2, power, alpha, sample.type, alternative, approx) { power - tTestPower(n.or.n1 = n.or.n1, n2 = n2, delta.over.sigma = delta.over.sigma, alpha = alpha, sample.type = sample.type, alternative = alternative, approx = approx) } for (i in (1:N)[!index]) { n.or.n1.i <- n.or.n1[i] n2.i <- n2[i] power.i <- power[i] alpha.i <- alpha[i] delta.over.sigma.i <- delta.over.sigma.vec[i] upper <- 2 * delta.over.sigma.i power.upper <- tTestPower(n.or.n1 = n.or.n1.i, n2 = n2.i, delta.over.sigma = upper, alpha = alpha.i, sample.type = sample.type, alternative = alt, approx = FALSE) upper.too.small <- power.upper <= power.i iter <- 1 while (upper.too.small && iter <= maxiter) { upper <- 2 * upper power.upper <- tTestPower(n.or.n1 = n.or.n1.i, n2 = n2.i, delta.over.sigma = upper, alpha = alpha.i, sample.type = sample.type, alternative = alt, approx = FALSE) upper.too.small <- power.upper <= power.i iter <- iter + 1 } if (iter > maxiter) stop("Error in search algorithm. Try increasing the argument 'maxiter'") delta.over.sigma.vec[i] <- uniroot(fcn.for.root, lower = 0, upper = upper, f.lower = power.i - alpha.i, f.upper = power.i - power.upper, n.or.n1 = n.or.n1.i, n2 = n2.i, power = power.i, alpha = alpha.i, sample.type = sample.type, alternative = alt, approx = FALSE, tol = tol, maxiter = maxiter)$root } } if (alternative == "less" || (alternative == "two.sided" && two.sided.direction == "less")) delta.over.sigma.vec <- -delta.over.sigma.vec delta.over.sigma.vec }
/R/tTestScaledMdd.R
no_license
alexkowa/EnvStats
R
false
false
4,780
r
tTestScaledMdd <- function (n.or.n1, n2 = n.or.n1, alpha = 0.05, power = 0.95, sample.type = ifelse(!missing(n2) && !is.null(n2), "two.sample", "one.sample"), alternative = "two.sided", two.sided.direction = "greater", approx = FALSE, tol = 1e-07, maxiter = 1000) { sample.type <- match.arg(sample.type, c("one.sample", "two.sample")) alternative <- match.arg(alternative, c("two.sided", "less", "greater")) two.sided.direction <- match.arg(two.sided.direction, c("greater", "less")) if (!is.vector(n.or.n1, mode = "numeric") || !is.vector(alpha, mode = "numeric") || !is.vector(power, mode = "numeric")) stop("'n.or.n1', 'alpha', and 'power' must be numeric vectors.") if (!all(is.finite(n.or.n1)) || !all(is.finite(alpha)) || !all(is.finite(power))) stop(paste("Missing (NA), Infinite (Inf, -Inf), and", "Undefined (Nan) values are not allowed in", "'n.or.n1', 'alpha', or 'power'")) if (any(n.or.n1 < 2)) stop("All values of 'n.or.n1' must be greater than or equal to 2") if (any(alpha <= 0) || any(alpha >= 1)) stop("All values of 'alpha' must be greater than 0 and less than 1") if (any(power < alpha) || any(power >= 1)) stop(paste("All values of 'power' must be greater than or equal to", "the corresponding elements of 'alpha', and less than 1")) if (sample.type == "two.sample" && !missing(n2)) { if (is.null(n2) || !is.vector(n2, mode = "numeric")) stop("'n2' must be a numeric vector") if (!all(is.finite(n2))) stop(paste("Missing (NA), Infinite (Inf, -Inf), and", "Undefined (Nan) values are not allowed in 'n2'")) if (any(n2 < 2)) stop("All values of 'n2' must be greater than or equal to 2") } alt.fac <- ifelse(alternative == "two.sided", 2, 1) if (sample.type == "two.sample") { df <- n.or.n1 + n2 - 2 oorn <- 1/sqrt((n.or.n1 * n2)/(n.or.n1 + n2)) } else { df <- n.or.n1 - 1 oorn <- 1/sqrt(n.or.n1) } delta.over.sigma.vec <- oorn * (qt(1 - alpha/alt.fac, df) + qt(power, df)) index <- power == alpha delta.over.sigma.vec[index] <- 0 if (!approx) { alt <- ifelse(alternative == "less", "greater", alternative) arg.mat <- cbind.no.warn(n.or.n1 = as.vector(n.or.n1), n2 = as.vector(n2), power = as.vector(power), alpha = as.vector(alpha)) for (i in c("n.or.n1", "n2", "power", "alpha")) assign(i, arg.mat[, i]) N <- nrow(arg.mat) fcn.for.root <- function(delta.over.sigma, n.or.n1, n2, power, alpha, sample.type, alternative, approx) { power - tTestPower(n.or.n1 = n.or.n1, n2 = n2, delta.over.sigma = delta.over.sigma, alpha = alpha, sample.type = sample.type, alternative = alternative, approx = approx) } for (i in (1:N)[!index]) { n.or.n1.i <- n.or.n1[i] n2.i <- n2[i] power.i <- power[i] alpha.i <- alpha[i] delta.over.sigma.i <- delta.over.sigma.vec[i] upper <- 2 * delta.over.sigma.i power.upper <- tTestPower(n.or.n1 = n.or.n1.i, n2 = n2.i, delta.over.sigma = upper, alpha = alpha.i, sample.type = sample.type, alternative = alt, approx = FALSE) upper.too.small <- power.upper <= power.i iter <- 1 while (upper.too.small && iter <= maxiter) { upper <- 2 * upper power.upper <- tTestPower(n.or.n1 = n.or.n1.i, n2 = n2.i, delta.over.sigma = upper, alpha = alpha.i, sample.type = sample.type, alternative = alt, approx = FALSE) upper.too.small <- power.upper <= power.i iter <- iter + 1 } if (iter > maxiter) stop("Error in search algorithm. Try increasing the argument 'maxiter'") delta.over.sigma.vec[i] <- uniroot(fcn.for.root, lower = 0, upper = upper, f.lower = power.i - alpha.i, f.upper = power.i - power.upper, n.or.n1 = n.or.n1.i, n2 = n2.i, power = power.i, alpha = alpha.i, sample.type = sample.type, alternative = alt, approx = FALSE, tol = tol, maxiter = maxiter)$root } } if (alternative == "less" || (alternative == "two.sided" && two.sided.direction == "less")) delta.over.sigma.vec <- -delta.over.sigma.vec delta.over.sigma.vec }
##makeCacheMatrix:This function creates a special "matrix" object ##that can cache its inverse. makeCacheMatrix <- function(x = numeric()) { inverse <- NULL set <- function(y) { x <<- y inverse <<- NULL } get <- function() x setinverse <- function(solve) inverse <<- solve getinverse <- function() inverse list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ##cacheSolve ##This function computes the inverse of the special "matrix" ##returned by makeCacheMatrix above. If the inverse has already ##been calculated (and the matrix has not changed), ##then the cachesolve should retrieve the inverse from the cache cacheSolve <- function(x, ...) { inverse <- x$getinverse() if(!is.null(inverse)) { message("getting cached data") return(inverse) } data <- x$get() inverse <- solve(data, ...) x$setinverse(inverse) inverse }
/cachematrix.R
no_license
antonioldg/ProgrammingAssignment2
R
false
false
912
r
##makeCacheMatrix:This function creates a special "matrix" object ##that can cache its inverse. makeCacheMatrix <- function(x = numeric()) { inverse <- NULL set <- function(y) { x <<- y inverse <<- NULL } get <- function() x setinverse <- function(solve) inverse <<- solve getinverse <- function() inverse list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ##cacheSolve ##This function computes the inverse of the special "matrix" ##returned by makeCacheMatrix above. If the inverse has already ##been calculated (and the matrix has not changed), ##then the cachesolve should retrieve the inverse from the cache cacheSolve <- function(x, ...) { inverse <- x$getinverse() if(!is.null(inverse)) { message("getting cached data") return(inverse) } data <- x$get() inverse <- solve(data, ...) x$setinverse(inverse) inverse }
/Homework_Session6_Chenxin.R
no_license
xiechenxin/Statistical-Machine-learning-for-Marketing-Research
R
false
false
7,032
r
context("test-asanaRconfigFile.R") test_that("asanaRconfigFile() cats a message to console", { # Call getsurvey expect_output( asanaR::asanaRconfigFile(), "Copy-paste the lines between the dashes into a new plain text file, replace the value for the personal_access_token" ) })
/tests/testthat/test-asanaRconfigFile.R
permissive
ryantsullivan/asanaR
R
false
false
293
r
context("test-asanaRconfigFile.R") test_that("asanaRconfigFile() cats a message to console", { # Call getsurvey expect_output( asanaR::asanaRconfigFile(), "Copy-paste the lines between the dashes into a new plain text file, replace the value for the personal_access_token" ) })
"samplesCoda" <- function(node, stem, beg = samplesGetBeg(), end = samplesGetEnd(), firstChain = samplesGetFirstChain(), lastChain = samplesGetLastChain(), thin = samplesGetThin()) { # Write out CODA files if(!is.character(node) || length(node)!=1) stop("'node' must be character of length 1") if(!is.character(stem) || length(stem)!=1) stop("'stem' must be character of length 1") if(dirname(stem) == ".") stem <- file.path(getwd(), basename(stem)) oldBeg <- samplesGetBeg() oldEnd <- samplesGetEnd() oldFirstChain <- samplesGetFirstChain() oldLastChain <- samplesGetLastChain() oldThin <- samplesGetThin() on.exit({ samplesSetBeg(oldBeg) samplesSetEnd(oldEnd) samplesSetFirstChain(oldFirstChain) samplesSetLastChain(oldLastChain) samplesSetThin(oldThin) }) beg <- max(beg, modelAdaptivePhase()) samplesSetBeg(beg) samplesSetEnd(end) samplesSetFirstChain(firstChain) samplesSetLastChain(lastChain) thin <- max(c(thin, 1)) samplesSetThin(thin) command <- paste(.SamplesGlobalsCmd(node), ";SamplesEmbed.StatsGuard;", "SamplesEmbed.CODA(", sQuote(stem), ")") .CmdInterpreter(command) buffer() }
/R/samples.coda.R
no_license
cran/BRugs
R
false
false
1,310
r
"samplesCoda" <- function(node, stem, beg = samplesGetBeg(), end = samplesGetEnd(), firstChain = samplesGetFirstChain(), lastChain = samplesGetLastChain(), thin = samplesGetThin()) { # Write out CODA files if(!is.character(node) || length(node)!=1) stop("'node' must be character of length 1") if(!is.character(stem) || length(stem)!=1) stop("'stem' must be character of length 1") if(dirname(stem) == ".") stem <- file.path(getwd(), basename(stem)) oldBeg <- samplesGetBeg() oldEnd <- samplesGetEnd() oldFirstChain <- samplesGetFirstChain() oldLastChain <- samplesGetLastChain() oldThin <- samplesGetThin() on.exit({ samplesSetBeg(oldBeg) samplesSetEnd(oldEnd) samplesSetFirstChain(oldFirstChain) samplesSetLastChain(oldLastChain) samplesSetThin(oldThin) }) beg <- max(beg, modelAdaptivePhase()) samplesSetBeg(beg) samplesSetEnd(end) samplesSetFirstChain(firstChain) samplesSetLastChain(lastChain) thin <- max(c(thin, 1)) samplesSetThin(thin) command <- paste(.SamplesGlobalsCmd(node), ";SamplesEmbed.StatsGuard;", "SamplesEmbed.CODA(", sQuote(stem), ")") .CmdInterpreter(command) buffer() }
#' add_controls #' #' add_controls to the archaeopteryx htmlwidget #' #' @param arch Required. #' @param control Optional. Default \code{"maincontrols"}. Can be either "maincontrols or "colorcontrols". #' @export add_controls <- function(arch, controls="maincontrols") { if (controls == "maincontrols") { arch$x$maincontrols <- "visible" } else if (controls == "colorcontrols") { arch$x$colorcontrols <- "visible" } else { stop("invalid control option.") } arch }
/R/add_controls.R
no_license
zachcp/archaeopteryx-js
R
false
false
494
r
#' add_controls #' #' add_controls to the archaeopteryx htmlwidget #' #' @param arch Required. #' @param control Optional. Default \code{"maincontrols"}. Can be either "maincontrols or "colorcontrols". #' @export add_controls <- function(arch, controls="maincontrols") { if (controls == "maincontrols") { arch$x$maincontrols <- "visible" } else if (controls == "colorcontrols") { arch$x$colorcontrols <- "visible" } else { stop("invalid control option.") } arch }
#' Prints available TCGA datasets #' #' @description Prints available TCGA cohorts #' @examples #' tcga_available() #' @export #' @seealso \code{\link{tcga_load}} tcga_available = function(){ cohorts = system.file('extdata', 'cohorts.txt', package = 'TCGAmutations') cohorts = data.table::fread(input = cohorts) cohorts }
/R/tcga_available.R
permissive
jchenpku/TCGAmutations
R
false
false
328
r
#' Prints available TCGA datasets #' #' @description Prints available TCGA cohorts #' @examples #' tcga_available() #' @export #' @seealso \code{\link{tcga_load}} tcga_available = function(){ cohorts = system.file('extdata', 'cohorts.txt', package = 'TCGAmutations') cohorts = data.table::fread(input = cohorts) cohorts }
# setwd("/Users/chris/Desktop/sb_cofactor_hr/A549") setwd("/home/chris/Bureau/sb_cofactor_hr/A549") source("scripts/ckn_utils.R") source("scripts/load_reddy.R") library(ChIPseeker) # Loading peaks peaks_dir <- "output/chip-pipeline-GRCh38/peak_call/A549_NB" gainNB <- rtracklayer::import(con = file.path(peaks_dir, "NB_specific_DEX.bed")); print(length(gainNB)) # 803 lossNB <- rtracklayer::import(con = file.path(peaks_dir, "NB_specific_CTRL.bed")); print(length(lossNB)) # 5952 commonNB <- rtracklayer::import(con = file.path(peaks_dir, "NB_common.bed")); print(length(commonNB)) # 1680 ### Overlaps with GR gr_regions <- load_reddy_binding_consensus("NR3C1") # Overlaps with GR at 1h # gr_1h <- gr_regions[["1 hour"]] # # gainNB_ovGR1h <- subsetByOverlaps(gainNB, gr_1h); print(length(gainNB_ovGR1h)) # 741 ; 741/803 = 92.28 % # gainNB_notovGR1h <- gainNB[!(gainNB %in% gainNB_ovGR1h)]; print(length(gainNB_notovGR1h)) # 62 ; 62/803 = 7.72 % # # lossNB_ovGR1h <- subsetByOverlaps(lossNB, gr_1h); print(length(lossNB_ovGR1h)) # 935 ; 935/5952 = 15.71 % # lossNB_notovGR1h <- lossNB[!(lossNB %in% lossNB_ovGR1h)]; print(length(lossNB_notovGR1h)) # 5017 ; 5017/5952 = 84.29 % # # commonNB_ovGR1h <- subsetByOverlaps(commonNB, gr_1h); print(length(commonNB_ovGR1h)) # 1397 ; 1397/1680 = 83.15 % # commonNB_notovGR1h <- commonNB[!(commonNB %in% commonNB_ovGR1h)]; print(length(commonNB_notovGR1h)) # 283 ; 283/1680 = 16.84 % # Overlaps with GR at between 0 and 1h # for (timepoint in names(gr_regions)[1:8]) { # message(timepoint) # gr_time <- gr_regions[[timepoint]] # # gainNB_ovGR <- subsetByOverlaps(gainNB, gr_time); print(length(gainNB_ovGR)) # ; /803 = 92.28 % # gainNB_notovGR <- gainNB[!(gainNB %in% gainNB_ovGR)]; print(length(gainNB_notovGR)) # ; /803 = 7.72 % # # lossNB_ovGR <- subsetByOverlaps(lossNB, gr_time); print(length(lossNB_ovGR)) # ; /5952 = 15.71 % # lossNB_notovGR <- lossNB[!(lossNB %in% lossNB_ovGR)]; print(length(lossNB_notovGR)) # ; /5952 = 84.29 % # # commonNB_ovGR <- subsetByOverlaps(commonNB, gr_time); print(length(commonNB_ovGR)) # ; /1680 = 83.15 % # commonNB_notovGR <- commonNB[!(commonNB %in% commonNB_ovGR)]; print(length(commonNB_notovGR)) # ; /1680 = 16.84 % # } # Overlaps with GR at between 0 and 1h (reduced) gr_5m_1h <- GRanges() for (time in names(gr_regions)[2:8]) { gr_time <- gr_regions[[time]] gr_5m_1h <- append(gr_5m_1h, gr_time) } gainNB_ovGR <- subsetByOverlaps(gainNB, gr_5m_1h); print(length(gainNB_ovGR)) # 791 ; 791/803 = 98.51% gainNB_notovGR <- gainNB[!(gainNB %in% gainNB_ovGR)]; print(length(gainNB_notovGR)) # 12 ; 12/803 = 1.49% lossNB_ovGR <- subsetByOverlaps(lossNB, gr_5m_1h); print(length(lossNB_ovGR)) # 3600 ; 3600/5952 = 60.48% lossNB_notovGR <- lossNB[!(lossNB %in% lossNB_ovGR)]; print(length(lossNB_notovGR)) # 2352 ; 2352/5952 = 39.51% commonNB_ovGR <- subsetByOverlaps(commonNB, gr_5m_1h); print(length(commonNB_ovGR)) # 1632 ; 1632/1680 = 97.14% commonNB_notovGR <- commonNB[!(commonNB %in% commonNB_ovGR)]; print(length(commonNB_notovGR)) # 48 ; 48/1680 = 2.85% # Width summary(width(gainNB_ovGR)); hist(width(gainNB_ovGR), breaks = 60) summary(width(gainNB_notovGR)); hist(width(gainNB_notovGR), breaks = 60) summary(width(lossNB_ovGR)); hist(width(lossNB_ovGR), breaks = 60) summary(width(lossNB_notovGR)); hist(width(lossNB_notovGR), breaks = 60) # Annotation gainNB_ovGR_annodf <- annotatePeaks(gainNB_ovGR, output = "df") gainNB_notovGR_annodf <- annotatePeaks(gainNB_notovGR, output = "df") lossNB_ovGR_annodf <- annotatePeaks(lossNB_ovGR, output = "df") lossNB_notovGR_annodf <- annotatePeaks(lossNB_notovGR, output = "df") commonNB_ovGR_annodf <- annotatePeaks(commonNB_ovGR, output = "df") commonNB_notovGR_annodf <- annotatePeaks(commonNB_notovGR, output = "df") # Retrieve genes which gain or lose NBC at the promoters geneGainNB_ovGR <- gainNB_ovGR_annodf %>% filter(abs(distanceToTSS) <= 3000) %>% pull(geneId) %>% unique # 20 geneGainNB_notovGR <- gainNB_notovGR_annodf %>% filter(abs(distanceToTSS) <= 3000) %>% pull(geneId) %>% unique # 3 geneLossNB_ovGR <- lossNB_ovGR_annodf %>% filter(abs(distanceToTSS) <= 3000) %>% pull(geneId) %>% unique # 368 geneLossNB_notovGR <- lossNB_notovGR_annodf %>% filter(abs(distanceToTSS) <= 3000) %>% pull(geneId) %>% unique # 1516 geneCommonNB_ovGR <- commonNB_ovGR_annodf %>% filter(abs(distanceToTSS) <= 3000) %>% pull(geneId) %>% unique # 86 geneCommonNB_notovGR <- commonNB_notovGR_annodf %>% filter(abs(distanceToTSS) <= 3000) %>% pull(geneId) %>% unique # 18 symbol_all_geneGainNB_ovGR <- gainNB_ovGR_annodf %>% pull(SYMBOL) %>% unique symbol_all_geneGainNB_notovGR <- gainNB_notovGR_annodf %>% pull(SYMBOL) %>% unique symbol_all_geneLossNB_ovGR <- lossNB_ovGR_annodf %>% pull(SYMBOL) %>% unique symbol_all_geneLossNB_notovGR <- lossNB_notovGR_annodf %>% pull(SYMBOL) %>% unique symbol_prom_geneGainNB_ovGR <- gainNB_ovGR_annodf %>% filter(abs(distanceToTSS) <= 8000) %>% pull(SYMBOL) %>% unique symbol_prom_geneGainNB_notovGR <- gainNB_notovGR_annodf %>% filter(abs(distanceToTSS) <= 8000) %>% pull(SYMBOL) %>% unique symbol_prom_geneLossNB_ovGR <- lossNB_ovGR_annodf %>% filter(abs(distanceToTSS) <= 8000) %>% pull(SYMBOL) %>% unique symbol_prom_geneLossNB_notovGR <- lossNB_notovGR_annodf %>% filter(abs(distanceToTSS) <= 8000) %>% pull(SYMBOL) %>% unique ######### upDEX <- c("PER1", "ZFP36", "ERRFI1", "ANGPTL4", "NR1D2", "CRY2") upDEX_in_gainNB <- upDEX %in% symbol_all_geneGainNB_ovGR; names(upDEX_in_gainNB) <- upDEX upDEX_in_gainNB downDEX <- c("IL11") downDEX_in_gainNB <- downDEX %in% symbol_all_geneGainNB_ovGR; names(downDEX_in_gainNB) <- downDEX downDEX_in_gainNB ###################### # Draw FC time series ###################### source("scripts/reddy_time_series/draw_graph_log2FC_0-12h.R") geneGroupList <- list("GainNB_ovGR_withGR" = geneGainNB_ovGR, "GainNB_notovGR_withGR" = geneGainNB_notovGR, "LossNB_withGR" = geneLossNB_ovGR, "LossNB_withoutGR" = geneLossNB_notovGR, "CommonNB_withGR" = geneCommonNB_ovGR, "CommonNB_withoutGR" = geneCommonNB_notovGR) draw_time_course_FC(geneGainNB_ovGR) draw_time_course_FC(geneGainNB_notovGR) draw_time_course_FC(geneLossNB_ovGR) draw_time_course_FC(geneLossNB_notovGR) draw_time_course_FC(geneCommonNB_ovGR) draw_time_course_FC(geneCommonNB_notovGR) draw_time_course_pergroup_FC(geneGroupList) # geneLossNBC_ovGR: Action répressive de GR par binding direct # geneLossNBC_notovGR: Les premières observations ne montre pas de grand changements dans le niveau de fold change de gene expression, réservoir de cofacteurs? ### is there a recruitement of POL2 ? POL2_diffbind_Myers <- load_diffbind_POLR2A_peaks_Myers() POL2_UP_FDR_annodf <- POL2_diffbind_Myers[["POLR2A_UP_FDR"]] %>% annotatePeaks(output = "df"); print(nrow(POL2_UP_FDR_annodf)) # 242 gene_POL2_UP_FDR <- POL2_UP_FDR_annodf %>% pull(SYMBOL) %>% unique; print(length(gene_POL2_UP_FDR)) # 112 POL2_DOWN_FDR_annodf <- POL2_diffbind_Myers[["POLR2A_DOWN_FDR"]] %>% annotatePeaks(output = "df"); print(nrow(POL2_DOWN_FDR_annodf)) # 301 gene_POL2_DOWN_FDR <- POL2_DOWN_FDR_annodf %>% pull(SYMBOL) %>% unique; print(length(gene_POL2_DOWN_FDR)) # 168 POL2_UP_PVAL_annodf <- POL2_diffbind_Myers[["POLR2A_UP_PVAL"]] %>% annotatePeaks(output = "df"); print(nrow(POL2_UP_PVAL_annodf)) # 966 gene_POL2_UP_PVAL <- POL2_UP_PVAL_annodf %>% pull(SYMBOL) %>% unique; print(length(gene_POL2_UP_PVAL)) # 427 POL2_DOWN_PVAL_annodf <- POL2_diffbind_Myers[["POLR2A_DOWN_PVAL"]] %>% annotatePeaks(output = "df"); print(nrow(POL2_DOWN_PVAL_annodf)) # 1861 gene_POL2_DOWN_PVAL <- POL2_DOWN_PVAL_annodf %>% pull(SYMBOL) %>% unique; print(length(gene_POL2_DOWN_PVAL)) # 945 # control all(gene_POL2_UP_FDR %in% gene_POL2_UP_PVAL) all(gene_POL2_DOWN_FDR %in% gene_POL2_DOWN_PVAL) # symbol symbol_prom_geneGainNB_ovGR <- gainNB_ovGR_annodf %>% filter(abs(distanceToTSS) <= 8000) %>% pull(SYMBOL) %>% unique; print(length(symbol_prom_geneGainNB_ovGR)) # 63 symbol_prom_geneGainNB_notovGR <- gainNB_notovGR_annodf %>% filter(abs(distanceToTSS) <= 8000) %>% pull(SYMBOL) %>% unique; print(length(symbol_prom_geneGainNB_notovGR)) # 4 symbol_prom_geneLossNB_ovGR <- lossNB_ovGR_annodf %>% filter(abs(distanceToTSS) <= 8000) %>% pull(SYMBOL) %>% unique; print(length(symbol_prom_geneLossNB_ovGR)) # 551 symbol_prom_geneLossNB_notovGR <- lossNB_notovGR_annodf %>% filter(abs(distanceToTSS) <= 8000) %>% pull(SYMBOL) %>% unique; print(length(symbol_prom_geneLossNB_notovGR)) # 1593 ### VS # gain NB ov GR sum(symbol_prom_geneGainNB_ovGR %in% gene_POL2_UP_FDR) # 8/63 # FIGURE intersect(symbol_prom_geneGainNB_ovGR, gene_POL2_UP_FDR) sum(symbol_prom_geneGainNB_ovGR %in% gene_POL2_UP_PVAL) # 17/63 intersect(symbol_prom_geneGainNB_ovGR, gene_POL2_UP_PVAL) sum(symbol_prom_geneGainNB_ovGR %in% gene_POL2_DOWN_PVAL) # 2/63 intersect(symbol_prom_geneGainNB_ovGR, gene_POL2_DOWN_PVAL) # loss NB ov GR sum(symbol_prom_geneLossNB_ovGR %in% gene_POL2_DOWN_FDR) # 50/551 intersect(symbol_prom_geneLossNB_ovGR, gene_POL2_DOWN_FDR) sum(symbol_prom_geneLossNB_ovGR %in% gene_POL2_DOWN_PVAL) # 148/551 intersect(symbol_prom_geneLossNB_ovGR, gene_POL2_DOWN_PVAL) # loss NB ov GR sum(symbol_prom_geneLossNB_notovGR %in% gene_POL2_DOWN_FDR) # 51/1593 intersect(symbol_prom_geneLossNB_notovGR, gene_POL2_DOWN_FDR) sum(symbol_prom_geneLossNB_notovGR %in% gene_POL2_DOWN_PVAL) # 187/1593 intersect(symbol_prom_geneLossNB_notovGR, gene_POL2_DOWN_PVAL)
/A549/scripts/chris/NB_balance/analysis2_GainLossNB_ovPOL2.R
no_license
ArnaudDroitLab/sb_cofactor
R
false
false
9,835
r
# setwd("/Users/chris/Desktop/sb_cofactor_hr/A549") setwd("/home/chris/Bureau/sb_cofactor_hr/A549") source("scripts/ckn_utils.R") source("scripts/load_reddy.R") library(ChIPseeker) # Loading peaks peaks_dir <- "output/chip-pipeline-GRCh38/peak_call/A549_NB" gainNB <- rtracklayer::import(con = file.path(peaks_dir, "NB_specific_DEX.bed")); print(length(gainNB)) # 803 lossNB <- rtracklayer::import(con = file.path(peaks_dir, "NB_specific_CTRL.bed")); print(length(lossNB)) # 5952 commonNB <- rtracklayer::import(con = file.path(peaks_dir, "NB_common.bed")); print(length(commonNB)) # 1680 ### Overlaps with GR gr_regions <- load_reddy_binding_consensus("NR3C1") # Overlaps with GR at 1h # gr_1h <- gr_regions[["1 hour"]] # # gainNB_ovGR1h <- subsetByOverlaps(gainNB, gr_1h); print(length(gainNB_ovGR1h)) # 741 ; 741/803 = 92.28 % # gainNB_notovGR1h <- gainNB[!(gainNB %in% gainNB_ovGR1h)]; print(length(gainNB_notovGR1h)) # 62 ; 62/803 = 7.72 % # # lossNB_ovGR1h <- subsetByOverlaps(lossNB, gr_1h); print(length(lossNB_ovGR1h)) # 935 ; 935/5952 = 15.71 % # lossNB_notovGR1h <- lossNB[!(lossNB %in% lossNB_ovGR1h)]; print(length(lossNB_notovGR1h)) # 5017 ; 5017/5952 = 84.29 % # # commonNB_ovGR1h <- subsetByOverlaps(commonNB, gr_1h); print(length(commonNB_ovGR1h)) # 1397 ; 1397/1680 = 83.15 % # commonNB_notovGR1h <- commonNB[!(commonNB %in% commonNB_ovGR1h)]; print(length(commonNB_notovGR1h)) # 283 ; 283/1680 = 16.84 % # Overlaps with GR at between 0 and 1h # for (timepoint in names(gr_regions)[1:8]) { # message(timepoint) # gr_time <- gr_regions[[timepoint]] # # gainNB_ovGR <- subsetByOverlaps(gainNB, gr_time); print(length(gainNB_ovGR)) # ; /803 = 92.28 % # gainNB_notovGR <- gainNB[!(gainNB %in% gainNB_ovGR)]; print(length(gainNB_notovGR)) # ; /803 = 7.72 % # # lossNB_ovGR <- subsetByOverlaps(lossNB, gr_time); print(length(lossNB_ovGR)) # ; /5952 = 15.71 % # lossNB_notovGR <- lossNB[!(lossNB %in% lossNB_ovGR)]; print(length(lossNB_notovGR)) # ; /5952 = 84.29 % # # commonNB_ovGR <- subsetByOverlaps(commonNB, gr_time); print(length(commonNB_ovGR)) # ; /1680 = 83.15 % # commonNB_notovGR <- commonNB[!(commonNB %in% commonNB_ovGR)]; print(length(commonNB_notovGR)) # ; /1680 = 16.84 % # } # Overlaps with GR at between 0 and 1h (reduced) gr_5m_1h <- GRanges() for (time in names(gr_regions)[2:8]) { gr_time <- gr_regions[[time]] gr_5m_1h <- append(gr_5m_1h, gr_time) } gainNB_ovGR <- subsetByOverlaps(gainNB, gr_5m_1h); print(length(gainNB_ovGR)) # 791 ; 791/803 = 98.51% gainNB_notovGR <- gainNB[!(gainNB %in% gainNB_ovGR)]; print(length(gainNB_notovGR)) # 12 ; 12/803 = 1.49% lossNB_ovGR <- subsetByOverlaps(lossNB, gr_5m_1h); print(length(lossNB_ovGR)) # 3600 ; 3600/5952 = 60.48% lossNB_notovGR <- lossNB[!(lossNB %in% lossNB_ovGR)]; print(length(lossNB_notovGR)) # 2352 ; 2352/5952 = 39.51% commonNB_ovGR <- subsetByOverlaps(commonNB, gr_5m_1h); print(length(commonNB_ovGR)) # 1632 ; 1632/1680 = 97.14% commonNB_notovGR <- commonNB[!(commonNB %in% commonNB_ovGR)]; print(length(commonNB_notovGR)) # 48 ; 48/1680 = 2.85% # Width summary(width(gainNB_ovGR)); hist(width(gainNB_ovGR), breaks = 60) summary(width(gainNB_notovGR)); hist(width(gainNB_notovGR), breaks = 60) summary(width(lossNB_ovGR)); hist(width(lossNB_ovGR), breaks = 60) summary(width(lossNB_notovGR)); hist(width(lossNB_notovGR), breaks = 60) # Annotation gainNB_ovGR_annodf <- annotatePeaks(gainNB_ovGR, output = "df") gainNB_notovGR_annodf <- annotatePeaks(gainNB_notovGR, output = "df") lossNB_ovGR_annodf <- annotatePeaks(lossNB_ovGR, output = "df") lossNB_notovGR_annodf <- annotatePeaks(lossNB_notovGR, output = "df") commonNB_ovGR_annodf <- annotatePeaks(commonNB_ovGR, output = "df") commonNB_notovGR_annodf <- annotatePeaks(commonNB_notovGR, output = "df") # Retrieve genes which gain or lose NBC at the promoters geneGainNB_ovGR <- gainNB_ovGR_annodf %>% filter(abs(distanceToTSS) <= 3000) %>% pull(geneId) %>% unique # 20 geneGainNB_notovGR <- gainNB_notovGR_annodf %>% filter(abs(distanceToTSS) <= 3000) %>% pull(geneId) %>% unique # 3 geneLossNB_ovGR <- lossNB_ovGR_annodf %>% filter(abs(distanceToTSS) <= 3000) %>% pull(geneId) %>% unique # 368 geneLossNB_notovGR <- lossNB_notovGR_annodf %>% filter(abs(distanceToTSS) <= 3000) %>% pull(geneId) %>% unique # 1516 geneCommonNB_ovGR <- commonNB_ovGR_annodf %>% filter(abs(distanceToTSS) <= 3000) %>% pull(geneId) %>% unique # 86 geneCommonNB_notovGR <- commonNB_notovGR_annodf %>% filter(abs(distanceToTSS) <= 3000) %>% pull(geneId) %>% unique # 18 symbol_all_geneGainNB_ovGR <- gainNB_ovGR_annodf %>% pull(SYMBOL) %>% unique symbol_all_geneGainNB_notovGR <- gainNB_notovGR_annodf %>% pull(SYMBOL) %>% unique symbol_all_geneLossNB_ovGR <- lossNB_ovGR_annodf %>% pull(SYMBOL) %>% unique symbol_all_geneLossNB_notovGR <- lossNB_notovGR_annodf %>% pull(SYMBOL) %>% unique symbol_prom_geneGainNB_ovGR <- gainNB_ovGR_annodf %>% filter(abs(distanceToTSS) <= 8000) %>% pull(SYMBOL) %>% unique symbol_prom_geneGainNB_notovGR <- gainNB_notovGR_annodf %>% filter(abs(distanceToTSS) <= 8000) %>% pull(SYMBOL) %>% unique symbol_prom_geneLossNB_ovGR <- lossNB_ovGR_annodf %>% filter(abs(distanceToTSS) <= 8000) %>% pull(SYMBOL) %>% unique symbol_prom_geneLossNB_notovGR <- lossNB_notovGR_annodf %>% filter(abs(distanceToTSS) <= 8000) %>% pull(SYMBOL) %>% unique ######### upDEX <- c("PER1", "ZFP36", "ERRFI1", "ANGPTL4", "NR1D2", "CRY2") upDEX_in_gainNB <- upDEX %in% symbol_all_geneGainNB_ovGR; names(upDEX_in_gainNB) <- upDEX upDEX_in_gainNB downDEX <- c("IL11") downDEX_in_gainNB <- downDEX %in% symbol_all_geneGainNB_ovGR; names(downDEX_in_gainNB) <- downDEX downDEX_in_gainNB ###################### # Draw FC time series ###################### source("scripts/reddy_time_series/draw_graph_log2FC_0-12h.R") geneGroupList <- list("GainNB_ovGR_withGR" = geneGainNB_ovGR, "GainNB_notovGR_withGR" = geneGainNB_notovGR, "LossNB_withGR" = geneLossNB_ovGR, "LossNB_withoutGR" = geneLossNB_notovGR, "CommonNB_withGR" = geneCommonNB_ovGR, "CommonNB_withoutGR" = geneCommonNB_notovGR) draw_time_course_FC(geneGainNB_ovGR) draw_time_course_FC(geneGainNB_notovGR) draw_time_course_FC(geneLossNB_ovGR) draw_time_course_FC(geneLossNB_notovGR) draw_time_course_FC(geneCommonNB_ovGR) draw_time_course_FC(geneCommonNB_notovGR) draw_time_course_pergroup_FC(geneGroupList) # geneLossNBC_ovGR: Action répressive de GR par binding direct # geneLossNBC_notovGR: Les premières observations ne montre pas de grand changements dans le niveau de fold change de gene expression, réservoir de cofacteurs? ### is there a recruitement of POL2 ? POL2_diffbind_Myers <- load_diffbind_POLR2A_peaks_Myers() POL2_UP_FDR_annodf <- POL2_diffbind_Myers[["POLR2A_UP_FDR"]] %>% annotatePeaks(output = "df"); print(nrow(POL2_UP_FDR_annodf)) # 242 gene_POL2_UP_FDR <- POL2_UP_FDR_annodf %>% pull(SYMBOL) %>% unique; print(length(gene_POL2_UP_FDR)) # 112 POL2_DOWN_FDR_annodf <- POL2_diffbind_Myers[["POLR2A_DOWN_FDR"]] %>% annotatePeaks(output = "df"); print(nrow(POL2_DOWN_FDR_annodf)) # 301 gene_POL2_DOWN_FDR <- POL2_DOWN_FDR_annodf %>% pull(SYMBOL) %>% unique; print(length(gene_POL2_DOWN_FDR)) # 168 POL2_UP_PVAL_annodf <- POL2_diffbind_Myers[["POLR2A_UP_PVAL"]] %>% annotatePeaks(output = "df"); print(nrow(POL2_UP_PVAL_annodf)) # 966 gene_POL2_UP_PVAL <- POL2_UP_PVAL_annodf %>% pull(SYMBOL) %>% unique; print(length(gene_POL2_UP_PVAL)) # 427 POL2_DOWN_PVAL_annodf <- POL2_diffbind_Myers[["POLR2A_DOWN_PVAL"]] %>% annotatePeaks(output = "df"); print(nrow(POL2_DOWN_PVAL_annodf)) # 1861 gene_POL2_DOWN_PVAL <- POL2_DOWN_PVAL_annodf %>% pull(SYMBOL) %>% unique; print(length(gene_POL2_DOWN_PVAL)) # 945 # control all(gene_POL2_UP_FDR %in% gene_POL2_UP_PVAL) all(gene_POL2_DOWN_FDR %in% gene_POL2_DOWN_PVAL) # symbol symbol_prom_geneGainNB_ovGR <- gainNB_ovGR_annodf %>% filter(abs(distanceToTSS) <= 8000) %>% pull(SYMBOL) %>% unique; print(length(symbol_prom_geneGainNB_ovGR)) # 63 symbol_prom_geneGainNB_notovGR <- gainNB_notovGR_annodf %>% filter(abs(distanceToTSS) <= 8000) %>% pull(SYMBOL) %>% unique; print(length(symbol_prom_geneGainNB_notovGR)) # 4 symbol_prom_geneLossNB_ovGR <- lossNB_ovGR_annodf %>% filter(abs(distanceToTSS) <= 8000) %>% pull(SYMBOL) %>% unique; print(length(symbol_prom_geneLossNB_ovGR)) # 551 symbol_prom_geneLossNB_notovGR <- lossNB_notovGR_annodf %>% filter(abs(distanceToTSS) <= 8000) %>% pull(SYMBOL) %>% unique; print(length(symbol_prom_geneLossNB_notovGR)) # 1593 ### VS # gain NB ov GR sum(symbol_prom_geneGainNB_ovGR %in% gene_POL2_UP_FDR) # 8/63 # FIGURE intersect(symbol_prom_geneGainNB_ovGR, gene_POL2_UP_FDR) sum(symbol_prom_geneGainNB_ovGR %in% gene_POL2_UP_PVAL) # 17/63 intersect(symbol_prom_geneGainNB_ovGR, gene_POL2_UP_PVAL) sum(symbol_prom_geneGainNB_ovGR %in% gene_POL2_DOWN_PVAL) # 2/63 intersect(symbol_prom_geneGainNB_ovGR, gene_POL2_DOWN_PVAL) # loss NB ov GR sum(symbol_prom_geneLossNB_ovGR %in% gene_POL2_DOWN_FDR) # 50/551 intersect(symbol_prom_geneLossNB_ovGR, gene_POL2_DOWN_FDR) sum(symbol_prom_geneLossNB_ovGR %in% gene_POL2_DOWN_PVAL) # 148/551 intersect(symbol_prom_geneLossNB_ovGR, gene_POL2_DOWN_PVAL) # loss NB ov GR sum(symbol_prom_geneLossNB_notovGR %in% gene_POL2_DOWN_FDR) # 51/1593 intersect(symbol_prom_geneLossNB_notovGR, gene_POL2_DOWN_FDR) sum(symbol_prom_geneLossNB_notovGR %in% gene_POL2_DOWN_PVAL) # 187/1593 intersect(symbol_prom_geneLossNB_notovGR, gene_POL2_DOWN_PVAL)
#analysis of dataset mtcars using dplyr #filename : dplyr-mtcars.R library(dplyr) ?mtcars #structure of data set str(mtcars) #structure dim(mtcars) #dimensions names(mtcars) ;colnames(mtcars) #column names rownames(mtcars) #rownames summary(mtcars) #summary of dataset #summary activities on mtcars t1= table(mtcars$am) pie(t1) 19/32* 360 pie(t1, labels = c('Auto','Manual')) t2= table(mtcars$gear) pie(t2) barplot(t2) barplot(t2, col = 1:3) barplot(t2,col = 1:3, horiz = T) barplot(t2,col = c('green','blue','red'),xlab = 'gear',ylab = 'No of cars',ylim = c(0,20)) title(main = 'Distibution of gears of cars',sub = 'No of gears') #using dplyr %>% is chaining function mtcars %>% select(mpg,gear) %>% slice(c(1:5,10)) #select for columns, slice for rows mtcars %>% arrange(mpg) #ascending order of mileage mtcars %>% arrange(am, desc(mpg)) %>% select(am, mpg) #ascending order of am, descending order of mpg mtcars %>% muatate(rn= rownames(mtcars)) %>% select(rn, mpg) #display rownames with mpg mtcars %>% slice(c(1,5,7)) mtcars %>% sample_n(3) mtcars %>% sample_frac(.2) mtcars %>% select(sample(x=c(1:11),size = 2)) %>% head sample(x=1:11,size = 2) mtcars %>% mutate( newmpg = mpg * 1.1) mutate(mtcars, newmpg = mpg * 1.2) # type of Tx, mean(mpg) mtcars %>% group_by(am) %>% summarise(MeanMPG = mean(mpg)) mtcars %>% group_by(am) %>% summarise(MeanMPG = mean(mpg), MaxHP= max(hp), MinWT = min(wt)) mtcars %>% group_by(gear,cyl) %>% summarise(MeanMPG = mean(mpg))
/dplyr-mtcars.R
no_license
elissabedamatta/analytics-1
R
false
false
1,475
r
#analysis of dataset mtcars using dplyr #filename : dplyr-mtcars.R library(dplyr) ?mtcars #structure of data set str(mtcars) #structure dim(mtcars) #dimensions names(mtcars) ;colnames(mtcars) #column names rownames(mtcars) #rownames summary(mtcars) #summary of dataset #summary activities on mtcars t1= table(mtcars$am) pie(t1) 19/32* 360 pie(t1, labels = c('Auto','Manual')) t2= table(mtcars$gear) pie(t2) barplot(t2) barplot(t2, col = 1:3) barplot(t2,col = 1:3, horiz = T) barplot(t2,col = c('green','blue','red'),xlab = 'gear',ylab = 'No of cars',ylim = c(0,20)) title(main = 'Distibution of gears of cars',sub = 'No of gears') #using dplyr %>% is chaining function mtcars %>% select(mpg,gear) %>% slice(c(1:5,10)) #select for columns, slice for rows mtcars %>% arrange(mpg) #ascending order of mileage mtcars %>% arrange(am, desc(mpg)) %>% select(am, mpg) #ascending order of am, descending order of mpg mtcars %>% muatate(rn= rownames(mtcars)) %>% select(rn, mpg) #display rownames with mpg mtcars %>% slice(c(1,5,7)) mtcars %>% sample_n(3) mtcars %>% sample_frac(.2) mtcars %>% select(sample(x=c(1:11),size = 2)) %>% head sample(x=1:11,size = 2) mtcars %>% mutate( newmpg = mpg * 1.1) mutate(mtcars, newmpg = mpg * 1.2) # type of Tx, mean(mpg) mtcars %>% group_by(am) %>% summarise(MeanMPG = mean(mpg)) mtcars %>% group_by(am) %>% summarise(MeanMPG = mean(mpg), MaxHP= max(hp), MinWT = min(wt)) mtcars %>% group_by(gear,cyl) %>% summarise(MeanMPG = mean(mpg))
# defines functions for data cleaning # Toon Roge # takes a vector and caps lowest and highest values cap_vector <- function(x, cap = c(0.02, 0.98)) { if (is.numeric(x)) { q <- quantile(x, cap, na.rm = TRUE) min <- as.numeric(q[1]) max <- as.numeric(q[2]) x <- pmin(x, max) ; x <- pmax(x, min) } return(x) } # transforms towards standardnormal, but with first capping low and high levels std_normal_transform <- function(data, ignore) { require(caret) ; require(magrittr) # get numeric vars to transform numeric_vars <- names(data)[sapply(data, is.numeric)] numeric_vars <- numeric_vars[!numeric_vars %in% ignore] # cap numeric variables new_data <- lapply(data[, numeric_vars, with = FALSE], cap_vector) %>% as.data.table() # Standnormal transformation new_data$rand <- runif(nrow(new_data)) preProcValues <- preProcess(new_data[rand < 0.2, numeric_vars, with = F], method = c("BoxCox", "center", "scale")) new_data <- predict(preProcValues, new_data) ; gc() ; new_data$rand <- NULL # change column names + add new features to data colnames(new_data) <- gsub("_ORIGINAL", "", paste(colnames(new_data), "STD_NORM", sep = "_")) return(cbind(data, new_data)) } # groups low occuring values in categorical variable - for one vector replace_rare_levels <- function(x, n = 750, max_length = 6) { require(magrittr) # transform to character x <- as.character(x) # get max of n and max_length value vec <- summary(as.factor(x), maxsum = length(x)) %>% sort(decreasing = T) value <- max(vec[max_length], n, na.rm = T) # replace values with other names_to_replace <- names(which(vec < value)) return(ifelse(x %in% names_to_replace, "Other", x) %>% as.factor()) } # transforms all categorical variables in a data frame group_low_categorical <- function(data, n = 750, max_length = 6, ignore_pattern = NULL) { # get categorical vars to transform categorical_vars <- names(data)[!sapply(data, is.numeric)] ignore_vars <- c() for (pattern in ignore_pattern) { ignore_vars <- c(ignore_vars, grep(pattern, names(data), value = TRUE)) } categorical_vars <- categorical_vars[!categorical_vars %in% ignore_vars] # group low-occuring categorical variables in "other" data_cat <- data[, categorical_vars, with = F] new_data <- lapply(data_cat, replace_rare_levels, n, max_length) %>% as.data.table() # change column names + add new features to data colnames(new_data) <- gsub( "_ORIGINAL", "", paste( colnames(new_data), "GROUP_LOW_CATEG", "n", as.character(n), "max", as.character(max_length), sep = "_" ) ) return(cbind(data, new_data)) } impute_missings_mice <- function(data, keep_pattern, method = 'rf', m = 1, maxit = 1, sampsize = 0.025) { require(mice) ; require(magrittr) var_names <- c() for (pattern in keep_pattern) { var_names <- c(var_names, grep(pattern, names(data), value = TRUE)) } data_imputation <- data[, var_names, with = F] imp <- mice( data_imputation, m = m, maxit = maxit, method = method, sampsize = sampsize ) new_data <- complete(imp, 'long', inc = FALSE) %>% as.data.table() # change column names + add new features to data colnames(new_data) <- paste(colnames(new_data), "IMP", method, sep = "_") return(cbind(data, new_data)) } impute_missings_caret <- function(data, keep_pattern, method = "bagImpute") { require(caret) ; require(magrittr) var_names <- c() for (pattern in keep_pattern) { var_names <- c(var_names, grep(pattern, names(data), value = TRUE)) } data_imputation <- data[, var_names, with = F] categorical_vars <- names(data_imputation)[!sapply(data_imputation, is.numeric)] data_imputation[,(categorical_vars):= lapply(.SD, as.numeric), .SDcols = categorical_vars] # bagging trees imputation data_imputation$rand <- runif(nrow(data_imputation)) preProcValues <- preProcess(data_imputation[rand < 0.15], method = method) new_data <- predict(preProcValues, data_imputation) ; gc() ; new_data$rand <- NULL new_data <- new_data[,-categorical_vars, with = F] # change column names + add new features to data colnames(new_data) <- paste(colnames(new_data), "IMP", method, sep = "_") return(cbind(data, new_data)) } impute_missForest <- function(data, keep_pattern) { require(missForest) ; require(magrittr) ; require(doParallel) keep_pattern = c("GROUP_LOW_CATEG", "STD_NORM") var_names <- c() for (pattern in keep_pattern) { var_names <- c(var_names, grep(pattern, names(data), value = TRUE)) } data_imputation <- data[, var_names, with = F] registerDoParallel(cores = 4) rf_imp <- missForest( data_imputation, ntree = 10, maxiter = 1, mtry = 0.25, sampsize = rep(0.025, ncol(data_imputation)), verbose = TRUE, parallelize = "forests" ) newdata <- rf_imp$ximp } impute_customrf <- function(data, keep_pattern, prop = 0.20, ntree = 20, colsample = 0.10) { require(randomForest) ; require(magrittr) ; require(doParallel) # extract data for imputation var_names <- c() for (pattern in keep_pattern) { var_names <- c(var_names, grep(pattern, names(data), value = TRUE)) } data_imputation <- data[, var_names, with = F] # transform categorical vars to numeric IDs categorical_vars <- names(data_imputation)[!sapply(data_imputation, is.numeric)] data_imputation[,(categorical_vars):= lapply(.SD, as.numeric), .SDcols = categorical_vars] # put missing values as -999 data_imputation[is.na(data_imputation)] <- -999 # create new data table new_data <- data_imputation var_names <- which(sapply(data_imputation, function(x) sum(x == -999)) > 0) %>% names() for (var in var_names) { # select columns for fitting rf cond <- which(data_imputation[, var, with = F] == -999) cols <- which(colSums(data_imputation[cond] == -999) / nrow(data_imputation) < prop) %>% names() cols <- c(cols, var) # select rows for fitting rf rows <- which(!(data_imputation[, var, with = F] == -999) & runif(nrow(data_imputation)) < 0.2) rf <- randomForest( x = as.data.frame(data_imputation[rows, cols, with = F]), y = as.data.frame(data_imputation[rows, var, with = F])[[var]], ntree = ntree, mtry = round(length(cols) * colsample) ) new_data[[var]] <- ifelse(new_data[[var]] == -999, predict(rf, new_data), new_data[[var]]) print(paste("variable: ", var)) } # change column names + add new features to data colnames(new_data) <- paste(colnames(new_data), "IMP_customRF", sep = "_") return(cbind(data, new_data)) } missings_per_row <- function(data, ignore_pattern) { ignore_vars <- c() for (pattern in ignore_pattern) { ignore_vars <- c(ignore_vars, grep(pattern, names(data), value = TRUE)) } missData <- data[,-ignore_vars, with = F] data$MISSING_COUNT <- rowSums(is.na(missData)) / ncol(missData) data$MISSING_LABEL <- as.factor(as.numeric(as.factor(data$MISSING_COUNT))) return(data) } flag_original_variables <- function(data, ignore) { names(data)[!names(data) %in% ignore] <- paste(names(data)[!names(data) %in% ignore], "ORIGINAL", sep = "_") return(data) } replace_blank_categorical <- function(data) { # get categorical vars to transform categorical_vars <- names(data)[!sapply(data, is.numeric)] for (var in categorical_vars) { data[[var]][data[[var]] == ""] <- "MISSING" } return(data) } get_best_feature <- function(target, var1, var2){ require(randomForest) db <- data.frame(target = target, var1 = var1, var2 = var2) %>% na.omit() rf1 <- randomForest(y = as.factor(db$target), x = db[,2] %>% as.data.frame(), ntree = 75 ) rf2 <- randomForest(y = as.factor(db$target), x = db[,3] %>% as.data.frame(), ntree = 75 ) err2 <- rf2$err.rate[,1] %>% min() err1 <- rf1$err.rate[,1] %>% min() x <- err2 < err1 print(paste("Error rates:", as.character(round(err1, 4)), as.character(round(err2, 4)))) return(x + 1) # returns 2 if second features is best, 1 if first feature is best } remove_two_correlated_vars <- function(data, var1, var2) { # This function takes data.table and the names of two corr. variables as input # next it checks if both variables are present # if so it will fit a linear model and remove one variables by the residuals of this linear model if (is.null(data[[var1]]) | is.null(data[[var2]])) { print("Already deleted one variable") print("==================================") return(data) } else{ data$x <- data[[var1]] data$y <- data[[var2]] name <- paste(var1, var2, "_RESLM", sep = "") data[[name]] <- predict(lm(x ~ y, data), data) - data$x data$x <- NULL ; data$y <- NULL best_feature <- get_best_feature(subset(data, rand < 20)$target, subset(data, rand < 20)[[var1]], subset(data, rand < 20)[[var2]]) if (best_feature == 2) { data[[var1]] <- NULL print(paste("delete", var1)) } else{ data[[var2]] <- NULL print(paste("delete", var2)) } print("==================================") return(data) } } manual_cleaning <- function(data) { ### FIRST: MANUALLY TREAT SOME INSIGHT FROM FORUM ### # duplicate variable v91 and v107 data$v107 <- NULL #keep v91 # v71 and v75 should be pasted together, because the one is almost perfect subset from the other data$v71v75_PASTE <- paste(data$v71, data$v75, sep = "") data$v71 <- NULL data$v75 <- NULL # v50 - 16,78 = 2,041*v10 + -2,779*v12 --> Almost perfect fit with LM # v10 + 18.58 = 1.571 * v34 + 0.9936 * v40 --> Almost perfect fir with LM # I think ratios should be much more informative in such case data$v50v12_LM_RATIO <- ((16.78 - data$v50) / (data$v12 * 2.779)) %>% pmax(0) %>% pmin(1) data$v34v10_LM_RATIO <- ((data$v34 * 1.571) / (data$v10 + 18.58)) %>% pmax(0) %>% pmin(1) data$v12 <- NULL ; data$v10 <-NULL data$v34 <- NULL ; data$v40 <- NULL # v72 = v129 + v38 + v62 (Exact) data$v72_ISZERO <- ifelse(data$v72 == 0, 1, 0) data$v129v72_EX_RATIO <- ifelse( data$v72 == 0, mean(data$v129, na.rm = T) / mean(data$v72, na.rm = T), data$v129 / data$v72 ) data$v38v72_EX_RATIO <- ifelse( data$v72 == 0, mean(data$v38, na.rm = T) / mean(data$v72, na.rm = T), data$v38 / data$v72 ) data$v62v72_EX_RATIO <- ifelse( data$v72 == 0, mean(data$v162, na.rm = T) / mean(data$v72, na.rm = T), data$v62 / data$v72 ) data$v62 <- NULL ; data$v38 <- NULL ; data$v129 <- NULL return(data) } removing_correlated_features <- function(data, cor_value = 0.85){ # variables who are really strongly correlated --> strategy keep 1 + the noise of lm(v1 ~v2) # 1. extract numeric variables numeric_vars <- names(data)[sapply(data, is.numeric)] numeric_vars <- numeric_vars[!numeric_vars %in% c("ID", "target", "rand")] # 2. construct vectors with correlated features print("Calculation of correlation Matrix") corm <- cor(x = data[, numeric_vars, with = F], use = "pairwise.complete.obs") cord <- melt(corm) %>% dplyr::arrange(desc(abs(value))) %>% dplyr::filter(value < 1 & value > cor_value) var1 <- as.character(cord$Var1) ; var2 <- as.character(cord$Var2) # 3. remove correlated features and add the noise as a variable if (length(var1) == 0){ return(data) } else{ for (i in 1:nrow(cord)){ print(paste(as.character(var1[i]), as.character(var2[i]), as.character(round(cord$value[i], 4)))) data <- data %>% remove_two_correlated_vars(var1[i], var2[i]) } return(data) } } do_data_cleaning <- function(data, cor_value = 0.8){ # 1. Manual cleaning data <- manual_cleaning(data) #2. Automatic removal: if x and y are correlated; replace by x and residuals of lm(y ~ x) # --> Use loop, because noise can be explained by other features test <- names(data) ; i <- 1 ; print(i) data <- removing_correlated_features(data, cor_value = cor_value) while(!isTRUE(all.equal(names(data), test))){ test <- names(data) ; i <- i + 1 data <- removing_correlated_features(data, cor_value = cor_value) ; print(i) } return(data) } transform_cat_to_ridge_coefs <- function(db, cores = 4, alpha = 0.25, keep_pattern, ignore_pattern, ID = "ID", random = "rand", # need to be 1-100 for training and > 100 for test target = "target"){ # takes a database as input # detects the categorical variables in the keep_pattern # excluding the ones in ignore_pattern # fits elasticnet regression on two folds of the data (75pct ridge as default) # returns the input data, but with elasticnet predictions for each fold added # + a variable indicating to which fold each observation belonged during the fitting require("glmnet") require("caret") require("doParallel") require("reshape2") require("data.table") require("dplyr") vars <- c() for (pattern in keep_pattern) { vars <- c(vars, grep(pattern, names(db), value = TRUE)) } ignore_vars <- c() for (pattern in ignore_pattern) { ignore_vars <- c(ignore_vars, grep(pattern, names(db), value = TRUE)) } vars <- vars[!vars %in% ignore_vars] vars_to_keep <- c(ID, random, target, vars) data <- db[, vars_to_keep, with = F] dummies <- dummyVars(target ~ ., data = data) dummieData <- predict(dummies, newdata = data) %>% as.data.table() %>% cbind(data[,target, with = F]) no_vars <- c(ID, random, target, "fold") dummieData$test <- ifelse(dummieData[[random]] > 100, 1, 0) dummieData[[random]] <- ifelse(dummieData[[random]] > 100, 100 * runif(nrow(dummieData)), dummieData[[random]]) dummieData$fold <- ifelse(dummieData[[random]] < 50, 1, 2) registerDoParallel(cores = cores) for (fold in 1:2){ print(paste("Running ridge regression fold:", as.character(fold), sep = " ")) glmnet <- cv.glmnet( x = as.matrix(dummieData[fold == fold & test == 0, -no_vars, with = F]), y = as.matrix(dummieData[fold == fold & test == 0, c("target"), with = F]), family = "binomial", alpha = alpha, parallel = TRUE ) print("Fold has terminated.") coefs <- coef.cv.glmnet(glmnet, s="lambda.min") %>% as.matrix() ridge_data <- data.frame(var = row.names(coefs), value = as.numeric(coefs[, 1])) %>% as.data.table() keep <- grepl("\\.", as.character(ridge_data$var)) ridge_data <- ridge_data[keep] ridge_data <- cbind(ridge_data, colsplit(as.character(ridge_data$var), "\\.", c("variable", "level"))) ridge_data$var <- NULL categorical_vars <- names(data)[!sapply(data, is.numeric)] for (var in categorical_vars){ print(paste("replacing the following categorical variable:", var, sep = " ")) coef_table <- ridge_data[ridge_data$variable == var, c("value", "level"), with = F] names(coef_table)[names(coef_table) == "value"] = paste(var, "RIDGE_COEF_FOLD", as.character(fold), sep = "_") data <- merge(x = data, y = coef_table, by.x = var, by.y = "level", all.x = T) } } datafold <- dummieData[, c(ID, "fold"), with = F] names(datafold)[names(datafold) == "fold"] <- "RIDGE_FOLD" keep_pattern = c("RIDGE_COEF_FOLD") vars <- c() for (pattern in keep_pattern) { vars <- c(vars, grep(pattern, names(data), value = TRUE)) } vars <- c(vars, ID) data_ridge <- data[, vars, with = F] db <- merge(x = db, y = datafold, by = ID, all.x = T) db <- merge(x = db, y = data_ridge, by = ID, all.x = T) return(db) } # from kaggle scripts na.roughfix2 <- function (object, ...) { res <- lapply(object, roughfix) structure(res, class = "data.frame", row.names = seq_len(nrow(object))) } #from kaggle scripts roughfix <- function(x) { missing <- is.na(x) if (!any(missing)) return(x) if (is.numeric(x)) { x[missing] <- median.default(x[!missing]) } else if (is.factor(x)) { freq <- table(x) x[missing] <- names(freq)[which.max(freq)] } else { stop("na.roughfix only works for numeric or factor") } x } # Convert v22 to hexavigesimal base - from kaggle scripts - not yet used az_to_int <- function(az) { xx <- strsplit(tolower(az), "")[[1]] pos <- match(xx, letters[(1:26)]) result <- sum( pos* 26^rev(seq_along(xx)-1)) return(result) } clust_pca <- function(data, idvar, ignore, n_clust, clust_method = "hclust"){ require(data.table) require(dplyr) require(ClustOfVar) require(FactoMineR) require(stringr) # FactoMineR doesn't work with data.table data <- as.data.frame(data) # select data for pca analysis numeric_vars <- names(data)[sapply(data, is.numeric)] output_data <- data[!names(data) %in% setdiff(numeric_vars, c(idvar, ignore))] data <- data[names(data) %in% numeric_vars] # data <- na.omit(data) ind.sup <- which(!complete.cases(data)) id <- data[[idvar]] data <- data[!names(data) %in% c(idvar, ignore)] row.names(data) <- id # define groups of clustered data print("Clustering variables...") if (clust_method == "kmeans"){ res <- kmeansvar(na.omit(data), init = n_clust)$var # list, row.names are variables } else if (clust_method == "hclust"){ hclust <- hclustvar(na.omit(data)) res <- cutreevar(hclust, n_clust)$var # list, row.names are variables } else{ print("clust_method should be kmeans or hclust") } # do a pca on each cluster - if more then 1 dimension print("PCA for each cluster...") new_data <- NULL colweight <- c() for (i in 1:n_clust){ print(i) names <- row.names(res[[i]]) # extract vars in cluster n <- length(names) if (n > 1){ # pca needed pca <- PCA(data[names(data) %in% names], graph = F, ncp = n, ind.sup = ind.sup) n_vars <- which(pca$eig$`cumulative percentage of variance` > 98) %>% min() colweight <- c(colweight, rep(1 / sqrt(pca$eig$`eigenvalue`[1]), n_vars)) # extract vars of pca out1 <- pca$ind$coord out2 <- pca$ind.sup$coord out1 <- out1[, 1:n_vars] %>% as.data.frame() out2 <- out2[, 1:n_vars] %>% as.data.frame() out1$dist <- pca$ind$dist out2$dist <- pca$ind.sup$dist out <- rbind(out1, out2) names(out)[1] <- "Dim.1" # because sometimes name got changed prefix <- paste(clust_method, str_pad(i, 2, pad = "0"), "pca", sep ="_") names(out) <- paste(prefix, names(out), sep = "_") out[[idvar]] <- row.names(out) } else{ # no pca needed out <- data[names(data) %in% names] colweight <- c(colweight, 1) prefix <- paste(clust_method, str_pad(i, 2, pad = "0"), "org", sep ="_") names(out) <- paste(prefix, names(out), sep = "_") out[[idvar]] <- row.names(out) } if (is.null(new_data)){ new_data <- out } else{ # cbind did not work well with data.table new_data <- merge(new_data, out, by = idvar, all.x = T, sort = F) } } # add this data to output data table output_data <- merge(output_data, new_data, by = idvar, all.x=T) # do again a PCA, but on previous results print("PCA on previous pca output...") keep_pattern <- c("pca_Dim", "_org") var_names <- c() for (pattern in keep_pattern) { var_names <- c(var_names, grep(pattern, names(new_data), value = TRUE)) } new_data <- new_data[names(new_data) %in% var_names] pca <- PCA(new_data, col.w = colweight, graph = F, ncp = ncol(new_data), ind.sup = ind.sup) n_vars <- which(pca$eig$`cumulative percentage of variance` > 98) %>% min() out1 <- as.data.frame(pca$ind$coord)[, 1:n_vars] out2 <- as.data.frame(pca$ind$coord)[, 1:n_vars] out1$dist <- pca$ind$dist out2$dist <- pca$ind$dist out <- rbind(out1, out2) prefix <- paste("gpca", sep ="_") names(out) <- paste(prefix, names(out), sep = "_") out[[idvar]] <- row.names(out) # add global pca data to output data output_data <- merge(output_data, out, by = idvar, all.x=T, sort = F) return(as.data.table(output_data)) } stacking_features <- function(names){ # put all predictions in df for (i in 1:length(names)){ path <- paste("./finished_models_output/", names[i], "_stacking.csv", sep = "") db <- read.csv(path) db$rand <- NULL db$fold <- NULL db$X <- NULL names(db)[names(db) != "ID"] <- names[i] if (i == 1){ out <- db } else{ out <- merge(out, db, "ID") } } # get some extra features out$stacker_pctgsd <- rowSds(out[,names] %>% as.matrix(), na.rm=TRUE) / rowMeans(out[,names] %>% as.matrix(), na.rm=TRUE) out$stacker_min <- rowQuantiles(out[,names] %>% as.matrix(), na.rm=TRUE, probs = 0) out$stacker_max <- rowQuantiles(out[,names] %>% as.matrix(), na.rm=TRUE, probs = 0) out$stacker_median <- rowQuantiles(out[,names] %>% as.matrix(), na.rm=TRUE, probs = 0) out$stacker_mean <- rowMeans(out[,names] %>% as.matrix(), na.rm=TRUE) return(out) } get_rank_mapping <- function(x){ rank_mapping <- table(x) %>% as.data.frame() rank_mapping$rank <- round(1000 * rank(rank_mapping$Freq, ties.method = "random") / nrow(rank_mapping), 2) rank_mapping$Freq <- NULL names(rank_mapping) <- c("var", "rank") x <-data.frame(x = x, i = 1:length(x)) out <- merge(x, rank_mapping, by.x = "x", by.y = "var", sort = F) out <- out[order(out$i), ] out$x <- NULL ; out$i <- NULL return(out$rank) } transform_catvars_to_rank <- function(data, vars = NULL){ if(is.null(vars)){ vars <- names(data)[!sapply(data, is.numeric)] } new_data <- data[, vars, with = F] new_data <- sapply(new_data, get_rank_mapping) %>% as.data.table() colnames(new_data) <- paste(colnames(new_data), "CATRANK", sep = "_") cbind(data, new_data) }
/data_manipulation_functions.R
no_license
toonroge/kaggleBNP
R
false
false
26,227
r
# defines functions for data cleaning # Toon Roge # takes a vector and caps lowest and highest values cap_vector <- function(x, cap = c(0.02, 0.98)) { if (is.numeric(x)) { q <- quantile(x, cap, na.rm = TRUE) min <- as.numeric(q[1]) max <- as.numeric(q[2]) x <- pmin(x, max) ; x <- pmax(x, min) } return(x) } # transforms towards standardnormal, but with first capping low and high levels std_normal_transform <- function(data, ignore) { require(caret) ; require(magrittr) # get numeric vars to transform numeric_vars <- names(data)[sapply(data, is.numeric)] numeric_vars <- numeric_vars[!numeric_vars %in% ignore] # cap numeric variables new_data <- lapply(data[, numeric_vars, with = FALSE], cap_vector) %>% as.data.table() # Standnormal transformation new_data$rand <- runif(nrow(new_data)) preProcValues <- preProcess(new_data[rand < 0.2, numeric_vars, with = F], method = c("BoxCox", "center", "scale")) new_data <- predict(preProcValues, new_data) ; gc() ; new_data$rand <- NULL # change column names + add new features to data colnames(new_data) <- gsub("_ORIGINAL", "", paste(colnames(new_data), "STD_NORM", sep = "_")) return(cbind(data, new_data)) } # groups low occuring values in categorical variable - for one vector replace_rare_levels <- function(x, n = 750, max_length = 6) { require(magrittr) # transform to character x <- as.character(x) # get max of n and max_length value vec <- summary(as.factor(x), maxsum = length(x)) %>% sort(decreasing = T) value <- max(vec[max_length], n, na.rm = T) # replace values with other names_to_replace <- names(which(vec < value)) return(ifelse(x %in% names_to_replace, "Other", x) %>% as.factor()) } # transforms all categorical variables in a data frame group_low_categorical <- function(data, n = 750, max_length = 6, ignore_pattern = NULL) { # get categorical vars to transform categorical_vars <- names(data)[!sapply(data, is.numeric)] ignore_vars <- c() for (pattern in ignore_pattern) { ignore_vars <- c(ignore_vars, grep(pattern, names(data), value = TRUE)) } categorical_vars <- categorical_vars[!categorical_vars %in% ignore_vars] # group low-occuring categorical variables in "other" data_cat <- data[, categorical_vars, with = F] new_data <- lapply(data_cat, replace_rare_levels, n, max_length) %>% as.data.table() # change column names + add new features to data colnames(new_data) <- gsub( "_ORIGINAL", "", paste( colnames(new_data), "GROUP_LOW_CATEG", "n", as.character(n), "max", as.character(max_length), sep = "_" ) ) return(cbind(data, new_data)) } impute_missings_mice <- function(data, keep_pattern, method = 'rf', m = 1, maxit = 1, sampsize = 0.025) { require(mice) ; require(magrittr) var_names <- c() for (pattern in keep_pattern) { var_names <- c(var_names, grep(pattern, names(data), value = TRUE)) } data_imputation <- data[, var_names, with = F] imp <- mice( data_imputation, m = m, maxit = maxit, method = method, sampsize = sampsize ) new_data <- complete(imp, 'long', inc = FALSE) %>% as.data.table() # change column names + add new features to data colnames(new_data) <- paste(colnames(new_data), "IMP", method, sep = "_") return(cbind(data, new_data)) } impute_missings_caret <- function(data, keep_pattern, method = "bagImpute") { require(caret) ; require(magrittr) var_names <- c() for (pattern in keep_pattern) { var_names <- c(var_names, grep(pattern, names(data), value = TRUE)) } data_imputation <- data[, var_names, with = F] categorical_vars <- names(data_imputation)[!sapply(data_imputation, is.numeric)] data_imputation[,(categorical_vars):= lapply(.SD, as.numeric), .SDcols = categorical_vars] # bagging trees imputation data_imputation$rand <- runif(nrow(data_imputation)) preProcValues <- preProcess(data_imputation[rand < 0.15], method = method) new_data <- predict(preProcValues, data_imputation) ; gc() ; new_data$rand <- NULL new_data <- new_data[,-categorical_vars, with = F] # change column names + add new features to data colnames(new_data) <- paste(colnames(new_data), "IMP", method, sep = "_") return(cbind(data, new_data)) } impute_missForest <- function(data, keep_pattern) { require(missForest) ; require(magrittr) ; require(doParallel) keep_pattern = c("GROUP_LOW_CATEG", "STD_NORM") var_names <- c() for (pattern in keep_pattern) { var_names <- c(var_names, grep(pattern, names(data), value = TRUE)) } data_imputation <- data[, var_names, with = F] registerDoParallel(cores = 4) rf_imp <- missForest( data_imputation, ntree = 10, maxiter = 1, mtry = 0.25, sampsize = rep(0.025, ncol(data_imputation)), verbose = TRUE, parallelize = "forests" ) newdata <- rf_imp$ximp } impute_customrf <- function(data, keep_pattern, prop = 0.20, ntree = 20, colsample = 0.10) { require(randomForest) ; require(magrittr) ; require(doParallel) # extract data for imputation var_names <- c() for (pattern in keep_pattern) { var_names <- c(var_names, grep(pattern, names(data), value = TRUE)) } data_imputation <- data[, var_names, with = F] # transform categorical vars to numeric IDs categorical_vars <- names(data_imputation)[!sapply(data_imputation, is.numeric)] data_imputation[,(categorical_vars):= lapply(.SD, as.numeric), .SDcols = categorical_vars] # put missing values as -999 data_imputation[is.na(data_imputation)] <- -999 # create new data table new_data <- data_imputation var_names <- which(sapply(data_imputation, function(x) sum(x == -999)) > 0) %>% names() for (var in var_names) { # select columns for fitting rf cond <- which(data_imputation[, var, with = F] == -999) cols <- which(colSums(data_imputation[cond] == -999) / nrow(data_imputation) < prop) %>% names() cols <- c(cols, var) # select rows for fitting rf rows <- which(!(data_imputation[, var, with = F] == -999) & runif(nrow(data_imputation)) < 0.2) rf <- randomForest( x = as.data.frame(data_imputation[rows, cols, with = F]), y = as.data.frame(data_imputation[rows, var, with = F])[[var]], ntree = ntree, mtry = round(length(cols) * colsample) ) new_data[[var]] <- ifelse(new_data[[var]] == -999, predict(rf, new_data), new_data[[var]]) print(paste("variable: ", var)) } # change column names + add new features to data colnames(new_data) <- paste(colnames(new_data), "IMP_customRF", sep = "_") return(cbind(data, new_data)) } missings_per_row <- function(data, ignore_pattern) { ignore_vars <- c() for (pattern in ignore_pattern) { ignore_vars <- c(ignore_vars, grep(pattern, names(data), value = TRUE)) } missData <- data[,-ignore_vars, with = F] data$MISSING_COUNT <- rowSums(is.na(missData)) / ncol(missData) data$MISSING_LABEL <- as.factor(as.numeric(as.factor(data$MISSING_COUNT))) return(data) } flag_original_variables <- function(data, ignore) { names(data)[!names(data) %in% ignore] <- paste(names(data)[!names(data) %in% ignore], "ORIGINAL", sep = "_") return(data) } replace_blank_categorical <- function(data) { # get categorical vars to transform categorical_vars <- names(data)[!sapply(data, is.numeric)] for (var in categorical_vars) { data[[var]][data[[var]] == ""] <- "MISSING" } return(data) } get_best_feature <- function(target, var1, var2){ require(randomForest) db <- data.frame(target = target, var1 = var1, var2 = var2) %>% na.omit() rf1 <- randomForest(y = as.factor(db$target), x = db[,2] %>% as.data.frame(), ntree = 75 ) rf2 <- randomForest(y = as.factor(db$target), x = db[,3] %>% as.data.frame(), ntree = 75 ) err2 <- rf2$err.rate[,1] %>% min() err1 <- rf1$err.rate[,1] %>% min() x <- err2 < err1 print(paste("Error rates:", as.character(round(err1, 4)), as.character(round(err2, 4)))) return(x + 1) # returns 2 if second features is best, 1 if first feature is best } remove_two_correlated_vars <- function(data, var1, var2) { # This function takes data.table and the names of two corr. variables as input # next it checks if both variables are present # if so it will fit a linear model and remove one variables by the residuals of this linear model if (is.null(data[[var1]]) | is.null(data[[var2]])) { print("Already deleted one variable") print("==================================") return(data) } else{ data$x <- data[[var1]] data$y <- data[[var2]] name <- paste(var1, var2, "_RESLM", sep = "") data[[name]] <- predict(lm(x ~ y, data), data) - data$x data$x <- NULL ; data$y <- NULL best_feature <- get_best_feature(subset(data, rand < 20)$target, subset(data, rand < 20)[[var1]], subset(data, rand < 20)[[var2]]) if (best_feature == 2) { data[[var1]] <- NULL print(paste("delete", var1)) } else{ data[[var2]] <- NULL print(paste("delete", var2)) } print("==================================") return(data) } } manual_cleaning <- function(data) { ### FIRST: MANUALLY TREAT SOME INSIGHT FROM FORUM ### # duplicate variable v91 and v107 data$v107 <- NULL #keep v91 # v71 and v75 should be pasted together, because the one is almost perfect subset from the other data$v71v75_PASTE <- paste(data$v71, data$v75, sep = "") data$v71 <- NULL data$v75 <- NULL # v50 - 16,78 = 2,041*v10 + -2,779*v12 --> Almost perfect fit with LM # v10 + 18.58 = 1.571 * v34 + 0.9936 * v40 --> Almost perfect fir with LM # I think ratios should be much more informative in such case data$v50v12_LM_RATIO <- ((16.78 - data$v50) / (data$v12 * 2.779)) %>% pmax(0) %>% pmin(1) data$v34v10_LM_RATIO <- ((data$v34 * 1.571) / (data$v10 + 18.58)) %>% pmax(0) %>% pmin(1) data$v12 <- NULL ; data$v10 <-NULL data$v34 <- NULL ; data$v40 <- NULL # v72 = v129 + v38 + v62 (Exact) data$v72_ISZERO <- ifelse(data$v72 == 0, 1, 0) data$v129v72_EX_RATIO <- ifelse( data$v72 == 0, mean(data$v129, na.rm = T) / mean(data$v72, na.rm = T), data$v129 / data$v72 ) data$v38v72_EX_RATIO <- ifelse( data$v72 == 0, mean(data$v38, na.rm = T) / mean(data$v72, na.rm = T), data$v38 / data$v72 ) data$v62v72_EX_RATIO <- ifelse( data$v72 == 0, mean(data$v162, na.rm = T) / mean(data$v72, na.rm = T), data$v62 / data$v72 ) data$v62 <- NULL ; data$v38 <- NULL ; data$v129 <- NULL return(data) } removing_correlated_features <- function(data, cor_value = 0.85){ # variables who are really strongly correlated --> strategy keep 1 + the noise of lm(v1 ~v2) # 1. extract numeric variables numeric_vars <- names(data)[sapply(data, is.numeric)] numeric_vars <- numeric_vars[!numeric_vars %in% c("ID", "target", "rand")] # 2. construct vectors with correlated features print("Calculation of correlation Matrix") corm <- cor(x = data[, numeric_vars, with = F], use = "pairwise.complete.obs") cord <- melt(corm) %>% dplyr::arrange(desc(abs(value))) %>% dplyr::filter(value < 1 & value > cor_value) var1 <- as.character(cord$Var1) ; var2 <- as.character(cord$Var2) # 3. remove correlated features and add the noise as a variable if (length(var1) == 0){ return(data) } else{ for (i in 1:nrow(cord)){ print(paste(as.character(var1[i]), as.character(var2[i]), as.character(round(cord$value[i], 4)))) data <- data %>% remove_two_correlated_vars(var1[i], var2[i]) } return(data) } } do_data_cleaning <- function(data, cor_value = 0.8){ # 1. Manual cleaning data <- manual_cleaning(data) #2. Automatic removal: if x and y are correlated; replace by x and residuals of lm(y ~ x) # --> Use loop, because noise can be explained by other features test <- names(data) ; i <- 1 ; print(i) data <- removing_correlated_features(data, cor_value = cor_value) while(!isTRUE(all.equal(names(data), test))){ test <- names(data) ; i <- i + 1 data <- removing_correlated_features(data, cor_value = cor_value) ; print(i) } return(data) } transform_cat_to_ridge_coefs <- function(db, cores = 4, alpha = 0.25, keep_pattern, ignore_pattern, ID = "ID", random = "rand", # need to be 1-100 for training and > 100 for test target = "target"){ # takes a database as input # detects the categorical variables in the keep_pattern # excluding the ones in ignore_pattern # fits elasticnet regression on two folds of the data (75pct ridge as default) # returns the input data, but with elasticnet predictions for each fold added # + a variable indicating to which fold each observation belonged during the fitting require("glmnet") require("caret") require("doParallel") require("reshape2") require("data.table") require("dplyr") vars <- c() for (pattern in keep_pattern) { vars <- c(vars, grep(pattern, names(db), value = TRUE)) } ignore_vars <- c() for (pattern in ignore_pattern) { ignore_vars <- c(ignore_vars, grep(pattern, names(db), value = TRUE)) } vars <- vars[!vars %in% ignore_vars] vars_to_keep <- c(ID, random, target, vars) data <- db[, vars_to_keep, with = F] dummies <- dummyVars(target ~ ., data = data) dummieData <- predict(dummies, newdata = data) %>% as.data.table() %>% cbind(data[,target, with = F]) no_vars <- c(ID, random, target, "fold") dummieData$test <- ifelse(dummieData[[random]] > 100, 1, 0) dummieData[[random]] <- ifelse(dummieData[[random]] > 100, 100 * runif(nrow(dummieData)), dummieData[[random]]) dummieData$fold <- ifelse(dummieData[[random]] < 50, 1, 2) registerDoParallel(cores = cores) for (fold in 1:2){ print(paste("Running ridge regression fold:", as.character(fold), sep = " ")) glmnet <- cv.glmnet( x = as.matrix(dummieData[fold == fold & test == 0, -no_vars, with = F]), y = as.matrix(dummieData[fold == fold & test == 0, c("target"), with = F]), family = "binomial", alpha = alpha, parallel = TRUE ) print("Fold has terminated.") coefs <- coef.cv.glmnet(glmnet, s="lambda.min") %>% as.matrix() ridge_data <- data.frame(var = row.names(coefs), value = as.numeric(coefs[, 1])) %>% as.data.table() keep <- grepl("\\.", as.character(ridge_data$var)) ridge_data <- ridge_data[keep] ridge_data <- cbind(ridge_data, colsplit(as.character(ridge_data$var), "\\.", c("variable", "level"))) ridge_data$var <- NULL categorical_vars <- names(data)[!sapply(data, is.numeric)] for (var in categorical_vars){ print(paste("replacing the following categorical variable:", var, sep = " ")) coef_table <- ridge_data[ridge_data$variable == var, c("value", "level"), with = F] names(coef_table)[names(coef_table) == "value"] = paste(var, "RIDGE_COEF_FOLD", as.character(fold), sep = "_") data <- merge(x = data, y = coef_table, by.x = var, by.y = "level", all.x = T) } } datafold <- dummieData[, c(ID, "fold"), with = F] names(datafold)[names(datafold) == "fold"] <- "RIDGE_FOLD" keep_pattern = c("RIDGE_COEF_FOLD") vars <- c() for (pattern in keep_pattern) { vars <- c(vars, grep(pattern, names(data), value = TRUE)) } vars <- c(vars, ID) data_ridge <- data[, vars, with = F] db <- merge(x = db, y = datafold, by = ID, all.x = T) db <- merge(x = db, y = data_ridge, by = ID, all.x = T) return(db) } # from kaggle scripts na.roughfix2 <- function (object, ...) { res <- lapply(object, roughfix) structure(res, class = "data.frame", row.names = seq_len(nrow(object))) } #from kaggle scripts roughfix <- function(x) { missing <- is.na(x) if (!any(missing)) return(x) if (is.numeric(x)) { x[missing] <- median.default(x[!missing]) } else if (is.factor(x)) { freq <- table(x) x[missing] <- names(freq)[which.max(freq)] } else { stop("na.roughfix only works for numeric or factor") } x } # Convert v22 to hexavigesimal base - from kaggle scripts - not yet used az_to_int <- function(az) { xx <- strsplit(tolower(az), "")[[1]] pos <- match(xx, letters[(1:26)]) result <- sum( pos* 26^rev(seq_along(xx)-1)) return(result) } clust_pca <- function(data, idvar, ignore, n_clust, clust_method = "hclust"){ require(data.table) require(dplyr) require(ClustOfVar) require(FactoMineR) require(stringr) # FactoMineR doesn't work with data.table data <- as.data.frame(data) # select data for pca analysis numeric_vars <- names(data)[sapply(data, is.numeric)] output_data <- data[!names(data) %in% setdiff(numeric_vars, c(idvar, ignore))] data <- data[names(data) %in% numeric_vars] # data <- na.omit(data) ind.sup <- which(!complete.cases(data)) id <- data[[idvar]] data <- data[!names(data) %in% c(idvar, ignore)] row.names(data) <- id # define groups of clustered data print("Clustering variables...") if (clust_method == "kmeans"){ res <- kmeansvar(na.omit(data), init = n_clust)$var # list, row.names are variables } else if (clust_method == "hclust"){ hclust <- hclustvar(na.omit(data)) res <- cutreevar(hclust, n_clust)$var # list, row.names are variables } else{ print("clust_method should be kmeans or hclust") } # do a pca on each cluster - if more then 1 dimension print("PCA for each cluster...") new_data <- NULL colweight <- c() for (i in 1:n_clust){ print(i) names <- row.names(res[[i]]) # extract vars in cluster n <- length(names) if (n > 1){ # pca needed pca <- PCA(data[names(data) %in% names], graph = F, ncp = n, ind.sup = ind.sup) n_vars <- which(pca$eig$`cumulative percentage of variance` > 98) %>% min() colweight <- c(colweight, rep(1 / sqrt(pca$eig$`eigenvalue`[1]), n_vars)) # extract vars of pca out1 <- pca$ind$coord out2 <- pca$ind.sup$coord out1 <- out1[, 1:n_vars] %>% as.data.frame() out2 <- out2[, 1:n_vars] %>% as.data.frame() out1$dist <- pca$ind$dist out2$dist <- pca$ind.sup$dist out <- rbind(out1, out2) names(out)[1] <- "Dim.1" # because sometimes name got changed prefix <- paste(clust_method, str_pad(i, 2, pad = "0"), "pca", sep ="_") names(out) <- paste(prefix, names(out), sep = "_") out[[idvar]] <- row.names(out) } else{ # no pca needed out <- data[names(data) %in% names] colweight <- c(colweight, 1) prefix <- paste(clust_method, str_pad(i, 2, pad = "0"), "org", sep ="_") names(out) <- paste(prefix, names(out), sep = "_") out[[idvar]] <- row.names(out) } if (is.null(new_data)){ new_data <- out } else{ # cbind did not work well with data.table new_data <- merge(new_data, out, by = idvar, all.x = T, sort = F) } } # add this data to output data table output_data <- merge(output_data, new_data, by = idvar, all.x=T) # do again a PCA, but on previous results print("PCA on previous pca output...") keep_pattern <- c("pca_Dim", "_org") var_names <- c() for (pattern in keep_pattern) { var_names <- c(var_names, grep(pattern, names(new_data), value = TRUE)) } new_data <- new_data[names(new_data) %in% var_names] pca <- PCA(new_data, col.w = colweight, graph = F, ncp = ncol(new_data), ind.sup = ind.sup) n_vars <- which(pca$eig$`cumulative percentage of variance` > 98) %>% min() out1 <- as.data.frame(pca$ind$coord)[, 1:n_vars] out2 <- as.data.frame(pca$ind$coord)[, 1:n_vars] out1$dist <- pca$ind$dist out2$dist <- pca$ind$dist out <- rbind(out1, out2) prefix <- paste("gpca", sep ="_") names(out) <- paste(prefix, names(out), sep = "_") out[[idvar]] <- row.names(out) # add global pca data to output data output_data <- merge(output_data, out, by = idvar, all.x=T, sort = F) return(as.data.table(output_data)) } stacking_features <- function(names){ # put all predictions in df for (i in 1:length(names)){ path <- paste("./finished_models_output/", names[i], "_stacking.csv", sep = "") db <- read.csv(path) db$rand <- NULL db$fold <- NULL db$X <- NULL names(db)[names(db) != "ID"] <- names[i] if (i == 1){ out <- db } else{ out <- merge(out, db, "ID") } } # get some extra features out$stacker_pctgsd <- rowSds(out[,names] %>% as.matrix(), na.rm=TRUE) / rowMeans(out[,names] %>% as.matrix(), na.rm=TRUE) out$stacker_min <- rowQuantiles(out[,names] %>% as.matrix(), na.rm=TRUE, probs = 0) out$stacker_max <- rowQuantiles(out[,names] %>% as.matrix(), na.rm=TRUE, probs = 0) out$stacker_median <- rowQuantiles(out[,names] %>% as.matrix(), na.rm=TRUE, probs = 0) out$stacker_mean <- rowMeans(out[,names] %>% as.matrix(), na.rm=TRUE) return(out) } get_rank_mapping <- function(x){ rank_mapping <- table(x) %>% as.data.frame() rank_mapping$rank <- round(1000 * rank(rank_mapping$Freq, ties.method = "random") / nrow(rank_mapping), 2) rank_mapping$Freq <- NULL names(rank_mapping) <- c("var", "rank") x <-data.frame(x = x, i = 1:length(x)) out <- merge(x, rank_mapping, by.x = "x", by.y = "var", sort = F) out <- out[order(out$i), ] out$x <- NULL ; out$i <- NULL return(out$rank) } transform_catvars_to_rank <- function(data, vars = NULL){ if(is.null(vars)){ vars <- names(data)[!sapply(data, is.numeric)] } new_data <- data[, vars, with = F] new_data <- sapply(new_data, get_rank_mapping) %>% as.data.table() colnames(new_data) <- paste(colnames(new_data), "CATRANK", sep = "_") cbind(data, new_data) }
#' Generates simulations of expected mortality by simulating the model coefficients. #' #' With the given fit from fit_attrib the function sim, from package arm as described in Gelman, Hill (2012) <doi:10.1017/CBO9780511790942>, is used to generate 500 simulations #' of all the coefficients, from there respective posterior distributions. #' This is then used to compute the expected response for all simulations and rows in the input dataset. #' # For more details see the help vignette: #' \code{vignette("intro", package="attrib")} #' #' @param fit A model fit created by fit_attrib #' @param data The data with either observed values or reference values. #' @param n_sim Number of simulations #' #' @examples #' #' response <- "deaths" #' fixef <- "pr100_ili_lag_1 + sin(2 * pi * (week - 1) / 52) + cos(2 * pi * (week - 1) / 52)" #' ranef <- " (pr100_ili_lag_1| season)" #' offset <- "log(pop)" #' #' data <- attrib::data_fake_nation #' #' fit <- fit_attrib(data = data, response = response, fixef = fixef, ranef = ranef, offset = offset) #' #' n_sim <- 5 #' sim(fit, data, n_sim) #' @return A dataset with 500 simulations of the expected response for each row in the original dataset. #' @export sim <- function( fit, data, n_sim) { if (length(which(is.na(data))) != 0) { stop("The dataset has NA values") } if (is.null(attr(fit, "fit_fix"))) { stop("Fit is missing attribute fit_fix and possibly not computed by fit_attrib") # Maybe a different message, you decide :) } if (is.null(attr(fit, "response"))) { stop("Fit is missing attribute fit_fix and possibly not computed by fit_attrib") # Maybe a different message, you decide :) } col_names <- colnames(data) fix_eff <- attr(fit, "fit_fix") offset <- attr(fit, "offset") response <- attr(fit, "response") x <- arm::sim(fit, n.sims = n_sim) # get the design matrix for the fixed effects data_fix <- stats::model.frame(fix_eff, data = data) data_fix_copy <- data.table::as.data.table(data_fix) data_fix_copy[, (response) := NULL] x_fix <- as.data.frame(as.matrix(x@fixef)) r_names <- rownames(rbind(1, as.matrix(t(data_fix_copy)))) c_names <- colnames(as.matrix(x@fixef)) count <- 0 for (i in (2:(length(r_names) - 1))) { # print(i) r_cur <- r_names[i] c_cur <- c_names[i - count] check <- FALSE c_check <- stringr::str_replace_all(c_cur, "[:(, =)*/-]", ".") r_check <- stringr::str_replace_all(r_cur, "[:(, =)*/-]", ".") # print(c_check == r_cur) # print(r_cur) # print(c_check) if (c_check == r_check) { check <- TRUE next } split <- strsplit(c_check, "")[[1]] if (split[length(split) - 1] == ".") { p <- paste0(substr(c_check, 1, (nchar(c_check) - 1)), ".", substr(c_check, nchar(c_check), nchar(c_check)), collapse = NULL) if (p == r_check) { check <- TRUE next } } if (check == FALSE) { x_fix <- tibble::add_column(x_fix, extra = 0, .after = (i - 1 + count)) count <- count + 1 } } # multiply it out dim(cbind(as.matrix(x_fix), 1)) dim(rbind(1, as.matrix(t(data_fix_copy)))) colnames(cbind(as.matrix(x_fix), 1)) rownames(rbind(1, as.matrix(t(data_fix_copy)))) # add the offset!! if (is.null(offset)) { cbind(as.matrix(x_fix)) %*% rbind(1, as.matrix(t(data_fix_copy))) } else { expected_fix <- cbind(as.matrix(x_fix), 1) %*% rbind(1, as.matrix(t(data_fix_copy))) } # set up the results for random effects expected_ran <- matrix(0, ncol = ncol(expected_fix), nrow = nrow(expected_fix)) # slowly add in each of the random effects i <- j <- k <- 1 pb <- progress::progress_bar$new(total = length(x@ranef) + 3) for (i in 1:length(x@ranef)) { grouping <- names(x@ranef)[i] for (j in 1:dim(x@ranef[[i]])[3]) { # print(j) variable <- dimnames(x@ranef[[i]])[[3]][j] coefficients <- x@ranef[[i]][, , j] if (variable == "(Intercept)") { # print(dim(expected_ran)) # print(dim(coefficients[,data[[grouping]]])) expected_ran <- expected_ran + coefficients[, data[[grouping]]] } else { # print(dim(expected_ran)) # print(dim(coefficients[,data[[grouping]]])) # print("non_intercept") expected_ran <- expected_ran + coefficients[, data[[grouping]]] %*% diag(data[[variable]]) } } if (interactive()) pb$tick() } # print("loop over") # add together the coefficients for the fixed and random effects expected <- as.data.table(exp(expected_fix + expected_ran)) expected_t <- data.table::transpose(expected) expected_t$id_row <- 1:nrow(data) data$id_row <- 1:nrow(data) if (interactive()) pb$tick() new_data <- merge(data, expected_t, by = "id_row", all = TRUE) if (interactive()) pb$tick() new_data <- data.table::melt(new_data, id.vars = c(col_names, "id_row")) if (interactive()) pb$tick() setnames(new_data, "variable", "sim_id") new_data$sim_id <- as.numeric(as.factor(new_data$sim_id)) setnames(new_data, "value", "sim_value") return(new_data) }
/R/sim.R
no_license
cran/attrib
R
false
false
5,122
r
#' Generates simulations of expected mortality by simulating the model coefficients. #' #' With the given fit from fit_attrib the function sim, from package arm as described in Gelman, Hill (2012) <doi:10.1017/CBO9780511790942>, is used to generate 500 simulations #' of all the coefficients, from there respective posterior distributions. #' This is then used to compute the expected response for all simulations and rows in the input dataset. #' # For more details see the help vignette: #' \code{vignette("intro", package="attrib")} #' #' @param fit A model fit created by fit_attrib #' @param data The data with either observed values or reference values. #' @param n_sim Number of simulations #' #' @examples #' #' response <- "deaths" #' fixef <- "pr100_ili_lag_1 + sin(2 * pi * (week - 1) / 52) + cos(2 * pi * (week - 1) / 52)" #' ranef <- " (pr100_ili_lag_1| season)" #' offset <- "log(pop)" #' #' data <- attrib::data_fake_nation #' #' fit <- fit_attrib(data = data, response = response, fixef = fixef, ranef = ranef, offset = offset) #' #' n_sim <- 5 #' sim(fit, data, n_sim) #' @return A dataset with 500 simulations of the expected response for each row in the original dataset. #' @export sim <- function( fit, data, n_sim) { if (length(which(is.na(data))) != 0) { stop("The dataset has NA values") } if (is.null(attr(fit, "fit_fix"))) { stop("Fit is missing attribute fit_fix and possibly not computed by fit_attrib") # Maybe a different message, you decide :) } if (is.null(attr(fit, "response"))) { stop("Fit is missing attribute fit_fix and possibly not computed by fit_attrib") # Maybe a different message, you decide :) } col_names <- colnames(data) fix_eff <- attr(fit, "fit_fix") offset <- attr(fit, "offset") response <- attr(fit, "response") x <- arm::sim(fit, n.sims = n_sim) # get the design matrix for the fixed effects data_fix <- stats::model.frame(fix_eff, data = data) data_fix_copy <- data.table::as.data.table(data_fix) data_fix_copy[, (response) := NULL] x_fix <- as.data.frame(as.matrix(x@fixef)) r_names <- rownames(rbind(1, as.matrix(t(data_fix_copy)))) c_names <- colnames(as.matrix(x@fixef)) count <- 0 for (i in (2:(length(r_names) - 1))) { # print(i) r_cur <- r_names[i] c_cur <- c_names[i - count] check <- FALSE c_check <- stringr::str_replace_all(c_cur, "[:(, =)*/-]", ".") r_check <- stringr::str_replace_all(r_cur, "[:(, =)*/-]", ".") # print(c_check == r_cur) # print(r_cur) # print(c_check) if (c_check == r_check) { check <- TRUE next } split <- strsplit(c_check, "")[[1]] if (split[length(split) - 1] == ".") { p <- paste0(substr(c_check, 1, (nchar(c_check) - 1)), ".", substr(c_check, nchar(c_check), nchar(c_check)), collapse = NULL) if (p == r_check) { check <- TRUE next } } if (check == FALSE) { x_fix <- tibble::add_column(x_fix, extra = 0, .after = (i - 1 + count)) count <- count + 1 } } # multiply it out dim(cbind(as.matrix(x_fix), 1)) dim(rbind(1, as.matrix(t(data_fix_copy)))) colnames(cbind(as.matrix(x_fix), 1)) rownames(rbind(1, as.matrix(t(data_fix_copy)))) # add the offset!! if (is.null(offset)) { cbind(as.matrix(x_fix)) %*% rbind(1, as.matrix(t(data_fix_copy))) } else { expected_fix <- cbind(as.matrix(x_fix), 1) %*% rbind(1, as.matrix(t(data_fix_copy))) } # set up the results for random effects expected_ran <- matrix(0, ncol = ncol(expected_fix), nrow = nrow(expected_fix)) # slowly add in each of the random effects i <- j <- k <- 1 pb <- progress::progress_bar$new(total = length(x@ranef) + 3) for (i in 1:length(x@ranef)) { grouping <- names(x@ranef)[i] for (j in 1:dim(x@ranef[[i]])[3]) { # print(j) variable <- dimnames(x@ranef[[i]])[[3]][j] coefficients <- x@ranef[[i]][, , j] if (variable == "(Intercept)") { # print(dim(expected_ran)) # print(dim(coefficients[,data[[grouping]]])) expected_ran <- expected_ran + coefficients[, data[[grouping]]] } else { # print(dim(expected_ran)) # print(dim(coefficients[,data[[grouping]]])) # print("non_intercept") expected_ran <- expected_ran + coefficients[, data[[grouping]]] %*% diag(data[[variable]]) } } if (interactive()) pb$tick() } # print("loop over") # add together the coefficients for the fixed and random effects expected <- as.data.table(exp(expected_fix + expected_ran)) expected_t <- data.table::transpose(expected) expected_t$id_row <- 1:nrow(data) data$id_row <- 1:nrow(data) if (interactive()) pb$tick() new_data <- merge(data, expected_t, by = "id_row", all = TRUE) if (interactive()) pb$tick() new_data <- data.table::melt(new_data, id.vars = c(col_names, "id_row")) if (interactive()) pb$tick() setnames(new_data, "variable", "sim_id") new_data$sim_id <- as.numeric(as.factor(new_data$sim_id)) setnames(new_data, "value", "sim_value") return(new_data) }
## Project 1, Exploratory Data Analysis ## Series of scripts to print out plots (plot1.r - plot4.r) ## 10/11/2015 # CLEAN HOUSE rm(list = ls()) for(i in 1:5) gc() options(stringsAsFactors=FALSE) ######## DATA PREP ######## fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" if(!file.exists("./data")){dir.create("./data")} zipfn <- "./data/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(url = fileURL,destfile = zipfn) # look at which files are in Zip files <- unzip(zipfile = zipfn,list=T)$Name # bring in the 1st row which contains the column names data_names <- colnames(read.table(unz(zipfn, files),nrows = 1,sep = ";",header =T) ) # read rest of table table (i played with the nrows and skip values so that i didn't load too much, but got the right dates) data <- read.table(unz(zipfn, files),nrows = 10000,sep = ";",header =T,skip = 60000) # add your column names back colnames(data) <- data_names str(data) # select only dates we're interested in data$Date <- as.Date(data$Date,format="%d/%m/%Y") head(data[which(data$Date<=as.Date("2007-02-02") & data$Date>=as.Date("2007-02-01")),]) data <- data[which(data$Date<=as.Date("2007-02-02") & data$Date>=as.Date("2007-02-01")),] # all numbers were brought in as characters.. convert to numeric data[,3:9] <- as.data.frame(apply(data[,3:9],MARGIN = 2,as.numeric)) # prof suggested using datetime format, so convertime here data$datetime <- strptime(paste(data$Date,data$Time,sep=" "),format="%Y-%m-%d %H:%M:%S") ######## PLOT ######## png(file='plot4.png') par(mfrow= c(2, 2)) plot(data$datetime,data$Global_active_power,ylab="Global Active Power",type = 'l',xlab="") plot(data$datetime,data$Voltage,ylab="Voltage",type = 'l',xlab="datetime") plot(data$datetime,data$Sub_metering_1,ylab="Energy sub metering",type = 'l',xlab="") lines(data$datetime,data$Sub_metering_2,col="red") lines(data$datetime,data$Sub_metering_3,col="blue") legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lwd=c(2.5,2.5,2.5), col=c("black","blue","red"), bty="n") plot(data$datetime,data$Global_reactive_power,ylab="Global_reactive_power",type = 'l',xlab="datetime") dev.off()
/plot4.r
no_license
kimstat/ExData_Plotting1
R
false
false
2,231
r
## Project 1, Exploratory Data Analysis ## Series of scripts to print out plots (plot1.r - plot4.r) ## 10/11/2015 # CLEAN HOUSE rm(list = ls()) for(i in 1:5) gc() options(stringsAsFactors=FALSE) ######## DATA PREP ######## fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" if(!file.exists("./data")){dir.create("./data")} zipfn <- "./data/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(url = fileURL,destfile = zipfn) # look at which files are in Zip files <- unzip(zipfile = zipfn,list=T)$Name # bring in the 1st row which contains the column names data_names <- colnames(read.table(unz(zipfn, files),nrows = 1,sep = ";",header =T) ) # read rest of table table (i played with the nrows and skip values so that i didn't load too much, but got the right dates) data <- read.table(unz(zipfn, files),nrows = 10000,sep = ";",header =T,skip = 60000) # add your column names back colnames(data) <- data_names str(data) # select only dates we're interested in data$Date <- as.Date(data$Date,format="%d/%m/%Y") head(data[which(data$Date<=as.Date("2007-02-02") & data$Date>=as.Date("2007-02-01")),]) data <- data[which(data$Date<=as.Date("2007-02-02") & data$Date>=as.Date("2007-02-01")),] # all numbers were brought in as characters.. convert to numeric data[,3:9] <- as.data.frame(apply(data[,3:9],MARGIN = 2,as.numeric)) # prof suggested using datetime format, so convertime here data$datetime <- strptime(paste(data$Date,data$Time,sep=" "),format="%Y-%m-%d %H:%M:%S") ######## PLOT ######## png(file='plot4.png') par(mfrow= c(2, 2)) plot(data$datetime,data$Global_active_power,ylab="Global Active Power",type = 'l',xlab="") plot(data$datetime,data$Voltage,ylab="Voltage",type = 'l',xlab="datetime") plot(data$datetime,data$Sub_metering_1,ylab="Energy sub metering",type = 'l',xlab="") lines(data$datetime,data$Sub_metering_2,col="red") lines(data$datetime,data$Sub_metering_3,col="blue") legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lwd=c(2.5,2.5,2.5), col=c("black","blue","red"), bty="n") plot(data$datetime,data$Global_reactive_power,ylab="Global_reactive_power",type = 'l',xlab="datetime") dev.off()
library(stringr) #this package is quite useful and it's already install on bear library("ggplot2") #loads ggplot2 # cd - if error change to /Volumes/vianaj-genomics-brain-development/ or /rds/projects/v/vianaj-genomics-brain-development/ #setwd("/Volumes/vianaj-genomics-brain-development/MATRICS/aggression_RRBS/") #setwd("/Volumes/vianaj-genomics-brain-development/MATRICS/") #setwd("/Volumes/vianaj-genomics-brain-development/MATRICS/bismark_methylation_extractor/") #setwd("/Volumes/vianaj-genomics-brain-development/MATRICS/bismark_methylation_extractor/spikeins/") setwd("/rds/projects/v/vianaj-genomics-brain-development/MATRICS/bismark_methylation_extractor/spikeins/methylated") #if you put the full path you only have to tell R the final destination, not every folder on the way # methylated data methylated_data <- lapply(Sys.glob("*.bismark.cov"), read.table, stringsAsFactors=FALSE) #I added the argument stringAsFactors=FALSE or otherwise some of the columns would be imported as factors . Do str(methylated_data) before and after adding that argument and see first column. Add to below as well names(methylated_data)<- str_match(Sys.glob("*.bismark.cov"),paste0("BLB","(.*?.....)"))[,1] #This takes the sample names from the files names and attributes it to the list elements. There are cleaner ways of doing this, but I am not an expert in regular expressions. # unmethylated data setwd("/rds/projects/v/vianaj-genomics-brain-development/MATRICS/bismark_methylation_extractor/spikeins/unmethylated") unmethylated_data <- lapply(Sys.glob("*.bismark.cov"), read.table, stringsAsFactors=FALSE) names(unmethylated_data)<- str_match(Sys.glob("*.bismark.cov"),paste0("BLB","(.*?.....)"))[,1] # start loop for(file in 1:length(methylated_data)){ #Loot from 1 to the maximum elements of the list temp_meth <- data.frame(methylated_data[[file]]) #this sill extract the data set in the current loop. colnames(temp_meth) <- c("chromosome", "start_position", "end_position", "methylation_percentage", "count_methylated", "count_unmethylated") #add column names # names(methylated_data)[[file]] #this will give you the sample name for the current set print(names(methylated_data)[[file]]) print("check1") # call corresponding unmeth file temp_unmeth <- data.frame(unmethylated_data[names(methylated_data)[[file]]]) print("check2") colnames(temp_unmeth) <- c("chromosome", "start_position", "end_position", "methylation_percentage", "count_methylated", "count_unmethylated") #add column names print("check3") #setwd("/rds/projects/v/vianaj-genomics-brain-development/MATRICS/bismark_methylation_extractor/spikeins/plots/test_plots") #It's best not to change the directory in every loop as it might get messy and you might end up with files all over. It might also take longer. In this case what we are doing a very quick thing inside of the loop, but it's good practice to make for loops as quick as possible for when you are doing things that take a long time. # start pdf pdf(paste0("/rds/projects/v/vianaj-genomics-brain-development/MATTRICS/bismark_methylation_extractor/spikeins/plots/test_plots/", names(methylated_data)[[file]],"_spikeins_scatter.pdf")) #paste0() is a really handy function, you can create strings (without spaces, for strings with spaces see paste()). #You already have the name of the sample as the name of the item in the current list, so you can use that. #Try to print just paste0("/rds/projects/v/vianaj-genomics-brain-development/MATRICS/bismark_methylation_extractor/spikeins/plots/test_plots", names(methylated_data)[[file]],"_spikeins_scatter.pdf") in R and see the output. #First bit in the full path name of where you want the pdf #I added 'spikeins_scatter' because as we make more plots is good to know which QC it is from just the name. # plot plot <- ggplot() + geom_point(data= temp_meth, aes(x=start_position, y= methylation_percentage, color = chromosome))+ geom_point(data= temp_unmeth, aes(x=start_position, y= methylation_percentage, color = chromosome)) + ggtitle(label = names(methylated_data)[[file]], "Methylation Percentage") + ylim(-1, 100) print(plot) #you need to add this so the plot is printed into the pdf # end pdf dev.off() print("check4") }
/scripts/old/BLB/R_scripts/spikeins_script.R
no_license
jffpviana/aggression_RRBS
R
false
false
4,268
r
library(stringr) #this package is quite useful and it's already install on bear library("ggplot2") #loads ggplot2 # cd - if error change to /Volumes/vianaj-genomics-brain-development/ or /rds/projects/v/vianaj-genomics-brain-development/ #setwd("/Volumes/vianaj-genomics-brain-development/MATRICS/aggression_RRBS/") #setwd("/Volumes/vianaj-genomics-brain-development/MATRICS/") #setwd("/Volumes/vianaj-genomics-brain-development/MATRICS/bismark_methylation_extractor/") #setwd("/Volumes/vianaj-genomics-brain-development/MATRICS/bismark_methylation_extractor/spikeins/") setwd("/rds/projects/v/vianaj-genomics-brain-development/MATRICS/bismark_methylation_extractor/spikeins/methylated") #if you put the full path you only have to tell R the final destination, not every folder on the way # methylated data methylated_data <- lapply(Sys.glob("*.bismark.cov"), read.table, stringsAsFactors=FALSE) #I added the argument stringAsFactors=FALSE or otherwise some of the columns would be imported as factors . Do str(methylated_data) before and after adding that argument and see first column. Add to below as well names(methylated_data)<- str_match(Sys.glob("*.bismark.cov"),paste0("BLB","(.*?.....)"))[,1] #This takes the sample names from the files names and attributes it to the list elements. There are cleaner ways of doing this, but I am not an expert in regular expressions. # unmethylated data setwd("/rds/projects/v/vianaj-genomics-brain-development/MATRICS/bismark_methylation_extractor/spikeins/unmethylated") unmethylated_data <- lapply(Sys.glob("*.bismark.cov"), read.table, stringsAsFactors=FALSE) names(unmethylated_data)<- str_match(Sys.glob("*.bismark.cov"),paste0("BLB","(.*?.....)"))[,1] # start loop for(file in 1:length(methylated_data)){ #Loot from 1 to the maximum elements of the list temp_meth <- data.frame(methylated_data[[file]]) #this sill extract the data set in the current loop. colnames(temp_meth) <- c("chromosome", "start_position", "end_position", "methylation_percentage", "count_methylated", "count_unmethylated") #add column names # names(methylated_data)[[file]] #this will give you the sample name for the current set print(names(methylated_data)[[file]]) print("check1") # call corresponding unmeth file temp_unmeth <- data.frame(unmethylated_data[names(methylated_data)[[file]]]) print("check2") colnames(temp_unmeth) <- c("chromosome", "start_position", "end_position", "methylation_percentage", "count_methylated", "count_unmethylated") #add column names print("check3") #setwd("/rds/projects/v/vianaj-genomics-brain-development/MATRICS/bismark_methylation_extractor/spikeins/plots/test_plots") #It's best not to change the directory in every loop as it might get messy and you might end up with files all over. It might also take longer. In this case what we are doing a very quick thing inside of the loop, but it's good practice to make for loops as quick as possible for when you are doing things that take a long time. # start pdf pdf(paste0("/rds/projects/v/vianaj-genomics-brain-development/MATTRICS/bismark_methylation_extractor/spikeins/plots/test_plots/", names(methylated_data)[[file]],"_spikeins_scatter.pdf")) #paste0() is a really handy function, you can create strings (without spaces, for strings with spaces see paste()). #You already have the name of the sample as the name of the item in the current list, so you can use that. #Try to print just paste0("/rds/projects/v/vianaj-genomics-brain-development/MATRICS/bismark_methylation_extractor/spikeins/plots/test_plots", names(methylated_data)[[file]],"_spikeins_scatter.pdf") in R and see the output. #First bit in the full path name of where you want the pdf #I added 'spikeins_scatter' because as we make more plots is good to know which QC it is from just the name. # plot plot <- ggplot() + geom_point(data= temp_meth, aes(x=start_position, y= methylation_percentage, color = chromosome))+ geom_point(data= temp_unmeth, aes(x=start_position, y= methylation_percentage, color = chromosome)) + ggtitle(label = names(methylated_data)[[file]], "Methylation Percentage") + ylim(-1, 100) print(plot) #you need to add this so the plot is printed into the pdf # end pdf dev.off() print("check4") }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/layers_pool.R \name{layer_top_k_pool} \alias{layer_top_k_pool} \title{TopKPool} \usage{ layer_top_k_pool( object, ratio, return_mask = FALSE, sigmoid_gating = FALSE, kernel_initializer = "glorot_uniform", kernel_regularizer = NULL, kernel_constraint = NULL, ... ) } \arguments{ \item{ratio}{float between 0 and 1, ratio of nodes to keep in each graph} \item{return_mask}{boolean, whether to return the binary mask used for pooling} \item{sigmoid_gating}{boolean, use a sigmoid gating activation instead of a tanh} \item{kernel_initializer}{initializer for the weights} \item{kernel_regularizer}{regularization applied to the weights} \item{kernel_constraint}{constraint applied to the weights} } \description{ \loadmathjax A gPool/Top-K layer as presented by \href{http://proceedings.mlr.press/v97/gao19a/gao19a.pdf}{Gao & Ji (2019)} and \href{https://arxiv.org/abs/1811.01287}{Cangea et al. (2018)}. \strong{Mode}: single, disjoint. This layer computes the following operations: \mjdeqn{\boldsymbol{y} = \frac{\boldsymbol{X}\p}{\|\boldsymbol{p}\|}; \;\;\;\;\boldsymbol{i} = \textrm{rank}(\boldsymbol{y}, K); \;\;\;\;\boldsymbol{X}' = (\boldsymbol{X} \odot \textrm{tanh}(\boldsymbol{y}))_\boldsymbol{i}; \;\;\;\;\boldsymbol{A}' = \boldsymbol{A} _ {\boldsymbol{i}, \boldsymbol{i}}}{} where \mjeqn{ \textrm{rank}(\boldsymbol{y}, K) }{} returns the indices of the top K values of \mjeqn{\boldsymbol{y}}{}, and \mjeqn{\boldsymbol{p}}{} is a learnable parameter vector of size \mjeqn{F}{}. \mjeqn{K}{} is defined for each graph as a fraction of the number of nodes. Note that the the gating operation \mjeqn{\textrm{tanh}(\boldsymbol{y})}{} (Cangea et al.) can be replaced with a sigmoid (Gao & Ji). This layer temporarily makes the adjacency matrix dense in order to compute \mjeqn{\boldsymbol{A}'}{}. If memory is not an issue, considerable speedups can be achieved by using dense graphs directly. Converting a graph from sparse to dense and back to sparse is an expensive operation. \strong{Input} \itemize{ \item Node features of shape \verb{(N, F)}; \item Binary adjacency matrix of shape \verb{(N, N)}; \item Graph IDs of shape \verb{(N, )} (only in disjoint mode); } \strong{Output} \itemize{ \item Reduced node features of shape \verb{(ratio * N, F)}; \item Reduced adjacency matrix of shape \verb{(ratio * N, ratio * N)}; \item Reduced graph IDs of shape \verb{(ratio * N, )} (only in disjoint mode); \item If \code{return_mask=True}, the binary pooling mask of shape \verb{(ratio * N, )}. } }
/man/layer_top_k_pool.Rd
no_license
rdinnager/rspektral
R
false
true
2,604
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/layers_pool.R \name{layer_top_k_pool} \alias{layer_top_k_pool} \title{TopKPool} \usage{ layer_top_k_pool( object, ratio, return_mask = FALSE, sigmoid_gating = FALSE, kernel_initializer = "glorot_uniform", kernel_regularizer = NULL, kernel_constraint = NULL, ... ) } \arguments{ \item{ratio}{float between 0 and 1, ratio of nodes to keep in each graph} \item{return_mask}{boolean, whether to return the binary mask used for pooling} \item{sigmoid_gating}{boolean, use a sigmoid gating activation instead of a tanh} \item{kernel_initializer}{initializer for the weights} \item{kernel_regularizer}{regularization applied to the weights} \item{kernel_constraint}{constraint applied to the weights} } \description{ \loadmathjax A gPool/Top-K layer as presented by \href{http://proceedings.mlr.press/v97/gao19a/gao19a.pdf}{Gao & Ji (2019)} and \href{https://arxiv.org/abs/1811.01287}{Cangea et al. (2018)}. \strong{Mode}: single, disjoint. This layer computes the following operations: \mjdeqn{\boldsymbol{y} = \frac{\boldsymbol{X}\p}{\|\boldsymbol{p}\|}; \;\;\;\;\boldsymbol{i} = \textrm{rank}(\boldsymbol{y}, K); \;\;\;\;\boldsymbol{X}' = (\boldsymbol{X} \odot \textrm{tanh}(\boldsymbol{y}))_\boldsymbol{i}; \;\;\;\;\boldsymbol{A}' = \boldsymbol{A} _ {\boldsymbol{i}, \boldsymbol{i}}}{} where \mjeqn{ \textrm{rank}(\boldsymbol{y}, K) }{} returns the indices of the top K values of \mjeqn{\boldsymbol{y}}{}, and \mjeqn{\boldsymbol{p}}{} is a learnable parameter vector of size \mjeqn{F}{}. \mjeqn{K}{} is defined for each graph as a fraction of the number of nodes. Note that the the gating operation \mjeqn{\textrm{tanh}(\boldsymbol{y})}{} (Cangea et al.) can be replaced with a sigmoid (Gao & Ji). This layer temporarily makes the adjacency matrix dense in order to compute \mjeqn{\boldsymbol{A}'}{}. If memory is not an issue, considerable speedups can be achieved by using dense graphs directly. Converting a graph from sparse to dense and back to sparse is an expensive operation. \strong{Input} \itemize{ \item Node features of shape \verb{(N, F)}; \item Binary adjacency matrix of shape \verb{(N, N)}; \item Graph IDs of shape \verb{(N, )} (only in disjoint mode); } \strong{Output} \itemize{ \item Reduced node features of shape \verb{(ratio * N, F)}; \item Reduced adjacency matrix of shape \verb{(ratio * N, ratio * N)}; \item Reduced graph IDs of shape \verb{(ratio * N, )} (only in disjoint mode); \item If \code{return_mask=True}, the binary pooling mask of shape \verb{(ratio * N, )}. } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/saga_constantgrid.R \name{saga_constantgrid} \alias{saga_constantgrid} \title{QGIS algorithm Constant grid} \usage{ saga_constantgrid( NAME = qgisprocess::qgis_default_value(), CONST = qgisprocess::qgis_default_value(), TYPE = qgisprocess::qgis_default_value(), DEFINITION = qgisprocess::qgis_default_value(), USER_SIZE = qgisprocess::qgis_default_value(), USER_XMIN = qgisprocess::qgis_default_value(), USER_XMAX = qgisprocess::qgis_default_value(), USER_YMIN = qgisprocess::qgis_default_value(), USER_YMAX = qgisprocess::qgis_default_value(), USER_FITS = qgisprocess::qgis_default_value(), TEMPLATE = qgisprocess::qgis_default_value(), OUT_GRID = qgisprocess::qgis_default_value(), ..., .complete_output = TRUE ) } \arguments{ \item{NAME}{\code{string} - Name. String value.} \item{CONST}{\code{number} - Constant Value. A numeric value.} \item{TYPE}{\code{enum} of \verb{("[0] bit", "[1] unsigned 1 byte integer", "[2] signed 1 byte integer", "[3] unsigned 2 byte integer", "[4] signed 2 byte integer", "[5] unsigned 8 byte integer", "[6] signed 8 byte integer", "[7] 4 byte floating point number", "[8] 8 byte floating point number")} - Data Type. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.} \item{DEFINITION}{\code{enum} of \verb{("[0] user defined", "[1] grid or grid system")} - Target Grid System. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.} \item{USER_SIZE}{\code{number} - Cellsize. A numeric value.} \item{USER_XMIN}{\code{number} - Left. A numeric value.} \item{USER_XMAX}{\code{number} - Right. A numeric value.} \item{USER_YMIN}{\code{number} - Bottom. A numeric value.} \item{USER_YMAX}{\code{number} - Top. A numeric value.} \item{USER_FITS}{\code{enum} of \verb{("[0] nodes", "[1] cells")} - Fit. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.} \item{TEMPLATE}{\code{raster} - Target System. Path to a raster layer.} \item{OUT_GRID}{\code{rasterDestination} - Target Grid. Path for new raster layer.} \item{...}{further parameters passed to \code{qgisprocess::qgis_run_algorithm()}} \item{.complete_output}{logical specifing if complete out of \code{qgisprocess::qgis_run_algorithm()} should be used (\code{TRUE}) or first output (most likely the main) should read (\code{FALSE}). Default value is \code{TRUE}.} } \description{ QGIS Algorithm provided by SAGA Constant grid (saga:constantgrid) } \details{ \subsection{Outputs description}{ \itemize{ \item OUT_GRID - outputRaster - Target Grid } } }
/man/saga_constantgrid.Rd
permissive
VB6Hobbyst7/r_package_qgis
R
false
true
2,652
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/saga_constantgrid.R \name{saga_constantgrid} \alias{saga_constantgrid} \title{QGIS algorithm Constant grid} \usage{ saga_constantgrid( NAME = qgisprocess::qgis_default_value(), CONST = qgisprocess::qgis_default_value(), TYPE = qgisprocess::qgis_default_value(), DEFINITION = qgisprocess::qgis_default_value(), USER_SIZE = qgisprocess::qgis_default_value(), USER_XMIN = qgisprocess::qgis_default_value(), USER_XMAX = qgisprocess::qgis_default_value(), USER_YMIN = qgisprocess::qgis_default_value(), USER_YMAX = qgisprocess::qgis_default_value(), USER_FITS = qgisprocess::qgis_default_value(), TEMPLATE = qgisprocess::qgis_default_value(), OUT_GRID = qgisprocess::qgis_default_value(), ..., .complete_output = TRUE ) } \arguments{ \item{NAME}{\code{string} - Name. String value.} \item{CONST}{\code{number} - Constant Value. A numeric value.} \item{TYPE}{\code{enum} of \verb{("[0] bit", "[1] unsigned 1 byte integer", "[2] signed 1 byte integer", "[3] unsigned 2 byte integer", "[4] signed 2 byte integer", "[5] unsigned 8 byte integer", "[6] signed 8 byte integer", "[7] 4 byte floating point number", "[8] 8 byte floating point number")} - Data Type. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.} \item{DEFINITION}{\code{enum} of \verb{("[0] user defined", "[1] grid or grid system")} - Target Grid System. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.} \item{USER_SIZE}{\code{number} - Cellsize. A numeric value.} \item{USER_XMIN}{\code{number} - Left. A numeric value.} \item{USER_XMAX}{\code{number} - Right. A numeric value.} \item{USER_YMIN}{\code{number} - Bottom. A numeric value.} \item{USER_YMAX}{\code{number} - Top. A numeric value.} \item{USER_FITS}{\code{enum} of \verb{("[0] nodes", "[1] cells")} - Fit. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.} \item{TEMPLATE}{\code{raster} - Target System. Path to a raster layer.} \item{OUT_GRID}{\code{rasterDestination} - Target Grid. Path for new raster layer.} \item{...}{further parameters passed to \code{qgisprocess::qgis_run_algorithm()}} \item{.complete_output}{logical specifing if complete out of \code{qgisprocess::qgis_run_algorithm()} should be used (\code{TRUE}) or first output (most likely the main) should read (\code{FALSE}). Default value is \code{TRUE}.} } \description{ QGIS Algorithm provided by SAGA Constant grid (saga:constantgrid) } \details{ \subsection{Outputs description}{ \itemize{ \item OUT_GRID - outputRaster - Target Grid } } }
## SPATIAL library(sp) library(rgeos) library(raster) library(rgdal) library(maptools) library(sf) ## DATA MANAGEMENT library(tidyverse) library(skimr) library(patchwork) library(readxl) # library(zoo) library(lubridate) ## PLOTTING library(scales) library(units) library(viridis) library(extrafont) library(gtable) library(grid) library(ggnewscale) library(ggpubr) library(cowplot) library(patchwork) #---------------------------------------------------------------------------- # water chem shapefile wat_shp <- readOGR("../appa_CL_model/GIS/waterchemsites_master_1213_n83") wat_shp <- spTransform(wat_shp, CRS("+proj=longlat +datum=WGS84")) wat_points <- data.frame(Latitude = wat_shp@coords[,2], Longitude = wat_shp@coords[,1], wat_shp@data) wat_points$ID <- as.character(wat_points$ID) crs_new <- proj4string(wat_shp) mon <- readOGR("gis/mon_nf") mon <- spTransform(mon, crs_new) mon_sf <- st_as_sf(mon) mon_c1 <- readOGR("../fs_admin/data/mon_c1") mon_c1 <- spTransform(mon_c1, crs_new) mon_c1_sf <- st_as_sf(mon_c1) lime_wv <- readOGR("gis/WV_LimestoneSites_06162017") lime_wv <- spTransform(lime_wv, crs_new) lime_wv <- lime_wv[lime_wv$Active == "active",] lime_mon <- over(lime_wv, mon) %>% bind_cols(., lime_wv@data) %>% filter(!(is.na(NAME))) %>% pull(OBJECTID1) lime_wv <- lime_wv[lime_wv$OBJECTID %in% lime_mon, ] lime_ctds <- as.data.frame(lime_wv@coords) lime_ctds$limed <- rep("active", dim(lime_ctds)[1]) colnames(lime_ctds)[1:2] <- c("lon", "lat") # relief map mon_relief <- readRDS("../appa_CL_model/GIS/mon_relief.RDS") # relief relief_df <- as.data.frame(mon_relief, xy = TRUE) %>% dplyr::select(x, y, relief = srgr48i0100a_Value) %>% filter(!(is.na(relief))) rm(mon_relief) acid <- readOGR("gis/mon_acid_sens_soils") acid <- spTransform(acid, crs_new) acid_sf <- st_as_sf(acid) %>% rename(sens = SENSITIVIT) %>% mutate(sens = factor(sens, levels = c("H", "M", "L", "NA"), labels = c("High", "Moderate", "Low", "NA"))) water_chem <- read_excel("raw_data/WaterChem_MasterResults_031320.xlsx", na = c(".", "NS", "ND", "<0.04", "<0.03")) %>% # select(ID, Waterbody, Collection, area, ANC_ueq_L) %>% mutate(year = year(Collection), month = month(Collection)) %>% mutate(ID = as.character(ID), season = if_else(month %in% c(9,10,11), "Fall", "Spring")) %>% rename(anc = ANC_ueq_L) %>% group_by(ID, season) %>% arrange(desc(year)) %>% slice(1:3) %>% summarise(mean_anc = mean(anc)) %>% ungroup() # join the two together to explore liming and water trends water_dat <- wat_points %>% select(ID, Latitude, Longitude, Water_Unit_Type = Site_Locat, Monitoring_Site_Name = Waterbody, Limed) %>% left_join(water_chem, by = "ID") %>% mutate(limed = str_sub(Limed, 1, 1)) %>% select(-Limed) # create breaks in data for plotting brks <- c(-Inf, 0, 50, 200, Inf) labels <- c("Acidified", "Acid sensitive", "Buffered", "Well buffered") water_dat$anc_fac <- cut(water_dat$mean_anc, breaks = brks, include.lowest = TRUE, labels = labels) # water_dat$anc_fac_ch <- as.character(water_dat$anc_fac) water_dat$buffered <- if_else(water_dat$limed == "Y" & water_dat$anc_fac %in% c("Buffered", "Well buffered"), "Buffered from liming", as.character(water_dat$anc_fac)) water_dat <- water_dat %>% mutate(anc_ch = coalesce(buffered, as.character(anc_fac))) %>% mutate(anc_buf_fac = factor(anc_ch, levels = c("Acidified", "Acid sensitive", "Buffered", "Well buffered", "Buffered from liming"))) %>% filter(!(is.na(anc_buf_fac))) water_dat$limed[water_dat$limed == "?"] <- "N" water_dat$limed[is.na(water_dat$limed)] <- "N" fall_dat <- water_dat %>% filter(season == "Fall") spring_dat <- water_dat %>% filter(season == "Spring") acidic_limed <- water_dat %>% filter(limed == "Y") %>% filter(anc_buf_fac != "Buffered from liming") %>% select(1:8) write_csv(acidic_limed, "data/acidified_limed_sites.csv") #---------------------------------------------------------------------------- # plot ANC points to get legend anc_plot <- ggplot() + geom_point(aes(Longitude, Latitude, fill = anc_fac), shape = 21, color = "grey15", size = 1.5, data = fall_dat) + scale_fill_manual(values = c("darkred", "tomato3", "darkolivegreen3", "darkgreen"), name = expression(bold(Acid~Neutralizing~Capacity~(mu*eq~L^-1)))) + theme_minimal() + theme(axis.title = element_blank(), axis.text = element_blank(), panel.grid.major = element_line(color = "white"), legend.text = element_text(size = 13), legend.title = element_text(size = 13, face = "bold")) + guides(fill = guide_legend(override.aes = list(size = 4))) anc_legend <- get_legend(anc_plot) # plot ANC points to get legend sens_plot <- ggplot() + geom_sf(aes(fill = NULL), color = "black", mon_sf) + geom_sf(aes(fill = sens), color = NA, acid_sf) + scale_fill_manual(values = alpha(c("red", "yellow", "green", "grey85"), 0.3), na.translate = FALSE, name = "Acid Sensitive Geology") + theme_minimal() + theme(axis.title = element_blank(), axis.text = element_blank(), panel.grid.major = element_line(color = "white"), legend.text = element_text(size = 13), legend.title = element_text(size = 13, face = "bold")) sens_legend <- get_legend(sens_plot) # limed_plot <- ggplot() + # geom_point(aes(Longitude, Latitude, shape = limed), # # shape = 21, # color = "grey15", # size = 1.5, # data = spring_dat) + # scale_shape_manual(values = c(21, 24), # name = "Lime Addition?", # labels = c("No", "Yes")) + # theme_minimal() + # theme(axis.title = element_blank(), # axis.text = element_blank(), # panel.grid.major = element_line(color = "white"), # legend.text = element_text(size = 13), # legend.title = element_text(size = 13, face = "bold")) + # guides(shape = guide_legend(override.aes = list(size = 4))) limed_plot <- ggplot() + geom_point(aes(lon, lat, shape = "limed"), color = "midnightblue", size = 1.5, data = lime_ctds) + scale_shape_manual(values = c(17), name = "Liming Site", labels = "") + theme_minimal() + theme(axis.title = element_blank(), axis.text = element_blank(), panel.grid.major = element_line(color = "white"), legend.text = element_text(size = 13), legend.title = element_text(size = 13, face = "bold")) + guides(shape = guide_legend(override.aes = list(size = 4))) limed_legend <- get_legend(limed_plot) # plot_map <- ggplot() + # geom_sf(aes(fill = NULL), alpha = 0, color = "black", mon_sf) + # # geom_raster(data = relief_df, aes(x = x, # # y = y, # # alpha = relief)) + # # use the "alpha hack" # scale_alpha(name = "", range = c(0.6, 0), guide = F) + # geom_sf(aes(fill = sens), color = NA, acid_sf) + # geom_sf(aes(fill = NULL), alpha = 0, color = "black", size = 1.5, mon_c1_sf) + # scale_fill_manual(values = alpha(c("red", "yellow", "green", "grey85"), 0.3), # na.translate = FALSE, # name = "Acid Sensitive Geology", # guide = FALSE) + # new_scale_fill() + # geom_point(aes(Longitude, Latitude, fill = anc_buf_fac), # shape = 21, # color = "grey15", # size = 2.3, # data = spring_dat) + # scale_fill_manual(values = c("darkred", "tomato3", "darkolivegreen3", "darkgreen", "midnightblue"), # name = expression(bold(Acid~Neutralizing~Capacity~(mu*eq~L^-1))), # guide = FALSE) + # theme_minimal() + # scale_x_continuous(limits = c(-80, -79.1)) + # scale_y_continuous(limits = c(38.8, 39.3)) + # theme(axis.title = element_blank(), # axis.text = element_blank(), # panel.grid.major = element_line(color = "white"), # legend.text = element_text(size = 13), # legend.title = element_text(size = 13, face = "bold"), # plot.margin = margin(0,0,0,0)) plot_map <- ggplot() + geom_sf(aes(fill = NULL), alpha = 0, color = "black", mon_sf) + # geom_raster(data = relief_df, aes(x = x, # y = y, # alpha = relief)) + # use the "alpha hack" scale_alpha(name = "", range = c(0.6, 0), guide = F) + geom_sf(aes(fill = sens), color = NA, acid_sf) + geom_sf(aes(fill = NULL), alpha = 0, color = "black", size = 1.5, mon_c1_sf) + scale_fill_manual(values = alpha(c("red", "yellow", "green", "grey85"), 0.3), na.translate = FALSE, name = "Acid Sensitive Geology", guide = FALSE) + new_scale_fill() + geom_point(aes(Longitude, Latitude, fill = anc_fac), shape = 21, color = "grey15", size = 3.5, data = fall_dat) + geom_point(aes(lon, lat, shape = "limed"), color = "midnightblue", size = 3.5, data = lime_ctds) + scale_shape_manual(values = c(17), name = "Liming Site", guide = FALSE) + scale_fill_manual(values = c("darkred", "tomato3", "darkolivegreen3", "darkgreen"), name = expression(bold(Acid~Neutralizing~Capacity~(mu*eq~L^-1))), guide = FALSE) + theme_minimal() + scale_x_continuous(limits = c(-80, -79.1)) + scale_y_continuous(limits = c(38.8, 39.3)) + theme(axis.title = element_blank(), axis.text = element_blank(), panel.grid.major = element_line(color = "white"), legend.text = element_text(size = 13), legend.title = element_text(size = 13, face = "bold"), plot.margin = margin(0,0,0,0)) bbox_df <- tibble(x = c(-80, -80, -79.1, -79.1, -80), y = c(39.3, 38.8, 38.8, 39.3, 39.3)) inset_map <- ggplot() + geom_sf(fill = NA, color = "black", data = mon_sf) + geom_path(data=bbox_df, aes(x,y), color="red", lwd=1) + theme_void() plot_inset_map <- ggdraw() + draw_plot(plot_map) + draw_plot(inset_map, x = 0.65, y = 0.65, width = 0.3, height = 0.3) # create a blank plot for legend alignment blank_p <- plot_spacer() + theme_void() # combine legend 1 & 2 leg12 <- plot_grid(blank_p, anc_legend, limed_legend, sens_legend, blank_p, ncol = 5) # # combine legend 3 & blank plot # leg30 <- plot_grid(leg3, blank_p, # blank_p, # nrow = 3 # ) # # # combine all legends # leg123 <- plot_grid(leg12, leg30, # ncol = 2 # ) final_p <- plot_grid(plot_inset_map, leg12, nrow = 2, align = "hv", axis = "l", # hjust = 0.5, rel_heights = c(4, 1) ) save_plot(filename = "figures/anc_3yr_avg_map_fall.pdf", plot = final_p, nrow = 1, base_height = 8, base_width = 11)
/code/anc_map.R
no_license
jeremyash/mon_nf
R
false
false
11,697
r
## SPATIAL library(sp) library(rgeos) library(raster) library(rgdal) library(maptools) library(sf) ## DATA MANAGEMENT library(tidyverse) library(skimr) library(patchwork) library(readxl) # library(zoo) library(lubridate) ## PLOTTING library(scales) library(units) library(viridis) library(extrafont) library(gtable) library(grid) library(ggnewscale) library(ggpubr) library(cowplot) library(patchwork) #---------------------------------------------------------------------------- # water chem shapefile wat_shp <- readOGR("../appa_CL_model/GIS/waterchemsites_master_1213_n83") wat_shp <- spTransform(wat_shp, CRS("+proj=longlat +datum=WGS84")) wat_points <- data.frame(Latitude = wat_shp@coords[,2], Longitude = wat_shp@coords[,1], wat_shp@data) wat_points$ID <- as.character(wat_points$ID) crs_new <- proj4string(wat_shp) mon <- readOGR("gis/mon_nf") mon <- spTransform(mon, crs_new) mon_sf <- st_as_sf(mon) mon_c1 <- readOGR("../fs_admin/data/mon_c1") mon_c1 <- spTransform(mon_c1, crs_new) mon_c1_sf <- st_as_sf(mon_c1) lime_wv <- readOGR("gis/WV_LimestoneSites_06162017") lime_wv <- spTransform(lime_wv, crs_new) lime_wv <- lime_wv[lime_wv$Active == "active",] lime_mon <- over(lime_wv, mon) %>% bind_cols(., lime_wv@data) %>% filter(!(is.na(NAME))) %>% pull(OBJECTID1) lime_wv <- lime_wv[lime_wv$OBJECTID %in% lime_mon, ] lime_ctds <- as.data.frame(lime_wv@coords) lime_ctds$limed <- rep("active", dim(lime_ctds)[1]) colnames(lime_ctds)[1:2] <- c("lon", "lat") # relief map mon_relief <- readRDS("../appa_CL_model/GIS/mon_relief.RDS") # relief relief_df <- as.data.frame(mon_relief, xy = TRUE) %>% dplyr::select(x, y, relief = srgr48i0100a_Value) %>% filter(!(is.na(relief))) rm(mon_relief) acid <- readOGR("gis/mon_acid_sens_soils") acid <- spTransform(acid, crs_new) acid_sf <- st_as_sf(acid) %>% rename(sens = SENSITIVIT) %>% mutate(sens = factor(sens, levels = c("H", "M", "L", "NA"), labels = c("High", "Moderate", "Low", "NA"))) water_chem <- read_excel("raw_data/WaterChem_MasterResults_031320.xlsx", na = c(".", "NS", "ND", "<0.04", "<0.03")) %>% # select(ID, Waterbody, Collection, area, ANC_ueq_L) %>% mutate(year = year(Collection), month = month(Collection)) %>% mutate(ID = as.character(ID), season = if_else(month %in% c(9,10,11), "Fall", "Spring")) %>% rename(anc = ANC_ueq_L) %>% group_by(ID, season) %>% arrange(desc(year)) %>% slice(1:3) %>% summarise(mean_anc = mean(anc)) %>% ungroup() # join the two together to explore liming and water trends water_dat <- wat_points %>% select(ID, Latitude, Longitude, Water_Unit_Type = Site_Locat, Monitoring_Site_Name = Waterbody, Limed) %>% left_join(water_chem, by = "ID") %>% mutate(limed = str_sub(Limed, 1, 1)) %>% select(-Limed) # create breaks in data for plotting brks <- c(-Inf, 0, 50, 200, Inf) labels <- c("Acidified", "Acid sensitive", "Buffered", "Well buffered") water_dat$anc_fac <- cut(water_dat$mean_anc, breaks = brks, include.lowest = TRUE, labels = labels) # water_dat$anc_fac_ch <- as.character(water_dat$anc_fac) water_dat$buffered <- if_else(water_dat$limed == "Y" & water_dat$anc_fac %in% c("Buffered", "Well buffered"), "Buffered from liming", as.character(water_dat$anc_fac)) water_dat <- water_dat %>% mutate(anc_ch = coalesce(buffered, as.character(anc_fac))) %>% mutate(anc_buf_fac = factor(anc_ch, levels = c("Acidified", "Acid sensitive", "Buffered", "Well buffered", "Buffered from liming"))) %>% filter(!(is.na(anc_buf_fac))) water_dat$limed[water_dat$limed == "?"] <- "N" water_dat$limed[is.na(water_dat$limed)] <- "N" fall_dat <- water_dat %>% filter(season == "Fall") spring_dat <- water_dat %>% filter(season == "Spring") acidic_limed <- water_dat %>% filter(limed == "Y") %>% filter(anc_buf_fac != "Buffered from liming") %>% select(1:8) write_csv(acidic_limed, "data/acidified_limed_sites.csv") #---------------------------------------------------------------------------- # plot ANC points to get legend anc_plot <- ggplot() + geom_point(aes(Longitude, Latitude, fill = anc_fac), shape = 21, color = "grey15", size = 1.5, data = fall_dat) + scale_fill_manual(values = c("darkred", "tomato3", "darkolivegreen3", "darkgreen"), name = expression(bold(Acid~Neutralizing~Capacity~(mu*eq~L^-1)))) + theme_minimal() + theme(axis.title = element_blank(), axis.text = element_blank(), panel.grid.major = element_line(color = "white"), legend.text = element_text(size = 13), legend.title = element_text(size = 13, face = "bold")) + guides(fill = guide_legend(override.aes = list(size = 4))) anc_legend <- get_legend(anc_plot) # plot ANC points to get legend sens_plot <- ggplot() + geom_sf(aes(fill = NULL), color = "black", mon_sf) + geom_sf(aes(fill = sens), color = NA, acid_sf) + scale_fill_manual(values = alpha(c("red", "yellow", "green", "grey85"), 0.3), na.translate = FALSE, name = "Acid Sensitive Geology") + theme_minimal() + theme(axis.title = element_blank(), axis.text = element_blank(), panel.grid.major = element_line(color = "white"), legend.text = element_text(size = 13), legend.title = element_text(size = 13, face = "bold")) sens_legend <- get_legend(sens_plot) # limed_plot <- ggplot() + # geom_point(aes(Longitude, Latitude, shape = limed), # # shape = 21, # color = "grey15", # size = 1.5, # data = spring_dat) + # scale_shape_manual(values = c(21, 24), # name = "Lime Addition?", # labels = c("No", "Yes")) + # theme_minimal() + # theme(axis.title = element_blank(), # axis.text = element_blank(), # panel.grid.major = element_line(color = "white"), # legend.text = element_text(size = 13), # legend.title = element_text(size = 13, face = "bold")) + # guides(shape = guide_legend(override.aes = list(size = 4))) limed_plot <- ggplot() + geom_point(aes(lon, lat, shape = "limed"), color = "midnightblue", size = 1.5, data = lime_ctds) + scale_shape_manual(values = c(17), name = "Liming Site", labels = "") + theme_minimal() + theme(axis.title = element_blank(), axis.text = element_blank(), panel.grid.major = element_line(color = "white"), legend.text = element_text(size = 13), legend.title = element_text(size = 13, face = "bold")) + guides(shape = guide_legend(override.aes = list(size = 4))) limed_legend <- get_legend(limed_plot) # plot_map <- ggplot() + # geom_sf(aes(fill = NULL), alpha = 0, color = "black", mon_sf) + # # geom_raster(data = relief_df, aes(x = x, # # y = y, # # alpha = relief)) + # # use the "alpha hack" # scale_alpha(name = "", range = c(0.6, 0), guide = F) + # geom_sf(aes(fill = sens), color = NA, acid_sf) + # geom_sf(aes(fill = NULL), alpha = 0, color = "black", size = 1.5, mon_c1_sf) + # scale_fill_manual(values = alpha(c("red", "yellow", "green", "grey85"), 0.3), # na.translate = FALSE, # name = "Acid Sensitive Geology", # guide = FALSE) + # new_scale_fill() + # geom_point(aes(Longitude, Latitude, fill = anc_buf_fac), # shape = 21, # color = "grey15", # size = 2.3, # data = spring_dat) + # scale_fill_manual(values = c("darkred", "tomato3", "darkolivegreen3", "darkgreen", "midnightblue"), # name = expression(bold(Acid~Neutralizing~Capacity~(mu*eq~L^-1))), # guide = FALSE) + # theme_minimal() + # scale_x_continuous(limits = c(-80, -79.1)) + # scale_y_continuous(limits = c(38.8, 39.3)) + # theme(axis.title = element_blank(), # axis.text = element_blank(), # panel.grid.major = element_line(color = "white"), # legend.text = element_text(size = 13), # legend.title = element_text(size = 13, face = "bold"), # plot.margin = margin(0,0,0,0)) plot_map <- ggplot() + geom_sf(aes(fill = NULL), alpha = 0, color = "black", mon_sf) + # geom_raster(data = relief_df, aes(x = x, # y = y, # alpha = relief)) + # use the "alpha hack" scale_alpha(name = "", range = c(0.6, 0), guide = F) + geom_sf(aes(fill = sens), color = NA, acid_sf) + geom_sf(aes(fill = NULL), alpha = 0, color = "black", size = 1.5, mon_c1_sf) + scale_fill_manual(values = alpha(c("red", "yellow", "green", "grey85"), 0.3), na.translate = FALSE, name = "Acid Sensitive Geology", guide = FALSE) + new_scale_fill() + geom_point(aes(Longitude, Latitude, fill = anc_fac), shape = 21, color = "grey15", size = 3.5, data = fall_dat) + geom_point(aes(lon, lat, shape = "limed"), color = "midnightblue", size = 3.5, data = lime_ctds) + scale_shape_manual(values = c(17), name = "Liming Site", guide = FALSE) + scale_fill_manual(values = c("darkred", "tomato3", "darkolivegreen3", "darkgreen"), name = expression(bold(Acid~Neutralizing~Capacity~(mu*eq~L^-1))), guide = FALSE) + theme_minimal() + scale_x_continuous(limits = c(-80, -79.1)) + scale_y_continuous(limits = c(38.8, 39.3)) + theme(axis.title = element_blank(), axis.text = element_blank(), panel.grid.major = element_line(color = "white"), legend.text = element_text(size = 13), legend.title = element_text(size = 13, face = "bold"), plot.margin = margin(0,0,0,0)) bbox_df <- tibble(x = c(-80, -80, -79.1, -79.1, -80), y = c(39.3, 38.8, 38.8, 39.3, 39.3)) inset_map <- ggplot() + geom_sf(fill = NA, color = "black", data = mon_sf) + geom_path(data=bbox_df, aes(x,y), color="red", lwd=1) + theme_void() plot_inset_map <- ggdraw() + draw_plot(plot_map) + draw_plot(inset_map, x = 0.65, y = 0.65, width = 0.3, height = 0.3) # create a blank plot for legend alignment blank_p <- plot_spacer() + theme_void() # combine legend 1 & 2 leg12 <- plot_grid(blank_p, anc_legend, limed_legend, sens_legend, blank_p, ncol = 5) # # combine legend 3 & blank plot # leg30 <- plot_grid(leg3, blank_p, # blank_p, # nrow = 3 # ) # # # combine all legends # leg123 <- plot_grid(leg12, leg30, # ncol = 2 # ) final_p <- plot_grid(plot_inset_map, leg12, nrow = 2, align = "hv", axis = "l", # hjust = 0.5, rel_heights = c(4, 1) ) save_plot(filename = "figures/anc_3yr_avg_map_fall.pdf", plot = final_p, nrow = 1, base_height = 8, base_width = 11)
outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character") head(outcome) ncol(outcome) nrow(outcome) names(outcome) outcome[, 11] <- as.numeric(outcome[, 11]) hist(outcome[, 11]) best <- function(state, outcome) { ## Read outcome data data <- read.csv("outcome-of-care-measures.csv") ## Check that state and outcome are valid states <- levels(data[, 7])[data[, 7]] state_flag <- FALSE for (i in 1:length(states)) { if (state == states[i]) { state_flag <- TRUE } } if (!state_flag) { stop ("invalid state") } if (!((outcome == "heart attack") | (outcome == "heart failure") | (outcome == "pneumonia"))) { stop ("invalid outcome") } ## Return hospital name in that state with lowest 30-day death rate col <- if (outcome == "heart attack") { 11 } else if (outcome == "heart failure") { 17 } else { 23 } data[, col] <- suppressWarnings(as.numeric(levels(data[, col])[data[, col]])) data[, 2] <- as.character(data[, 2]) statedata <- data[grep(state, data$State), ] orderdata <- statedata[order(statedata[, col], statedata[, 2], na.last = NA), ] orderdata[1, 2] } source("best.R") best("TX", "heart attack") best("TX", "heart failure") best("MD", "heart attack") best("MD", "pneumonia") best("BB", "heart attack") best("NY", "hert attack") best("SC", "heart attack") best("NY", "pneumonia") best("AK", "pneumonia")
/2_RProgramming/ProgrammingAssignment3/best.R
no_license
cesarpbn1/Johns_Hopkins_University
R
false
false
1,691
r
outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character") head(outcome) ncol(outcome) nrow(outcome) names(outcome) outcome[, 11] <- as.numeric(outcome[, 11]) hist(outcome[, 11]) best <- function(state, outcome) { ## Read outcome data data <- read.csv("outcome-of-care-measures.csv") ## Check that state and outcome are valid states <- levels(data[, 7])[data[, 7]] state_flag <- FALSE for (i in 1:length(states)) { if (state == states[i]) { state_flag <- TRUE } } if (!state_flag) { stop ("invalid state") } if (!((outcome == "heart attack") | (outcome == "heart failure") | (outcome == "pneumonia"))) { stop ("invalid outcome") } ## Return hospital name in that state with lowest 30-day death rate col <- if (outcome == "heart attack") { 11 } else if (outcome == "heart failure") { 17 } else { 23 } data[, col] <- suppressWarnings(as.numeric(levels(data[, col])[data[, col]])) data[, 2] <- as.character(data[, 2]) statedata <- data[grep(state, data$State), ] orderdata <- statedata[order(statedata[, col], statedata[, 2], na.last = NA), ] orderdata[1, 2] } source("best.R") best("TX", "heart attack") best("TX", "heart failure") best("MD", "heart attack") best("MD", "pneumonia") best("BB", "heart attack") best("NY", "hert attack") best("SC", "heart attack") best("NY", "pneumonia") best("AK", "pneumonia")
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584306637799e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist) str(result)
/CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615827325-test.R
no_license
akhikolla/updatedatatype-list2
R
false
false
361
r
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584306637799e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist) str(result)
## Libraries library(dplyr) library(lubridate) ## Read data consumption <- read.table("./Week1/household_power_consumption.txt", header = TRUE, sep = ";", colClasses = "character", na.strings = "?") consumption <- tbl_df(consumption) ## Tidying data names(consumption) <- as.vector(strsplit(names(consumption), "\\.")) consumption[,1] <- dmy(consumption$Date) consumption <- filter(consumption, Date ==dmy("01/02/2007") | Date == dmy("02/02/2007")) consumption[,3:9] <- sapply(consumption[,3:9],as.numeric) consumption <- mutate(consumption, Date = paste(consumption$Date,consumption$Time)) consumption <- select(consumption, -Time) consumption[,1] <- ymd_hms(consumption$Date) ## Graph dev.new(width = 480, height = 480, unit = "px") par(mfrow = c(2,2)) with(consumption,plot(Date, Global_active_power, type = "l", ylab = "Global Active Power")) with(consumption,plot(Date, Voltage, type = "l", xlab = "datetime", ylab = "Voltage")) with(consumption,plot(Date, Sub_metering_1, type = "l", xlab ="", ylab = "Energy sub metering")) points(consumption$Date, consumption$Sub_metering_2, type = "l", col = "orange") points(consumption$Date, consumption$Sub_metering_3, type = "l", col = "blue") legend("topright", legend = names(consumption[,6:8]), col= c("black", "orange", "blue"), lty=1, cex = 0.5, bty = "n", x.intersp = 0.5) with(consumption,plot(Date, Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")) dev.copy(png, file = "plot4.png") dev.off()
/plot4.R
no_license
abuitrago/ExData_Plotting1
R
false
false
1,517
r
## Libraries library(dplyr) library(lubridate) ## Read data consumption <- read.table("./Week1/household_power_consumption.txt", header = TRUE, sep = ";", colClasses = "character", na.strings = "?") consumption <- tbl_df(consumption) ## Tidying data names(consumption) <- as.vector(strsplit(names(consumption), "\\.")) consumption[,1] <- dmy(consumption$Date) consumption <- filter(consumption, Date ==dmy("01/02/2007") | Date == dmy("02/02/2007")) consumption[,3:9] <- sapply(consumption[,3:9],as.numeric) consumption <- mutate(consumption, Date = paste(consumption$Date,consumption$Time)) consumption <- select(consumption, -Time) consumption[,1] <- ymd_hms(consumption$Date) ## Graph dev.new(width = 480, height = 480, unit = "px") par(mfrow = c(2,2)) with(consumption,plot(Date, Global_active_power, type = "l", ylab = "Global Active Power")) with(consumption,plot(Date, Voltage, type = "l", xlab = "datetime", ylab = "Voltage")) with(consumption,plot(Date, Sub_metering_1, type = "l", xlab ="", ylab = "Energy sub metering")) points(consumption$Date, consumption$Sub_metering_2, type = "l", col = "orange") points(consumption$Date, consumption$Sub_metering_3, type = "l", col = "blue") legend("topright", legend = names(consumption[,6:8]), col= c("black", "orange", "blue"), lty=1, cex = 0.5, bty = "n", x.intersp = 0.5) with(consumption,plot(Date, Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")) dev.copy(png, file = "plot4.png") dev.off()
###########################################################################/** # @set "class=AffymetrixCelSet" # @RdocMethod extractAffyBatch # @alias extractAffyBatch.ChipEffectSet # @alias extractAffyBatch # # @title "Extracts an in-memory AffyBatch object from the CEL set" # # \description{ # @get "title". # Note that any modifications done to the extract object will \emph{not} # be reflected in the original CEL set. # } # # @synopsis # # \arguments{ # \item{...}{Argument passed to \code{ReadAffy()} # (@see "affy::read.affybatch").} # \item{verbose}{See @see "R.utils::Verbose".} # } # # \value{ # Returns an @see "affy::AffyBatch-class" object. # } # # \details{ # Since the \pkg{affy} package is making use of special CDF environment # packages, this method will warn if the needed package is missing and # explain that \pkg{affy} will later try to download and install it # automatically. # } # # @author "HB" # # \seealso{ # Internally @see "affy::read.affybatch" is used to read the data. # @seeclass # } # # @keyword IO # @keyword programming #*/########################################################################### setMethodS3("extractAffyBatch", "AffymetrixCelSet", function(this, ..., verbose=FALSE) { requireNamespace("affy") || throw("Package not loaded: affy") cleancdfname <- affy::cleancdfname ReadAffy <- affy::ReadAffy # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Validate arguments # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Argument 'verbose': verbose <- Arguments$getVerbose(verbose); if (verbose) { pushState(verbose); on.exit(popState(verbose)); } cdf <- getCdf(this); chipType <- getChipType(cdf, fullname=FALSE); cdfPkgName <- cleancdfname(chipType); suppressWarnings({ .require <- require # To please R CMD check res <- .require(cdfPkgName, character.only=TRUE); }); if (!res) { warning("CDF enviroment package '", cdfPkgName, "' not installed. The 'affy' package will later try to download it from Bioconductor and install it."); } filenames <- getPathnames(this); verbose && enter(verbose, "Creating AffyBatch from ", length(filenames), " CEL files"); verbose && cat(verbose, "Filenames: ", paste(filenames, collapse=", ")); sampleNames <- getFullNames(this); verbose && cat(verbose, "Sample names: ", paste(sampleNames, collapse=", ")); # Sanity check dups <- sort(sampleNames[duplicated(sampleNames)]); if (length(dups) > 0) { throw(sprintf("Cannot load %s as an AffyBatch. Detected %d files that share the same sample names: %s", class(this)[1], length(dups)+length(unique(dups)), paste(unique(dups), collapse=", "))); } # Specify ReadAffy() of 'affy' to avoid conflicts with the one # in 'oligo'. read.affybatch <- affy::read.affybatch; ReadAffy <- affy::ReadAffy; res <- ReadAffy(filenames=filenames, sampleNames=sampleNames, ..., verbose=as.logical(verbose)); verbose && exit(verbose); res; }) # extractAffyBatch() setMethodS3("extractAffyBatch", "ChipEffectSet", function(this, ...) { throw("Cannot extract AffyBatch from an ", class(this)[1], " object because it contains estimates that are summarized over sets of probes, whereas an AffyBatch should contain probe-level signals: ", getPath(this)); }, protected=TRUE) ############################################################################ # HISTORY: # 2010-11-17 # o ROBUSTNESS: Now extractAffyBatch() for AffymetrixCelSet asserts that # the sample names are unique, which affy::ReadAffy() requires. # Moreover, the sample names are now the fullnames not just the names. # 2010-09-06 # o ROBUSTNESS: Added extractAffyBatch() for ChipEffectSet that gives an # informative error message explaining why it doesn't make sense to do so. # 2006-10-02 # o Created. A first small step toward an interface to Bioconductor. ############################################################################
/R/AffymetrixCelSet.extractAffyBatch.R
no_license
microarray/aroma.affymetrix
R
false
false
4,105
r
###########################################################################/** # @set "class=AffymetrixCelSet" # @RdocMethod extractAffyBatch # @alias extractAffyBatch.ChipEffectSet # @alias extractAffyBatch # # @title "Extracts an in-memory AffyBatch object from the CEL set" # # \description{ # @get "title". # Note that any modifications done to the extract object will \emph{not} # be reflected in the original CEL set. # } # # @synopsis # # \arguments{ # \item{...}{Argument passed to \code{ReadAffy()} # (@see "affy::read.affybatch").} # \item{verbose}{See @see "R.utils::Verbose".} # } # # \value{ # Returns an @see "affy::AffyBatch-class" object. # } # # \details{ # Since the \pkg{affy} package is making use of special CDF environment # packages, this method will warn if the needed package is missing and # explain that \pkg{affy} will later try to download and install it # automatically. # } # # @author "HB" # # \seealso{ # Internally @see "affy::read.affybatch" is used to read the data. # @seeclass # } # # @keyword IO # @keyword programming #*/########################################################################### setMethodS3("extractAffyBatch", "AffymetrixCelSet", function(this, ..., verbose=FALSE) { requireNamespace("affy") || throw("Package not loaded: affy") cleancdfname <- affy::cleancdfname ReadAffy <- affy::ReadAffy # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Validate arguments # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Argument 'verbose': verbose <- Arguments$getVerbose(verbose); if (verbose) { pushState(verbose); on.exit(popState(verbose)); } cdf <- getCdf(this); chipType <- getChipType(cdf, fullname=FALSE); cdfPkgName <- cleancdfname(chipType); suppressWarnings({ .require <- require # To please R CMD check res <- .require(cdfPkgName, character.only=TRUE); }); if (!res) { warning("CDF enviroment package '", cdfPkgName, "' not installed. The 'affy' package will later try to download it from Bioconductor and install it."); } filenames <- getPathnames(this); verbose && enter(verbose, "Creating AffyBatch from ", length(filenames), " CEL files"); verbose && cat(verbose, "Filenames: ", paste(filenames, collapse=", ")); sampleNames <- getFullNames(this); verbose && cat(verbose, "Sample names: ", paste(sampleNames, collapse=", ")); # Sanity check dups <- sort(sampleNames[duplicated(sampleNames)]); if (length(dups) > 0) { throw(sprintf("Cannot load %s as an AffyBatch. Detected %d files that share the same sample names: %s", class(this)[1], length(dups)+length(unique(dups)), paste(unique(dups), collapse=", "))); } # Specify ReadAffy() of 'affy' to avoid conflicts with the one # in 'oligo'. read.affybatch <- affy::read.affybatch; ReadAffy <- affy::ReadAffy; res <- ReadAffy(filenames=filenames, sampleNames=sampleNames, ..., verbose=as.logical(verbose)); verbose && exit(verbose); res; }) # extractAffyBatch() setMethodS3("extractAffyBatch", "ChipEffectSet", function(this, ...) { throw("Cannot extract AffyBatch from an ", class(this)[1], " object because it contains estimates that are summarized over sets of probes, whereas an AffyBatch should contain probe-level signals: ", getPath(this)); }, protected=TRUE) ############################################################################ # HISTORY: # 2010-11-17 # o ROBUSTNESS: Now extractAffyBatch() for AffymetrixCelSet asserts that # the sample names are unique, which affy::ReadAffy() requires. # Moreover, the sample names are now the fullnames not just the names. # 2010-09-06 # o ROBUSTNESS: Added extractAffyBatch() for ChipEffectSet that gives an # informative error message explaining why it doesn't make sense to do so. # 2006-10-02 # o Created. A first small step toward an interface to Bioconductor. ############################################################################
percentiles <- numeric(10000) for(i in 1:length(percentiles)) { samps <- rnorm(714) percentiles[i] <- quantile(samps, probs = .975) } sd(percentiles)
/Homework 4/quantiles.R
no_license
IlgizMurzakhanov/BDA
R
false
false
153
r
percentiles <- numeric(10000) for(i in 1:length(percentiles)) { samps <- rnorm(714) percentiles[i] <- quantile(samps, probs = .975) } sd(percentiles)
library(RWeka) library(caret) data <- read.csv('../Datasets/training-data-14-tuples.csv') #data <- read.csv(file.choose()) kfolds <- createFolds(data$buys_computer, k = 2) sum = 0 for(i in kfolds){ train <- data[-i,] test <- data[i,] model <- J48(buys_computer~., data = train) prediction <- predict(model, test) cfMatrix <- confusionMatrix(data = prediction, test$buys_computer) sum <- sum + cfMatrix$overall[1] } accuracy <- sum/length(kfolds) accuracy
/R/dt-accuracy-with-cv-k=2.R
no_license
chitholian/Machine-Learning-Lab
R
false
false
467
r
library(RWeka) library(caret) data <- read.csv('../Datasets/training-data-14-tuples.csv') #data <- read.csv(file.choose()) kfolds <- createFolds(data$buys_computer, k = 2) sum = 0 for(i in kfolds){ train <- data[-i,] test <- data[i,] model <- J48(buys_computer~., data = train) prediction <- predict(model, test) cfMatrix <- confusionMatrix(data = prediction, test$buys_computer) sum <- sum + cfMatrix$overall[1] } accuracy <- sum/length(kfolds) accuracy
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/weights.R \name{queen_weights} \alias{queen_weights} \title{Queen Contiguity Spatial Weights} \usage{ queen_weights( sf_obj, order = 1, include_lower_order = FALSE, precision_threshold = 0 ) } \arguments{ \item{sf_obj}{An sf (simple feature) object} \item{order}{(Optional) Order of contiguity} \item{include_lower_order}{(Optional) Whether or not the lower order neighbors should be included in the weights structure} \item{precision_threshold}{(Optional) The precision of the underlying shape file is insufficient to allow for an exact match of coordinates to determine which polygons are neighbors} } \value{ An instance of Weight-class } \description{ Create a Queen contiguity weights with options of "order", "include lower order" and "precision threshold" } \examples{ library(sf) guerry_path <- system.file("extdata", "Guerry.shp", package = "rgeoda") guerry <- st_read(guerry_path) queen_w <- queen_weights(guerry) summary(queen_w) }
/man/queen_weights.Rd
no_license
whaibao/rgeoda
R
false
true
1,033
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/weights.R \name{queen_weights} \alias{queen_weights} \title{Queen Contiguity Spatial Weights} \usage{ queen_weights( sf_obj, order = 1, include_lower_order = FALSE, precision_threshold = 0 ) } \arguments{ \item{sf_obj}{An sf (simple feature) object} \item{order}{(Optional) Order of contiguity} \item{include_lower_order}{(Optional) Whether or not the lower order neighbors should be included in the weights structure} \item{precision_threshold}{(Optional) The precision of the underlying shape file is insufficient to allow for an exact match of coordinates to determine which polygons are neighbors} } \value{ An instance of Weight-class } \description{ Create a Queen contiguity weights with options of "order", "include lower order" and "precision threshold" } \examples{ library(sf) guerry_path <- system.file("extdata", "Guerry.shp", package = "rgeoda") guerry <- st_read(guerry_path) queen_w <- queen_weights(guerry) summary(queen_w) }
library(ggmap) library(ggplot2) library(animation) #Create a data frame countryDF<- data.frame(c("united states", "france", "india")) colnames(countryDF) <- "countryname" LatLon <- c(apply(countryDF, 1, geocode)) LatLonDF <- do.call(rbind.data.frame, LatLon) countryLatLonDF <- cbind(countryDF, LatLonDF) countryLatLonDF[,"myDate"]<- c("02/12/13", "03/16/14", "01/10/13") countryLatLonDF$myDate <- as.Date(countryLatLonDF$myDate , "%m/%d/%y") countryLatLonDF["counts"] <- as.numeric(c(10,20,30)) #Sort countryLatLonDF based on dates countryLatLonDF <- countryLatLonDF[ order(countryLatLonDF[,4]), ] #Create animation in HTML file saveHTML({ for (i in 1:nrow(countryLatLonDF)) { #Get the map myMap <- ggmap(get_map(location = c(lat=0, lon=0), color="color",source="google", maptype="terrain", zoom=2)) myMap <- myMap + geom_point(data = countryLatLonDF[i,], aes(x = lon, y = lat, color = countryname, alpha = 0.5, fill = "red"), size = 5, shape = 21) + geom_text(data = countryLatLonDF[i,], aes(x = lon, y = lat, label = countryname), size = 3, vjust = 0, hjust = -0.1, color = "blue") + scale_colour_discrete(name = "countryname") print(myMap) } }, img.name = "anim_plot", imgdir = "anim_dir", htmlfile = "anim.html", autobrowse = FALSE, title = "Country animation", verbose =FALSE, interval = 2) graphics.off()
/inst/examples/map4.R
no_license
COMHIS/estc
R
false
false
1,333
r
library(ggmap) library(ggplot2) library(animation) #Create a data frame countryDF<- data.frame(c("united states", "france", "india")) colnames(countryDF) <- "countryname" LatLon <- c(apply(countryDF, 1, geocode)) LatLonDF <- do.call(rbind.data.frame, LatLon) countryLatLonDF <- cbind(countryDF, LatLonDF) countryLatLonDF[,"myDate"]<- c("02/12/13", "03/16/14", "01/10/13") countryLatLonDF$myDate <- as.Date(countryLatLonDF$myDate , "%m/%d/%y") countryLatLonDF["counts"] <- as.numeric(c(10,20,30)) #Sort countryLatLonDF based on dates countryLatLonDF <- countryLatLonDF[ order(countryLatLonDF[,4]), ] #Create animation in HTML file saveHTML({ for (i in 1:nrow(countryLatLonDF)) { #Get the map myMap <- ggmap(get_map(location = c(lat=0, lon=0), color="color",source="google", maptype="terrain", zoom=2)) myMap <- myMap + geom_point(data = countryLatLonDF[i,], aes(x = lon, y = lat, color = countryname, alpha = 0.5, fill = "red"), size = 5, shape = 21) + geom_text(data = countryLatLonDF[i,], aes(x = lon, y = lat, label = countryname), size = 3, vjust = 0, hjust = -0.1, color = "blue") + scale_colour_discrete(name = "countryname") print(myMap) } }, img.name = "anim_plot", imgdir = "anim_dir", htmlfile = "anim.html", autobrowse = FALSE, title = "Country animation", verbose =FALSE, interval = 2) graphics.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/HMMutilities.r \name{local_decode} \alias{local_decode} \title{Local decoding of HMM} \usage{ local_decode(object, ddl = NULL, state.names = NULL) } \arguments{ \item{object}{fitted crm model (must be an HMM model)} \item{ddl}{design data list} \item{state.names}{names for states used to label output; if NULL uses strata.labels + Dead state} } \value{ matrix of state predictions } \description{ Computes state predictions one at a time for each occasion for each individual } \examples{ # \donttest{ # This example is excluded from testing to reduce package check time # cormack-jolly-seber model data(dipper) mod=crm(dipper,model="hmmcjs") local_decode(mod) } } \author{ Jeff Laake } \references{ Zucchini, W. and I.L. MacDonald. 2009. Hidden Markov Models for Time Series: An Introduction using R. Chapman and Hall, Boca Raton, FL. See page 80. } \keyword{utility}
/marked/man/local_decode.Rd
no_license
pconn/marked
R
false
true
990
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/HMMutilities.r \name{local_decode} \alias{local_decode} \title{Local decoding of HMM} \usage{ local_decode(object, ddl = NULL, state.names = NULL) } \arguments{ \item{object}{fitted crm model (must be an HMM model)} \item{ddl}{design data list} \item{state.names}{names for states used to label output; if NULL uses strata.labels + Dead state} } \value{ matrix of state predictions } \description{ Computes state predictions one at a time for each occasion for each individual } \examples{ # \donttest{ # This example is excluded from testing to reduce package check time # cormack-jolly-seber model data(dipper) mod=crm(dipper,model="hmmcjs") local_decode(mod) } } \author{ Jeff Laake } \references{ Zucchini, W. and I.L. MacDonald. 2009. Hidden Markov Models for Time Series: An Introduction using R. Chapman and Hall, Boca Raton, FL. See page 80. } \keyword{utility}
library(plyr) library(dplyr) library(reshape2) ## Load in data if (!file.exists("./data")) { dir.create("./data") } if (!file.exists("./data/UCI HAR Dataset.zip")) { download.file( url="https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", method="curl", destfile="./data/UCI HAR Dataset.zip") unzip("./data/UCI HAR Dataset.zip", exdir="./data") } ## Get List of all features. features<-read.table("data/UCI HAR Dataset/features.txt") # The mean and std subset targetFeatures<-filter(features, grepl("mean\\(\\)|std\\(\\)",features$V2)) ## Get activity Labels activityLabels<-read.table("data/UCI HAR Dataset/activity_labels.txt") ## Get the training data trainingSet<-read.table("data/UCI HAR Dataset/train/X_train.txt") trainingSubjects<-read.table("data/UCI HAR Dataset/train/subject_train.txt") names(trainingSubjects)<-"subjects" trainingLabels<-read.table("data/UCI HAR Dataset/train/y_train.txt") names(trainingLabels)<-"traininglabel" # Combine all training data fullTraining<-cbind( trainingSubjects, trainingLabels, trainingSet ) fullTraining<-merge(activityLabels, fullTraining, by.x="V1", by.y="traininglabel", all = TRUE) # match activity numbers to names fullTraining<-fullTraining[,-1] # remove redundant column ## Get the test data testSet<-read.table("data/UCI HAR Dataset/test/X_test.txt") testSubjects<-read.table("data/UCI HAR Dataset/test/subject_test.txt") names(testSubjects)<-"subjects" testLabels<-read.table("data/UCI HAR Dataset/test/y_test.txt") names(testLabels)<-"traininglabel" # Combine all test data fullTest<-cbind(testSubjects, testLabels, testSet) fullTest<-merge(activityLabels, fullTest, by.x="V1", by.y="traininglabel", all = TRUE) # match activity numbers to names fullTest<-fullTest[,-1] # remove redundant column ## Merge the training and the test sets to create one data set. fullData<-rbind(fullTraining, fullTest) # Extracts only the measurements on the mean and standard deviation for each measurement. fullData<-select(fullData, 2, 1, targetFeatures$V1+2) names(fullData)<-c("subject","activity", as.character(targetFeatures$V2)) # Clean up the workspace rm(list=c("features","fullTest","fullTraining","targetFeatures", "testLabels","testSet","testSubjects","trainingLabels", "trainingSet","trainingSubjects","activityLabels")) # Appropriately labels the data set with descriptive variable names (good luck with that!) currentNames<-names(fullData) currentNames<-sub("Acc", "Accelerometer", currentNames) currentNames<-sub("Gyro", "Gyroscope", currentNames) currentNames<-sub("Mag", "Magnitude", currentNames) currentNames<-sub("-mean\\(\\)", "Mean", currentNames) currentNames<-sub("-std\\(\\)", "Stdev", currentNames) currentNames<-gsub("-", "", currentNames) names(fullData)<-currentNames # Create a second, independent tidy data set with # the average of each variable for each activity and each subject. tidyData <- fullData %>% melt(id.vars=c("subject","activity")) %>% group_by(subject, activity, variable) %>% summarise(value=mean(value)) %>% arrange(subject, activity, variable) tidyData$variable<-paste0( "mean",tidyData$variable ) # Output the data write.csv(fullData, file="output/fullData.csv") write.csv(tidyData, file="output/tidyData.csv") write.table(tidyData, file="output/tidyData.txt", row.name=FALSE)
/run_analysis.R
no_license
Phippsy/Tidy-Galaxy
R
false
false
3,550
r
library(plyr) library(dplyr) library(reshape2) ## Load in data if (!file.exists("./data")) { dir.create("./data") } if (!file.exists("./data/UCI HAR Dataset.zip")) { download.file( url="https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", method="curl", destfile="./data/UCI HAR Dataset.zip") unzip("./data/UCI HAR Dataset.zip", exdir="./data") } ## Get List of all features. features<-read.table("data/UCI HAR Dataset/features.txt") # The mean and std subset targetFeatures<-filter(features, grepl("mean\\(\\)|std\\(\\)",features$V2)) ## Get activity Labels activityLabels<-read.table("data/UCI HAR Dataset/activity_labels.txt") ## Get the training data trainingSet<-read.table("data/UCI HAR Dataset/train/X_train.txt") trainingSubjects<-read.table("data/UCI HAR Dataset/train/subject_train.txt") names(trainingSubjects)<-"subjects" trainingLabels<-read.table("data/UCI HAR Dataset/train/y_train.txt") names(trainingLabels)<-"traininglabel" # Combine all training data fullTraining<-cbind( trainingSubjects, trainingLabels, trainingSet ) fullTraining<-merge(activityLabels, fullTraining, by.x="V1", by.y="traininglabel", all = TRUE) # match activity numbers to names fullTraining<-fullTraining[,-1] # remove redundant column ## Get the test data testSet<-read.table("data/UCI HAR Dataset/test/X_test.txt") testSubjects<-read.table("data/UCI HAR Dataset/test/subject_test.txt") names(testSubjects)<-"subjects" testLabels<-read.table("data/UCI HAR Dataset/test/y_test.txt") names(testLabels)<-"traininglabel" # Combine all test data fullTest<-cbind(testSubjects, testLabels, testSet) fullTest<-merge(activityLabels, fullTest, by.x="V1", by.y="traininglabel", all = TRUE) # match activity numbers to names fullTest<-fullTest[,-1] # remove redundant column ## Merge the training and the test sets to create one data set. fullData<-rbind(fullTraining, fullTest) # Extracts only the measurements on the mean and standard deviation for each measurement. fullData<-select(fullData, 2, 1, targetFeatures$V1+2) names(fullData)<-c("subject","activity", as.character(targetFeatures$V2)) # Clean up the workspace rm(list=c("features","fullTest","fullTraining","targetFeatures", "testLabels","testSet","testSubjects","trainingLabels", "trainingSet","trainingSubjects","activityLabels")) # Appropriately labels the data set with descriptive variable names (good luck with that!) currentNames<-names(fullData) currentNames<-sub("Acc", "Accelerometer", currentNames) currentNames<-sub("Gyro", "Gyroscope", currentNames) currentNames<-sub("Mag", "Magnitude", currentNames) currentNames<-sub("-mean\\(\\)", "Mean", currentNames) currentNames<-sub("-std\\(\\)", "Stdev", currentNames) currentNames<-gsub("-", "", currentNames) names(fullData)<-currentNames # Create a second, independent tidy data set with # the average of each variable for each activity and each subject. tidyData <- fullData %>% melt(id.vars=c("subject","activity")) %>% group_by(subject, activity, variable) %>% summarise(value=mean(value)) %>% arrange(subject, activity, variable) tidyData$variable<-paste0( "mean",tidyData$variable ) # Output the data write.csv(fullData, file="output/fullData.csv") write.csv(tidyData, file="output/tidyData.csv") write.table(tidyData, file="output/tidyData.txt", row.name=FALSE)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gradientInterpolation.R \name{covGradAtLocs} \alias{covGradAtLocs} \title{Gradient of covariate field} \usage{ covGradAtLocs(locs, cov_list = NULL, grad_fun = NULL) } \arguments{ \item{locs}{Matrix of locations where the gradient should be evaluated} \item{cov_list}{List of J (number of covariates) "raster like" elements. A raster like element is a 3 elements list with named elements 1) "x" a vector of increasing x locations (at which the covariate is sampled) 2) "y" a vector of increasing y locations (at which the covariate is sampled) 3) "z" a size(x)*size(y) matrix giving covariate values at location (x, y)} \item{grad_fun}{Optional list of functions taking a 2d vector and returning a two 2d vector for the gradient} } \value{ Three-dimensional array of gradients of covariate fields. The rows index time, the columns are the dimensions (x and y), and the layers index the covariates. } \description{ Gradient of covariate field }
/man/covGradAtLocs.Rd
no_license
papayoun/Rhabit
R
false
true
1,023
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gradientInterpolation.R \name{covGradAtLocs} \alias{covGradAtLocs} \title{Gradient of covariate field} \usage{ covGradAtLocs(locs, cov_list = NULL, grad_fun = NULL) } \arguments{ \item{locs}{Matrix of locations where the gradient should be evaluated} \item{cov_list}{List of J (number of covariates) "raster like" elements. A raster like element is a 3 elements list with named elements 1) "x" a vector of increasing x locations (at which the covariate is sampled) 2) "y" a vector of increasing y locations (at which the covariate is sampled) 3) "z" a size(x)*size(y) matrix giving covariate values at location (x, y)} \item{grad_fun}{Optional list of functions taking a 2d vector and returning a two 2d vector for the gradient} } \value{ Three-dimensional array of gradients of covariate fields. The rows index time, the columns are the dimensions (x and y), and the layers index the covariates. } \description{ Gradient of covariate field }
check.vars(c("uof.all")) title <- "Force by month" ######################################################################################################## ######################################################################################################## uof.for.year <- uof.all %>% filter(year.of.record == CURRENT.YEAR) ftn.for.year <- uof.for.year %>% distinct(FIT.Number, .keep_all = TRUE) months <- c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec") uof.by.month <- uof.for.year %>% group_by(Month.occurred) %>% summarize(num.uof = n()) ftn.by.month <- ftn.for.year %>% filter(Month.occurred <= 12) %>% group_by(Month.occurred) %>% summarize(num.ftn = n()) force.by.month <- data.frame(month = months, uof.by.month = uof.by.month$num.uof, ftn.by.month = ftn.by.month$num.ftn) force.by.month <- force.by.month %>% mutate( uof.per.ftn = uof.by.month / ftn.by.month ) p.force.by.month <- plot_ly(force.by.month, x = ~month, y = ~ftn.by.month, name = 'Force Incident (FTN)', type = 'scatter', mode = 'lines+markers', line = list(color = 'rgb(22, 96, 167)', width = 2, dash = 'solid')) %>% add_trace(y = ~uof.by.month, name = 'Force Amount (UOF)', mode = 'lines+markers', line = list(color = 'rgb(205, 12, 24)', width = 2, dash = 'solid')) %>% add_trace(y = ~uof.per.ftn, name = "Force per incident (UOF/FTN)", yaxis = 'y2', mode = 'lines+markers', line = list(color = 'rgb(25, 12, 24)', width = 2, dash = 'dashdot')) %>% layout( hovermode = 'compare', margin = list(b = 150), xaxis = list(categoryorder = "array", categoryarray = months, title = paste("Month in", CURRENT.YEAR), showgrid = F), yaxis = list(title = 'Num incidents', showgrid = T), yaxis2 = list(side = 'right', overlaying = "y", title = "Force per incident", range = c(0, 10))) p.force.by.month gen.plotly.json(p.force.by.month, "force-by-month")
/data-analysis/force/time/force-by-month.R
permissive
marvinmarnold/oipm_annual_report_2019
R
false
false
2,117
r
check.vars(c("uof.all")) title <- "Force by month" ######################################################################################################## ######################################################################################################## uof.for.year <- uof.all %>% filter(year.of.record == CURRENT.YEAR) ftn.for.year <- uof.for.year %>% distinct(FIT.Number, .keep_all = TRUE) months <- c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec") uof.by.month <- uof.for.year %>% group_by(Month.occurred) %>% summarize(num.uof = n()) ftn.by.month <- ftn.for.year %>% filter(Month.occurred <= 12) %>% group_by(Month.occurred) %>% summarize(num.ftn = n()) force.by.month <- data.frame(month = months, uof.by.month = uof.by.month$num.uof, ftn.by.month = ftn.by.month$num.ftn) force.by.month <- force.by.month %>% mutate( uof.per.ftn = uof.by.month / ftn.by.month ) p.force.by.month <- plot_ly(force.by.month, x = ~month, y = ~ftn.by.month, name = 'Force Incident (FTN)', type = 'scatter', mode = 'lines+markers', line = list(color = 'rgb(22, 96, 167)', width = 2, dash = 'solid')) %>% add_trace(y = ~uof.by.month, name = 'Force Amount (UOF)', mode = 'lines+markers', line = list(color = 'rgb(205, 12, 24)', width = 2, dash = 'solid')) %>% add_trace(y = ~uof.per.ftn, name = "Force per incident (UOF/FTN)", yaxis = 'y2', mode = 'lines+markers', line = list(color = 'rgb(25, 12, 24)', width = 2, dash = 'dashdot')) %>% layout( hovermode = 'compare', margin = list(b = 150), xaxis = list(categoryorder = "array", categoryarray = months, title = paste("Month in", CURRENT.YEAR), showgrid = F), yaxis = list(title = 'Num incidents', showgrid = T), yaxis2 = list(side = 'right', overlaying = "y", title = "Force per incident", range = c(0, 10))) p.force.by.month gen.plotly.json(p.force.by.month, "force-by-month")
## Put comments here that give an overall description of what your ## functions do ## Write a short comment describing this function # This function wraps your matrix in a structure that allows you to # recover the original matrix, and calculate and cache the inverted matrix. makeCacheMatrix <- function(x = matrix()) { cachedInverse <- NULL getMatrix <- function () x inverse <- function () cacheSolve(l) getInverse <- function () cachedInverse setInverse <- function (i) cachedInverse <<- i l <- list(getMatrix = getMatrix, inverse = inverse, setInverse = setInverse, getInverse = getInverse) l } ## Given a "cache matrix" structure as defined above, will # compute and cache a matrix inverse, or return a previously-cached # inverse matrix. cacheSolve <- function(x, ...) { if (is.null(x$getInverse())) { x$setInverse(solve(x$getMatrix(), ...)) } x$getInverse() } # test: # hilbert <- function(n) { i <- 1:n; 1 / outer(i - 1, i, "+") } # h8 <- hilbert(8) # cm <- makeCacheMatrix(h8)
/cachematrix.R
no_license
JoshuaGross/ProgrammingAssignment2
R
false
false
1,040
r
## Put comments here that give an overall description of what your ## functions do ## Write a short comment describing this function # This function wraps your matrix in a structure that allows you to # recover the original matrix, and calculate and cache the inverted matrix. makeCacheMatrix <- function(x = matrix()) { cachedInverse <- NULL getMatrix <- function () x inverse <- function () cacheSolve(l) getInverse <- function () cachedInverse setInverse <- function (i) cachedInverse <<- i l <- list(getMatrix = getMatrix, inverse = inverse, setInverse = setInverse, getInverse = getInverse) l } ## Given a "cache matrix" structure as defined above, will # compute and cache a matrix inverse, or return a previously-cached # inverse matrix. cacheSolve <- function(x, ...) { if (is.null(x$getInverse())) { x$setInverse(solve(x$getMatrix(), ...)) } x$getInverse() } # test: # hilbert <- function(n) { i <- 1:n; 1 / outer(i - 1, i, "+") } # h8 <- hilbert(8) # cm <- makeCacheMatrix(h8)
# This file is generated by make.paws. Please do not edit here. #' @importFrom paws.common get_config new_operation new_request send_request #' @include cloudsearchdomain_service.R NULL #' Retrieves a list of documents that match the specified search criteria #' #' @description #' Retrieves a list of documents that match the specified search criteria. How you specify the search criteria depends on which query parser you use. Amazon CloudSearch supports four query parsers: #' #' See [https://www.paws-r-sdk.com/docs/cloudsearchdomain_search/](https://www.paws-r-sdk.com/docs/cloudsearchdomain_search/) for full documentation. #' #' @param cursor Retrieves a cursor value you can use to page through large result sets. #' Use the `size` parameter to control the number of hits to include in #' each response. You can specify either the `cursor` or `start` parameter #' in a request; they are mutually exclusive. To get the first cursor, set #' the cursor value to `initial`. In subsequent requests, specify the #' cursor value returned in the hits section of the response. #' #' For more information, see [Paginating #' Results](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/paginating-results.html) #' in the *Amazon CloudSearch Developer Guide*. #' @param expr Defines one or more numeric expressions that can be used to sort results #' or specify search or filter criteria. You can also specify expressions #' as return fields. #' #' You specify the expressions in JSON using the form #' `{"EXPRESSIONNAME":"EXPRESSION"}`. You can define and use multiple #' expressions in a search request. For example: #' #' ` {"expression1":"_score*rating", "expression2":"(1/rank)*year"} ` #' #' For information about the variables, operators, and functions you can #' use in expressions, see [Writing #' Expressions](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-expressions.html#writing-expressions) #' in the *Amazon CloudSearch Developer Guide*. #' @param facet Specifies one or more fields for which to get facet information, and #' options that control how the facet information is returned. Each #' specified field must be facet-enabled in the domain configuration. The #' fields and options are specified in JSON using the form #' `{\"FIELD\":{\"OPTION\":VALUE,\"OPTION:\"STRING\"},\"FIELD\":{\"OPTION\":VALUE,\"OPTION\":\"STRING\"}}`. #' #' You can specify the following faceting options: #' #' - `buckets` specifies an array of the facet values or ranges to count. #' Ranges are specified using the same syntax that you use to search #' for a range of values. For more information, see [Searching for a #' Range of #' Values](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching-ranges.html) #' in the *Amazon CloudSearch Developer Guide*. Buckets are returned in #' the order they are specified in the request. The `sort` and `size` #' options are not valid if you specify `buckets`. #' #' - `size` specifies the maximum number of facets to include in the #' results. By default, Amazon CloudSearch returns counts for the #' top 10. The `size` parameter is only valid when you specify the #' `sort` option; it cannot be used in conjunction with `buckets`. #' #' - `sort` specifies how you want to sort the facets in the results: #' `bucket` or `count`. Specify `bucket` to sort alphabetically or #' numerically by facet value (in ascending order). Specify `count` to #' sort by the facet counts computed for each facet value (in #' descending order). To retrieve facet counts for particular values or #' ranges of values, use the `buckets` option instead of `sort`. #' #' If no facet options are specified, facet counts are computed for all #' field values, the facets are sorted by facet count, and the top 10 #' facets are returned in the results. #' #' To count particular buckets of values, use the `buckets` option. For #' example, the following request uses the `buckets` option to calculate #' and return facet counts by decade. #' #' ` \{"year":\{"buckets":["[1970,1979]","[1980,1989]","[1990,1999]","[2000,2009]","[2010,\}"]\}\} ` #' #' To sort facets by facet count, use the `count` option. For example, the #' following request sets the `sort` option to `count` to sort the facet #' values by facet count, with the facet values that have the most matching #' documents listed first. Setting the `size` option to 3 returns only the #' top three facet values. #' #' ` {"year":{"sort":"count","size":3}} ` #' #' To sort the facets by value, use the `bucket` option. For example, the #' following request sets the `sort` option to `bucket` to sort the facet #' values numerically by year, with earliest year listed first. #' #' ` {"year":{"sort":"bucket"}} ` #' #' For more information, see [Getting and Using Facet #' Information](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/faceting.html) #' in the *Amazon CloudSearch Developer Guide*. #' @param filterQuery Specifies a structured query that filters the results of a search #' without affecting how the results are scored and sorted. You use #' `filterQuery` in conjunction with the `query` parameter to filter the #' documents that match the constraints specified in the `query` parameter. #' Specifying a filter controls only which matching documents are included #' in the results, it has no effect on how they are scored and sorted. The #' `filterQuery` parameter supports the full structured query syntax. #' #' For more information about using filters, see [Filtering Matching #' Documents](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/filtering-results.html) #' in the *Amazon CloudSearch Developer Guide*. #' @param highlight Retrieves highlights for matches in the specified `text` or `text-array` #' fields. Each specified field must be highlight enabled in the domain #' configuration. The fields and options are specified in JSON using the #' form #' `{\"FIELD\":{\"OPTION\":VALUE,\"OPTION:\"STRING\"},\"FIELD\":{\"OPTION\":VALUE,\"OPTION\":\"STRING\"}}`. #' #' You can specify the following highlight options: #' #' - `format`: specifies the format of the data in the text field: `text` #' or `html`. When data is returned as HTML, all non-alphanumeric #' characters are encoded. The default is `html`. #' - `max_phrases`: specifies the maximum number of occurrences of the #' search term(s) you want to highlight. By default, the first #' occurrence is highlighted. #' - `pre_tag`: specifies the string to prepend to an occurrence of a #' search term. The default for HTML highlights is `&lt;em&gt;`. The #' default for text highlights is `*`. #' - `post_tag`: specifies the string to append to an occurrence of a #' search term. The default for HTML highlights is `&lt;/em&gt;`. The #' default for text highlights is `*`. #' #' If no highlight options are specified for a field, the returned field #' text is treated as HTML and the first match is highlighted with emphasis #' tags: `&lt;em>search-term&lt;/em&gt;`. #' #' For example, the following request retrieves highlights for the `actors` #' and `title` fields. #' #' `{ "actors": {}, "title": {"format": "text","max_phrases": 2,"pre_tag": "","post_tag": ""} }` #' @param partial Enables partial results to be returned if one or more index partitions #' are unavailable. When your search index is partitioned across multiple #' search instances, by default Amazon CloudSearch only returns results if #' every partition can be queried. This means that the failure of a single #' search instance can result in 5xx (internal server) errors. When you #' enable partial results, Amazon CloudSearch returns whatever results are #' available and includes the percentage of documents searched in the #' search results (percent-searched). This enables you to more gracefully #' degrade your users' search experience. For example, rather than #' displaying no results, you could display the partial results and a #' message indicating that the results might be incomplete due to a #' temporary system outage. #' @param query &#91;required&#93; Specifies the search criteria for the request. How you specify the #' search criteria depends on the query parser used for the request and the #' parser options specified in the `queryOptions` parameter. By default, #' the `simple` query parser is used to process requests. To use the #' `structured`, `lucene`, or `dismax` query parser, you must also specify #' the `queryParser` parameter. #' #' For more information about specifying search criteria, see [Searching #' Your #' Data](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching.html) #' in the *Amazon CloudSearch Developer Guide*. #' @param queryOptions Configures options for the query parser specified in the `queryParser` #' parameter. You specify the options in JSON using the following form #' `{\"OPTION1\":\"VALUE1\",\"OPTION2\":VALUE2\"...\"OPTIONN\":\"VALUEN\"}.` #' #' The options you can configure vary according to which parser you use: #' #' - `defaultOperator`: The default operator used to combine individual #' terms in the search string. For example: `defaultOperator: 'or'`. #' For the `dismax` parser, you specify a percentage that represents #' the percentage of terms in the search string (rounded down) that #' must match, rather than a default operator. A value of `0%` is the #' equivalent to OR, and a value of `100%` is equivalent to AND. The #' percentage must be specified as a value in the range 0-100 followed #' by the percent (%) symbol. For example, `defaultOperator: 50%`. #' Valid values: `and`, `or`, a percentage in the range 0%-100% #' (`dismax`). Default: `and` (`simple`, `structured`, `lucene`) or #' `100` (`dismax`). Valid for: `simple`, `structured`, `lucene`, and #' `dismax`. #' - `fields`: An array of the fields to search when no fields are #' specified in a search. If no fields are specified in a search and #' this option is not specified, all text and text-array fields are #' searched. You can specify a weight for each field to control the #' relative importance of each field when Amazon CloudSearch calculates #' relevance scores. To specify a field weight, append a caret (`^`) #' symbol and the weight to the field name. For example, to boost the #' importance of the `title` field over the `description` field you #' could specify: `"fields":["title^5","description"]`. Valid values: #' The name of any configured field and an optional numeric value #' greater than zero. Default: All `text` and `text-array` fields. #' Valid for: `simple`, `structured`, `lucene`, and `dismax`. #' - `operators`: An array of the operators or special characters you #' want to disable for the simple query parser. If you disable the #' `and`, `or`, or `not` operators, the corresponding operators (`+`, #' `|`, `-`) have no special meaning and are dropped from the search #' string. Similarly, disabling `prefix` disables the wildcard operator #' (`*`) and disabling `phrase` disables the ability to search for #' phrases by enclosing phrases in double quotes. Disabling precedence #' disables the ability to control order of precedence using #' parentheses. Disabling `near` disables the ability to use the ~ #' operator to perform a sloppy phrase search. Disabling the `fuzzy` #' operator disables the ability to use the ~ operator to perform a #' fuzzy search. `escape` disables the ability to use a backslash (`\`) #' to escape special characters within the search string. Disabling #' whitespace is an advanced option that prevents the parser from #' tokenizing on whitespace, which can be useful for Vietnamese. (It #' prevents Vietnamese words from being split incorrectly.) For #' example, you could disable all operators other than the phrase #' operator to support just simple term and phrase queries: #' `"operators":["and","not","or", "prefix"]`. Valid values: `and`, #' `escape`, `fuzzy`, `near`, `not`, `or`, `phrase`, `precedence`, #' `prefix`, `whitespace`. Default: All operators and special #' characters are enabled. Valid for: `simple`. #' - `phraseFields`: An array of the `text` or `text-array` fields you #' want to use for phrase searches. When the terms in the search string #' appear in close proximity within a field, the field scores higher. #' You can specify a weight for each field to boost that score. The #' `phraseSlop` option controls how much the matches can deviate from #' the search string and still be boosted. To specify a field weight, #' append a caret (`^`) symbol and the weight to the field name. For #' example, to boost phrase matches in the `title` field over the #' `abstract` field, you could specify: #' `"phraseFields":["title^3", "plot"]` Valid values: The name of any #' `text` or `text-array` field and an optional numeric value greater #' than zero. Default: No fields. If you don't specify any fields with #' `phraseFields`, proximity scoring is disabled even if `phraseSlop` #' is specified. Valid for: `dismax`. #' - `phraseSlop`: An integer value that specifies how much matches can #' deviate from the search phrase and still be boosted according to the #' weights specified in the `phraseFields` option; for example, #' `phraseSlop: 2`. You must also specify `phraseFields` to enable #' proximity scoring. Valid values: positive integers. Default: 0. #' Valid for: `dismax`. #' - `explicitPhraseSlop`: An integer value that specifies how much a #' match can deviate from the search phrase when the phrase is enclosed #' in double quotes in the search string. (Phrases that exceed this #' proximity distance are not considered a match.) For example, to #' specify a slop of three for dismax phrase queries, you would specify #' `"explicitPhraseSlop":3`. Valid values: positive integers. #' Default: 0. Valid for: `dismax`. #' - `tieBreaker`: When a term in the search string is found in a #' document's field, a score is calculated for that field based on how #' common the word is in that field compared to other documents. If the #' term occurs in multiple fields within a document, by default only #' the highest scoring field contributes to the document's overall #' score. You can specify a `tieBreaker` value to enable the matches in #' lower-scoring fields to contribute to the document's score. That #' way, if two documents have the same max field score for a particular #' term, the score for the document that has matches in more fields #' will be higher. The formula for calculating the score with a #' tieBreaker is #' `(max field score) + (tieBreaker) * (sum of the scores for the rest of the matching fields)`. #' Set `tieBreaker` to 0 to disregard all but the highest scoring field #' (pure max): `"tieBreaker":0`. Set to 1 to sum the scores from all #' fields (pure sum): `"tieBreaker":1`. Valid values: 0.0 to 1.0. #' Default: 0.0. Valid for: `dismax`. #' @param queryParser Specifies which query parser to use to process the request. If #' `queryParser` is not specified, Amazon CloudSearch uses the `simple` #' query parser. #' #' Amazon CloudSearch supports four query parsers: #' #' - `simple`: perform simple searches of `text` and `text-array` fields. #' By default, the `simple` query parser searches all `text` and #' `text-array` fields. You can specify which fields to search by with #' the `queryOptions` parameter. If you prefix a search term with a #' plus sign (+) documents must contain the term to be considered a #' match. (This is the default, unless you configure the default #' operator with the `queryOptions` parameter.) You can use the `-` #' (NOT), `|` (OR), and `*` (wildcard) operators to exclude particular #' terms, find results that match any of the specified terms, or search #' for a prefix. To search for a phrase rather than individual terms, #' enclose the phrase in double quotes. For more information, see #' [Searching for #' Text](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching-text.html) #' in the *Amazon CloudSearch Developer Guide*. #' - `structured`: perform advanced searches by combining multiple #' expressions to define the search criteria. You can also search #' within particular fields, search for values and ranges of values, #' and use advanced options such as term boosting, `matchall`, and #' `near`. For more information, see [Constructing Compound #' Queries](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching-compound-queries.html) #' in the *Amazon CloudSearch Developer Guide*. #' - `lucene`: search using the Apache Lucene query parser syntax. For #' more information, see [Apache Lucene Query Parser #' Syntax](https://lucene.apache.org/core/4_6_0/queryparser/org/apache/lucene/queryparser/classic/package-summary.html#package_description). #' - `dismax`: search using the simplified subset of the Apache Lucene #' query parser syntax defined by the DisMax query parser. For more #' information, see [DisMax Query Parser #' Syntax](https://cwiki.apache.org/confluence/display/solr/DisMaxQParserPlugin#Query_Syntax). #' @param return Specifies the field and expression values to include in the response. #' Multiple fields or expressions are specified as a comma-separated list. #' By default, a search response includes all return enabled fields #' (`_all_fields`). To return only the document IDs for the matching #' documents, specify `_no_fields`. To retrieve the relevance score #' calculated for each document, specify `_score`. #' @param size Specifies the maximum number of search hits to include in the response. #' @param sort Specifies the fields or custom expressions to use to sort the search #' results. Multiple fields or expressions are specified as a #' comma-separated list. You must specify the sort direction (`asc` or #' `desc`) for each field; for example, `year desc,title asc`. To use a #' field to sort results, the field must be sort-enabled in the domain #' configuration. Array type fields cannot be used for sorting. If no #' `sort` parameter is specified, results are sorted by their default #' relevance scores in descending order: `_score desc`. You can also sort #' by document ID (`_id asc`) and version (`_version desc`). #' #' For more information, see [Sorting #' Results](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/sorting-results.html) #' in the *Amazon CloudSearch Developer Guide*. #' @param start Specifies the offset of the first search hit you want to return. Note #' that the result set is zero-based; the first result is at index 0. You #' can specify either the `start` or `cursor` parameter in a request, they #' are mutually exclusive. #' #' For more information, see [Paginating #' Results](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/paginating-results.html) #' in the *Amazon CloudSearch Developer Guide*. #' @param stats Specifies one or more fields for which to get statistics information. #' Each specified field must be facet-enabled in the domain configuration. #' The fields are specified in JSON using the form: #' #' `{"FIELD-A":{},"FIELD-B":{}}` #' #' There are currently no options supported for statistics. #' #' @keywords internal #' #' @rdname cloudsearchdomain_search cloudsearchdomain_search <- function(cursor = NULL, expr = NULL, facet = NULL, filterQuery = NULL, highlight = NULL, partial = NULL, query, queryOptions = NULL, queryParser = NULL, return = NULL, size = NULL, sort = NULL, start = NULL, stats = NULL) { op <- new_operation( name = "Search", http_method = "GET", http_path = "/2013-01-01/search?format=sdk&pretty=true", paginator = list() ) input <- .cloudsearchdomain$search_input(cursor = cursor, expr = expr, facet = facet, filterQuery = filterQuery, highlight = highlight, partial = partial, query = query, queryOptions = queryOptions, queryParser = queryParser, return = return, size = size, sort = sort, start = start, stats = stats) output <- .cloudsearchdomain$search_output() config <- get_config() svc <- .cloudsearchdomain$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .cloudsearchdomain$operations$search <- cloudsearchdomain_search #' Retrieves autocomplete suggestions for a partial query string #' #' @description #' Retrieves autocomplete suggestions for a partial query string. You can use suggestions enable you to display likely matches before users finish typing. In Amazon CloudSearch, suggestions are based on the contents of a particular text field. When you request suggestions, Amazon CloudSearch finds all of the documents whose values in the suggester field start with the specified query string. The beginning of the field must match the query string to be considered a match. #' #' See [https://www.paws-r-sdk.com/docs/cloudsearchdomain_suggest/](https://www.paws-r-sdk.com/docs/cloudsearchdomain_suggest/) for full documentation. #' #' @param query &#91;required&#93; Specifies the string for which you want to get suggestions. #' @param suggester &#91;required&#93; Specifies the name of the suggester to use to find suggested matches. #' @param size Specifies the maximum number of suggestions to return. #' #' @keywords internal #' #' @rdname cloudsearchdomain_suggest cloudsearchdomain_suggest <- function(query, suggester, size = NULL) { op <- new_operation( name = "Suggest", http_method = "GET", http_path = "/2013-01-01/suggest?format=sdk&pretty=true", paginator = list() ) input <- .cloudsearchdomain$suggest_input(query = query, suggester = suggester, size = size) output <- .cloudsearchdomain$suggest_output() config <- get_config() svc <- .cloudsearchdomain$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .cloudsearchdomain$operations$suggest <- cloudsearchdomain_suggest #' Posts a batch of documents to a search domain for indexing #' #' @description #' Posts a batch of documents to a search domain for indexing. A document batch is a collection of add and delete operations that represent the documents you want to add, update, or delete from your domain. Batches can be described in either JSON or XML. Each item that you want Amazon CloudSearch to return as a search result (such as a product) is represented as a document. Every document has a unique ID and one or more fields that contain the data that you want to search and return in results. Individual documents cannot contain more than 1 MB of data. The entire batch cannot exceed 5 MB. To get the best possible upload performance, group add and delete operations in batches that are close the 5 MB limit. Submitting a large volume of single-document batches can overload a domain's document service. #' #' See [https://www.paws-r-sdk.com/docs/cloudsearchdomain_upload_documents/](https://www.paws-r-sdk.com/docs/cloudsearchdomain_upload_documents/) for full documentation. #' #' @param documents &#91;required&#93; A batch of documents formatted in JSON or HTML. #' @param contentType &#91;required&#93; The format of the batch you are uploading. Amazon CloudSearch supports #' two document batch formats: #' #' - application/json #' - application/xml #' #' @keywords internal #' #' @rdname cloudsearchdomain_upload_documents cloudsearchdomain_upload_documents <- function(documents, contentType) { op <- new_operation( name = "UploadDocuments", http_method = "POST", http_path = "/2013-01-01/documents/batch?format=sdk", paginator = list() ) input <- .cloudsearchdomain$upload_documents_input(documents = documents, contentType = contentType) output <- .cloudsearchdomain$upload_documents_output() config <- get_config() svc <- .cloudsearchdomain$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .cloudsearchdomain$operations$upload_documents <- cloudsearchdomain_upload_documents
/cran/paws.analytics/R/cloudsearchdomain_operations.R
permissive
paws-r/paws
R
false
false
24,482
r
# This file is generated by make.paws. Please do not edit here. #' @importFrom paws.common get_config new_operation new_request send_request #' @include cloudsearchdomain_service.R NULL #' Retrieves a list of documents that match the specified search criteria #' #' @description #' Retrieves a list of documents that match the specified search criteria. How you specify the search criteria depends on which query parser you use. Amazon CloudSearch supports four query parsers: #' #' See [https://www.paws-r-sdk.com/docs/cloudsearchdomain_search/](https://www.paws-r-sdk.com/docs/cloudsearchdomain_search/) for full documentation. #' #' @param cursor Retrieves a cursor value you can use to page through large result sets. #' Use the `size` parameter to control the number of hits to include in #' each response. You can specify either the `cursor` or `start` parameter #' in a request; they are mutually exclusive. To get the first cursor, set #' the cursor value to `initial`. In subsequent requests, specify the #' cursor value returned in the hits section of the response. #' #' For more information, see [Paginating #' Results](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/paginating-results.html) #' in the *Amazon CloudSearch Developer Guide*. #' @param expr Defines one or more numeric expressions that can be used to sort results #' or specify search or filter criteria. You can also specify expressions #' as return fields. #' #' You specify the expressions in JSON using the form #' `{"EXPRESSIONNAME":"EXPRESSION"}`. You can define and use multiple #' expressions in a search request. For example: #' #' ` {"expression1":"_score*rating", "expression2":"(1/rank)*year"} ` #' #' For information about the variables, operators, and functions you can #' use in expressions, see [Writing #' Expressions](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-expressions.html#writing-expressions) #' in the *Amazon CloudSearch Developer Guide*. #' @param facet Specifies one or more fields for which to get facet information, and #' options that control how the facet information is returned. Each #' specified field must be facet-enabled in the domain configuration. The #' fields and options are specified in JSON using the form #' `{\"FIELD\":{\"OPTION\":VALUE,\"OPTION:\"STRING\"},\"FIELD\":{\"OPTION\":VALUE,\"OPTION\":\"STRING\"}}`. #' #' You can specify the following faceting options: #' #' - `buckets` specifies an array of the facet values or ranges to count. #' Ranges are specified using the same syntax that you use to search #' for a range of values. For more information, see [Searching for a #' Range of #' Values](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching-ranges.html) #' in the *Amazon CloudSearch Developer Guide*. Buckets are returned in #' the order they are specified in the request. The `sort` and `size` #' options are not valid if you specify `buckets`. #' #' - `size` specifies the maximum number of facets to include in the #' results. By default, Amazon CloudSearch returns counts for the #' top 10. The `size` parameter is only valid when you specify the #' `sort` option; it cannot be used in conjunction with `buckets`. #' #' - `sort` specifies how you want to sort the facets in the results: #' `bucket` or `count`. Specify `bucket` to sort alphabetically or #' numerically by facet value (in ascending order). Specify `count` to #' sort by the facet counts computed for each facet value (in #' descending order). To retrieve facet counts for particular values or #' ranges of values, use the `buckets` option instead of `sort`. #' #' If no facet options are specified, facet counts are computed for all #' field values, the facets are sorted by facet count, and the top 10 #' facets are returned in the results. #' #' To count particular buckets of values, use the `buckets` option. For #' example, the following request uses the `buckets` option to calculate #' and return facet counts by decade. #' #' ` \{"year":\{"buckets":["[1970,1979]","[1980,1989]","[1990,1999]","[2000,2009]","[2010,\}"]\}\} ` #' #' To sort facets by facet count, use the `count` option. For example, the #' following request sets the `sort` option to `count` to sort the facet #' values by facet count, with the facet values that have the most matching #' documents listed first. Setting the `size` option to 3 returns only the #' top three facet values. #' #' ` {"year":{"sort":"count","size":3}} ` #' #' To sort the facets by value, use the `bucket` option. For example, the #' following request sets the `sort` option to `bucket` to sort the facet #' values numerically by year, with earliest year listed first. #' #' ` {"year":{"sort":"bucket"}} ` #' #' For more information, see [Getting and Using Facet #' Information](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/faceting.html) #' in the *Amazon CloudSearch Developer Guide*. #' @param filterQuery Specifies a structured query that filters the results of a search #' without affecting how the results are scored and sorted. You use #' `filterQuery` in conjunction with the `query` parameter to filter the #' documents that match the constraints specified in the `query` parameter. #' Specifying a filter controls only which matching documents are included #' in the results, it has no effect on how they are scored and sorted. The #' `filterQuery` parameter supports the full structured query syntax. #' #' For more information about using filters, see [Filtering Matching #' Documents](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/filtering-results.html) #' in the *Amazon CloudSearch Developer Guide*. #' @param highlight Retrieves highlights for matches in the specified `text` or `text-array` #' fields. Each specified field must be highlight enabled in the domain #' configuration. The fields and options are specified in JSON using the #' form #' `{\"FIELD\":{\"OPTION\":VALUE,\"OPTION:\"STRING\"},\"FIELD\":{\"OPTION\":VALUE,\"OPTION\":\"STRING\"}}`. #' #' You can specify the following highlight options: #' #' - `format`: specifies the format of the data in the text field: `text` #' or `html`. When data is returned as HTML, all non-alphanumeric #' characters are encoded. The default is `html`. #' - `max_phrases`: specifies the maximum number of occurrences of the #' search term(s) you want to highlight. By default, the first #' occurrence is highlighted. #' - `pre_tag`: specifies the string to prepend to an occurrence of a #' search term. The default for HTML highlights is `&lt;em&gt;`. The #' default for text highlights is `*`. #' - `post_tag`: specifies the string to append to an occurrence of a #' search term. The default for HTML highlights is `&lt;/em&gt;`. The #' default for text highlights is `*`. #' #' If no highlight options are specified for a field, the returned field #' text is treated as HTML and the first match is highlighted with emphasis #' tags: `&lt;em>search-term&lt;/em&gt;`. #' #' For example, the following request retrieves highlights for the `actors` #' and `title` fields. #' #' `{ "actors": {}, "title": {"format": "text","max_phrases": 2,"pre_tag": "","post_tag": ""} }` #' @param partial Enables partial results to be returned if one or more index partitions #' are unavailable. When your search index is partitioned across multiple #' search instances, by default Amazon CloudSearch only returns results if #' every partition can be queried. This means that the failure of a single #' search instance can result in 5xx (internal server) errors. When you #' enable partial results, Amazon CloudSearch returns whatever results are #' available and includes the percentage of documents searched in the #' search results (percent-searched). This enables you to more gracefully #' degrade your users' search experience. For example, rather than #' displaying no results, you could display the partial results and a #' message indicating that the results might be incomplete due to a #' temporary system outage. #' @param query &#91;required&#93; Specifies the search criteria for the request. How you specify the #' search criteria depends on the query parser used for the request and the #' parser options specified in the `queryOptions` parameter. By default, #' the `simple` query parser is used to process requests. To use the #' `structured`, `lucene`, or `dismax` query parser, you must also specify #' the `queryParser` parameter. #' #' For more information about specifying search criteria, see [Searching #' Your #' Data](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching.html) #' in the *Amazon CloudSearch Developer Guide*. #' @param queryOptions Configures options for the query parser specified in the `queryParser` #' parameter. You specify the options in JSON using the following form #' `{\"OPTION1\":\"VALUE1\",\"OPTION2\":VALUE2\"...\"OPTIONN\":\"VALUEN\"}.` #' #' The options you can configure vary according to which parser you use: #' #' - `defaultOperator`: The default operator used to combine individual #' terms in the search string. For example: `defaultOperator: 'or'`. #' For the `dismax` parser, you specify a percentage that represents #' the percentage of terms in the search string (rounded down) that #' must match, rather than a default operator. A value of `0%` is the #' equivalent to OR, and a value of `100%` is equivalent to AND. The #' percentage must be specified as a value in the range 0-100 followed #' by the percent (%) symbol. For example, `defaultOperator: 50%`. #' Valid values: `and`, `or`, a percentage in the range 0%-100% #' (`dismax`). Default: `and` (`simple`, `structured`, `lucene`) or #' `100` (`dismax`). Valid for: `simple`, `structured`, `lucene`, and #' `dismax`. #' - `fields`: An array of the fields to search when no fields are #' specified in a search. If no fields are specified in a search and #' this option is not specified, all text and text-array fields are #' searched. You can specify a weight for each field to control the #' relative importance of each field when Amazon CloudSearch calculates #' relevance scores. To specify a field weight, append a caret (`^`) #' symbol and the weight to the field name. For example, to boost the #' importance of the `title` field over the `description` field you #' could specify: `"fields":["title^5","description"]`. Valid values: #' The name of any configured field and an optional numeric value #' greater than zero. Default: All `text` and `text-array` fields. #' Valid for: `simple`, `structured`, `lucene`, and `dismax`. #' - `operators`: An array of the operators or special characters you #' want to disable for the simple query parser. If you disable the #' `and`, `or`, or `not` operators, the corresponding operators (`+`, #' `|`, `-`) have no special meaning and are dropped from the search #' string. Similarly, disabling `prefix` disables the wildcard operator #' (`*`) and disabling `phrase` disables the ability to search for #' phrases by enclosing phrases in double quotes. Disabling precedence #' disables the ability to control order of precedence using #' parentheses. Disabling `near` disables the ability to use the ~ #' operator to perform a sloppy phrase search. Disabling the `fuzzy` #' operator disables the ability to use the ~ operator to perform a #' fuzzy search. `escape` disables the ability to use a backslash (`\`) #' to escape special characters within the search string. Disabling #' whitespace is an advanced option that prevents the parser from #' tokenizing on whitespace, which can be useful for Vietnamese. (It #' prevents Vietnamese words from being split incorrectly.) For #' example, you could disable all operators other than the phrase #' operator to support just simple term and phrase queries: #' `"operators":["and","not","or", "prefix"]`. Valid values: `and`, #' `escape`, `fuzzy`, `near`, `not`, `or`, `phrase`, `precedence`, #' `prefix`, `whitespace`. Default: All operators and special #' characters are enabled. Valid for: `simple`. #' - `phraseFields`: An array of the `text` or `text-array` fields you #' want to use for phrase searches. When the terms in the search string #' appear in close proximity within a field, the field scores higher. #' You can specify a weight for each field to boost that score. The #' `phraseSlop` option controls how much the matches can deviate from #' the search string and still be boosted. To specify a field weight, #' append a caret (`^`) symbol and the weight to the field name. For #' example, to boost phrase matches in the `title` field over the #' `abstract` field, you could specify: #' `"phraseFields":["title^3", "plot"]` Valid values: The name of any #' `text` or `text-array` field and an optional numeric value greater #' than zero. Default: No fields. If you don't specify any fields with #' `phraseFields`, proximity scoring is disabled even if `phraseSlop` #' is specified. Valid for: `dismax`. #' - `phraseSlop`: An integer value that specifies how much matches can #' deviate from the search phrase and still be boosted according to the #' weights specified in the `phraseFields` option; for example, #' `phraseSlop: 2`. You must also specify `phraseFields` to enable #' proximity scoring. Valid values: positive integers. Default: 0. #' Valid for: `dismax`. #' - `explicitPhraseSlop`: An integer value that specifies how much a #' match can deviate from the search phrase when the phrase is enclosed #' in double quotes in the search string. (Phrases that exceed this #' proximity distance are not considered a match.) For example, to #' specify a slop of three for dismax phrase queries, you would specify #' `"explicitPhraseSlop":3`. Valid values: positive integers. #' Default: 0. Valid for: `dismax`. #' - `tieBreaker`: When a term in the search string is found in a #' document's field, a score is calculated for that field based on how #' common the word is in that field compared to other documents. If the #' term occurs in multiple fields within a document, by default only #' the highest scoring field contributes to the document's overall #' score. You can specify a `tieBreaker` value to enable the matches in #' lower-scoring fields to contribute to the document's score. That #' way, if two documents have the same max field score for a particular #' term, the score for the document that has matches in more fields #' will be higher. The formula for calculating the score with a #' tieBreaker is #' `(max field score) + (tieBreaker) * (sum of the scores for the rest of the matching fields)`. #' Set `tieBreaker` to 0 to disregard all but the highest scoring field #' (pure max): `"tieBreaker":0`. Set to 1 to sum the scores from all #' fields (pure sum): `"tieBreaker":1`. Valid values: 0.0 to 1.0. #' Default: 0.0. Valid for: `dismax`. #' @param queryParser Specifies which query parser to use to process the request. If #' `queryParser` is not specified, Amazon CloudSearch uses the `simple` #' query parser. #' #' Amazon CloudSearch supports four query parsers: #' #' - `simple`: perform simple searches of `text` and `text-array` fields. #' By default, the `simple` query parser searches all `text` and #' `text-array` fields. You can specify which fields to search by with #' the `queryOptions` parameter. If you prefix a search term with a #' plus sign (+) documents must contain the term to be considered a #' match. (This is the default, unless you configure the default #' operator with the `queryOptions` parameter.) You can use the `-` #' (NOT), `|` (OR), and `*` (wildcard) operators to exclude particular #' terms, find results that match any of the specified terms, or search #' for a prefix. To search for a phrase rather than individual terms, #' enclose the phrase in double quotes. For more information, see #' [Searching for #' Text](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching-text.html) #' in the *Amazon CloudSearch Developer Guide*. #' - `structured`: perform advanced searches by combining multiple #' expressions to define the search criteria. You can also search #' within particular fields, search for values and ranges of values, #' and use advanced options such as term boosting, `matchall`, and #' `near`. For more information, see [Constructing Compound #' Queries](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching-compound-queries.html) #' in the *Amazon CloudSearch Developer Guide*. #' - `lucene`: search using the Apache Lucene query parser syntax. For #' more information, see [Apache Lucene Query Parser #' Syntax](https://lucene.apache.org/core/4_6_0/queryparser/org/apache/lucene/queryparser/classic/package-summary.html#package_description). #' - `dismax`: search using the simplified subset of the Apache Lucene #' query parser syntax defined by the DisMax query parser. For more #' information, see [DisMax Query Parser #' Syntax](https://cwiki.apache.org/confluence/display/solr/DisMaxQParserPlugin#Query_Syntax). #' @param return Specifies the field and expression values to include in the response. #' Multiple fields or expressions are specified as a comma-separated list. #' By default, a search response includes all return enabled fields #' (`_all_fields`). To return only the document IDs for the matching #' documents, specify `_no_fields`. To retrieve the relevance score #' calculated for each document, specify `_score`. #' @param size Specifies the maximum number of search hits to include in the response. #' @param sort Specifies the fields or custom expressions to use to sort the search #' results. Multiple fields or expressions are specified as a #' comma-separated list. You must specify the sort direction (`asc` or #' `desc`) for each field; for example, `year desc,title asc`. To use a #' field to sort results, the field must be sort-enabled in the domain #' configuration. Array type fields cannot be used for sorting. If no #' `sort` parameter is specified, results are sorted by their default #' relevance scores in descending order: `_score desc`. You can also sort #' by document ID (`_id asc`) and version (`_version desc`). #' #' For more information, see [Sorting #' Results](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/sorting-results.html) #' in the *Amazon CloudSearch Developer Guide*. #' @param start Specifies the offset of the first search hit you want to return. Note #' that the result set is zero-based; the first result is at index 0. You #' can specify either the `start` or `cursor` parameter in a request, they #' are mutually exclusive. #' #' For more information, see [Paginating #' Results](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/paginating-results.html) #' in the *Amazon CloudSearch Developer Guide*. #' @param stats Specifies one or more fields for which to get statistics information. #' Each specified field must be facet-enabled in the domain configuration. #' The fields are specified in JSON using the form: #' #' `{"FIELD-A":{},"FIELD-B":{}}` #' #' There are currently no options supported for statistics. #' #' @keywords internal #' #' @rdname cloudsearchdomain_search cloudsearchdomain_search <- function(cursor = NULL, expr = NULL, facet = NULL, filterQuery = NULL, highlight = NULL, partial = NULL, query, queryOptions = NULL, queryParser = NULL, return = NULL, size = NULL, sort = NULL, start = NULL, stats = NULL) { op <- new_operation( name = "Search", http_method = "GET", http_path = "/2013-01-01/search?format=sdk&pretty=true", paginator = list() ) input <- .cloudsearchdomain$search_input(cursor = cursor, expr = expr, facet = facet, filterQuery = filterQuery, highlight = highlight, partial = partial, query = query, queryOptions = queryOptions, queryParser = queryParser, return = return, size = size, sort = sort, start = start, stats = stats) output <- .cloudsearchdomain$search_output() config <- get_config() svc <- .cloudsearchdomain$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .cloudsearchdomain$operations$search <- cloudsearchdomain_search #' Retrieves autocomplete suggestions for a partial query string #' #' @description #' Retrieves autocomplete suggestions for a partial query string. You can use suggestions enable you to display likely matches before users finish typing. In Amazon CloudSearch, suggestions are based on the contents of a particular text field. When you request suggestions, Amazon CloudSearch finds all of the documents whose values in the suggester field start with the specified query string. The beginning of the field must match the query string to be considered a match. #' #' See [https://www.paws-r-sdk.com/docs/cloudsearchdomain_suggest/](https://www.paws-r-sdk.com/docs/cloudsearchdomain_suggest/) for full documentation. #' #' @param query &#91;required&#93; Specifies the string for which you want to get suggestions. #' @param suggester &#91;required&#93; Specifies the name of the suggester to use to find suggested matches. #' @param size Specifies the maximum number of suggestions to return. #' #' @keywords internal #' #' @rdname cloudsearchdomain_suggest cloudsearchdomain_suggest <- function(query, suggester, size = NULL) { op <- new_operation( name = "Suggest", http_method = "GET", http_path = "/2013-01-01/suggest?format=sdk&pretty=true", paginator = list() ) input <- .cloudsearchdomain$suggest_input(query = query, suggester = suggester, size = size) output <- .cloudsearchdomain$suggest_output() config <- get_config() svc <- .cloudsearchdomain$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .cloudsearchdomain$operations$suggest <- cloudsearchdomain_suggest #' Posts a batch of documents to a search domain for indexing #' #' @description #' Posts a batch of documents to a search domain for indexing. A document batch is a collection of add and delete operations that represent the documents you want to add, update, or delete from your domain. Batches can be described in either JSON or XML. Each item that you want Amazon CloudSearch to return as a search result (such as a product) is represented as a document. Every document has a unique ID and one or more fields that contain the data that you want to search and return in results. Individual documents cannot contain more than 1 MB of data. The entire batch cannot exceed 5 MB. To get the best possible upload performance, group add and delete operations in batches that are close the 5 MB limit. Submitting a large volume of single-document batches can overload a domain's document service. #' #' See [https://www.paws-r-sdk.com/docs/cloudsearchdomain_upload_documents/](https://www.paws-r-sdk.com/docs/cloudsearchdomain_upload_documents/) for full documentation. #' #' @param documents &#91;required&#93; A batch of documents formatted in JSON or HTML. #' @param contentType &#91;required&#93; The format of the batch you are uploading. Amazon CloudSearch supports #' two document batch formats: #' #' - application/json #' - application/xml #' #' @keywords internal #' #' @rdname cloudsearchdomain_upload_documents cloudsearchdomain_upload_documents <- function(documents, contentType) { op <- new_operation( name = "UploadDocuments", http_method = "POST", http_path = "/2013-01-01/documents/batch?format=sdk", paginator = list() ) input <- .cloudsearchdomain$upload_documents_input(documents = documents, contentType = contentType) output <- .cloudsearchdomain$upload_documents_output() config <- get_config() svc <- .cloudsearchdomain$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .cloudsearchdomain$operations$upload_documents <- cloudsearchdomain_upload_documents
\name{plot.powerTransform} \Rdversion{1.1} \alias{plot.powerTransform} \title{ plot Method for powerTransform Objects } \description{ This function provides a simple function for plotting data using power transformations. } \usage{ \method{plot}{powerTransform}(x, z = NULL, round = TRUE, plot = pairs, ...) } \arguments{ \item{x}{name of the power transformation object } \item{z}{ Additional variables of the same length as those used to get the transformation to be plotted, default is \code{NULL}. } \item{round}{ If \code{TRUE}, the default, use rounded transforms, if \code{FALSE} use the MLEs. } \item{plot}{ Plotting method. Default is \code{pairs}. Another possible choice is \code{scatterplot.matrix} from the \code{car} package. } \item{\dots}{ Optional arguments passed to the plotting method } } \details{ The data used to estimate transformations using \code{powerTransform} are plotted in the transformed scale. } \value{ None. Produces a graph as a side-effect. } \references{ Weisberg, S. (2014) \emph{Applied Linear Regression}, Fourth Edition, Wiley. Fox, J. and Weisberg, S. (2011) \emph{An R Companion to Applied Linear Regression}, Second Edition, Sage. } \author{ Sanford Weisberg, <sandy@umn.edu> } \seealso{ \code{\link{powerTransform}} } \examples{ summary(a3 <- powerTransform(cbind(len, adt, trks, shld, sigs1) ~ 1, Highway1)) with(Highway1, plot(a3, z=rate, col=as.numeric(htype))) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ hplot } \keyword{ regression }% __ONLY ONE__ keyword per line
/man/plot.powerTransform.Rd
no_license
jonathon-love/car3
R
false
false
1,609
rd
\name{plot.powerTransform} \Rdversion{1.1} \alias{plot.powerTransform} \title{ plot Method for powerTransform Objects } \description{ This function provides a simple function for plotting data using power transformations. } \usage{ \method{plot}{powerTransform}(x, z = NULL, round = TRUE, plot = pairs, ...) } \arguments{ \item{x}{name of the power transformation object } \item{z}{ Additional variables of the same length as those used to get the transformation to be plotted, default is \code{NULL}. } \item{round}{ If \code{TRUE}, the default, use rounded transforms, if \code{FALSE} use the MLEs. } \item{plot}{ Plotting method. Default is \code{pairs}. Another possible choice is \code{scatterplot.matrix} from the \code{car} package. } \item{\dots}{ Optional arguments passed to the plotting method } } \details{ The data used to estimate transformations using \code{powerTransform} are plotted in the transformed scale. } \value{ None. Produces a graph as a side-effect. } \references{ Weisberg, S. (2014) \emph{Applied Linear Regression}, Fourth Edition, Wiley. Fox, J. and Weisberg, S. (2011) \emph{An R Companion to Applied Linear Regression}, Second Edition, Sage. } \author{ Sanford Weisberg, <sandy@umn.edu> } \seealso{ \code{\link{powerTransform}} } \examples{ summary(a3 <- powerTransform(cbind(len, adt, trks, shld, sigs1) ~ 1, Highway1)) with(Highway1, plot(a3, z=rate, col=as.numeric(htype))) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ hplot } \keyword{ regression }% __ONLY ONE__ keyword per line
#' Get or set the file path to the CmdStan installation #' #' @description Use the `set_cmdstan_path()` function to tell CmdStanR where the #' CmdStan installation in located. Once the path has been set, #' `cmdstan_path()` will return the full path to the CmdStan installation and #' `cmdstan_version()` will return the CmdStan version number. See **Details** #' for how to avoid manually setting the path in each \R session. #' #' @export #' #' @param path The full file path to the CmdStan installation as a string. If #' `NULL` (the default) then the path is set to the default path used by #' [install_cmdstan()] if it exists. #' @return A string. Either the file path to the CmdStan installation or the #' CmdStan version number. #' #' @details #' Before the package can be used it needs to know where the CmdStan #' installation is located. When the package is loaded it tries to help automate #' this to avoid having to manually set the path every session: #' #' * If the [environment variable][Sys.setenv()] `"CMDSTAN"` exists at load time #' then its value will be automatically set as the default path to CmdStan for #' the \R session. #' * If no environment variable is found when loaded but any directory in the form #' `".cmdstanr/cmdstan-[version]"`, for example `".cmdstanr/cmdstan-2.23.0"`, #' exists in the user's home directory (`Sys.getenv("HOME")`, #' *not* the current working directory) then the path to the cmdstan with the largest #' version number will be set as the path to CmdStan for the \R session. #' This is the same as the default directory that [install_cmdstan()] would use to #' install the latest version of CmdStan. #' #' It is always possible to change the path after loading the package using #' `set_cmdstan_path(path)`. #' set_cmdstan_path <- function(path = NULL) { if (is.null(path)) { path <- cmdstan_default_path() } if (dir.exists(path)) { path <- absolute_path(path) .cmdstanr$PATH <- path .cmdstanr$VERSION <- read_cmdstan_version(path) message("CmdStan path set to: ", path) } else { warning("Path not set. Can't find directory: ", path, call. = FALSE) } invisible(path) } #' @rdname set_cmdstan_path #' @export cmdstan_path <- function() { path <- .cmdstanr$PATH %||% stop_no_path() path <- repair_path(path) if (is.null(.cmdstanr$VERSION)) { .cmdstanr$VERSION <- read_cmdstan_version(path) } path } #' @rdname set_cmdstan_path #' @export cmdstan_version <- function() { .cmdstanr$VERSION %||% stop_no_path() } # internal ---------------------------------------------------------------- # initialize internal environment to store path to cmdstan, cmdstan version # number, and path to temp dir .cmdstanr <- new.env(parent = emptyenv()) .cmdstanr$PATH <- NULL .cmdstanr$VERSION <- NULL .cmdstanr$TEMP_DIR <- NULL # path to temp directory cmdstan_tempdir <- function() { .cmdstanr$TEMP_DIR } # error message to throw if no path has been set stop_no_path <- function() { stop("CmdStan path has not been set yet. See ?set_cmdstan_path.", call. = FALSE) } #' cmdstan_default_install_path #' #' Path to where [install_cmdstan()] with default settings installs CmdStan. #' #' @keywords internal #' @return The installation path. #' @export cmdstan_default_install_path <- function() { file.path(Sys.getenv("HOME"), ".cmdstanr") } #' cmdstan_default_path #' #' Returns the path to the installation of cmdstan with the most recent release version. #' #' @keywords internal #' @return Path to the cmdstan installation with the most recent release version, NULL if no #' installation found. #' @export cmdstan_default_path <- function() { installs_path <- file.path(Sys.getenv("HOME"), ".cmdstanr") if (dir.exists(installs_path)) { cmdstan_installs <- list.dirs(path = installs_path, recursive = FALSE, full.names = FALSE) # if installed in folder cmdstan, with no version # move to cmdstan-version folder if ("cmdstan" %in% cmdstan_installs) { ver <- read_cmdstan_version(file.path(installs_path, "cmdstan")) old_path <- file.path(installs_path, "cmdstan") new_path <- file.path(installs_path, paste0("cmdstan-",ver)) file.rename(old_path, new_path) cmdstan_installs <- list.dirs(path = installs_path, recursive = FALSE, full.names = FALSE) } if (length(cmdstan_installs) > 0) { latest_cmdstan <- sort(cmdstan_installs, decreasing = TRUE)[1] if (is_release_candidate(latest_cmdstan)) { non_rc_path <- strsplit(latest_cmdstan, "-rc")[[1]][1] if (dir.exists(file.path(installs_path,non_rc_path))) { latest_cmdstan <- non_rc_path } } return(file.path(installs_path,latest_cmdstan)) } } return(NULL) } # unset the path (only used in tests) unset_cmdstan_path <- function() { .cmdstanr$PATH <- NULL .cmdstanr$VERSION <- NULL } # called in .onLoad() in zzz.R: cmdstanr_initialize <- function() { # First check for environment variable CMDSTAN, but if not found # then see if default path <- Sys.getenv("CMDSTAN") if (isTRUE(nzchar(path))) { # CMDSTAN environment variable found if (dir.exists(path)) { path <- absolute_path(path) suppressMessages(set_cmdstan_path(path)) } else { warning("Can't find directory specified by environment variable", " 'CMDSTAN'. Path not set.", call. = FALSE) .cmdstanr$PATH <- NULL } } else { # environment variable not found path <- cmdstan_default_path() if (!is.null(path)) { suppressMessages(set_cmdstan_path(path)) } } if (getRversion() < '3.5.0') { .cmdstanr$TEMP_DIR <- tempdir() } else { .cmdstanr$TEMP_DIR <- tempdir(check = TRUE) } invisible(TRUE) } #' Find the version of cmdstan from makefile #' @noRd #' @param path Path to installation. #' @return Version number as a string. read_cmdstan_version <- function(path) { makefile_path <- file.path(path, "makefile") if (!file.exists(makefile_path)) { warning( "Can't find CmdStan makefile to detect version number. ", "Path may not point to valid installation.", call. = FALSE ) return(NULL) } makefile <- readLines(makefile_path) version_line <- grep("^CMDSTAN_VERSION :=", makefile, value = TRUE) sub("CMDSTAN_VERSION := ", "", version_line) } #' Returns whether the supplied installation is a release candidate #' @noRd #' @param path Path to installation. #' @return TRUE if the installation in the supplied path is a release candidate is_release_candidate <- function(path) { if (endsWith(path, "/")) { path <- substr(path, 1, nchar(path) - 1) } if (length(grep(pattern = "-rc[0-9]*$", x = path)) > 0) { TRUE } else{ FALSE } }
/R/path.R
permissive
ssp3nc3r/cmdstanr
R
false
false
6,744
r
#' Get or set the file path to the CmdStan installation #' #' @description Use the `set_cmdstan_path()` function to tell CmdStanR where the #' CmdStan installation in located. Once the path has been set, #' `cmdstan_path()` will return the full path to the CmdStan installation and #' `cmdstan_version()` will return the CmdStan version number. See **Details** #' for how to avoid manually setting the path in each \R session. #' #' @export #' #' @param path The full file path to the CmdStan installation as a string. If #' `NULL` (the default) then the path is set to the default path used by #' [install_cmdstan()] if it exists. #' @return A string. Either the file path to the CmdStan installation or the #' CmdStan version number. #' #' @details #' Before the package can be used it needs to know where the CmdStan #' installation is located. When the package is loaded it tries to help automate #' this to avoid having to manually set the path every session: #' #' * If the [environment variable][Sys.setenv()] `"CMDSTAN"` exists at load time #' then its value will be automatically set as the default path to CmdStan for #' the \R session. #' * If no environment variable is found when loaded but any directory in the form #' `".cmdstanr/cmdstan-[version]"`, for example `".cmdstanr/cmdstan-2.23.0"`, #' exists in the user's home directory (`Sys.getenv("HOME")`, #' *not* the current working directory) then the path to the cmdstan with the largest #' version number will be set as the path to CmdStan for the \R session. #' This is the same as the default directory that [install_cmdstan()] would use to #' install the latest version of CmdStan. #' #' It is always possible to change the path after loading the package using #' `set_cmdstan_path(path)`. #' set_cmdstan_path <- function(path = NULL) { if (is.null(path)) { path <- cmdstan_default_path() } if (dir.exists(path)) { path <- absolute_path(path) .cmdstanr$PATH <- path .cmdstanr$VERSION <- read_cmdstan_version(path) message("CmdStan path set to: ", path) } else { warning("Path not set. Can't find directory: ", path, call. = FALSE) } invisible(path) } #' @rdname set_cmdstan_path #' @export cmdstan_path <- function() { path <- .cmdstanr$PATH %||% stop_no_path() path <- repair_path(path) if (is.null(.cmdstanr$VERSION)) { .cmdstanr$VERSION <- read_cmdstan_version(path) } path } #' @rdname set_cmdstan_path #' @export cmdstan_version <- function() { .cmdstanr$VERSION %||% stop_no_path() } # internal ---------------------------------------------------------------- # initialize internal environment to store path to cmdstan, cmdstan version # number, and path to temp dir .cmdstanr <- new.env(parent = emptyenv()) .cmdstanr$PATH <- NULL .cmdstanr$VERSION <- NULL .cmdstanr$TEMP_DIR <- NULL # path to temp directory cmdstan_tempdir <- function() { .cmdstanr$TEMP_DIR } # error message to throw if no path has been set stop_no_path <- function() { stop("CmdStan path has not been set yet. See ?set_cmdstan_path.", call. = FALSE) } #' cmdstan_default_install_path #' #' Path to where [install_cmdstan()] with default settings installs CmdStan. #' #' @keywords internal #' @return The installation path. #' @export cmdstan_default_install_path <- function() { file.path(Sys.getenv("HOME"), ".cmdstanr") } #' cmdstan_default_path #' #' Returns the path to the installation of cmdstan with the most recent release version. #' #' @keywords internal #' @return Path to the cmdstan installation with the most recent release version, NULL if no #' installation found. #' @export cmdstan_default_path <- function() { installs_path <- file.path(Sys.getenv("HOME"), ".cmdstanr") if (dir.exists(installs_path)) { cmdstan_installs <- list.dirs(path = installs_path, recursive = FALSE, full.names = FALSE) # if installed in folder cmdstan, with no version # move to cmdstan-version folder if ("cmdstan" %in% cmdstan_installs) { ver <- read_cmdstan_version(file.path(installs_path, "cmdstan")) old_path <- file.path(installs_path, "cmdstan") new_path <- file.path(installs_path, paste0("cmdstan-",ver)) file.rename(old_path, new_path) cmdstan_installs <- list.dirs(path = installs_path, recursive = FALSE, full.names = FALSE) } if (length(cmdstan_installs) > 0) { latest_cmdstan <- sort(cmdstan_installs, decreasing = TRUE)[1] if (is_release_candidate(latest_cmdstan)) { non_rc_path <- strsplit(latest_cmdstan, "-rc")[[1]][1] if (dir.exists(file.path(installs_path,non_rc_path))) { latest_cmdstan <- non_rc_path } } return(file.path(installs_path,latest_cmdstan)) } } return(NULL) } # unset the path (only used in tests) unset_cmdstan_path <- function() { .cmdstanr$PATH <- NULL .cmdstanr$VERSION <- NULL } # called in .onLoad() in zzz.R: cmdstanr_initialize <- function() { # First check for environment variable CMDSTAN, but if not found # then see if default path <- Sys.getenv("CMDSTAN") if (isTRUE(nzchar(path))) { # CMDSTAN environment variable found if (dir.exists(path)) { path <- absolute_path(path) suppressMessages(set_cmdstan_path(path)) } else { warning("Can't find directory specified by environment variable", " 'CMDSTAN'. Path not set.", call. = FALSE) .cmdstanr$PATH <- NULL } } else { # environment variable not found path <- cmdstan_default_path() if (!is.null(path)) { suppressMessages(set_cmdstan_path(path)) } } if (getRversion() < '3.5.0') { .cmdstanr$TEMP_DIR <- tempdir() } else { .cmdstanr$TEMP_DIR <- tempdir(check = TRUE) } invisible(TRUE) } #' Find the version of cmdstan from makefile #' @noRd #' @param path Path to installation. #' @return Version number as a string. read_cmdstan_version <- function(path) { makefile_path <- file.path(path, "makefile") if (!file.exists(makefile_path)) { warning( "Can't find CmdStan makefile to detect version number. ", "Path may not point to valid installation.", call. = FALSE ) return(NULL) } makefile <- readLines(makefile_path) version_line <- grep("^CMDSTAN_VERSION :=", makefile, value = TRUE) sub("CMDSTAN_VERSION := ", "", version_line) } #' Returns whether the supplied installation is a release candidate #' @noRd #' @param path Path to installation. #' @return TRUE if the installation in the supplied path is a release candidate is_release_candidate <- function(path) { if (endsWith(path, "/")) { path <- substr(path, 1, nchar(path) - 1) } if (length(grep(pattern = "-rc[0-9]*$", x = path)) > 0) { TRUE } else{ FALSE } }
## Script: plot.R ## ## This script creates plot #1 in the Course Project 1 of the course Exploratory Data Analysis ## ## Author: Bruno Ascenso ## Date: 12.Sep.2018 ## Read and subset the data power <- read.table("household_power_consumption.txt", sep=";", header = TRUE, stringsAsFactors = FALSE) power$timestamp <- strptime(paste(power$Date, power$Time, sep=";"), "%d/%m/%Y;%H:%M:%S") powerSub <- subset(power, timestamp >= "2007-02-01 00:00:00" & timestamp <= "2007-02-02 23:59:59") ## Create plot #1 png(file = "plot1.png") hist(as.numeric(powerSub$Global_active_power), col="red", xlab="Global Active Power (kilowatts)", main="Global Active Power") dev.off() ## Clean up rm (power) rm (powerSub)
/plot1.R
no_license
bascenso/ExData_Plotting1
R
false
false
704
r
## Script: plot.R ## ## This script creates plot #1 in the Course Project 1 of the course Exploratory Data Analysis ## ## Author: Bruno Ascenso ## Date: 12.Sep.2018 ## Read and subset the data power <- read.table("household_power_consumption.txt", sep=";", header = TRUE, stringsAsFactors = FALSE) power$timestamp <- strptime(paste(power$Date, power$Time, sep=";"), "%d/%m/%Y;%H:%M:%S") powerSub <- subset(power, timestamp >= "2007-02-01 00:00:00" & timestamp <= "2007-02-02 23:59:59") ## Create plot #1 png(file = "plot1.png") hist(as.numeric(powerSub$Global_active_power), col="red", xlab="Global Active Power (kilowatts)", main="Global Active Power") dev.off() ## Clean up rm (power) rm (powerSub)
## define custom functions, call data from the database source("~/workspace/NHL_regression/R3/setup.R") ## clean the data skaterstats <- nhlClean() skaterstats <- merge(skaterstats, skaters[, 3:4]) skaterstats$player_position <- as.factor(skaterstats$player_position) ## subset the data and build test models, then look at importance output # fitData <- nhlShape(2011, 2011, outcome = 3) # factControl <- trainControl(method = "repeatedcv", number = 10, repeats = 5) # gpFactors.rf <- nhlAnalyze(fitData, seed = 203622, importance = TRUE, trControl = factControl) # gpFactors.gbm <- nhlAnalyze(fitData, method = "gbm", seed = 654566, trControl = factControl) # gpFactors.pls <- nhlAnalyze(fitData, method = "pls", seed = 847244, trControl = factControl) # gpFactors.knn <- nhlAnalyze(fitData, method = "knn", seed = 174633, trControl = factControl) # gpFactors.svm <- nhlAnalyze(fitData, method = "svmLinear", seed = 287174, trControl = factControl) ## use the importance output to select factors cols <- list() cols[["rf"]] <- c(1:6, 15, 24, 31:32, 38:42, 45:46) cols[["gbm"]] <- c(1:3, 15, 24, 31:32, 38:42, 45:46) cols[["pls"]] <- c(1:3, 16, 33:34, 38:41) cols[["knn"]] <- c(1:6, 15:16, 24, 26, 30:32, 38:42, 45:46) cols[["svmLinear"]] <- c(1:6, 15:16, 24:27, 30:32, 38:42, 45:46) ## build single models, ensemble the models, and look at correlations fitControl <- trainControl(method = "repeatedcv", number = 10, repeats = 10) controls <- list() controls[[1]] <- fitControl controls[[2]] <- fitControl controls[[3]] <- fitControl controls[[4]] <- fitControl controls[[5]] <- fitControl # gpModel <- nhlModel(2011, 2011, outcome = 3, cols = cols, methods = c("rf", "gbm", "pls", "knn", "svmLinear"), # controls = controls, seed = 714537) # gpCorrs <- nhlCorr(2010, 2013, 3, gpModel) # gpModel2 <- nhlModel(2010, 2010, outcome = 3, cols = cols, methods = c("rf", "gbm", "pls", "knn", "svmLinear"), # controls = controls, seed = 845856) # gpCorrs2 <- nhlCorr(2010, 2013, 3, gpModel2) # gpModel3 <- nhlModel(2010, 2010, outcome = 3, cols = cols, methods = c("rf", "gbm", "svmLinear"), # controls = controls, seed = 244416) # gpCorrs3 <- nhlCorr(2010, 2013, 3, gpModel3) gpModel4 <- nhlModel(2013, 2013, outcome = 3, cols = cols, methods = c("gbm", "svmLinear"), controls = controls, seed = 174896) gpCorrs4 <- nhlCorr(2010, 2013, 3, gpModel4) ## prediction shaping preds2013 <- nhlPredict(2012, 2012, gpModel4, outcome = 3) preds2014 <- nhlPredict(2013, 2013, gpModel4, outcome = 3) preds2013$rf[preds2013$rf > 1] <- 1 preds2013$gbm[preds2013$gbm > 1] <- 1 preds2013$svmLinear[preds2013$svmLinear > 1] <- 1 preds2013$cumulative[preds2013$cumulative > 1] <- 1 preds2013[, -1] <- preds2013[, -1] * 48 preds2013$mean <- (preds2013$rf + preds2013$gbm + preds2013$svmLinear) / 3 preds2014$rf[preds2014$rf > 1] <- 1 preds2014$gbm[preds2014$gbm > 1] <- 1 preds2014$svmLinear[preds2014$svmLinear > 1] <- 1 preds2014$cumulative[preds2014$cumulative > 1] <- 1 preds2014[, -1] <- preds2014[, -1] * 82 preds2014$mean <- (preds2014$rf + preds2014$gbm + preds2014$svmLinear) / 3 ## graphing corr <- round(gpCorrs4["2014", "naive"], digits = 4) plot14naive <- ggplot(preds2014, aes(x=naive, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("Naive Model: Games Played, 2013 vs. 2014 (r = ", corr, ")", sep = "")) + xlab("Games Played in 2013 (Scaled)") + ylab("Games Played in 2014") corr <- round(gpCorrs4["2014", "rf"], digits = 4) plot14rf <- ggplot(preds2014, aes(x=rf, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2014 Games Played, Random Forest Model (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") corr <- round(gpCorrs4["2014", "gbm"], digits = 4) plot14gbm <- ggplot(preds2014, aes(x=gbm, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2014 Games Played, Random Boosting Model (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") corr <- round(gpCorrs4["2014", "svmLinear"], digits = 4) plot14svm <- ggplot(preds2014, aes(x=svmLinear, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2014 Games Played, SVM Model (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") corr <- round(gpCorrs4["2014", "cumulative"], digits = 4) plot14cum <- ggplot(preds2014, aes(x=cumulative, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2014 Games Played, Regression Ensembling (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") corr <- round(gpCorrs4["2014", "mean"], digits = 4) plot14mean <- ggplot(preds2014, aes(x=mean, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2014 Games Played, Simple Ensembling (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") png(filename = "~/workspace/NHL_regression/graphics/GP/GP20141_naive.png") print(plot14naive) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20142_rf.png") print(plot14rf) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20143_gbm.png") print(plot14gbm) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20144_svm.png") print(plot14svm) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20145_cum.png") print(plot14cum) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20146_mean.png") print(plot14mean) dev.off() png(width = 960, height = 960, filename = "~/workspace/NHL_regression/graphics/GP/GP2014_full.png") grid.arrange(plot14naive, plot14rf, plot14gbm, plot14svm, plot14cum, plot14mean, ncol = 2) dev.off() corr <- round(gpCorrs4["2013", "naive"], digits = 4) plot13naive <- ggplot(preds2013, aes(x=naive, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("Naive Model: Games Played, 2012 vs. 2013 (r = ", corr, ")", sep = "")) + xlab("Games Played in 2012 (Scaled)") + ylab("Games Played in 2013") corr <- round(gpCorrs4["2013", "rf"], digits = 4) plot13rf <- ggplot(preds2013, aes(x=rf, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2013 Games Played, Random Forest Model (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") corr <- round(gpCorrs4["2013", "gbm"], digits = 4) plot13gbm <- ggplot(preds2013, aes(x=gbm, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2013 Games Played, Random Boosting Model (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") corr <- round(gpCorrs4["2013", "svmLinear"], digits = 4) plot13svm <- ggplot(preds2013, aes(x=svmLinear, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2013 Games Played, SVM Model (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") corr <- round(gpCorrs4["2013", "cumulative"], digits = 4) plot13cum <- ggplot(preds2013, aes(x=cumulative, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2013 Games Played, Regression Ensembling (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") corr <- round(gpCorrs4["2013", "mean"], digits = 4) plot13mean <- ggplot(preds2013, aes(x=mean, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2013 Games Played, Simple Ensembling (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") png(filename = "~/workspace/NHL_regression/graphics/GP/GP20131_naive.png") print(plot13naive) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20132_rf.png") print(plot13rf) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20133_gbm.png") print(plot13gbm) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20134_svm.png") print(plot13svm) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20135_cum.png") print(plot13cum) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20136_mean.png") print(plot13mean) dev.off() png(width = 960, height = 960, filename = "~/workspace/NHL_regression/graphics/GP/GP2013_full.png") grid.arrange(plot13naive, plot13rf, plot13gbm, plot13svm, plot13cum, plot13mean, ncol = 2) dev.off() ## printing output to database preds2015 <- nhlPredict(2014, 2014, gpModel4, outcome = 3) preds2015$rf <- preds2015$rf * 82 preds2015$gbm <- preds2015$gbm * 82 preds2015$svmLinear <- preds2015$svmLinear * 82 preds2015$games_played <- (preds2015$rf + preds2015$gbm + preds2015$svmLinear) / 3 # preds2015$games_played <- preds2015$cumulative * 82 # preds2015$games_played[preds2015$games_played > 82] <- 82 output <- preds2015[, c("nhl_num", "games_played")] conn <- dbConnect(driv, dbname = "nhltest", user = "postgres", password = "hollyleaf", host = "localhost") dbWriteTable(conn, "newskatpred15", output, overwrite=TRUE, row.names = FALSE) dbDisconnect(conn)
/R3/pred1_GP.R
permissive
kielejocain/NHL_regression
R
false
false
9,314
r
## define custom functions, call data from the database source("~/workspace/NHL_regression/R3/setup.R") ## clean the data skaterstats <- nhlClean() skaterstats <- merge(skaterstats, skaters[, 3:4]) skaterstats$player_position <- as.factor(skaterstats$player_position) ## subset the data and build test models, then look at importance output # fitData <- nhlShape(2011, 2011, outcome = 3) # factControl <- trainControl(method = "repeatedcv", number = 10, repeats = 5) # gpFactors.rf <- nhlAnalyze(fitData, seed = 203622, importance = TRUE, trControl = factControl) # gpFactors.gbm <- nhlAnalyze(fitData, method = "gbm", seed = 654566, trControl = factControl) # gpFactors.pls <- nhlAnalyze(fitData, method = "pls", seed = 847244, trControl = factControl) # gpFactors.knn <- nhlAnalyze(fitData, method = "knn", seed = 174633, trControl = factControl) # gpFactors.svm <- nhlAnalyze(fitData, method = "svmLinear", seed = 287174, trControl = factControl) ## use the importance output to select factors cols <- list() cols[["rf"]] <- c(1:6, 15, 24, 31:32, 38:42, 45:46) cols[["gbm"]] <- c(1:3, 15, 24, 31:32, 38:42, 45:46) cols[["pls"]] <- c(1:3, 16, 33:34, 38:41) cols[["knn"]] <- c(1:6, 15:16, 24, 26, 30:32, 38:42, 45:46) cols[["svmLinear"]] <- c(1:6, 15:16, 24:27, 30:32, 38:42, 45:46) ## build single models, ensemble the models, and look at correlations fitControl <- trainControl(method = "repeatedcv", number = 10, repeats = 10) controls <- list() controls[[1]] <- fitControl controls[[2]] <- fitControl controls[[3]] <- fitControl controls[[4]] <- fitControl controls[[5]] <- fitControl # gpModel <- nhlModel(2011, 2011, outcome = 3, cols = cols, methods = c("rf", "gbm", "pls", "knn", "svmLinear"), # controls = controls, seed = 714537) # gpCorrs <- nhlCorr(2010, 2013, 3, gpModel) # gpModel2 <- nhlModel(2010, 2010, outcome = 3, cols = cols, methods = c("rf", "gbm", "pls", "knn", "svmLinear"), # controls = controls, seed = 845856) # gpCorrs2 <- nhlCorr(2010, 2013, 3, gpModel2) # gpModel3 <- nhlModel(2010, 2010, outcome = 3, cols = cols, methods = c("rf", "gbm", "svmLinear"), # controls = controls, seed = 244416) # gpCorrs3 <- nhlCorr(2010, 2013, 3, gpModel3) gpModel4 <- nhlModel(2013, 2013, outcome = 3, cols = cols, methods = c("gbm", "svmLinear"), controls = controls, seed = 174896) gpCorrs4 <- nhlCorr(2010, 2013, 3, gpModel4) ## prediction shaping preds2013 <- nhlPredict(2012, 2012, gpModel4, outcome = 3) preds2014 <- nhlPredict(2013, 2013, gpModel4, outcome = 3) preds2013$rf[preds2013$rf > 1] <- 1 preds2013$gbm[preds2013$gbm > 1] <- 1 preds2013$svmLinear[preds2013$svmLinear > 1] <- 1 preds2013$cumulative[preds2013$cumulative > 1] <- 1 preds2013[, -1] <- preds2013[, -1] * 48 preds2013$mean <- (preds2013$rf + preds2013$gbm + preds2013$svmLinear) / 3 preds2014$rf[preds2014$rf > 1] <- 1 preds2014$gbm[preds2014$gbm > 1] <- 1 preds2014$svmLinear[preds2014$svmLinear > 1] <- 1 preds2014$cumulative[preds2014$cumulative > 1] <- 1 preds2014[, -1] <- preds2014[, -1] * 82 preds2014$mean <- (preds2014$rf + preds2014$gbm + preds2014$svmLinear) / 3 ## graphing corr <- round(gpCorrs4["2014", "naive"], digits = 4) plot14naive <- ggplot(preds2014, aes(x=naive, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("Naive Model: Games Played, 2013 vs. 2014 (r = ", corr, ")", sep = "")) + xlab("Games Played in 2013 (Scaled)") + ylab("Games Played in 2014") corr <- round(gpCorrs4["2014", "rf"], digits = 4) plot14rf <- ggplot(preds2014, aes(x=rf, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2014 Games Played, Random Forest Model (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") corr <- round(gpCorrs4["2014", "gbm"], digits = 4) plot14gbm <- ggplot(preds2014, aes(x=gbm, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2014 Games Played, Random Boosting Model (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") corr <- round(gpCorrs4["2014", "svmLinear"], digits = 4) plot14svm <- ggplot(preds2014, aes(x=svmLinear, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2014 Games Played, SVM Model (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") corr <- round(gpCorrs4["2014", "cumulative"], digits = 4) plot14cum <- ggplot(preds2014, aes(x=cumulative, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2014 Games Played, Regression Ensembling (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") corr <- round(gpCorrs4["2014", "mean"], digits = 4) plot14mean <- ggplot(preds2014, aes(x=mean, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2014 Games Played, Simple Ensembling (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") png(filename = "~/workspace/NHL_regression/graphics/GP/GP20141_naive.png") print(plot14naive) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20142_rf.png") print(plot14rf) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20143_gbm.png") print(plot14gbm) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20144_svm.png") print(plot14svm) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20145_cum.png") print(plot14cum) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20146_mean.png") print(plot14mean) dev.off() png(width = 960, height = 960, filename = "~/workspace/NHL_regression/graphics/GP/GP2014_full.png") grid.arrange(plot14naive, plot14rf, plot14gbm, plot14svm, plot14cum, plot14mean, ncol = 2) dev.off() corr <- round(gpCorrs4["2013", "naive"], digits = 4) plot13naive <- ggplot(preds2013, aes(x=naive, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("Naive Model: Games Played, 2012 vs. 2013 (r = ", corr, ")", sep = "")) + xlab("Games Played in 2012 (Scaled)") + ylab("Games Played in 2013") corr <- round(gpCorrs4["2013", "rf"], digits = 4) plot13rf <- ggplot(preds2013, aes(x=rf, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2013 Games Played, Random Forest Model (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") corr <- round(gpCorrs4["2013", "gbm"], digits = 4) plot13gbm <- ggplot(preds2013, aes(x=gbm, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2013 Games Played, Random Boosting Model (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") corr <- round(gpCorrs4["2013", "svmLinear"], digits = 4) plot13svm <- ggplot(preds2013, aes(x=svmLinear, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2013 Games Played, SVM Model (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") corr <- round(gpCorrs4["2013", "cumulative"], digits = 4) plot13cum <- ggplot(preds2013, aes(x=cumulative, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2013 Games Played, Regression Ensembling (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") corr <- round(gpCorrs4["2013", "mean"], digits = 4) plot13mean <- ggplot(preds2013, aes(x=mean, y=outcome)) + geom_smooth(method="lm") + geom_point() + ggtitle(paste("2013 Games Played, Simple Ensembling (r = ", corr, ")", sep = "")) + xlab("Predicted Games Played") + ylab("Actual Games Played") png(filename = "~/workspace/NHL_regression/graphics/GP/GP20131_naive.png") print(plot13naive) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20132_rf.png") print(plot13rf) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20133_gbm.png") print(plot13gbm) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20134_svm.png") print(plot13svm) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20135_cum.png") print(plot13cum) dev.off() png(filename = "~/workspace/NHL_regression/graphics/GP/GP20136_mean.png") print(plot13mean) dev.off() png(width = 960, height = 960, filename = "~/workspace/NHL_regression/graphics/GP/GP2013_full.png") grid.arrange(plot13naive, plot13rf, plot13gbm, plot13svm, plot13cum, plot13mean, ncol = 2) dev.off() ## printing output to database preds2015 <- nhlPredict(2014, 2014, gpModel4, outcome = 3) preds2015$rf <- preds2015$rf * 82 preds2015$gbm <- preds2015$gbm * 82 preds2015$svmLinear <- preds2015$svmLinear * 82 preds2015$games_played <- (preds2015$rf + preds2015$gbm + preds2015$svmLinear) / 3 # preds2015$games_played <- preds2015$cumulative * 82 # preds2015$games_played[preds2015$games_played > 82] <- 82 output <- preds2015[, c("nhl_num", "games_played")] conn <- dbConnect(driv, dbname = "nhltest", user = "postgres", password = "hollyleaf", host = "localhost") dbWriteTable(conn, "newskatpred15", output, overwrite=TRUE, row.names = FALSE) dbDisconnect(conn)
library(rhdf5) library(Matrix) library(data.table) library(ggplot2) library(RColorBrewer) library(dplyr) today <- format(Sys.Date(), '%y%m%d') ###### # limma results ###### #load differential expression results for mutant vs control file_path <- '~/Documents/Cooperations/Böttcher_IDR/10X_data/analysis/notebooks/table/' files <- list.files(path = file_path, pattern = 'mutant_vs_control_progen.csv') files <- files[grepl('limma_', files)] files <- files[grepl('181017', files)] files <- files[grepl('progeni|ISC', files)] #select only progenitor and stem cell populations, not mature cells limma_results <- data.frame(X='', logFC=numeric(1),AveExpr=0, adj.P.Val=0, cell_type='') for (filing in files){ cells_tested <- sapply(strsplit(filing, '_'),'[',3) limma_res <- read.csv(paste0(file_path, filing)) data_res <- limma_res %>% select(X, logFC,AveExpr, adj.P.Val) data_res$cell_type <- cells_tested limma_results <- rbind(limma_results, data_res) } limma_results <- limma_results[-1,] write.csv(x = limma_results, file = paste0(file_path, 'limma_results_',today,'_mutant_vs_control_progenitors.csv')) # load comparison between progenitors and mature cell types files <- list.files(path = file_path, pattern = '_controls.csv') files <- files[grepl('181017', files)] limma_results2 <- data.frame(X='', logFC=numeric(1),AveExpr=0, adj.P.Val=0, cell_type1='', cell_type2='') for (filing in files){ cells_tested1 <- sapply(strsplit(filing, '_'),'[',3) cells_tested2 <- sapply(strsplit(filing, '_'),'[',5) limma_res <- read.csv(paste0(file_path, filing)) data_res <- limma_res %>% select(X, logFC,AveExpr, adj.P.Val) data_res$cell_type1 <- cells_tested1 data_res$cell_type2 <- cells_tested2 limma_results2 <- rbind(limma_results2, data_res) } limma_results2 <- limma_results2[-1,] write.csv(x = limma_results2, file = paste0(file_path, 'limma_results_',today,'_controls_progen.csv')) a <- unique(limma_results2$X[abs(limma_results2$logFC)>0.1]) write.csv(a[order(a)], file = paste0(file_path, 'limma_results_', today, '_differential_genes_lfc05.csv'), row.names = FALSE) # load comparison between ISCs and maturing Paneth cells files <- list.files(path = file_path, pattern = '_controls_PC.csv') files <- files[grepl('181017', files)] limma_results3 <- data.frame(X='', logFC=numeric(1),AveExpr=0, adj.P.Val=0, cell_type1='', cell_type2='') for (filing in files){ cells_tested1 <- sapply(strsplit(filing, '_'),'[',3) cells_tested2 <- sapply(strsplit(filing, '_'),'[',5) limma_res <- read.csv(paste0(file_path, filing)) data_res <- limma_res %>% select(X, logFC,AveExpr, adj.P.Val) data_res$cell_type1 <- cells_tested1 data_res$cell_type2 <- cells_tested2 limma_results3<- rbind(limma_results3, data_res) } limma_results3 <- limma_results3[-1,] write.csv(x = limma_results3, file = paste0(file_path, 'limma_results_',today,'_controls_PC_maturation.csv'), row.names = FALSE) ####### #check Wnt genes for significance ####### canonical_Wnt_genes <- c('Dvl2', 'Axin2', 'Ascl2', 'Lgr5', 'Myc', 'Hopx') some_more_wnt_genes <- c('Wnt4', 'Wnt5a','Wnt5b', 'Wnt9a','Wnt9b', 'Wnt6', 'Fzd2', 'Fzd3', 'Fzd6','Fzd7', 'Dvl1','Dvl2','Dvl3','Invs', 'Vangl1', 'Vangl2', 'Prickle1', 'Celsr1', 'Celsr2', 'Celsr3', 'Scrib', 'Fuz', 'Intu', 'Ror2', 'Ryk', 'Ptk7', 'Smurf1', 'Smurf2', 'Mapk8', 'Cdc42', 'Rhoa','Exoc3', 'Exoc4', 'Exoc5', 'Jun') #check receptors for significance receptors <- c( 'Fgfr1','Fgfr2','Fgfr3','Fgfr4', #FGF 'Egfr', 'Erbb2','Erbb3','Lrig1', #EGF 'Bmpr1a', 'Bmpr2','Id1','Id2','Id3', #BMP 'Notch1','Notch2','Notch3','Jag1','Dll1','Dll4','Hes1','Lfng', #Notch 'Sfrp5','Sfrp1','Fzd1','Fzd2','Fzd3','Fzd6','Fzd7','Fzd8','Lrp5','Lrp6', #Wnt 'Smo', 'Shh', 'Ihh', #Hedgehog 'Yap1','Tead2', 'Tead3', #Hippo 'Fzd3', 'Fzd6', 'Ror2', 'Ptk7', 'Celsr1', 'Vangl1', 'Vangl2', 'Prickle1', 'Jun', #Wnt/PCP 'Ephb2', 'Ephb3', 'Efnb1', #Ephrin signalling 'Itgb1', 'Itga1','Itga2', 'Itga3', 'Itga5', 'Itga6', 'Itga9' #integrin signalling ) #### # check some more genes (including TFs) for significance #### more_interesting_genes =c('Cdkn1a', "Lbh", 'Klf15', "Ier2", 'Klf3', 'Atoh1', "Spdef", "Btg2", "Insm1", "Neurog3", "Sox4" ) #differential TFs mutant vs control tf_mutant_control <- read.csv(paste0(file_path,'TFs_progenitors_mutant_control_Oct.csv')) #differential TFs ISC vs progenitor or Paneth progenitor vs Goblet progenitor tf_progen <- read.csv(paste0(file_path,'TFs_progenitors_Oct.csv')) tfs_differential = intersect(unlist(tf_mutant_control), unlist(tf_progen)) tfs_diffset = setdiff(unlist(tf_mutant_control), unlist(tf_progen)) write.csv(tfs_differential, file=paste0(file_path, 'TFs_diff_progenitors_mutant_control_Oct.csv'), row.names = FALSE) progenitors <- c('ISC','Paneth primed ISC', 'Paneth progenitor', "Sox4+ early EE progenitor" , "Ngn3 progenitor", 'EEC', 'Goblet progenitor', 'Tuft progenitor') write.csv(x = limma_results[limma_results$X %in% canonical_Wnt_genes & limma_results$cell_type %in% progenitors,], row.names =FALSE, file= paste0(file_path, 'limma_results_',today,'_mutant_control_Wnt.csv')) write.csv(x = limma_results[limma_results$X %in% some_more_wnt_genes & limma_results$cell_type %in% progenitors,], row.names =FALSE, file= paste0(file_path,'limma_results_',today,'_mutant_control_Wnt-PCP.csv')) write.csv(x = limma_results[limma_results$X %in% more_interesting_genes & limma_results$cell_type %in% progenitors,], row.names =FALSE, file= paste0(file_path, 'limma_results_',today,'_mutant_control_genes_of_interest.csv')) write.csv(x = limma_results[limma_results$X %in% tfs_differential & limma_results$cell_type %in% progenitors,], row.names =FALSE, file= paste0(file_path, 'limma_results_',today,'_mutant_control_TFs_intersect.csv')) write.csv(x = limma_results[limma_results$X %in% tfs_diffset & limma_results$cell_type %in% progenitors,], row.names =FALSE, file= paste0(file_path, 'limma_results_',today,'_mutant_control_TFs_diffset.csv')) write.csv(x = limma_results[limma_results$X %in% receptors& limma_results$cell_type %in% progenitors,], row.names =FALSE, file= paste0(file_path,'limma_results_',today,'_mutant_control_receptors.csv')) #progenitor tests write.csv(x = limma_results2[limma_results2$X %in% canonical_Wnt_genes ,],row.names =FALSE, file= paste0(file_path, 'limma_results_',today,'_controls_Wnt.csv')) write.csv(x = limma_results2[limma_results2$X %in% some_more_wnt_genes,],row.names =FALSE, file= paste0(file_path, 'limma_results_',today,'_controls_Wnt-PCP.csv')) write.csv(x = limma_results2[limma_results2$X %in% more_interesting_genes ,],row.names =FALSE, file= paste0(file_path, 'limma_results_',today,'_controls_genes_of_interest.csv')) write.csv(x = limma_results2[limma_results2$X %in% tfs_differential ,],row.names =FALSE, file= paste0(file_path, 'limma_results_',today,'_controls_TFs_intersect.csv')) write.csv(x = limma_results2[limma_results2$X %in% receptors,], row.names =FALSE, file= paste0(file_path,'limma_results_',today,'_controls_receptors.csv')) ######### #load data for summary plots ######## f_path <- '~/Documents/Collaborations/Böttcher_IDR/10X_data/analysis/notebooks/write/ISC_PC_EEC_GC_Tuft_ref.h5ad' dset <- h5read(f_path, '/',compoundAsDataFrame=FALSE) figure_path <- '~/Documents/Collaborations/Böttcher_IDR/10X_data/analysis/notebooks/figures/' f_path_mut <- '~/Documents/Collaborations/Böttcher_IDR/10X_data/analysis/notebooks/write/gut_AB_AL_annotated_mutants.h5ad' dset_mut <- h5read(f_path_mut, '/', compoundAsDataFrame = FALSE) #get genes/cell ID barcodes <- unlist(dset$obs$index) genes <- unlist(dset$var$index) sample_levels <- c( "Control_1", "Control_2" , "Control_6" , "Control_3_FVR" , "Control_4_FVR", "Control_5_FVR","Control_7_FVR_only","CD_1" ,"CD_2", "CD_3") new_sample_levels <- c("Control_1", "Control_2" , "Control_6" , "Control_3_FVR" , "Control_4_FVR", "Control_5_FVR","Control_7_FVR_only",'FVF', 'FVF', 'FVF') cell_type_levels <- c("ISC", "Enterocyte progenitor", "Enterocyte", "Goblet progenitor" , "early Goblet" , "Goblet 1" , "Goblet 2", "Paneth primed ISC", "Paneth progenitor" , "Paneth 1" , "Paneth 2", "Lgr5+ EEC" , "Sox4+ early EE progenitor" , "Ngn3 progenitor", "Isl1/Arx progenitor", "Pax4 progenitor" ,"Ghrl progenitor", "EC", "EC-Reg4", "SAKD", "SIA", "SIK", "SIL-P", "SILA", "SIN", "Tuft progenitor" , "Tuft 1" , "Tuft 2") split_Gpc_levels <- c("ISC", "Enterocyte progenitor", "Enterocyte", "Goblet progenitor" , "early Goblet" , "Goblet cell" , "Goblet cell", "ISC", "Paneth progenitor" , "Paneth cell" , "Paneth cell", "EE progenitor" , "EE progenitor" , "EE progenitor", "EEC", "EEC" ,"EEC", "EEC", "EEC", "EEC", "EEC", "EEC", "EEC", "EEC", "EEC", "Tuft progenitor" , "Tuft cell" , "Tuft cell") proma_ct_mutant_levels <- c("ISC","Enterocyte progenitor", "Enterocyte", "Goblet progenitor" , "Goblet cell" , "Goblet cell" , "Goblet cell", "ISC", "Paneth progenitor" , "Paneth cell" , "Paneth cell", "EE progenitor" , "EE progenitor" , "EE progenitor", "EEC", "EEC" ,"EEC", "EEC", "EEC", "EEC", "EEC", "EEC", "EEC", "EEC", "EEC", "Tuft progenitor" , "Tuft cell" , "Tuft cell") major_cell_type_levels = c( "ISC" , "Enterocyte", "Goblet cell" , "Paneth cell" ,"EEC" , "Tuft cell") proma_cell_type_levels = c( "ISC" , "Enterocyte progenitor" ,"Enterocyte", "Goblet progenitor", "early Goblet" , "Goblet cell" , "Paneth progenitor" , "Paneth cell" , "EE progenitor" ,"EEC" , "Tuft progenitor", "Tuft cell") genetics <- c('control', 'mutant') colors_proma <-c('#A30059', #ISC '#FF4A46', #Enterocyte progenitor '#5A0007', #Enterocyte '#006FA6', #Goblet progenitor '#0000A6', #Goblet '#fdb462', #Paneth progenitor '#ffd92f', #Paneth '#63FFAC', #EEprogenitor '#0A8201', #EEC '#9083CF', #Tuft progenitor '#304130' #Tuft ) colors_split_Gpc <-c('#A30059', #ISC '#FF4A46', #Enterocyte progenitor '#5A0007', #Enterocyte '#006FA6', #Goblet progenitor '#CB63CC', #early Goblet '#0000A6', #Goblet '#fdb462', #Paneth progenitor '#ffd92f', #Paneth '#63FFAC', #EEprogenitor '#0A8201', #EEC '#9083CF', #Tuft progenitor '#304130' #Tuft ) #get cell attributes for Mutants and Controls cellData_mut <- data.frame(mouse_line = factor(unlist(dset_mut$uns$genetics_categories)[unlist(dset_mut$obs$genetics)+1]), sample = factor(unlist(dset_mut$uns$sample_categories)[unlist(dset_mut$obs$sample)+1]), cc_score = factor(unlist(dset_mut$uns$phase_categories)[unlist(dset_mut$obs$phase)+1], levels=c('G1', 'G2M', 'S')), # cell_type = factor(unlist(dset_mut$uns$cell_type_test_categories)[unlist(dset_mut$obs$cell_type_test)+1], # levels = cell_type_levels), proma_cell_type = factor(unlist(dset_mut$uns$proma_split_Gpc_categories)[unlist(dset_mut$obs$proma_split_Gpc)+1], levels = proma_cell_type_levels) ) #levels(cellData_mut$proma_cell_type) <- proma_ct_mutant_levels #count cells per cell type cellData_mut_new <- cellData_mut cellData_mut.m <- melt(table(cellData_mut_new), id.vars=c('mouse_line', 'sample', 'cc_score', 'proma_cell_type')) cellData_mut.m <- cellData_mut.m[cellData_mut.m$value>0,] #select only non enriched samples cellData_mut.m <- cellData_mut.m[cellData_mut.m$sample %in% c('Control_1', 'Control_2', 'Mutant_1', 'Mutant_2'),] #get cell attributes for Controls only cellData <- data.frame( sample=factor(unlist(dset$uns$sample_categories)[unlist(dset$obs$sample)+1], levels=sample_levels), genetics = unlist(dset$uns$genetics_categories)[unlist(dset$obs$genetics)+1], cell_type = factor(unlist(dset$uns$refined_clustering_categories)[unlist(dset$obs$refined_clustering+1)], cell_type_levels), cc_score = factor(unlist(dset$uns$phase_categories)[unlist(dset$obs$phase)+1], levels=c('G1', 'G2M', 'S')), major_cell_type = factor(unlist(dset$uns$major_cell_type_categories)[unlist(dset$obs$major_cell_type+1)], major_cell_type_levels), proma_cell_type = factor(unlist(dset$uns$proma_cell_type_categories)[unlist(dset$obs$proma_cell_type+1)], proma_cell_type_levels), split_Gpc_type = factor(unlist(dset$uns$refined_clustering_categories)[unlist(dset$obs$refined_clustering+1)], cell_type_levels) ) #get sample sizes levels(cellData$split_Gpc_type) <- split_Gpc_levels sample_size <- table(cellData$sample) cellData_new <- cellData levels(cellData_new$sample) <- new_sample_levels cellData.m <- melt(table(cellData_new), id.vars=c('cell_type', 'major_cell_type', 'proma_cell_type', 'split_Gpc_type')) cellData.m <- cellData.m[cellData.m$value>0,] CD <- cellData.m %>% select(sample, cell_type,major_cell_type, proma_cell_type,value) %>% group_by(cell_type) %>% summarise(total_count= sum(value)) CD_major <- cellData.m %>% select(sample, cell_type,major_cell_type, proma_cell_type,value) %>% group_by(major_cell_type) %>% summarise(total_count= sum(value)) CD_proma<- cellData.m %>% select(sample, cell_type,major_cell_type, proma_cell_type,value) %>% group_by(proma_cell_type) %>% summarise(total_count= sum(value)) CD_split_Gpc<- cellData.m %>% select(sample, cell_type,split_Gpc_type,value) %>% group_by(split_Gpc_type) %>% summarise(total_count= sum(value)) CD_sample <- cellData.m %>% select(sample, cell_type,major_cell_type, proma_cell_type,value) %>% group_by(sample, cell_type) %>% summarise(total_count= sum(value)) CD_sample_tot <- cellData.m %>% select(sample, cell_type,major_cell_type, proma_cell_type,value) %>% group_by(sample) %>% summarise(total_count= sum(value)) CD_sample_tot2 <- data.frame(sample=names(sample_size), value=sample_size) CD_sample_M <- cellData.m %>% select(sample, cell_type,major_cell_type, proma_cell_type,value) %>% group_by(sample, major_cell_type) %>% summarise(total_count= sum(value)) CD_sample_pro <- cellData.m %>% select(sample, cell_type,major_cell_type, proma_cell_type, value) %>% group_by(sample, proma_cell_type) %>% summarise(total_count= sum(value)) CD_sample_pro_Gpc <- cellData.m %>% select(sample, cell_type,major_cell_type, split_Gpc_type, value) %>% group_by(sample, split_Gpc_type) %>% summarise(total_count= sum(value)) ################ # create plots # ################ g06 <- ggplot(cellData_mut.m[cellData_mut.m$proma_cell_type %in% c('ISC', 'Paneth progenitor', 'Paneth cell','EE progenitor', 'EEC'),], aes(mouse_line, value, fill=cc_score))+ scale_fill_manual(values =c('#1f77b4', '#ff7f0e', '#2ca02c'), name='Cell cycle\nphase') + geom_bar(position="fill", stat='identity') + labs(x='Mouse line', y='Proliferation (%)') + scale_y_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c(0, 25,50 ,75, 100) ) + facet_grid(~proma_cell_type) + #scale_x_discrete(breaks=c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only','FVF'), # labels=c('Control 6', 'Control 5 FVR', 'Control 7 FVR only','FVF')) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g06 ggsave(filename = paste0(figure_path,today, '_cellcycle_per_cell_type_mutants.pdf'), plot = g06, width = 10, height=7) g06 <- ggplot(cellData_mut.m[cellData_mut.m$proma_cell_type %in% c("ISC" , "Enterocyte progenitor" ,"Enterocyte", "Goblet progenitor",'early Goblet', "Goblet cell" , "Paneth progenitor" , "Paneth cell" , "EE progenitor" ,"EEC" , "Tuft progenitor", "Tuft cell"),], aes(mouse_line, value, fill=cc_score))+ scale_fill_manual(values =c('#1f77b4', '#ff7f0e', '#2ca02c'), name='Cell cycle\nphase') + geom_bar(position="fill", stat='identity') + labs(x='Mouse line', y='Proliferation (%)') + scale_y_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c(0, 25,50 ,75, 100) ) + facet_grid(~proma_cell_type) + #scale_x_discrete(breaks=c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only','FVF'), # labels=c('Control 6', 'Control 5 FVR', 'Control 7 FVR only','FVF')) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g06 ggsave(filename = paste0(figure_path,today, '_cellcycle_per_cell_type_mutants.pdf'), plot = g06, width = 15, height=7) g07 <- ggplot(cellData.m[cellData.m$proma_cell_type %in% c("ISC" , "Enterocyte progenitor" ,"Enterocyte", "Goblet progenitor",'early Goblet', "Goblet cell" , "Paneth progenitor" , "Paneth cell" , "EE progenitor" ,"EEC" , "Tuft progenitor", "Tuft cell"),], aes(proma_cell_type, value, fill=cc_score))+ scale_fill_manual(values =c('#1f77b4', '#ff7f0e', '#2ca02c'), name='Cell cycle\nphase') + geom_bar(position="fill", stat='identity') + labs(x='Cell type', y='Proliferation (%)') + scale_y_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c(0, 25,50 ,75, 100) ) + #scale_x_discrete(breaks=c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only','FVF'), # labels=c('Control 6', 'Control 5 FVR', 'Control 7 FVR only','FVF')) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g07 ggsave(filename = paste0(figure_path, today, '_cellcycle_per_cell_type_progen.pdf'), plot = g07, width = 7, height=7) g08 <- ggplot(cellData.m[cellData.m$split_Gpc_type %in% c("ISC" , "Enterocyte progenitor" ,"Enterocyte", "Goblet progenitor", "early Goblet", "Goblet cell" , "Paneth progenitor" , "Paneth cell" , "EE progenitor" ,"EEC" , "Tuft progenitor", "Tuft cell"),], aes(split_Gpc_type, value, fill=cc_score))+ scale_fill_manual(values =c('#1f77b4', '#ff7f0e', '#2ca02c'), name='Cell cycle\nphase') + geom_bar(position="fill", stat='identity') + labs(x='Cell type', y='Proliferation (%)') + scale_y_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c(0, 25,50 ,75, 100) ) + #scale_x_discrete(breaks=c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only','FVF'), # labels=c('Control 6', 'Control 5 FVR', 'Control 7 FVR only','FVF')) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g08 ggsave(filename = paste0(figure_path, today, '_cellcycle_per_cell_type_progen_Gpc.pdf'), plot = g08, width = 7, height=7) g01 <- ggplot(CD_sample_tot, aes(sample, total_count)) +# facet_wrap(~genetics)+ geom_col(position="dodge") + #scale_fill_brewer(type='qual', palette = 'Paired', guide=FALSE) + lims(y=c(0,20000))+ #scale_y_sqrt(breaks=c(50, 500, 1000, 5000, 10000, 15000)) + scale_x_discrete(breaks= CD_sample_tot$sample, labels=c('Control 1', 'Control 2', 'Control 6', 'Control 3 (FVR)', 'Control 4 (FVR)', 'Control 5 (50% FVR)', 'Control 7 (90% FVR)', 'FVF (3 samples)') ) + theme_classic() + labs(title='Cell number per sample', x='Sample', y='Size') + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g01 ggsave(filename = paste0(figure_path, 'counts_per_sample_all_controls.pdf'), plot = g01, width = 5, height=7) g02 <- ggplot(CD_sample_tot2, aes(sample, value.Freq)) +# facet_wrap(~genetics)+ geom_col(position="dodge") + #scale_fill_brewer(type='qual', palette = 'Paired', guide=FALSE) + lims(y=c(0,20000))+ #scale_y_sqrt(breaks=c(50, 500, 1000, 5000, 10000, 15000)) + scale_x_discrete(breaks= levels(CD_sample_tot2$sample), labels=c('FVF 1', 'FVF 2', 'FVF 3', 'Control 1', 'Control 2', 'Control 6', 'Control 3 (FVR)', 'Control 4 (FVR)', 'Control 5 (50% FVR)', 'Control 7 (90% FVR)' ) ) + theme_classic() + labs(title='Cell number per sample', x='Sample', y='Size') + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g02 ggsave(filename = paste0(figure_path, 'counts_per_sample_all_controls2.pdf'), plot = g02, width = 5, height=7) g0 <- ggplot(CD, aes(cell_type, total_count)) +# facet_wrap(~genetics)+ geom_col(position="dodge") + #scale_fill_brewer(type='qual', palette = 'Paired', guide=FALSE) + scale_y_sqrt(breaks=c(50, 500, 1000, 5000, 10000, 15000)) + scale_x_discrete(breaks= cell_type_levels) + theme_classic() + labs(title='Cell number per cell type', x='Cell type', y='Population size') + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g0 ggsave(filename = paste0(figure_path, 'counts_per_refined_cell_type_all_controls.pdf'), plot = g0, width = 10, height=7) g1 <- ggplot(CD_major, aes(major_cell_type, total_count)) +# facet_wrap(~genetics)+ geom_col(position="dodge") + #scale_fill_brewer(type='qual', palette = 'Paired', guide=FALSE) + scale_y_sqrt(breaks=c(1000,2000,3000,4000, 5000, 10000, 20000)) + theme_classic() + labs(title='Cell number per cell type', x='Cell type', y='Population size') + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g1 ggsave(filename = paste0(figure_path, today, '_counts_per_major_cell_type_all_controls.pdf'), plot = g1, width = 4, height=7) g2 <- ggplot(CD_proma, aes(proma_cell_type, total_count)) +# facet_wrap(~genetics)+ geom_col(position="dodge") + #scale_fill_brewer(type='qual', palette = 'Paired', guide=FALSE) + scale_y_sqrt(breaks=c(500, 1000,2000,3000,4000, 5000, 10000, 20000)) + theme_classic() + labs(title='Cell number per cell type', x='Cell type', y='Population size') + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g2 ggsave(filename = paste0(figure_path, today, '_counts_per_main_progen_cell_type_all_controls.pdf'), plot = g2, width = 6, height=7) g21 <- ggplot(CD_split_Gpc, aes(split_Gpc_type, total_count, fill=split_Gpc_type)) +# facet_wrap(~genetics)+ geom_col(position="dodge") + scale_fill_manual(values=colors_split_Gpc, guide=FALSE)+#scale_fill_brewer(type='qual', palette = 'Paired', guide=FALSE) + scale_y_sqrt(breaks=c(500, 1000,2000,3000,4000, 5000, 10000, 20000)) + #guide_legend() + theme_classic() + labs(title='Cell number per cell type', x='Cell type', y='Population size') + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g21 ggsave(filename = paste0(figure_path, today, '_counts_per_progen_Gpc_cell_type_all_controls.pdf'), plot = g21, width = 6, height=7) g2 <- ggplot(CD_sample[CD_sample$sample %in% c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only'),], aes(cell_type, total_count, fill=sample)) + scale_fill_manual(breaks=c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only'), values= c('grey', 'firebrick1', 'cornflowerblue') ) + # facet_wrap(~genetics)+ geom_bar(position="fill", stat='identity') + labs(x='Cell type', y='Population size (%)') + scale_y_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c(0, 25,50 ,75, 100) ) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g2 ggsave(filename = paste0(figure_path, 'counts_per_refined_cell_type_c5-7.pdf'), plot = g2, width = 10, height=7) g2 <- ggplot(CD_sample_pro[CD_sample_pro$sample %in% c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only'),], aes(proma_cell_type, total_count, fill=sample)) + scale_fill_manual(breaks=c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only'), values= c('grey', 'firebrick1', 'cornflowerblue') ) + # facet_wrap(~genetics)+ geom_bar(position="fill", stat='identity') + labs(x='Cell type', y='Population size (%)') + scale_y_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c(0, 25,50 ,75, 100) ) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g2 ggsave(filename = paste0(figure_path, 'counts_per_main_progen_cell_type_c5-7.pdf'), plot = g2, width = 10, height=7) g6 <- ggplot(CD_sample_pro[CD_sample_pro$sample %in% c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only','FVF'),], aes(sample, total_count, fill=proma_cell_type)) + scale_fill_manual(breaks=proma_cell_type_levels, values=colors_proma, name='Cell type') + # facet_wrap(~genetics)+ geom_bar(position="fill", stat='identity') + labs(x='Sample', y='Cell type frequency (%)') + scale_y_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c(0, 25,50 ,75, 100) ) + scale_x_discrete(breaks=c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only','FVF'), labels=c('no enrichment', 'FVR enrichment (50%)', 'FVR enrichment (90%)','FVF enrichment (50%)')) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g6 ggsave(filename = paste0(figure_path, today, '_counts_per_cell_type_FVF_c5-7_stacked.pdf'), plot = g6, width = 10, height=7) g6 <- ggplot(CD_sample_pro_Gpc[CD_sample_pro_Gpc$sample %in% c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only','FVF'),], aes(sample, total_count, fill=split_Gpc_type)) + scale_fill_manual(breaks=levels(cellData$split_Gpc_type), values=colors_split_Gpc, name='Cell type') + # facet_wrap(~genetics)+ geom_bar(position="fill", stat='identity') + labs(x='Sample', y='Cell type frequency (%)') + scale_y_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c(0, 25,50 ,75, 100) ) + scale_x_discrete(breaks=c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only','FVF'), labels=c('no enrichment', 'FVR enrichment (50%)', 'FVR enrichment (90%)','FVF enrichment (50%)')) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.y = element_text(size=15), axis.text.x = element_text(angle=45, hjust = 1, size=15)) g6 ggsave(filename = paste0(figure_path, today, '_counts_per_cell_type_split_Gpc_FVF_c5-7_stacked.pdf'), plot = g6, width = 6, height=10) ############################# # load Wnt/PCP score # ############################# wnt_data <- read.csv('~/Documents/Cooperations/Böttcher_IDR/10X_data/analysis/notebooks/table/Wnt_scale_per_cell_type.csv') wnt_data$refined_clustering <- factor(wnt_data$refined_clustering, levels = cell_type_levels) wnt_data2 <- read.csv('~/Documents/Cooperations/Böttcher_IDR/10X_data/analysis/notebooks/table/Wnt_scale_per_promain_cell_type.csv') wnt_data2$proma_cell_type <- factor(wnt_data2$proma_cell_type, levels = proma_cell_type_levels) wnt_data <- melt(wnt_data, id.vars='refined_clustering') wnt_size_new <- c(sum(wnt_data$value[wnt_data$variable=='none']), sum(wnt_data$value[wnt_data$variable=='Wnt.PCP'])) wnt_data$value_norm <- wnt_data$value/rep(wnt_size_new, each=dim(wnt_data)[1]/2) wnt_size_new2 <- c(sum(wnt_data2['none']), sum(wnt_data2['Wnt.PCP'])) wnt_data2 <- melt(wnt_data2, id.vars='proma_cell_type') wnt_data2$value_norm <- wnt_data2$value/rep(wnt_size_new2, each=dim(wnt_data2)[1]/2) wnt_data3 <- data.frame(type=rep('overall',2), variable=c('none', 'Wnt.PCP'), value=wnt_size_new ) wnt_frac <- wnt_data %>% select(refined_clustering, variable, value) %>% group_by(refined_clustering, variable) %>% summarise(total=sum(value)) wnt_frac2 <- wnt_data %>% select(refined_clustering, variable, value) %>% group_by(refined_clustering) %>% summarise(total=sum(value)) wnt_frac <- wnt_frac[wnt_frac$variable=='Wnt.PCP',] wnt_frac$total = wnt_frac$total/wnt_frac2$total wnt_fracM <- wnt_data2 %>% select(proma_cell_type, variable, value) %>% group_by(proma_cell_type, variable) %>% summarise(total=sum(value)) wnt_fracM2 <- wnt_data2 %>% select(proma_cell_type, variable, value) %>% group_by(proma_cell_type) %>% summarise(total=sum(value)) wnt_fracM <- wnt_fracM[wnt_fracM$variable=='Wnt.PCP',] wnt_fracM$total = wnt_fracM$total/wnt_fracM2$total g81 <- ggplot(wnt_frac, aes(x=refined_clustering, y=total, fill=variable)) + scale_fill_manual( values=c('#b82531'), name='Wnt/PCP score') + guides(fill=FALSE) + # facet_wrap(~genetics)+ geom_bar(position="dodge",stat="identity") + labs(x='Cell type', y='Fraction Wnt/PCP\n positive cells (%)') + scale_y_sqrt(breaks=c(0,0.01,0.1), labels=c(0,1, 10) ) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g81 ggsave(filename = paste0(figure_path, 'Wnt_score_per_cell_type_frac.pdf'), plot = g81, width = 10, height=7) g82 <- ggplot(wnt_fracM, aes(x=proma_cell_type, y=total, fill=variable)) + scale_fill_manual( values=c('#b82531'), name='Wnt/PCP score') + guides(fill=FALSE) + # facet_wrap(~genetics)+ geom_bar(position="dodge",stat="identity") + labs(x='Cell type', y='Fraction Wnt/PCP\n positive cells (%)') + scale_y_sqrt(breaks=c(0,0.01,0.1), labels=c(0,1, 10) ) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g82 ggsave(filename = paste0(figure_path, 'Wnt_score_per_progen_cell_type_frac.pdf'), plot = g82, width = 10, height=7) g8 <- ggplot(wnt_data, aes(x=refined_clustering, y=value_norm, fill=variable)) + scale_fill_manual( values=c('#aaaaaa', '#b82531'), name='Wnt/PCP score', breaks=c('none', 'Wnt.PCP'), labels=c('negative', 'positive') ) + # facet_wrap(~genetics)+ geom_bar(position="dodge",stat="identity") + labs(x='Cell type', y='Population size (%)') + scale_y_sqrt(breaks=c(0,0.01,0.1,0.5,0.75,1), labels=c(0,1, 10,50 ,75, 100) ) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g8 ggsave(filename = paste0(figure_path, 'Wnt_score_per_cell_type_dodge.pdf'), plot = g8, width = 10, height=7) g9 <- ggplot(wnt_data2, aes(x=proma_cell_type, y=value_norm, fill=variable)) + scale_fill_manual( values=c('#aaaaaa', '#b82531'), name='Wnt/PCP score', breaks=c('none', 'Wnt.PCP'), labels=c('negative', 'positive') ) + # facet_wrap(~genetics)+ geom_bar(position="dodge",stat="identity") + labs(x='Cell type', y='Population size (%)') + scale_y_sqrt(breaks=c(0,0.01,0.1,0.5,0.75,1), labels=c(0,1,10,50 ,75, 100) ) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g9 ggsave(filename = paste0(figure_path, 'Wnt_score_per_progen_cell_type_dodge.pdf'), plot = g9, width = 10, height=7) g10 <- ggplot(wnt_data3, aes(x=type, y=value, fill=variable)) + scale_fill_manual( values=c('#aaaaaa', '#b82531'), name='Wnt/PCP score', breaks=c('none', 'Wnt.PCP'), labels=c('negative', 'positive') ) + scale_x_discrete(breaks = 'overall', labels='') + # facet_wrap(~genetics)+ geom_bar(position="dodge",stat="identity") + labs(x='Wnt/PCP signaling', y='Population size (total)') + scale_y_sqrt(breaks=c(1000, 10000, 60000) ) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g10 ggsave(filename = paste0(figure_path, 'Wnt_score_overall.pdf'), plot = g10, width = 10, height=7)
/gut_AB_AL_summary_statistics.R
no_license
sophietr/gut_lineage
R
false
false
36,646
r
library(rhdf5) library(Matrix) library(data.table) library(ggplot2) library(RColorBrewer) library(dplyr) today <- format(Sys.Date(), '%y%m%d') ###### # limma results ###### #load differential expression results for mutant vs control file_path <- '~/Documents/Cooperations/Böttcher_IDR/10X_data/analysis/notebooks/table/' files <- list.files(path = file_path, pattern = 'mutant_vs_control_progen.csv') files <- files[grepl('limma_', files)] files <- files[grepl('181017', files)] files <- files[grepl('progeni|ISC', files)] #select only progenitor and stem cell populations, not mature cells limma_results <- data.frame(X='', logFC=numeric(1),AveExpr=0, adj.P.Val=0, cell_type='') for (filing in files){ cells_tested <- sapply(strsplit(filing, '_'),'[',3) limma_res <- read.csv(paste0(file_path, filing)) data_res <- limma_res %>% select(X, logFC,AveExpr, adj.P.Val) data_res$cell_type <- cells_tested limma_results <- rbind(limma_results, data_res) } limma_results <- limma_results[-1,] write.csv(x = limma_results, file = paste0(file_path, 'limma_results_',today,'_mutant_vs_control_progenitors.csv')) # load comparison between progenitors and mature cell types files <- list.files(path = file_path, pattern = '_controls.csv') files <- files[grepl('181017', files)] limma_results2 <- data.frame(X='', logFC=numeric(1),AveExpr=0, adj.P.Val=0, cell_type1='', cell_type2='') for (filing in files){ cells_tested1 <- sapply(strsplit(filing, '_'),'[',3) cells_tested2 <- sapply(strsplit(filing, '_'),'[',5) limma_res <- read.csv(paste0(file_path, filing)) data_res <- limma_res %>% select(X, logFC,AveExpr, adj.P.Val) data_res$cell_type1 <- cells_tested1 data_res$cell_type2 <- cells_tested2 limma_results2 <- rbind(limma_results2, data_res) } limma_results2 <- limma_results2[-1,] write.csv(x = limma_results2, file = paste0(file_path, 'limma_results_',today,'_controls_progen.csv')) a <- unique(limma_results2$X[abs(limma_results2$logFC)>0.1]) write.csv(a[order(a)], file = paste0(file_path, 'limma_results_', today, '_differential_genes_lfc05.csv'), row.names = FALSE) # load comparison between ISCs and maturing Paneth cells files <- list.files(path = file_path, pattern = '_controls_PC.csv') files <- files[grepl('181017', files)] limma_results3 <- data.frame(X='', logFC=numeric(1),AveExpr=0, adj.P.Val=0, cell_type1='', cell_type2='') for (filing in files){ cells_tested1 <- sapply(strsplit(filing, '_'),'[',3) cells_tested2 <- sapply(strsplit(filing, '_'),'[',5) limma_res <- read.csv(paste0(file_path, filing)) data_res <- limma_res %>% select(X, logFC,AveExpr, adj.P.Val) data_res$cell_type1 <- cells_tested1 data_res$cell_type2 <- cells_tested2 limma_results3<- rbind(limma_results3, data_res) } limma_results3 <- limma_results3[-1,] write.csv(x = limma_results3, file = paste0(file_path, 'limma_results_',today,'_controls_PC_maturation.csv'), row.names = FALSE) ####### #check Wnt genes for significance ####### canonical_Wnt_genes <- c('Dvl2', 'Axin2', 'Ascl2', 'Lgr5', 'Myc', 'Hopx') some_more_wnt_genes <- c('Wnt4', 'Wnt5a','Wnt5b', 'Wnt9a','Wnt9b', 'Wnt6', 'Fzd2', 'Fzd3', 'Fzd6','Fzd7', 'Dvl1','Dvl2','Dvl3','Invs', 'Vangl1', 'Vangl2', 'Prickle1', 'Celsr1', 'Celsr2', 'Celsr3', 'Scrib', 'Fuz', 'Intu', 'Ror2', 'Ryk', 'Ptk7', 'Smurf1', 'Smurf2', 'Mapk8', 'Cdc42', 'Rhoa','Exoc3', 'Exoc4', 'Exoc5', 'Jun') #check receptors for significance receptors <- c( 'Fgfr1','Fgfr2','Fgfr3','Fgfr4', #FGF 'Egfr', 'Erbb2','Erbb3','Lrig1', #EGF 'Bmpr1a', 'Bmpr2','Id1','Id2','Id3', #BMP 'Notch1','Notch2','Notch3','Jag1','Dll1','Dll4','Hes1','Lfng', #Notch 'Sfrp5','Sfrp1','Fzd1','Fzd2','Fzd3','Fzd6','Fzd7','Fzd8','Lrp5','Lrp6', #Wnt 'Smo', 'Shh', 'Ihh', #Hedgehog 'Yap1','Tead2', 'Tead3', #Hippo 'Fzd3', 'Fzd6', 'Ror2', 'Ptk7', 'Celsr1', 'Vangl1', 'Vangl2', 'Prickle1', 'Jun', #Wnt/PCP 'Ephb2', 'Ephb3', 'Efnb1', #Ephrin signalling 'Itgb1', 'Itga1','Itga2', 'Itga3', 'Itga5', 'Itga6', 'Itga9' #integrin signalling ) #### # check some more genes (including TFs) for significance #### more_interesting_genes =c('Cdkn1a', "Lbh", 'Klf15', "Ier2", 'Klf3', 'Atoh1', "Spdef", "Btg2", "Insm1", "Neurog3", "Sox4" ) #differential TFs mutant vs control tf_mutant_control <- read.csv(paste0(file_path,'TFs_progenitors_mutant_control_Oct.csv')) #differential TFs ISC vs progenitor or Paneth progenitor vs Goblet progenitor tf_progen <- read.csv(paste0(file_path,'TFs_progenitors_Oct.csv')) tfs_differential = intersect(unlist(tf_mutant_control), unlist(tf_progen)) tfs_diffset = setdiff(unlist(tf_mutant_control), unlist(tf_progen)) write.csv(tfs_differential, file=paste0(file_path, 'TFs_diff_progenitors_mutant_control_Oct.csv'), row.names = FALSE) progenitors <- c('ISC','Paneth primed ISC', 'Paneth progenitor', "Sox4+ early EE progenitor" , "Ngn3 progenitor", 'EEC', 'Goblet progenitor', 'Tuft progenitor') write.csv(x = limma_results[limma_results$X %in% canonical_Wnt_genes & limma_results$cell_type %in% progenitors,], row.names =FALSE, file= paste0(file_path, 'limma_results_',today,'_mutant_control_Wnt.csv')) write.csv(x = limma_results[limma_results$X %in% some_more_wnt_genes & limma_results$cell_type %in% progenitors,], row.names =FALSE, file= paste0(file_path,'limma_results_',today,'_mutant_control_Wnt-PCP.csv')) write.csv(x = limma_results[limma_results$X %in% more_interesting_genes & limma_results$cell_type %in% progenitors,], row.names =FALSE, file= paste0(file_path, 'limma_results_',today,'_mutant_control_genes_of_interest.csv')) write.csv(x = limma_results[limma_results$X %in% tfs_differential & limma_results$cell_type %in% progenitors,], row.names =FALSE, file= paste0(file_path, 'limma_results_',today,'_mutant_control_TFs_intersect.csv')) write.csv(x = limma_results[limma_results$X %in% tfs_diffset & limma_results$cell_type %in% progenitors,], row.names =FALSE, file= paste0(file_path, 'limma_results_',today,'_mutant_control_TFs_diffset.csv')) write.csv(x = limma_results[limma_results$X %in% receptors& limma_results$cell_type %in% progenitors,], row.names =FALSE, file= paste0(file_path,'limma_results_',today,'_mutant_control_receptors.csv')) #progenitor tests write.csv(x = limma_results2[limma_results2$X %in% canonical_Wnt_genes ,],row.names =FALSE, file= paste0(file_path, 'limma_results_',today,'_controls_Wnt.csv')) write.csv(x = limma_results2[limma_results2$X %in% some_more_wnt_genes,],row.names =FALSE, file= paste0(file_path, 'limma_results_',today,'_controls_Wnt-PCP.csv')) write.csv(x = limma_results2[limma_results2$X %in% more_interesting_genes ,],row.names =FALSE, file= paste0(file_path, 'limma_results_',today,'_controls_genes_of_interest.csv')) write.csv(x = limma_results2[limma_results2$X %in% tfs_differential ,],row.names =FALSE, file= paste0(file_path, 'limma_results_',today,'_controls_TFs_intersect.csv')) write.csv(x = limma_results2[limma_results2$X %in% receptors,], row.names =FALSE, file= paste0(file_path,'limma_results_',today,'_controls_receptors.csv')) ######### #load data for summary plots ######## f_path <- '~/Documents/Collaborations/Böttcher_IDR/10X_data/analysis/notebooks/write/ISC_PC_EEC_GC_Tuft_ref.h5ad' dset <- h5read(f_path, '/',compoundAsDataFrame=FALSE) figure_path <- '~/Documents/Collaborations/Böttcher_IDR/10X_data/analysis/notebooks/figures/' f_path_mut <- '~/Documents/Collaborations/Böttcher_IDR/10X_data/analysis/notebooks/write/gut_AB_AL_annotated_mutants.h5ad' dset_mut <- h5read(f_path_mut, '/', compoundAsDataFrame = FALSE) #get genes/cell ID barcodes <- unlist(dset$obs$index) genes <- unlist(dset$var$index) sample_levels <- c( "Control_1", "Control_2" , "Control_6" , "Control_3_FVR" , "Control_4_FVR", "Control_5_FVR","Control_7_FVR_only","CD_1" ,"CD_2", "CD_3") new_sample_levels <- c("Control_1", "Control_2" , "Control_6" , "Control_3_FVR" , "Control_4_FVR", "Control_5_FVR","Control_7_FVR_only",'FVF', 'FVF', 'FVF') cell_type_levels <- c("ISC", "Enterocyte progenitor", "Enterocyte", "Goblet progenitor" , "early Goblet" , "Goblet 1" , "Goblet 2", "Paneth primed ISC", "Paneth progenitor" , "Paneth 1" , "Paneth 2", "Lgr5+ EEC" , "Sox4+ early EE progenitor" , "Ngn3 progenitor", "Isl1/Arx progenitor", "Pax4 progenitor" ,"Ghrl progenitor", "EC", "EC-Reg4", "SAKD", "SIA", "SIK", "SIL-P", "SILA", "SIN", "Tuft progenitor" , "Tuft 1" , "Tuft 2") split_Gpc_levels <- c("ISC", "Enterocyte progenitor", "Enterocyte", "Goblet progenitor" , "early Goblet" , "Goblet cell" , "Goblet cell", "ISC", "Paneth progenitor" , "Paneth cell" , "Paneth cell", "EE progenitor" , "EE progenitor" , "EE progenitor", "EEC", "EEC" ,"EEC", "EEC", "EEC", "EEC", "EEC", "EEC", "EEC", "EEC", "EEC", "Tuft progenitor" , "Tuft cell" , "Tuft cell") proma_ct_mutant_levels <- c("ISC","Enterocyte progenitor", "Enterocyte", "Goblet progenitor" , "Goblet cell" , "Goblet cell" , "Goblet cell", "ISC", "Paneth progenitor" , "Paneth cell" , "Paneth cell", "EE progenitor" , "EE progenitor" , "EE progenitor", "EEC", "EEC" ,"EEC", "EEC", "EEC", "EEC", "EEC", "EEC", "EEC", "EEC", "EEC", "Tuft progenitor" , "Tuft cell" , "Tuft cell") major_cell_type_levels = c( "ISC" , "Enterocyte", "Goblet cell" , "Paneth cell" ,"EEC" , "Tuft cell") proma_cell_type_levels = c( "ISC" , "Enterocyte progenitor" ,"Enterocyte", "Goblet progenitor", "early Goblet" , "Goblet cell" , "Paneth progenitor" , "Paneth cell" , "EE progenitor" ,"EEC" , "Tuft progenitor", "Tuft cell") genetics <- c('control', 'mutant') colors_proma <-c('#A30059', #ISC '#FF4A46', #Enterocyte progenitor '#5A0007', #Enterocyte '#006FA6', #Goblet progenitor '#0000A6', #Goblet '#fdb462', #Paneth progenitor '#ffd92f', #Paneth '#63FFAC', #EEprogenitor '#0A8201', #EEC '#9083CF', #Tuft progenitor '#304130' #Tuft ) colors_split_Gpc <-c('#A30059', #ISC '#FF4A46', #Enterocyte progenitor '#5A0007', #Enterocyte '#006FA6', #Goblet progenitor '#CB63CC', #early Goblet '#0000A6', #Goblet '#fdb462', #Paneth progenitor '#ffd92f', #Paneth '#63FFAC', #EEprogenitor '#0A8201', #EEC '#9083CF', #Tuft progenitor '#304130' #Tuft ) #get cell attributes for Mutants and Controls cellData_mut <- data.frame(mouse_line = factor(unlist(dset_mut$uns$genetics_categories)[unlist(dset_mut$obs$genetics)+1]), sample = factor(unlist(dset_mut$uns$sample_categories)[unlist(dset_mut$obs$sample)+1]), cc_score = factor(unlist(dset_mut$uns$phase_categories)[unlist(dset_mut$obs$phase)+1], levels=c('G1', 'G2M', 'S')), # cell_type = factor(unlist(dset_mut$uns$cell_type_test_categories)[unlist(dset_mut$obs$cell_type_test)+1], # levels = cell_type_levels), proma_cell_type = factor(unlist(dset_mut$uns$proma_split_Gpc_categories)[unlist(dset_mut$obs$proma_split_Gpc)+1], levels = proma_cell_type_levels) ) #levels(cellData_mut$proma_cell_type) <- proma_ct_mutant_levels #count cells per cell type cellData_mut_new <- cellData_mut cellData_mut.m <- melt(table(cellData_mut_new), id.vars=c('mouse_line', 'sample', 'cc_score', 'proma_cell_type')) cellData_mut.m <- cellData_mut.m[cellData_mut.m$value>0,] #select only non enriched samples cellData_mut.m <- cellData_mut.m[cellData_mut.m$sample %in% c('Control_1', 'Control_2', 'Mutant_1', 'Mutant_2'),] #get cell attributes for Controls only cellData <- data.frame( sample=factor(unlist(dset$uns$sample_categories)[unlist(dset$obs$sample)+1], levels=sample_levels), genetics = unlist(dset$uns$genetics_categories)[unlist(dset$obs$genetics)+1], cell_type = factor(unlist(dset$uns$refined_clustering_categories)[unlist(dset$obs$refined_clustering+1)], cell_type_levels), cc_score = factor(unlist(dset$uns$phase_categories)[unlist(dset$obs$phase)+1], levels=c('G1', 'G2M', 'S')), major_cell_type = factor(unlist(dset$uns$major_cell_type_categories)[unlist(dset$obs$major_cell_type+1)], major_cell_type_levels), proma_cell_type = factor(unlist(dset$uns$proma_cell_type_categories)[unlist(dset$obs$proma_cell_type+1)], proma_cell_type_levels), split_Gpc_type = factor(unlist(dset$uns$refined_clustering_categories)[unlist(dset$obs$refined_clustering+1)], cell_type_levels) ) #get sample sizes levels(cellData$split_Gpc_type) <- split_Gpc_levels sample_size <- table(cellData$sample) cellData_new <- cellData levels(cellData_new$sample) <- new_sample_levels cellData.m <- melt(table(cellData_new), id.vars=c('cell_type', 'major_cell_type', 'proma_cell_type', 'split_Gpc_type')) cellData.m <- cellData.m[cellData.m$value>0,] CD <- cellData.m %>% select(sample, cell_type,major_cell_type, proma_cell_type,value) %>% group_by(cell_type) %>% summarise(total_count= sum(value)) CD_major <- cellData.m %>% select(sample, cell_type,major_cell_type, proma_cell_type,value) %>% group_by(major_cell_type) %>% summarise(total_count= sum(value)) CD_proma<- cellData.m %>% select(sample, cell_type,major_cell_type, proma_cell_type,value) %>% group_by(proma_cell_type) %>% summarise(total_count= sum(value)) CD_split_Gpc<- cellData.m %>% select(sample, cell_type,split_Gpc_type,value) %>% group_by(split_Gpc_type) %>% summarise(total_count= sum(value)) CD_sample <- cellData.m %>% select(sample, cell_type,major_cell_type, proma_cell_type,value) %>% group_by(sample, cell_type) %>% summarise(total_count= sum(value)) CD_sample_tot <- cellData.m %>% select(sample, cell_type,major_cell_type, proma_cell_type,value) %>% group_by(sample) %>% summarise(total_count= sum(value)) CD_sample_tot2 <- data.frame(sample=names(sample_size), value=sample_size) CD_sample_M <- cellData.m %>% select(sample, cell_type,major_cell_type, proma_cell_type,value) %>% group_by(sample, major_cell_type) %>% summarise(total_count= sum(value)) CD_sample_pro <- cellData.m %>% select(sample, cell_type,major_cell_type, proma_cell_type, value) %>% group_by(sample, proma_cell_type) %>% summarise(total_count= sum(value)) CD_sample_pro_Gpc <- cellData.m %>% select(sample, cell_type,major_cell_type, split_Gpc_type, value) %>% group_by(sample, split_Gpc_type) %>% summarise(total_count= sum(value)) ################ # create plots # ################ g06 <- ggplot(cellData_mut.m[cellData_mut.m$proma_cell_type %in% c('ISC', 'Paneth progenitor', 'Paneth cell','EE progenitor', 'EEC'),], aes(mouse_line, value, fill=cc_score))+ scale_fill_manual(values =c('#1f77b4', '#ff7f0e', '#2ca02c'), name='Cell cycle\nphase') + geom_bar(position="fill", stat='identity') + labs(x='Mouse line', y='Proliferation (%)') + scale_y_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c(0, 25,50 ,75, 100) ) + facet_grid(~proma_cell_type) + #scale_x_discrete(breaks=c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only','FVF'), # labels=c('Control 6', 'Control 5 FVR', 'Control 7 FVR only','FVF')) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g06 ggsave(filename = paste0(figure_path,today, '_cellcycle_per_cell_type_mutants.pdf'), plot = g06, width = 10, height=7) g06 <- ggplot(cellData_mut.m[cellData_mut.m$proma_cell_type %in% c("ISC" , "Enterocyte progenitor" ,"Enterocyte", "Goblet progenitor",'early Goblet', "Goblet cell" , "Paneth progenitor" , "Paneth cell" , "EE progenitor" ,"EEC" , "Tuft progenitor", "Tuft cell"),], aes(mouse_line, value, fill=cc_score))+ scale_fill_manual(values =c('#1f77b4', '#ff7f0e', '#2ca02c'), name='Cell cycle\nphase') + geom_bar(position="fill", stat='identity') + labs(x='Mouse line', y='Proliferation (%)') + scale_y_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c(0, 25,50 ,75, 100) ) + facet_grid(~proma_cell_type) + #scale_x_discrete(breaks=c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only','FVF'), # labels=c('Control 6', 'Control 5 FVR', 'Control 7 FVR only','FVF')) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g06 ggsave(filename = paste0(figure_path,today, '_cellcycle_per_cell_type_mutants.pdf'), plot = g06, width = 15, height=7) g07 <- ggplot(cellData.m[cellData.m$proma_cell_type %in% c("ISC" , "Enterocyte progenitor" ,"Enterocyte", "Goblet progenitor",'early Goblet', "Goblet cell" , "Paneth progenitor" , "Paneth cell" , "EE progenitor" ,"EEC" , "Tuft progenitor", "Tuft cell"),], aes(proma_cell_type, value, fill=cc_score))+ scale_fill_manual(values =c('#1f77b4', '#ff7f0e', '#2ca02c'), name='Cell cycle\nphase') + geom_bar(position="fill", stat='identity') + labs(x='Cell type', y='Proliferation (%)') + scale_y_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c(0, 25,50 ,75, 100) ) + #scale_x_discrete(breaks=c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only','FVF'), # labels=c('Control 6', 'Control 5 FVR', 'Control 7 FVR only','FVF')) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g07 ggsave(filename = paste0(figure_path, today, '_cellcycle_per_cell_type_progen.pdf'), plot = g07, width = 7, height=7) g08 <- ggplot(cellData.m[cellData.m$split_Gpc_type %in% c("ISC" , "Enterocyte progenitor" ,"Enterocyte", "Goblet progenitor", "early Goblet", "Goblet cell" , "Paneth progenitor" , "Paneth cell" , "EE progenitor" ,"EEC" , "Tuft progenitor", "Tuft cell"),], aes(split_Gpc_type, value, fill=cc_score))+ scale_fill_manual(values =c('#1f77b4', '#ff7f0e', '#2ca02c'), name='Cell cycle\nphase') + geom_bar(position="fill", stat='identity') + labs(x='Cell type', y='Proliferation (%)') + scale_y_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c(0, 25,50 ,75, 100) ) + #scale_x_discrete(breaks=c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only','FVF'), # labels=c('Control 6', 'Control 5 FVR', 'Control 7 FVR only','FVF')) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g08 ggsave(filename = paste0(figure_path, today, '_cellcycle_per_cell_type_progen_Gpc.pdf'), plot = g08, width = 7, height=7) g01 <- ggplot(CD_sample_tot, aes(sample, total_count)) +# facet_wrap(~genetics)+ geom_col(position="dodge") + #scale_fill_brewer(type='qual', palette = 'Paired', guide=FALSE) + lims(y=c(0,20000))+ #scale_y_sqrt(breaks=c(50, 500, 1000, 5000, 10000, 15000)) + scale_x_discrete(breaks= CD_sample_tot$sample, labels=c('Control 1', 'Control 2', 'Control 6', 'Control 3 (FVR)', 'Control 4 (FVR)', 'Control 5 (50% FVR)', 'Control 7 (90% FVR)', 'FVF (3 samples)') ) + theme_classic() + labs(title='Cell number per sample', x='Sample', y='Size') + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g01 ggsave(filename = paste0(figure_path, 'counts_per_sample_all_controls.pdf'), plot = g01, width = 5, height=7) g02 <- ggplot(CD_sample_tot2, aes(sample, value.Freq)) +# facet_wrap(~genetics)+ geom_col(position="dodge") + #scale_fill_brewer(type='qual', palette = 'Paired', guide=FALSE) + lims(y=c(0,20000))+ #scale_y_sqrt(breaks=c(50, 500, 1000, 5000, 10000, 15000)) + scale_x_discrete(breaks= levels(CD_sample_tot2$sample), labels=c('FVF 1', 'FVF 2', 'FVF 3', 'Control 1', 'Control 2', 'Control 6', 'Control 3 (FVR)', 'Control 4 (FVR)', 'Control 5 (50% FVR)', 'Control 7 (90% FVR)' ) ) + theme_classic() + labs(title='Cell number per sample', x='Sample', y='Size') + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g02 ggsave(filename = paste0(figure_path, 'counts_per_sample_all_controls2.pdf'), plot = g02, width = 5, height=7) g0 <- ggplot(CD, aes(cell_type, total_count)) +# facet_wrap(~genetics)+ geom_col(position="dodge") + #scale_fill_brewer(type='qual', palette = 'Paired', guide=FALSE) + scale_y_sqrt(breaks=c(50, 500, 1000, 5000, 10000, 15000)) + scale_x_discrete(breaks= cell_type_levels) + theme_classic() + labs(title='Cell number per cell type', x='Cell type', y='Population size') + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g0 ggsave(filename = paste0(figure_path, 'counts_per_refined_cell_type_all_controls.pdf'), plot = g0, width = 10, height=7) g1 <- ggplot(CD_major, aes(major_cell_type, total_count)) +# facet_wrap(~genetics)+ geom_col(position="dodge") + #scale_fill_brewer(type='qual', palette = 'Paired', guide=FALSE) + scale_y_sqrt(breaks=c(1000,2000,3000,4000, 5000, 10000, 20000)) + theme_classic() + labs(title='Cell number per cell type', x='Cell type', y='Population size') + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g1 ggsave(filename = paste0(figure_path, today, '_counts_per_major_cell_type_all_controls.pdf'), plot = g1, width = 4, height=7) g2 <- ggplot(CD_proma, aes(proma_cell_type, total_count)) +# facet_wrap(~genetics)+ geom_col(position="dodge") + #scale_fill_brewer(type='qual', palette = 'Paired', guide=FALSE) + scale_y_sqrt(breaks=c(500, 1000,2000,3000,4000, 5000, 10000, 20000)) + theme_classic() + labs(title='Cell number per cell type', x='Cell type', y='Population size') + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g2 ggsave(filename = paste0(figure_path, today, '_counts_per_main_progen_cell_type_all_controls.pdf'), plot = g2, width = 6, height=7) g21 <- ggplot(CD_split_Gpc, aes(split_Gpc_type, total_count, fill=split_Gpc_type)) +# facet_wrap(~genetics)+ geom_col(position="dodge") + scale_fill_manual(values=colors_split_Gpc, guide=FALSE)+#scale_fill_brewer(type='qual', palette = 'Paired', guide=FALSE) + scale_y_sqrt(breaks=c(500, 1000,2000,3000,4000, 5000, 10000, 20000)) + #guide_legend() + theme_classic() + labs(title='Cell number per cell type', x='Cell type', y='Population size') + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g21 ggsave(filename = paste0(figure_path, today, '_counts_per_progen_Gpc_cell_type_all_controls.pdf'), plot = g21, width = 6, height=7) g2 <- ggplot(CD_sample[CD_sample$sample %in% c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only'),], aes(cell_type, total_count, fill=sample)) + scale_fill_manual(breaks=c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only'), values= c('grey', 'firebrick1', 'cornflowerblue') ) + # facet_wrap(~genetics)+ geom_bar(position="fill", stat='identity') + labs(x='Cell type', y='Population size (%)') + scale_y_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c(0, 25,50 ,75, 100) ) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g2 ggsave(filename = paste0(figure_path, 'counts_per_refined_cell_type_c5-7.pdf'), plot = g2, width = 10, height=7) g2 <- ggplot(CD_sample_pro[CD_sample_pro$sample %in% c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only'),], aes(proma_cell_type, total_count, fill=sample)) + scale_fill_manual(breaks=c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only'), values= c('grey', 'firebrick1', 'cornflowerblue') ) + # facet_wrap(~genetics)+ geom_bar(position="fill", stat='identity') + labs(x='Cell type', y='Population size (%)') + scale_y_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c(0, 25,50 ,75, 100) ) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g2 ggsave(filename = paste0(figure_path, 'counts_per_main_progen_cell_type_c5-7.pdf'), plot = g2, width = 10, height=7) g6 <- ggplot(CD_sample_pro[CD_sample_pro$sample %in% c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only','FVF'),], aes(sample, total_count, fill=proma_cell_type)) + scale_fill_manual(breaks=proma_cell_type_levels, values=colors_proma, name='Cell type') + # facet_wrap(~genetics)+ geom_bar(position="fill", stat='identity') + labs(x='Sample', y='Cell type frequency (%)') + scale_y_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c(0, 25,50 ,75, 100) ) + scale_x_discrete(breaks=c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only','FVF'), labels=c('no enrichment', 'FVR enrichment (50%)', 'FVR enrichment (90%)','FVF enrichment (50%)')) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g6 ggsave(filename = paste0(figure_path, today, '_counts_per_cell_type_FVF_c5-7_stacked.pdf'), plot = g6, width = 10, height=7) g6 <- ggplot(CD_sample_pro_Gpc[CD_sample_pro_Gpc$sample %in% c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only','FVF'),], aes(sample, total_count, fill=split_Gpc_type)) + scale_fill_manual(breaks=levels(cellData$split_Gpc_type), values=colors_split_Gpc, name='Cell type') + # facet_wrap(~genetics)+ geom_bar(position="fill", stat='identity') + labs(x='Sample', y='Cell type frequency (%)') + scale_y_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c(0, 25,50 ,75, 100) ) + scale_x_discrete(breaks=c('Control_6', 'Control_5_FVR', 'Control_7_FVR_only','FVF'), labels=c('no enrichment', 'FVR enrichment (50%)', 'FVR enrichment (90%)','FVF enrichment (50%)')) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.y = element_text(size=15), axis.text.x = element_text(angle=45, hjust = 1, size=15)) g6 ggsave(filename = paste0(figure_path, today, '_counts_per_cell_type_split_Gpc_FVF_c5-7_stacked.pdf'), plot = g6, width = 6, height=10) ############################# # load Wnt/PCP score # ############################# wnt_data <- read.csv('~/Documents/Cooperations/Böttcher_IDR/10X_data/analysis/notebooks/table/Wnt_scale_per_cell_type.csv') wnt_data$refined_clustering <- factor(wnt_data$refined_clustering, levels = cell_type_levels) wnt_data2 <- read.csv('~/Documents/Cooperations/Böttcher_IDR/10X_data/analysis/notebooks/table/Wnt_scale_per_promain_cell_type.csv') wnt_data2$proma_cell_type <- factor(wnt_data2$proma_cell_type, levels = proma_cell_type_levels) wnt_data <- melt(wnt_data, id.vars='refined_clustering') wnt_size_new <- c(sum(wnt_data$value[wnt_data$variable=='none']), sum(wnt_data$value[wnt_data$variable=='Wnt.PCP'])) wnt_data$value_norm <- wnt_data$value/rep(wnt_size_new, each=dim(wnt_data)[1]/2) wnt_size_new2 <- c(sum(wnt_data2['none']), sum(wnt_data2['Wnt.PCP'])) wnt_data2 <- melt(wnt_data2, id.vars='proma_cell_type') wnt_data2$value_norm <- wnt_data2$value/rep(wnt_size_new2, each=dim(wnt_data2)[1]/2) wnt_data3 <- data.frame(type=rep('overall',2), variable=c('none', 'Wnt.PCP'), value=wnt_size_new ) wnt_frac <- wnt_data %>% select(refined_clustering, variable, value) %>% group_by(refined_clustering, variable) %>% summarise(total=sum(value)) wnt_frac2 <- wnt_data %>% select(refined_clustering, variable, value) %>% group_by(refined_clustering) %>% summarise(total=sum(value)) wnt_frac <- wnt_frac[wnt_frac$variable=='Wnt.PCP',] wnt_frac$total = wnt_frac$total/wnt_frac2$total wnt_fracM <- wnt_data2 %>% select(proma_cell_type, variable, value) %>% group_by(proma_cell_type, variable) %>% summarise(total=sum(value)) wnt_fracM2 <- wnt_data2 %>% select(proma_cell_type, variable, value) %>% group_by(proma_cell_type) %>% summarise(total=sum(value)) wnt_fracM <- wnt_fracM[wnt_fracM$variable=='Wnt.PCP',] wnt_fracM$total = wnt_fracM$total/wnt_fracM2$total g81 <- ggplot(wnt_frac, aes(x=refined_clustering, y=total, fill=variable)) + scale_fill_manual( values=c('#b82531'), name='Wnt/PCP score') + guides(fill=FALSE) + # facet_wrap(~genetics)+ geom_bar(position="dodge",stat="identity") + labs(x='Cell type', y='Fraction Wnt/PCP\n positive cells (%)') + scale_y_sqrt(breaks=c(0,0.01,0.1), labels=c(0,1, 10) ) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g81 ggsave(filename = paste0(figure_path, 'Wnt_score_per_cell_type_frac.pdf'), plot = g81, width = 10, height=7) g82 <- ggplot(wnt_fracM, aes(x=proma_cell_type, y=total, fill=variable)) + scale_fill_manual( values=c('#b82531'), name='Wnt/PCP score') + guides(fill=FALSE) + # facet_wrap(~genetics)+ geom_bar(position="dodge",stat="identity") + labs(x='Cell type', y='Fraction Wnt/PCP\n positive cells (%)') + scale_y_sqrt(breaks=c(0,0.01,0.1), labels=c(0,1, 10) ) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g82 ggsave(filename = paste0(figure_path, 'Wnt_score_per_progen_cell_type_frac.pdf'), plot = g82, width = 10, height=7) g8 <- ggplot(wnt_data, aes(x=refined_clustering, y=value_norm, fill=variable)) + scale_fill_manual( values=c('#aaaaaa', '#b82531'), name='Wnt/PCP score', breaks=c('none', 'Wnt.PCP'), labels=c('negative', 'positive') ) + # facet_wrap(~genetics)+ geom_bar(position="dodge",stat="identity") + labs(x='Cell type', y='Population size (%)') + scale_y_sqrt(breaks=c(0,0.01,0.1,0.5,0.75,1), labels=c(0,1, 10,50 ,75, 100) ) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g8 ggsave(filename = paste0(figure_path, 'Wnt_score_per_cell_type_dodge.pdf'), plot = g8, width = 10, height=7) g9 <- ggplot(wnt_data2, aes(x=proma_cell_type, y=value_norm, fill=variable)) + scale_fill_manual( values=c('#aaaaaa', '#b82531'), name='Wnt/PCP score', breaks=c('none', 'Wnt.PCP'), labels=c('negative', 'positive') ) + # facet_wrap(~genetics)+ geom_bar(position="dodge",stat="identity") + labs(x='Cell type', y='Population size (%)') + scale_y_sqrt(breaks=c(0,0.01,0.1,0.5,0.75,1), labels=c(0,1,10,50 ,75, 100) ) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g9 ggsave(filename = paste0(figure_path, 'Wnt_score_per_progen_cell_type_dodge.pdf'), plot = g9, width = 10, height=7) g10 <- ggplot(wnt_data3, aes(x=type, y=value, fill=variable)) + scale_fill_manual( values=c('#aaaaaa', '#b82531'), name='Wnt/PCP score', breaks=c('none', 'Wnt.PCP'), labels=c('negative', 'positive') ) + scale_x_discrete(breaks = 'overall', labels='') + # facet_wrap(~genetics)+ geom_bar(position="dodge",stat="identity") + labs(x='Wnt/PCP signaling', y='Population size (total)') + scale_y_sqrt(breaks=c(1000, 10000, 60000) ) + theme_classic() + theme(axis.title.x = element_text(face="bold", size=15), axis.title.y = element_text(face="bold", size=15), axis.text.x = element_text(angle=45, hjust = 1, size=10)) g10 ggsave(filename = paste0(figure_path, 'Wnt_score_overall.pdf'), plot = g10, width = 10, height=7)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/8.2-ens.R \docType{methods} \name{buildEnsemble} \alias{buildEnsemble} \alias{buildEnsemble,ExprsModel-method} \alias{buildEnsemble,ExprsPipeline-method} \title{Build Ensemble} \usage{ buildEnsemble(object, ...) \S4method{buildEnsemble}{ExprsModel}(object, ...) \S4method{buildEnsemble}{ExprsPipeline}(object, colBy = 0, how = 0, gate = 0, top = 0) } \arguments{ \item{object}{An \code{\link{ExprsModel-class}} object.} \item{...}{Additional \code{ExprsModel} objects to use in the ensemble. Argument applies to \code{\link{ExprsModel-class}} method only.} \item{colBy}{A character vector or string. Specifies column(s) to use when filtering by classifier performance. Listing multiple columns will result in a filter based on a performance metric equal to the product of those listed columns.} \item{how}{A numeric scalar. Arguments between 0 and 1 will impose a threshold or ceiling filter, respectively, based on the raw value of \code{colBy}. Arguments between 1 and 100 will impose a filter based on the percentile of \code{colBy}. The user may also provide "midrange", "median", or "mean" as an argument for these filters. Set \code{how = 0} or \code{gate = 0}, to skip the threshold or ceiling filter, respectively.} \item{gate}{A numeric scalar. Arguments between 0 and 1 will impose a threshold or ceiling filter, respectively, based on the raw value of \code{colBy}. Arguments between 1 and 100 will impose a filter based on the percentile of \code{colBy}. The user may also provide "midrange", "median", or "mean" as an argument for these filters. Set \code{how = 0} or \code{gate = 0}, to skip the threshold or ceiling filter, respectively.} \item{top}{A numeric scalar. Determines the top N models based on \code{colBy} to include after the threshold and ceiling filters. In the case that the \code{@summary} slot contains the column "boot", this determines the top N models for each unique bootstrap. Set \code{top = 0} to skip this subset.} } \value{ An \code{\link{ExprsEnsemble-class}} object. } \description{ Aggregates multiple classifiers into a single ensemble classifier. } \details{ The \code{\link{ExprsModel-class}} method: Combine any number of \code{ExprsModel} objects into an ensemble. These models do not necessarily have to derive from the same \code{build} method. This method works identically to the \code{\link{conjoin}} \code{ExprsModel} method. The \code{\link{ExprsPipeline-class}} method: Build an ensemble from an \code{ExprsPipeline} object. This method works by calling \code{\link{pipeFilter}}, then aggregating those results into an ensemble. As an adjunct to this method, consider first combining multiple \code{ExprsPipeline} objects together with \code{\link{conjoin}}. } \section{Methods (by class)}{ \itemize{ \item \code{ExprsModel}: Method to build ensemble from \code{ExprsModel} objects. \item \code{ExprsPipeline}: Method to build ensemble from \code{ExprsPipeline} objects. }} \seealso{ \code{\link{pipeFilter}}\cr \code{\link{pipeUnboot}}\cr \code{\link{plCV}}\cr \code{\link{plGrid}}\cr \code{\link{plGridMulti}}\cr \code{\link{plMonteCarlo}}\cr \code{\link{plNested}} }
/man/buildEnsemble.Rd
no_license
lqcheng2017/exprso
R
false
true
3,225
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/8.2-ens.R \docType{methods} \name{buildEnsemble} \alias{buildEnsemble} \alias{buildEnsemble,ExprsModel-method} \alias{buildEnsemble,ExprsPipeline-method} \title{Build Ensemble} \usage{ buildEnsemble(object, ...) \S4method{buildEnsemble}{ExprsModel}(object, ...) \S4method{buildEnsemble}{ExprsPipeline}(object, colBy = 0, how = 0, gate = 0, top = 0) } \arguments{ \item{object}{An \code{\link{ExprsModel-class}} object.} \item{...}{Additional \code{ExprsModel} objects to use in the ensemble. Argument applies to \code{\link{ExprsModel-class}} method only.} \item{colBy}{A character vector or string. Specifies column(s) to use when filtering by classifier performance. Listing multiple columns will result in a filter based on a performance metric equal to the product of those listed columns.} \item{how}{A numeric scalar. Arguments between 0 and 1 will impose a threshold or ceiling filter, respectively, based on the raw value of \code{colBy}. Arguments between 1 and 100 will impose a filter based on the percentile of \code{colBy}. The user may also provide "midrange", "median", or "mean" as an argument for these filters. Set \code{how = 0} or \code{gate = 0}, to skip the threshold or ceiling filter, respectively.} \item{gate}{A numeric scalar. Arguments between 0 and 1 will impose a threshold or ceiling filter, respectively, based on the raw value of \code{colBy}. Arguments between 1 and 100 will impose a filter based on the percentile of \code{colBy}. The user may also provide "midrange", "median", or "mean" as an argument for these filters. Set \code{how = 0} or \code{gate = 0}, to skip the threshold or ceiling filter, respectively.} \item{top}{A numeric scalar. Determines the top N models based on \code{colBy} to include after the threshold and ceiling filters. In the case that the \code{@summary} slot contains the column "boot", this determines the top N models for each unique bootstrap. Set \code{top = 0} to skip this subset.} } \value{ An \code{\link{ExprsEnsemble-class}} object. } \description{ Aggregates multiple classifiers into a single ensemble classifier. } \details{ The \code{\link{ExprsModel-class}} method: Combine any number of \code{ExprsModel} objects into an ensemble. These models do not necessarily have to derive from the same \code{build} method. This method works identically to the \code{\link{conjoin}} \code{ExprsModel} method. The \code{\link{ExprsPipeline-class}} method: Build an ensemble from an \code{ExprsPipeline} object. This method works by calling \code{\link{pipeFilter}}, then aggregating those results into an ensemble. As an adjunct to this method, consider first combining multiple \code{ExprsPipeline} objects together with \code{\link{conjoin}}. } \section{Methods (by class)}{ \itemize{ \item \code{ExprsModel}: Method to build ensemble from \code{ExprsModel} objects. \item \code{ExprsPipeline}: Method to build ensemble from \code{ExprsPipeline} objects. }} \seealso{ \code{\link{pipeFilter}}\cr \code{\link{pipeUnboot}}\cr \code{\link{plCV}}\cr \code{\link{plGrid}}\cr \code{\link{plGridMulti}}\cr \code{\link{plMonteCarlo}}\cr \code{\link{plNested}} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mia_2.R \name{gmdh.mia_2} \alias{gmdh.mia_2} \title{GMDH MIA auxiliar functions} \usage{ gmdh.mia_2(X, y, prune, x.test, y.test) } \description{ Performs auxiliar tasks to predict.mia } \keyword{internal}
/man/gmdh.mia_2.Rd
no_license
cran/GMDHreg
R
false
true
283
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mia_2.R \name{gmdh.mia_2} \alias{gmdh.mia_2} \title{GMDH MIA auxiliar functions} \usage{ gmdh.mia_2(X, y, prune, x.test, y.test) } \description{ Performs auxiliar tasks to predict.mia } \keyword{internal}
#' Aggregate #' #' Aggregate several Parquet tables as a single one. #' #' @param input.files List of Parquet files. #' @param FUNC Function to apply on each Parquet table. #' @param cols Columns to subset. #' @param file.out Output Parquet filename. #' @return The aggregated Parquet tables as a \code{data.table} if #' \code{file.out} is not NULL, otherwise \code{file.out}. #' @export Aggregate <- function(input.files, FUNC, cols=NULL, file.out=NULL) { log <- rbindlist(lapply(input.files, FUNC), fill=TRUE) if (!is.null(cols)) { log <- log %>% SubsetColumns(cols) } if (!is.null(file.out)) { WriteParquet(log, file.out) file.out } else log } #' Aggregate commits #' #' Aggregate commits for different repositories as a single table. #' #' @param gitlog Data.frame containing for each Git repository the #' Parquet filename of the commit log and the Parquet filename of #' the diff. #' @param FUNC Function to apply on each commit table. #' @param file.out Output Parquet filename. #' @return The aggregated Parquet tables as a \code{data.table} if #' \code{file.out} is not NULL, otherwise \code{file.out}. #' @export AggregateCommits <- function(gitlog, FUNC, file.out=NULL) { files <- with(gitlog, mapply(function(x, y) list(log=x, diff=y), log, diff, SIMPLIFY=FALSE)) Aggregate(files, FUNC, file.out=file.out) } issues.cols <- list(issues=c("source", "product", "issue.id", "issue.key", "created", "updated", "last.resolved", "summary", "description", "version", "milestone", "status", "severity", "priority", "issuetype", "resolution", "component", "votes", "product.name", "reporter.key", "reporter.name", "reporter.displayname", "reporter.email", "reporter.tz", "creator.key", "creator.name", "creator.displayname", "creator.email", "creator.tz", "assignee.key", "assignee.name", "assignee.displayname", "assignee.email", "assignee.tz"), comments=c("source", "product", "issue.id", "comment.id", "count", "author.key", "author.name", "author.displayname", "author.email", "author.tz", "update.author.key", "update.author.name", "update.author.displayname", "update.author.email", "update.author.tz", "created", "updated")) #' Aggregate issues #' #' Aggregate issue data as a single table. #' #' @param jiralog Data.frame containing all the Jira Parquet #' filenames. #' @param bugzillalog Data.frame containing all the Bugzilla Parquet #' filenames #' @param t Type of issue log to create (e.g. issue or comment). #' @param FUNC Function to apply on each table. #' @param file.out Output Parquet filename. #' @return The aggregated Parquet tables as a \code{data.table} if #' \code{file.out} is not NULL, otherwise \code{file.out}. #' @export AggregateIssues <- function(jiralog, bugzillalog, t, FUNC, file.out=NULL) { files <- rbind(jiralog, bugzillalog)[type == t, filename] log <- Aggregate(files, function(f) { res <- f %>% ReadParquet if (nrow(res)) res %>% FUNC }, cols=issues.cols[[t]], file.out=file.out) }
/R/aggregate.R
no_license
M3SOulu/MozillaApacheDataset-Rpackage
R
false
false
3,718
r
#' Aggregate #' #' Aggregate several Parquet tables as a single one. #' #' @param input.files List of Parquet files. #' @param FUNC Function to apply on each Parquet table. #' @param cols Columns to subset. #' @param file.out Output Parquet filename. #' @return The aggregated Parquet tables as a \code{data.table} if #' \code{file.out} is not NULL, otherwise \code{file.out}. #' @export Aggregate <- function(input.files, FUNC, cols=NULL, file.out=NULL) { log <- rbindlist(lapply(input.files, FUNC), fill=TRUE) if (!is.null(cols)) { log <- log %>% SubsetColumns(cols) } if (!is.null(file.out)) { WriteParquet(log, file.out) file.out } else log } #' Aggregate commits #' #' Aggregate commits for different repositories as a single table. #' #' @param gitlog Data.frame containing for each Git repository the #' Parquet filename of the commit log and the Parquet filename of #' the diff. #' @param FUNC Function to apply on each commit table. #' @param file.out Output Parquet filename. #' @return The aggregated Parquet tables as a \code{data.table} if #' \code{file.out} is not NULL, otherwise \code{file.out}. #' @export AggregateCommits <- function(gitlog, FUNC, file.out=NULL) { files <- with(gitlog, mapply(function(x, y) list(log=x, diff=y), log, diff, SIMPLIFY=FALSE)) Aggregate(files, FUNC, file.out=file.out) } issues.cols <- list(issues=c("source", "product", "issue.id", "issue.key", "created", "updated", "last.resolved", "summary", "description", "version", "milestone", "status", "severity", "priority", "issuetype", "resolution", "component", "votes", "product.name", "reporter.key", "reporter.name", "reporter.displayname", "reporter.email", "reporter.tz", "creator.key", "creator.name", "creator.displayname", "creator.email", "creator.tz", "assignee.key", "assignee.name", "assignee.displayname", "assignee.email", "assignee.tz"), comments=c("source", "product", "issue.id", "comment.id", "count", "author.key", "author.name", "author.displayname", "author.email", "author.tz", "update.author.key", "update.author.name", "update.author.displayname", "update.author.email", "update.author.tz", "created", "updated")) #' Aggregate issues #' #' Aggregate issue data as a single table. #' #' @param jiralog Data.frame containing all the Jira Parquet #' filenames. #' @param bugzillalog Data.frame containing all the Bugzilla Parquet #' filenames #' @param t Type of issue log to create (e.g. issue or comment). #' @param FUNC Function to apply on each table. #' @param file.out Output Parquet filename. #' @return The aggregated Parquet tables as a \code{data.table} if #' \code{file.out} is not NULL, otherwise \code{file.out}. #' @export AggregateIssues <- function(jiralog, bugzillalog, t, FUNC, file.out=NULL) { files <- rbind(jiralog, bugzillalog)[type == t, filename] log <- Aggregate(files, function(f) { res <- f %>% ReadParquet if (nrow(res)) res %>% FUNC }, cols=issues.cols[[t]], file.out=file.out) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{seedwords} \alias{seedwords} \title{Seed words for Latent Semantci Analysis} \usage{ seedwords(type) } \arguments{ \item{type}{type of seed words currently only for sentiment (\code{sentiment}) or political ideology (\code{ideology}).} } \description{ Seed words for Latent Semantci Analysis } \examples{ seedwords('sentiment') } \references{ Turney, P. D., & Littman, M. L. (2003). Measuring Praise and Criticism: Inference of Semantic Orientation from Association. ACM Trans. Inf. Syst., 21(4), 315–346. https://doi.org/10.1145/944012.944013 }
/man/seedwords.Rd
no_license
daiyamao/LSX
R
false
true
640
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{seedwords} \alias{seedwords} \title{Seed words for Latent Semantci Analysis} \usage{ seedwords(type) } \arguments{ \item{type}{type of seed words currently only for sentiment (\code{sentiment}) or political ideology (\code{ideology}).} } \description{ Seed words for Latent Semantci Analysis } \examples{ seedwords('sentiment') } \references{ Turney, P. D., & Littman, M. L. (2003). Measuring Praise and Criticism: Inference of Semantic Orientation from Association. ACM Trans. Inf. Syst., 21(4), 315–346. https://doi.org/10.1145/944012.944013 }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SigDFMethods.R \name{sdf_read_table} \alias{sdf_read_table} \title{read a table file to SigDF} \usage{ sdf_read_table(fname, platform = NULL, ...) } \arguments{ \item{fname}{file name} \item{platform}{array platform (will infer if not given)} \item{...}{additional argument to read.table} } \value{ read table file to SigDF } \description{ read a table file to SigDF } \examples{ sesameDataCache("EPIC") # if not done yet sdf = sesameDataGet('EPIC.1.SigDF') fname = sprintf("\%s/sigdf.txt", tempdir()) sdf_write_table(sdf, file=fname) sdf2 = sdf_read_table(fname) }
/man/sdf_read_table.Rd
permissive
a-augustin/sesame
R
false
true
646
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SigDFMethods.R \name{sdf_read_table} \alias{sdf_read_table} \title{read a table file to SigDF} \usage{ sdf_read_table(fname, platform = NULL, ...) } \arguments{ \item{fname}{file name} \item{platform}{array platform (will infer if not given)} \item{...}{additional argument to read.table} } \value{ read table file to SigDF } \description{ read a table file to SigDF } \examples{ sesameDataCache("EPIC") # if not done yet sdf = sesameDataGet('EPIC.1.SigDF') fname = sprintf("\%s/sigdf.txt", tempdir()) sdf_write_table(sdf, file=fname) sdf2 = sdf_read_table(fname) }
################################### # rm(list=ls(all=TRUE)) ################################## # Generate training and testing data similar to IRIS set.seed(100) numbers1 = rnorm(400) rows1 = 40 columns1 = 10 training = matrix(numbers1, rows1, columns1) ############## set.seed(100) #set.seed(0) numbers2 = rnorm(400) rows2 = 40 columns2 = 10 testing = matrix(numbers2, rows2, columns2) ############################### head(training) head(testing) ############################################### # Binary Response Variable for ROC Curve # # Generate class labels training data (cl_training <- rep(c(-1, 1), each=20)) length(cl_training) # Generate class labels testing data (cl_testing <- rep(c(-1,1),each=20)) length(cl_testing) ######################################## # Apply KNN Modeling method # m1 <- class::knn(training, testing, cl_training, k=2, prob=TRUE) m1 # Compute the probabilities # (prob1 <- attr(m1, "prob")) (prob2 <- 2*ifelse(m1 == "-1", 1-prob1, prob1) - 1) ######################################### library(ROCR) pred_knn <- prediction(prob2, cl_testing) pred_knn <- performance(pred_knn, "tpr", "fpr") plot(pred_knn, avg= "threshold", colorize=T, lwd=3, main="ROC curve") plot(pred_knn, avg= "threshold", lwd=3, main="ROC curve") abline(a=0,b=1) ############################################
/cs61/s8/FINAL ROC Curve.R
no_license
taecoding/data
R
false
false
1,384
r
################################### # rm(list=ls(all=TRUE)) ################################## # Generate training and testing data similar to IRIS set.seed(100) numbers1 = rnorm(400) rows1 = 40 columns1 = 10 training = matrix(numbers1, rows1, columns1) ############## set.seed(100) #set.seed(0) numbers2 = rnorm(400) rows2 = 40 columns2 = 10 testing = matrix(numbers2, rows2, columns2) ############################### head(training) head(testing) ############################################### # Binary Response Variable for ROC Curve # # Generate class labels training data (cl_training <- rep(c(-1, 1), each=20)) length(cl_training) # Generate class labels testing data (cl_testing <- rep(c(-1,1),each=20)) length(cl_testing) ######################################## # Apply KNN Modeling method # m1 <- class::knn(training, testing, cl_training, k=2, prob=TRUE) m1 # Compute the probabilities # (prob1 <- attr(m1, "prob")) (prob2 <- 2*ifelse(m1 == "-1", 1-prob1, prob1) - 1) ######################################### library(ROCR) pred_knn <- prediction(prob2, cl_testing) pred_knn <- performance(pred_knn, "tpr", "fpr") plot(pred_knn, avg= "threshold", colorize=T, lwd=3, main="ROC curve") plot(pred_knn, avg= "threshold", lwd=3, main="ROC curve") abline(a=0,b=1) ############################################
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cb_taxa_gene.R \name{cb_taxa_gene} \alias{cb_taxa_gene} \title{Concatenate taxa names and gene name to build a search term for entrez_search function} \usage{ cb_taxa_gene(tax, gene) } \arguments{ \item{tax}{a char vector, use c() for multiple names} \item{gene}{a char vector, currently accepts a single gene} } \value{ the search term that feeds the entrez_search function } \description{ takes one or multiple taxa namzs, concatenate them and with the name of the gene build a search term for entrez_search function } \examples{ cb_taxa_gene(c("Pomatoschistus","Gobius"),"COI") }
/man/cb_taxa_gene.Rd
permissive
jurenoult/fishphylo
R
false
true
664
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cb_taxa_gene.R \name{cb_taxa_gene} \alias{cb_taxa_gene} \title{Concatenate taxa names and gene name to build a search term for entrez_search function} \usage{ cb_taxa_gene(tax, gene) } \arguments{ \item{tax}{a char vector, use c() for multiple names} \item{gene}{a char vector, currently accepts a single gene} } \value{ the search term that feeds the entrez_search function } \description{ takes one or multiple taxa namzs, concatenate them and with the name of the gene build a search term for entrez_search function } \examples{ cb_taxa_gene(c("Pomatoschistus","Gobius"),"COI") }
turk4_s1028 <- read.table("turk4_s1028.txt", header=T) names(turk4_s1028) <- c("X1","X2","X3","C1","T1")
/data/turk4_s1028.R
no_license
ewan/dlp
R
false
false
105
r
turk4_s1028 <- read.table("turk4_s1028.txt", header=T) names(turk4_s1028) <- c("X1","X2","X3","C1","T1")
## Test the tiling coefficients. ## if one train is empty, we get NaN. expect_equal( tiling.corr(1:5, double(0)), NaN) context("Tiling coefficent should return 1 for autocorrelated trains.") poisson.train <- function(n=1000, rate=1, beg=0) { ## Generate a Poisson spike train with N spikes and firing rate RATE. ## BEG is time of start of recording ## Check that the histogram looks exponentially distributed. ## hist( diff(poisson.train())) x <- runif(n) isi <- log(x) / rate spikes <- beg - cumsum(isi) spikes } ## Auto-correlations should be 1. ## Poisson spike train generated using the rule: t_i+1 = t_i - ## ln(x)/r, where x is drawn from Uniform distribuition and r is the ## rate. ## So in each case lets draw n=200 spikes from varying a rates. n <- 5000 rates <- c(0.3, 1, 2, 5, 10) for (r in rates) { t <- poisson.train(n, r, beg=300) expect_equal( tiling.corr(t, t), 1) } context("Tiling coefficent should return 0 for two independent Poisson trains.") ## We look at the distribution of many trials. the tails of the ## distribution should still be close to zero. Here we check that the ## 5% and 95% bins are less than 0.02 away from zero, and that they ## are opposite signs, so mirrored around zero. tiling.ind <- function() { n <- 3000; r <- 0.2 ## Compute tiling for a pair of Poisson trains -- should be close to zero. a <- poisson.train(n, r, beg=300) b <- poisson.train(n, r, beg=300) tiling.corr(a, b) } coefs <- replicate(1000, tiling.ind()) hist(coefs) percentiles <- quantile(coefs, probs=c(0.05, 0.95)) expect_true(max(abs(percentiles)) < 0.02) expect_true( prod(percentiles) < 0) #check opposite signs. context("Introducing some correlation between pairs of trains.") tiling.shared <- function(p.shared=0.6, n=2000, r=1) { master <- poisson.train(n, r) p.own <- (1-p.shared)/2 p <- c( p.own, p.own, p.shared) ## Each spike is in one of three states with prob given by P vector above. ## 1: in train 1 only. ## 2: in train 2 only. ## 3: in both trains state <- sample(1:3, n, replace=TRUE, prob=p) a <- master[state != 2] b <- master[state != 1] tiling.corr(a, b) } p.shared <- seq(from=0, to=1, length=100) coef <- sapply(p.shared, tiling.shared) plot(p.shared, coef, pch=20, main='This should monotonically increase') expect_true( abs(coef[1]) < 0.1) expect_equal( coef[length(coef)], 1) context("Anti-correlated trains.") tiling.anti <- function(dt=0.05, n=2000) { master <- seq(from=0, by=0.5, length=n) odd <- rep_len(c(TRUE, FALSE), n) a <- master[odd] b <- master[!odd] tiling.corr(a,b,dt) } dts <- seq(from=0.05, to=1.0, length=100) dts <- c(0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0) coeff <- sapply(dts, tiling.anti) plot(dts, coeff, pch=20, type='b') ## We get NaN for dt=0.5 and dt=1 second. context("Checking the rec.time works") ## Generate a pair of uncorrelated trains, a and b. ## Then make trains a' and b' by simply adding a constant Z to all times. ## then check that tiling(a,b) == tiling(a+z, b+z) beg <- 0; end <- 2000; n <- 3000; z <- 5000; #large offset for 2nd set of trains for (i in seq_len(10)) { a <- sort(runif(n, beg, end)) b <- sort(runif(n, beg, end)) c1 <- tiling.corr(a, b, rec.time=c(beg, end)) c2 <- tiling.corr(a+z, b+z, rec.time=z + c(beg, end)) all.equal(c1, c2) } context("Pathological corner case with synthetic trains") ## This is when Pa=Tb=1 so both numerator and denominator are zero. ## What should we do about this case? Unlikely to happen for ## realistic trains. a <- 1; b <- 2 # one spike in each time. expect_equal(tiling.corr(a, b, dt=1, rec.time=c(0, 3)), 1) expect_equal(tiling.corr(a, b, dt=1), NaN) #is this correct?!? context("Check the array wide computation") data.file <- system.file("examples", "P9_CTRL_MY1_1A.txt", package = "sjemea") s <- jay.read.spikes(data.file) system.time(t1 <- tiling.allpairwise.old(s)) system.time(t2 <- tiling.allpairwise(s)) require(lattice) levelplot(t1) levelplot(t2) u <- upper.tri(t1, diag=TRUE) expect_equal(t1[u], t2[u])
/inst/tests/test_tiling.R
no_license
sje30/sjemea
R
false
false
4,146
r
## Test the tiling coefficients. ## if one train is empty, we get NaN. expect_equal( tiling.corr(1:5, double(0)), NaN) context("Tiling coefficent should return 1 for autocorrelated trains.") poisson.train <- function(n=1000, rate=1, beg=0) { ## Generate a Poisson spike train with N spikes and firing rate RATE. ## BEG is time of start of recording ## Check that the histogram looks exponentially distributed. ## hist( diff(poisson.train())) x <- runif(n) isi <- log(x) / rate spikes <- beg - cumsum(isi) spikes } ## Auto-correlations should be 1. ## Poisson spike train generated using the rule: t_i+1 = t_i - ## ln(x)/r, where x is drawn from Uniform distribuition and r is the ## rate. ## So in each case lets draw n=200 spikes from varying a rates. n <- 5000 rates <- c(0.3, 1, 2, 5, 10) for (r in rates) { t <- poisson.train(n, r, beg=300) expect_equal( tiling.corr(t, t), 1) } context("Tiling coefficent should return 0 for two independent Poisson trains.") ## We look at the distribution of many trials. the tails of the ## distribution should still be close to zero. Here we check that the ## 5% and 95% bins are less than 0.02 away from zero, and that they ## are opposite signs, so mirrored around zero. tiling.ind <- function() { n <- 3000; r <- 0.2 ## Compute tiling for a pair of Poisson trains -- should be close to zero. a <- poisson.train(n, r, beg=300) b <- poisson.train(n, r, beg=300) tiling.corr(a, b) } coefs <- replicate(1000, tiling.ind()) hist(coefs) percentiles <- quantile(coefs, probs=c(0.05, 0.95)) expect_true(max(abs(percentiles)) < 0.02) expect_true( prod(percentiles) < 0) #check opposite signs. context("Introducing some correlation between pairs of trains.") tiling.shared <- function(p.shared=0.6, n=2000, r=1) { master <- poisson.train(n, r) p.own <- (1-p.shared)/2 p <- c( p.own, p.own, p.shared) ## Each spike is in one of three states with prob given by P vector above. ## 1: in train 1 only. ## 2: in train 2 only. ## 3: in both trains state <- sample(1:3, n, replace=TRUE, prob=p) a <- master[state != 2] b <- master[state != 1] tiling.corr(a, b) } p.shared <- seq(from=0, to=1, length=100) coef <- sapply(p.shared, tiling.shared) plot(p.shared, coef, pch=20, main='This should monotonically increase') expect_true( abs(coef[1]) < 0.1) expect_equal( coef[length(coef)], 1) context("Anti-correlated trains.") tiling.anti <- function(dt=0.05, n=2000) { master <- seq(from=0, by=0.5, length=n) odd <- rep_len(c(TRUE, FALSE), n) a <- master[odd] b <- master[!odd] tiling.corr(a,b,dt) } dts <- seq(from=0.05, to=1.0, length=100) dts <- c(0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0) coeff <- sapply(dts, tiling.anti) plot(dts, coeff, pch=20, type='b') ## We get NaN for dt=0.5 and dt=1 second. context("Checking the rec.time works") ## Generate a pair of uncorrelated trains, a and b. ## Then make trains a' and b' by simply adding a constant Z to all times. ## then check that tiling(a,b) == tiling(a+z, b+z) beg <- 0; end <- 2000; n <- 3000; z <- 5000; #large offset for 2nd set of trains for (i in seq_len(10)) { a <- sort(runif(n, beg, end)) b <- sort(runif(n, beg, end)) c1 <- tiling.corr(a, b, rec.time=c(beg, end)) c2 <- tiling.corr(a+z, b+z, rec.time=z + c(beg, end)) all.equal(c1, c2) } context("Pathological corner case with synthetic trains") ## This is when Pa=Tb=1 so both numerator and denominator are zero. ## What should we do about this case? Unlikely to happen for ## realistic trains. a <- 1; b <- 2 # one spike in each time. expect_equal(tiling.corr(a, b, dt=1, rec.time=c(0, 3)), 1) expect_equal(tiling.corr(a, b, dt=1), NaN) #is this correct?!? context("Check the array wide computation") data.file <- system.file("examples", "P9_CTRL_MY1_1A.txt", package = "sjemea") s <- jay.read.spikes(data.file) system.time(t1 <- tiling.allpairwise.old(s)) system.time(t2 <- tiling.allpairwise(s)) require(lattice) levelplot(t1) levelplot(t2) u <- upper.tri(t1, diag=TRUE) expect_equal(t1[u], t2[u])
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/other_plots.R \name{plot_timeline} \alias{plot_timeline} \title{Plot timeline as Gantt Plot} \usage{ plot_timeline( event, start, end = start + 1, label = NA, group = NA, title = "Curriculum Vitae Timeline", subtitle = "Bernardo Lares", interactive = FALSE, save = FALSE, subdir = NA ) } \arguments{ \item{event}{Vector. Event, role, label, or row.} \item{start}{Vector. Start date.} \item{end}{Vector. End date. Only one day be default if not defined} \item{label}{Vector. Place, institution, or label.} \item{group}{Vector. Academic, Work, Extracurricular... Pass as factor to keep a specific order} \item{title}{Character. Title for the plot} \item{subtitle}{Character. Subtitle for the plot} \item{interactive}{Boolean. Run with plotly?} \item{save}{Boolean. Save the output plot in our working directory} \item{subdir}{Character. Into which subdirectory do you wish to save the plot to?} } \value{ ggplot2 object } \description{ This function plots groups of observartions with timelines in a Gantt Plot way. Only works if start and end are date format values. } \examples{ Sys.unsetenv("LARES_FONT") # Temporal cols <- c("Role", "Place", "Type", "Start", "End") today <- as.character(Sys.Date()) cv <- data.frame(rbind( c("Marketing Science Partner", "Facebook", "Work Experience", "2019-12-09", today), c("Data Scientist Consultant", "MatrixDS", "Work Experience", "2018-09-01", today), c("R Community Contributor", "lares library", "Extra", "2018-07-18", today), c("Lead Data Scientist", "MEG", "Work Experience", "2019-01-15", "2019-12-09"), c("Head of Analytics", "Comparamejor/R5", "Work Experience", "2016-08-01", "2019-01-15"), c("Big Data & Data Science Programme", "UdC", "Academic", "2017-09-01", "2018-02-28"), c("Project Engineer", "Polytex", "Work Experience", "2016-05-15", "2016-09-01"), c("Big Data Analyst", "MEG", "Work Experience", "2016-01-01", "2016-04-30"), c("Advanced Excel Instructor", "ARTS", "Work Experience", "2015-11-01", "2016-04-30"), c("Continuous Improvement Intern", "PAVCO", "Work Experience", "2015-04-01", "2015-08-30"), c("Mechanical Design Intern", "SIGALCA", "Work Experience", "2013-07-01", "2013-09-30"), c("DJs Online Community Owner", "LaresDJ.com / SoloParaDJs", "Extra", "2010-01-05", "2020-05-20"), c("Mechanical Engineer Degree", "USB", "Academic", "2009-09-15", "2015-11-20"), c("DJ and Composer/Producer", "Legacy Discplay", "Extra", "2009-05-01", "2015-04-30") )) colnames(cv) <- cols plot_timeline( event = cv$Role, start = cv$Start, end = cv$End, label = cv$Place, # Simple trick to re-arrange the grids group = factor(cv$Type, levels = c("Work Experience", "Academic", "Extra")) ) } \seealso{ Other Visualization: \code{\link{distr}()}, \code{\link{freqs_df}()}, \code{\link{freqs_list}()}, \code{\link{freqs_plot}()}, \code{\link{freqs}()}, \code{\link{noPlot}()}, \code{\link{plot_chord}()}, \code{\link{plot_survey}()}, \code{\link{tree_var}()} } \concept{Visualization}
/man/plot_timeline.Rd
no_license
laresbernardo/lares
R
false
true
3,083
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/other_plots.R \name{plot_timeline} \alias{plot_timeline} \title{Plot timeline as Gantt Plot} \usage{ plot_timeline( event, start, end = start + 1, label = NA, group = NA, title = "Curriculum Vitae Timeline", subtitle = "Bernardo Lares", interactive = FALSE, save = FALSE, subdir = NA ) } \arguments{ \item{event}{Vector. Event, role, label, or row.} \item{start}{Vector. Start date.} \item{end}{Vector. End date. Only one day be default if not defined} \item{label}{Vector. Place, institution, or label.} \item{group}{Vector. Academic, Work, Extracurricular... Pass as factor to keep a specific order} \item{title}{Character. Title for the plot} \item{subtitle}{Character. Subtitle for the plot} \item{interactive}{Boolean. Run with plotly?} \item{save}{Boolean. Save the output plot in our working directory} \item{subdir}{Character. Into which subdirectory do you wish to save the plot to?} } \value{ ggplot2 object } \description{ This function plots groups of observartions with timelines in a Gantt Plot way. Only works if start and end are date format values. } \examples{ Sys.unsetenv("LARES_FONT") # Temporal cols <- c("Role", "Place", "Type", "Start", "End") today <- as.character(Sys.Date()) cv <- data.frame(rbind( c("Marketing Science Partner", "Facebook", "Work Experience", "2019-12-09", today), c("Data Scientist Consultant", "MatrixDS", "Work Experience", "2018-09-01", today), c("R Community Contributor", "lares library", "Extra", "2018-07-18", today), c("Lead Data Scientist", "MEG", "Work Experience", "2019-01-15", "2019-12-09"), c("Head of Analytics", "Comparamejor/R5", "Work Experience", "2016-08-01", "2019-01-15"), c("Big Data & Data Science Programme", "UdC", "Academic", "2017-09-01", "2018-02-28"), c("Project Engineer", "Polytex", "Work Experience", "2016-05-15", "2016-09-01"), c("Big Data Analyst", "MEG", "Work Experience", "2016-01-01", "2016-04-30"), c("Advanced Excel Instructor", "ARTS", "Work Experience", "2015-11-01", "2016-04-30"), c("Continuous Improvement Intern", "PAVCO", "Work Experience", "2015-04-01", "2015-08-30"), c("Mechanical Design Intern", "SIGALCA", "Work Experience", "2013-07-01", "2013-09-30"), c("DJs Online Community Owner", "LaresDJ.com / SoloParaDJs", "Extra", "2010-01-05", "2020-05-20"), c("Mechanical Engineer Degree", "USB", "Academic", "2009-09-15", "2015-11-20"), c("DJ and Composer/Producer", "Legacy Discplay", "Extra", "2009-05-01", "2015-04-30") )) colnames(cv) <- cols plot_timeline( event = cv$Role, start = cv$Start, end = cv$End, label = cv$Place, # Simple trick to re-arrange the grids group = factor(cv$Type, levels = c("Work Experience", "Academic", "Extra")) ) } \seealso{ Other Visualization: \code{\link{distr}()}, \code{\link{freqs_df}()}, \code{\link{freqs_list}()}, \code{\link{freqs_plot}()}, \code{\link{freqs}()}, \code{\link{noPlot}()}, \code{\link{plot_chord}()}, \code{\link{plot_survey}()}, \code{\link{tree_var}()} } \concept{Visualization}
# This file contains the functionality that wraps API archivefile services #' Get a list of files for a given location code and device category code, and #' filtered by others optional parameters. #' @keywords internal #' #' @param self Calling object #' @param filters (named list) describe the data origin #' @param allPages When TRUE, if the data is too long to fit a single request, #' multiple pages will be requested until all data is obatined #' #' @return (named list) file list obtained .getListByLocation = function(self, filters = list(), allPages = FALSE) { return(.getList(self, filters = filters, by = "location", allPages = allPages)) } #' Get a list of files available in Oceans 2.0 Archiving System for a given #' device code. The list of filenames can be filtered by time range. #' filtered by others optional parameters. #' @keywords internal #' #' @param self Calling object #' @param filters (named list) describe the data origin #' @param allPages When TRUE, if the data is too long to fit a single request, #' multiple pages will be requested until all data is obatined #' #' @return (named list) file list obtained .getListByDevice = function(self, filters = list(), allPages = FALSE) { return(.getList(self, filters, by = "device", allPages = allPages)) } #' Download the archive file with filename #' @keywords internal #' #' @param self Calling object #' @param filename Archive file filename #' @param overwrite When TRUE, downloaded files will overwrite any file with the #' same filename, otherwise file will be skipped #' #' @return (named list) Information on the download result .getFile = function(self, filename = "", overwrite = FALSE) { url <- .serviceUrl(self, "archivefiles") filters <- list( "token" = self$token, "method" = "getFile", "filename" = filename ) r <- .doRequest(self, url = url, filters = filters, getInfo = TRUE, rawResponse = TRUE) if (.respFailed(r)) return(r) response <- r$response outPath <- self$outPath saveStatus <- .saveAsFile(response, outPath, filename, overwrite) txtStatus <- "error" if (r$status == 200) { if (saveStatus == 0) txtStatus <- "completed" else if (saveStatus == -2) txtStatus <- "skipped" return(list( "url" = .getDownloadUrl(self, filename), "status" = txtStatus, "size" = length(response$content), "downloadTime" = round(r$duration, digits = 3), "file" = filename )) } return(list( "url" = "", "status" = txtStatus, "size" = 0, "downloadTime" = 0, "file" = "" )) } #' Downloads all archive files that match the filters #' Internally will use geListByDevice or getListByLocation and getFile all files #' @keywords internal #' #' @param self Calling object #' @param filters (named list) describe the data origin #' @param overwrite When TRUE, downloaded files will overwrite any file with the #' same filename, otherwise file will be skipped #' @param allPages When TRUE, if the data is too long to fit a single request, #' multiple pages will be requested until all data is obatined #' #' @return (named list) Information on the results of the operation, with "downloadResults" #' for each file downloaded and general "stats" .getDirectFiles = function(self, filters = list(), overwrite = FALSE, allPages = FALSE) { filNames <- names(filters) # make sure we only get a simple list of files if ("returnOptions" %in% filNames) { filters[["returnOptions"]] <- NULL } # Get a list of files if (("locationCode" %in% filNames) && ("deviceCategoryCode" %in% filNames)) { dataRows <- .getListByLocation(self, filters = filters, allPages = allPages) } else if ("deviceCode" %in% filNames) { dataRows <- .getListByDevice(self, filters = filters, allPages = allPages) } else { stop("getDirectFiles filters require either a combination of (locationCode) and (deviceCategoryCode), or a (deviceCode) present.") } n <- length(dataRows$files) cat(sprintf("Obtained a list of %d files to download.\n", n)) # Download the files obtained tries <- 1 successes <- 0 size <- 0 time <- 0 downInfos <- list() for (filename in dataRows$files) { # only download if file doesn"t exist (or overwrite is True) outPath <- self$outPath filePath <- sprintf("%s/%s", outPath, filename) fileExists <- file.exists(filePath) if (!fileExists || (fileExists && overwrite)) { cat(sprintf(" (%d of %d) Downloading file: \"%s\"\n", tries, n, filename)) downInfo <- .getFile(self, filename, overwrite) # Skip this file if the request failed if (.respFailed(downInfo)) { cat(sprintf(" Skipping \"%s\" due to an error.\n", filename)) tries <- tries + 1 errorInfo <- list( "url" = .getDownloadUrl(self, filename), "status" = "error", "size" = 0, "downloadTime" = 0, "file" = "" ) downInfos <- .appendList(downInfos, errorInfo) next } size <- size + downInfo$size time <- time + downInfo$downloadTime tries <- tries + 1 if (downInfo$status == "completed") { successes <- successes + 1 } downInfos <- .appendList(downInfos, downInfo) } else { cat(sprintf(" Skipping \"%s\": File already exists.\n", filename)) downInfo <- list( "url" = .getDownloadUrl(self, filename), "status" = "skipped", "size" = 0, "downloadTime" = 0, "file" = filename ) downInfos <- .appendList(downInfos, downInfo) } } cat(sprintf("%d files (%s) downloaded\n", successes, .formatSize(size))) cat(sprintf("Total Download Time: %s\n", .formatDuration(time))) return(list( "downloadResults" = downInfos, "stats" = list( "totalSize" = size, "downloadTime" = time, "fileCount" = successes ) )) } #' Given a filename, returns an archivefile absolute download URL #' @keywords internal #' #' @param self Calling object #' @param filename (character) archive file name #' #' @return (character) download URL .getDownloadUrl = function(self, filename = "") { url <- .serviceUrl(self, "archivefiles") return(sprintf("%s?method=getFile&filename=%s&token=%s", url, filename, self$token)) } #' A generic wrapper for getListByLocation() and getListByDevice() #' @keywords internal #' #' @param self Calling object #' @param filters (named list) describe the data origin #' @param by One of: "location", "device" #' @param allPages When TRUE, if the data is too long to fit a single request, #' multiple pages will be requested until all data is obatined #' #' @return (named list) Information on the list of files obtained .getList = function(self, filters = list(), by = "location", allPages = FALSE) { url <- .serviceUrl(self, "archivefiles") filters[["token"]] <- self$token filters[["method"]] <- ifelse((by == "location"), "getListByLocation", "getListByDevice") # parse and remove the artificial parameter extension extension <- "" if ("extension" %in% names(filters)) { extension <- filters$extension # Don"t remove yet } if (allPages) { mp <- MultiPage$new(self$showInfo, self$timeout) result <- mp$getAllPages("archivefiles", url, filters) } else { if ("extension" %in% names(filters)) { filters[["extension"]] <- NULL } result <- .doRequest(self, url, filters) result <- .filterByExtension(self, result, extension) } return(result) } #' Filter file list results to only those where the filename ends with the extension #' If extension is empty no change will be made #' @keywords internal #' #' @param self Calling object #' @param results Results as otained by getListByLocation() or getListByDevice() #' @param extension (character) Extension to search for (i.e. "txt") #' #' @return Filtered list .filterByExtension = function(self, results = list(), extension = "") { if (extension == "") { return(results) } extension <- sprintf(".%s", extension) # match the dot to avoid matching substrings n <- stringi::stri_length(extension) filtered <- list() # appending is faster than deleting # determine the row structure rowFormat <- "filename" if (length(results$files) > 0) { if (typeof(results$files[[1]]) == "list") { rowFormat <- "list" } } # filter for (file in results$files) { if (rowFormat == "filename") { fileExt <- stringi::stri_sub(file, from = -n) if (fileExt == extension) filtered <- append(filtered, file) } else { fileExt <- stringi::stri_sub(file$filename, from = -n) if (fileExt == extension) filtered <- .appendList(filtered, file) } } results$files <- filtered return(results) }
/R/onc_archive.R
permissive
OceanNetworksCanada/api-r-client
R
false
false
9,719
r
# This file contains the functionality that wraps API archivefile services #' Get a list of files for a given location code and device category code, and #' filtered by others optional parameters. #' @keywords internal #' #' @param self Calling object #' @param filters (named list) describe the data origin #' @param allPages When TRUE, if the data is too long to fit a single request, #' multiple pages will be requested until all data is obatined #' #' @return (named list) file list obtained .getListByLocation = function(self, filters = list(), allPages = FALSE) { return(.getList(self, filters = filters, by = "location", allPages = allPages)) } #' Get a list of files available in Oceans 2.0 Archiving System for a given #' device code. The list of filenames can be filtered by time range. #' filtered by others optional parameters. #' @keywords internal #' #' @param self Calling object #' @param filters (named list) describe the data origin #' @param allPages When TRUE, if the data is too long to fit a single request, #' multiple pages will be requested until all data is obatined #' #' @return (named list) file list obtained .getListByDevice = function(self, filters = list(), allPages = FALSE) { return(.getList(self, filters, by = "device", allPages = allPages)) } #' Download the archive file with filename #' @keywords internal #' #' @param self Calling object #' @param filename Archive file filename #' @param overwrite When TRUE, downloaded files will overwrite any file with the #' same filename, otherwise file will be skipped #' #' @return (named list) Information on the download result .getFile = function(self, filename = "", overwrite = FALSE) { url <- .serviceUrl(self, "archivefiles") filters <- list( "token" = self$token, "method" = "getFile", "filename" = filename ) r <- .doRequest(self, url = url, filters = filters, getInfo = TRUE, rawResponse = TRUE) if (.respFailed(r)) return(r) response <- r$response outPath <- self$outPath saveStatus <- .saveAsFile(response, outPath, filename, overwrite) txtStatus <- "error" if (r$status == 200) { if (saveStatus == 0) txtStatus <- "completed" else if (saveStatus == -2) txtStatus <- "skipped" return(list( "url" = .getDownloadUrl(self, filename), "status" = txtStatus, "size" = length(response$content), "downloadTime" = round(r$duration, digits = 3), "file" = filename )) } return(list( "url" = "", "status" = txtStatus, "size" = 0, "downloadTime" = 0, "file" = "" )) } #' Downloads all archive files that match the filters #' Internally will use geListByDevice or getListByLocation and getFile all files #' @keywords internal #' #' @param self Calling object #' @param filters (named list) describe the data origin #' @param overwrite When TRUE, downloaded files will overwrite any file with the #' same filename, otherwise file will be skipped #' @param allPages When TRUE, if the data is too long to fit a single request, #' multiple pages will be requested until all data is obatined #' #' @return (named list) Information on the results of the operation, with "downloadResults" #' for each file downloaded and general "stats" .getDirectFiles = function(self, filters = list(), overwrite = FALSE, allPages = FALSE) { filNames <- names(filters) # make sure we only get a simple list of files if ("returnOptions" %in% filNames) { filters[["returnOptions"]] <- NULL } # Get a list of files if (("locationCode" %in% filNames) && ("deviceCategoryCode" %in% filNames)) { dataRows <- .getListByLocation(self, filters = filters, allPages = allPages) } else if ("deviceCode" %in% filNames) { dataRows <- .getListByDevice(self, filters = filters, allPages = allPages) } else { stop("getDirectFiles filters require either a combination of (locationCode) and (deviceCategoryCode), or a (deviceCode) present.") } n <- length(dataRows$files) cat(sprintf("Obtained a list of %d files to download.\n", n)) # Download the files obtained tries <- 1 successes <- 0 size <- 0 time <- 0 downInfos <- list() for (filename in dataRows$files) { # only download if file doesn"t exist (or overwrite is True) outPath <- self$outPath filePath <- sprintf("%s/%s", outPath, filename) fileExists <- file.exists(filePath) if (!fileExists || (fileExists && overwrite)) { cat(sprintf(" (%d of %d) Downloading file: \"%s\"\n", tries, n, filename)) downInfo <- .getFile(self, filename, overwrite) # Skip this file if the request failed if (.respFailed(downInfo)) { cat(sprintf(" Skipping \"%s\" due to an error.\n", filename)) tries <- tries + 1 errorInfo <- list( "url" = .getDownloadUrl(self, filename), "status" = "error", "size" = 0, "downloadTime" = 0, "file" = "" ) downInfos <- .appendList(downInfos, errorInfo) next } size <- size + downInfo$size time <- time + downInfo$downloadTime tries <- tries + 1 if (downInfo$status == "completed") { successes <- successes + 1 } downInfos <- .appendList(downInfos, downInfo) } else { cat(sprintf(" Skipping \"%s\": File already exists.\n", filename)) downInfo <- list( "url" = .getDownloadUrl(self, filename), "status" = "skipped", "size" = 0, "downloadTime" = 0, "file" = filename ) downInfos <- .appendList(downInfos, downInfo) } } cat(sprintf("%d files (%s) downloaded\n", successes, .formatSize(size))) cat(sprintf("Total Download Time: %s\n", .formatDuration(time))) return(list( "downloadResults" = downInfos, "stats" = list( "totalSize" = size, "downloadTime" = time, "fileCount" = successes ) )) } #' Given a filename, returns an archivefile absolute download URL #' @keywords internal #' #' @param self Calling object #' @param filename (character) archive file name #' #' @return (character) download URL .getDownloadUrl = function(self, filename = "") { url <- .serviceUrl(self, "archivefiles") return(sprintf("%s?method=getFile&filename=%s&token=%s", url, filename, self$token)) } #' A generic wrapper for getListByLocation() and getListByDevice() #' @keywords internal #' #' @param self Calling object #' @param filters (named list) describe the data origin #' @param by One of: "location", "device" #' @param allPages When TRUE, if the data is too long to fit a single request, #' multiple pages will be requested until all data is obatined #' #' @return (named list) Information on the list of files obtained .getList = function(self, filters = list(), by = "location", allPages = FALSE) { url <- .serviceUrl(self, "archivefiles") filters[["token"]] <- self$token filters[["method"]] <- ifelse((by == "location"), "getListByLocation", "getListByDevice") # parse and remove the artificial parameter extension extension <- "" if ("extension" %in% names(filters)) { extension <- filters$extension # Don"t remove yet } if (allPages) { mp <- MultiPage$new(self$showInfo, self$timeout) result <- mp$getAllPages("archivefiles", url, filters) } else { if ("extension" %in% names(filters)) { filters[["extension"]] <- NULL } result <- .doRequest(self, url, filters) result <- .filterByExtension(self, result, extension) } return(result) } #' Filter file list results to only those where the filename ends with the extension #' If extension is empty no change will be made #' @keywords internal #' #' @param self Calling object #' @param results Results as otained by getListByLocation() or getListByDevice() #' @param extension (character) Extension to search for (i.e. "txt") #' #' @return Filtered list .filterByExtension = function(self, results = list(), extension = "") { if (extension == "") { return(results) } extension <- sprintf(".%s", extension) # match the dot to avoid matching substrings n <- stringi::stri_length(extension) filtered <- list() # appending is faster than deleting # determine the row structure rowFormat <- "filename" if (length(results$files) > 0) { if (typeof(results$files[[1]]) == "list") { rowFormat <- "list" } } # filter for (file in results$files) { if (rowFormat == "filename") { fileExt <- stringi::stri_sub(file, from = -n) if (fileExt == extension) filtered <- append(filtered, file) } else { fileExt <- stringi::stri_sub(file$filename, from = -n) if (fileExt == extension) filtered <- .appendList(filtered, file) } } results$files <- filtered return(results) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/days_in_yyyymm.R \name{days_in_yyyymm} \alias{days_in_yyyymm} \title{Return the number of days in a given month} \usage{ days_in_yyyymm(yyyymm) } \arguments{ \item{yyyymm}{Year/month in YYYYMM format} } \value{ number of days in month } \description{ Return the number of days in a given month }
/wsim.lsm/man/days_in_yyyymm.Rd
permissive
isciences/wsim
R
false
true
374
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/days_in_yyyymm.R \name{days_in_yyyymm} \alias{days_in_yyyymm} \title{Return the number of days in a given month} \usage{ days_in_yyyymm(yyyymm) } \arguments{ \item{yyyymm}{Year/month in YYYYMM format} } \value{ number of days in month } \description{ Return the number of days in a given month }
#'Calculate trend correlations based on mvgam latent factor loadings #' #'This function uses samples of latent trends for each series from a fitted #'mvgam model to calculates correlations among series' trends #' #'@importFrom stats cov2cor cov #'@param object \code{list} object returned from \code{mvgam} #'@return A \code{list} object containing the mean posterior correlations and the full array of posterior correlations #'@export lv_correlations = function(object){ # Check arguments if (!(inherits(object, "mvgam"))) { stop('argument "object" must be of class "mvgam"') } # Series start and end indices ends <- seq(0, dim(mcmc_chains(object$model_output, 'ypred'))[2], length.out = NCOL(object$ytimes) + 1) starts <- ends + 1 starts <- c(1, starts[-c(1, (NCOL(object$ytimes)+1))]) ends <- ends[-1] # Total number of MCMC samples n_preds <- dim(mcmc_chains(object$model_output, 'trend')[,starts[1]:ends[1]])[1] data_train <- object$obs_data # Total number of observations per series if(inherits(data_train, 'list')){ n_obs <- length(data_train$y) / NCOL(object$ytimes) } else { n_obs <- NROW(data_train) / NCOL(object$ytimes) } # Extract series trends series_trends <- lapply(seq_len(length(ends)), function(y){ if(object$fit_engine == 'stan'){ # For stan objects, trend is stored as a vector in column-major order mcmc_chains(object$model_output, 'trend')[,seq(y, dim(mcmc_chains(object$model_output, 'trend'))[2], by = NCOL(object$ytimes))] } else { mcmc_chains(object$model_output, 'trend')[,starts[y]:ends[y]][,1:n_obs] } }) # Get list of trend correlation estimates all_trend_cors <- lapply(seq_len(n_preds), function(x){ cov2cor(cov(do.call(cbind, lapply(series_trends, function(y){ y[x,] })))) }) # Calculate posterior mean correlations mean_correlations <- Reduce(`+`, all_trend_cors) / length(all_trend_cors) rownames(mean_correlations) <- colnames(mean_correlations) <- levels(data_train$series) list(mean_correlations = mean_correlations, posterior_correlations = all_trend_cors) }
/R/lv_correlations.R
permissive
nicholasjclark/mvgam
R
false
false
2,236
r
#'Calculate trend correlations based on mvgam latent factor loadings #' #'This function uses samples of latent trends for each series from a fitted #'mvgam model to calculates correlations among series' trends #' #'@importFrom stats cov2cor cov #'@param object \code{list} object returned from \code{mvgam} #'@return A \code{list} object containing the mean posterior correlations and the full array of posterior correlations #'@export lv_correlations = function(object){ # Check arguments if (!(inherits(object, "mvgam"))) { stop('argument "object" must be of class "mvgam"') } # Series start and end indices ends <- seq(0, dim(mcmc_chains(object$model_output, 'ypred'))[2], length.out = NCOL(object$ytimes) + 1) starts <- ends + 1 starts <- c(1, starts[-c(1, (NCOL(object$ytimes)+1))]) ends <- ends[-1] # Total number of MCMC samples n_preds <- dim(mcmc_chains(object$model_output, 'trend')[,starts[1]:ends[1]])[1] data_train <- object$obs_data # Total number of observations per series if(inherits(data_train, 'list')){ n_obs <- length(data_train$y) / NCOL(object$ytimes) } else { n_obs <- NROW(data_train) / NCOL(object$ytimes) } # Extract series trends series_trends <- lapply(seq_len(length(ends)), function(y){ if(object$fit_engine == 'stan'){ # For stan objects, trend is stored as a vector in column-major order mcmc_chains(object$model_output, 'trend')[,seq(y, dim(mcmc_chains(object$model_output, 'trend'))[2], by = NCOL(object$ytimes))] } else { mcmc_chains(object$model_output, 'trend')[,starts[y]:ends[y]][,1:n_obs] } }) # Get list of trend correlation estimates all_trend_cors <- lapply(seq_len(n_preds), function(x){ cov2cor(cov(do.call(cbind, lapply(series_trends, function(y){ y[x,] })))) }) # Calculate posterior mean correlations mean_correlations <- Reduce(`+`, all_trend_cors) / length(all_trend_cors) rownames(mean_correlations) <- colnames(mean_correlations) <- levels(data_train$series) list(mean_correlations = mean_correlations, posterior_correlations = all_trend_cors) }
# read the dataset monsterdata <- read.csv("household_power_consumption.txt", header = T, sep = ";", na.strings = "?", stringsAsFactors = F) # format the dates monsterdata$Date <- as.Date(monsterdata$Date, "%d/%m/%Y") # select the data required data <- monsterdata[monsterdata$Date >= "2007-02-01" & monsterdata$Date <= "2007-02-02",] # Convert dates datetime <- paste(as.Date(data$Date), data$Time) data$datetime <- as.POSIXct(datetime) # Create plot hist(data$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", col = "Red") # Save to file dev.copy(png, file = "plot1.png", height = 480, width = 480) dev.off()
/plot1.R
no_license
sequeiraneil/EDA
R
false
false
724
r
# read the dataset monsterdata <- read.csv("household_power_consumption.txt", header = T, sep = ";", na.strings = "?", stringsAsFactors = F) # format the dates monsterdata$Date <- as.Date(monsterdata$Date, "%d/%m/%Y") # select the data required data <- monsterdata[monsterdata$Date >= "2007-02-01" & monsterdata$Date <= "2007-02-02",] # Convert dates datetime <- paste(as.Date(data$Date), data$Time) data$datetime <- as.POSIXct(datetime) # Create plot hist(data$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", col = "Red") # Save to file dev.copy(png, file = "plot1.png", height = 480, width = 480) dev.off()
#' A sample dataset for demostration purposes. #' #' @details #' A data frame containing unreal sample data for demostration purposes only. It also serves as an example for a template of how user collected data should look. #' Use this dataset to learn about the functions on this package. #' #' @format Data frame with 13 variables and 20 observations: #' \describe{ #' \item{Athlete}{A character string. Name of the subjects.} #' \item{`Date of Birth`}{A date object referring to the DOB for each athlete.} #' \item{`Testing Date`}{A date object referring to the date of testing of each athlete.} #' \item{Gender}{A character string. Refers to Male or Female.} #' \item{`Weight1 (KG)`}{A number. Weight in kgs. Measurement 1.} #' \item{`Weight2 (KG)`}{A number. Weight in kgs. Measurement 2.} #' \item{`Height1 (CM)`}{A number. Height in cms. Measurement 1.} #' \item{`Height2 (CM)`}{A number. Height in cms. Measurement 2.} #' \item{`Sitting Height1 (CM)`}{A number. Length of the trunk in cms for a seated measurement. Measurement 1.} #' \item{`Sitting Height2 (CM)`}{A number. Length of the trunk in cms for a seated measurement. Measurement 2.} #' \item{`Bench Height2 (CM)`}{A number. If the sitting height is done using a chair or a bench, indicate its length, otherwise use 0.} #' \item{`Mothers Height (CM)`}{A number. The standing height of the athlete's mother in cms.} #' \item{`Fathers Height (CM)`}{A number. The standing height of the athlete's father in cms.} #' #' } #' @usage data_sample #' "data_sample"
/R/data_sample.R
permissive
CFPC-performance/matuR
R
false
false
1,564
r
#' A sample dataset for demostration purposes. #' #' @details #' A data frame containing unreal sample data for demostration purposes only. It also serves as an example for a template of how user collected data should look. #' Use this dataset to learn about the functions on this package. #' #' @format Data frame with 13 variables and 20 observations: #' \describe{ #' \item{Athlete}{A character string. Name of the subjects.} #' \item{`Date of Birth`}{A date object referring to the DOB for each athlete.} #' \item{`Testing Date`}{A date object referring to the date of testing of each athlete.} #' \item{Gender}{A character string. Refers to Male or Female.} #' \item{`Weight1 (KG)`}{A number. Weight in kgs. Measurement 1.} #' \item{`Weight2 (KG)`}{A number. Weight in kgs. Measurement 2.} #' \item{`Height1 (CM)`}{A number. Height in cms. Measurement 1.} #' \item{`Height2 (CM)`}{A number. Height in cms. Measurement 2.} #' \item{`Sitting Height1 (CM)`}{A number. Length of the trunk in cms for a seated measurement. Measurement 1.} #' \item{`Sitting Height2 (CM)`}{A number. Length of the trunk in cms for a seated measurement. Measurement 2.} #' \item{`Bench Height2 (CM)`}{A number. If the sitting height is done using a chair or a bench, indicate its length, otherwise use 0.} #' \item{`Mothers Height (CM)`}{A number. The standing height of the athlete's mother in cms.} #' \item{`Fathers Height (CM)`}{A number. The standing height of the athlete's father in cms.} #' #' } #' @usage data_sample #' "data_sample"
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/swan_reports.R \name{ann_rep_grp2_s} \alias{ann_rep_grp2_s} \title{Function for annual Swan River report plots and tables for group 2 metrics} \usage{ ann_rep_grp2_s(outpath, data, surface) } \arguments{ \item{outpath}{filepath to desired export location.} \item{data}{the data object created from running `swan_WIN_report_data`.} \item{surface}{colour for surface plots. Can be named colour or hex format.} } \value{ a separate panel plot for each metric and a csv of metrics for inclusion to a table. } \description{ \code{ann_rep_grp2_s} takes an export filepath, data, and surface plot colours and produces panel plots and tables for group 2 metrics. } \details{ Group 2 metrics are a grouping based on a common collection method, sample type and display parameters and in this case includes, alkalinity, dissolved organic carbon and TSS. Outputs will be exported to two folders at the outpath location. `s_panels/` for plots and `s_tables/` for data tables. These will need to be created if they don't exist. } \examples{ \dontrun{ ann_rep_grp2_s(outpath, data, surface = "blue")} } \author{ Bart Huntley, \email{bart.huntley@dbca.wa.gov.au} For more details see \url{https://dbca-wa.github.io/rivRmon/index.html} {the rivRmon website} }
/man/ann_rep_grp2_s.Rd
permissive
dbca-wa/rivRmon
R
false
true
1,354
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/swan_reports.R \name{ann_rep_grp2_s} \alias{ann_rep_grp2_s} \title{Function for annual Swan River report plots and tables for group 2 metrics} \usage{ ann_rep_grp2_s(outpath, data, surface) } \arguments{ \item{outpath}{filepath to desired export location.} \item{data}{the data object created from running `swan_WIN_report_data`.} \item{surface}{colour for surface plots. Can be named colour or hex format.} } \value{ a separate panel plot for each metric and a csv of metrics for inclusion to a table. } \description{ \code{ann_rep_grp2_s} takes an export filepath, data, and surface plot colours and produces panel plots and tables for group 2 metrics. } \details{ Group 2 metrics are a grouping based on a common collection method, sample type and display parameters and in this case includes, alkalinity, dissolved organic carbon and TSS. Outputs will be exported to two folders at the outpath location. `s_panels/` for plots and `s_tables/` for data tables. These will need to be created if they don't exist. } \examples{ \dontrun{ ann_rep_grp2_s(outpath, data, surface = "blue")} } \author{ Bart Huntley, \email{bart.huntley@dbca.wa.gov.au} For more details see \url{https://dbca-wa.github.io/rivRmon/index.html} {the rivRmon website} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/drop_delta.R \name{drop_delta} \alias{drop_delta} \title{Get a list of Deltas} \usage{ drop_delta(cursor = NULL, locale = NULL, path_prefix = NULL, include_media_info = NULL, dtoken = get_dropbox_token()) } \arguments{ \item{cursor}{The last cursor} \item{locale}{Dropbox uses the locale parameter to specify language settings of content responses. If your app supports any language other than English, insert the appropriate IETF language tag. When a supported language is specified, Dropbox will returned translated size and/or user_error fields (where applicable)} \item{path_prefix}{The path to subset} \item{include_media_info}{Set to \code{TRUE}} \item{dtoken}{The Dropbox token generated by \code{\link{drop_auth}}. rdrop2 will try to automatically locate your local credential cache and use them. However, if the credentials are not found, the function will initiate a new authentication request. You can override this in \code{\link{drop_auth}} by pointing to a different location where your credentials are stored.} } \description{ Get a list of Deltas } \examples{ \dontrun{ z <- drop_delta(path_prefix = "/Public") # If no files have changed during this time, entries will be NULL drop_delta(cursor = z$cursor, path_prefix = "/Public") } }
/man/drop_delta.Rd
no_license
scheidec/rdrop2
R
false
true
1,336
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/drop_delta.R \name{drop_delta} \alias{drop_delta} \title{Get a list of Deltas} \usage{ drop_delta(cursor = NULL, locale = NULL, path_prefix = NULL, include_media_info = NULL, dtoken = get_dropbox_token()) } \arguments{ \item{cursor}{The last cursor} \item{locale}{Dropbox uses the locale parameter to specify language settings of content responses. If your app supports any language other than English, insert the appropriate IETF language tag. When a supported language is specified, Dropbox will returned translated size and/or user_error fields (where applicable)} \item{path_prefix}{The path to subset} \item{include_media_info}{Set to \code{TRUE}} \item{dtoken}{The Dropbox token generated by \code{\link{drop_auth}}. rdrop2 will try to automatically locate your local credential cache and use them. However, if the credentials are not found, the function will initiate a new authentication request. You can override this in \code{\link{drop_auth}} by pointing to a different location where your credentials are stored.} } \description{ Get a list of Deltas } \examples{ \dontrun{ z <- drop_delta(path_prefix = "/Public") # If no files have changed during this time, entries will be NULL drop_delta(cursor = z$cursor, path_prefix = "/Public") } }
#FIGURE OUT IF THE CIRCLES ARE 1 STANDARD DEV #DO SOMETHING WITH THE TRUE LABELS #MAKE SURE DATA.KEEP SPECIFIES IT IS AFTER NORMALIZATION #MAKE SURE IT'S POSITIVE DEF AND SYMMETRIC #if variance parameter is provided we need to check some things to make sure it's valid #CAN ONLY INITIALIZE ALL THE CLUSTERS TO THE SAME VARIANCE library(glmnet) library(mclust) setwd("C:/Users/UikosPC/Dropbox/Han Liu - private/package - sml/SML/R") source("standard.R") source("error_handling.R") source("helper_func.R") source("em_gaussian.R") x = faithful res = sml_em_gaussian(x) res summary(res) summary(res,show.param=FALSE) #EM clustering selected the Mixture of Spherical Gaussians (VII) model #according to BIC with 4 clusters achieving a log-likelihood of -384.54. plot(res) plot(res,plot.type="classification") plot(res,plot.type=c("classification","uncertainty")) plot(res,plot.type=c("classification","uncertainty"),plot.pca=TRUE) plot(res,plot.type=c("classification","uncertainty"),plot.pca=TRUE,asp=FALSE) plot(res,plot.type=c("classification","uncertainty"),plot.pca=TRUE,asp=FALSE,pty="m") plot(res,plot.type=c("classification","uncertainty"),asp=FALSE,pty="m") plot(res,plot.type="classification",asp=FALSE,pty="m") plot(res,plot.type=c("classification","uncertainty"),asp=FALSE,pty="m",show.title=FALSE) plot(res,plot.type=c("classification","uncertainty"),asp=FALSE,pty="m",show.more=TRUE) plot(res,plot.type=c("classification","uncertainty"),plot.minUncer = 0.8,plot.quantiles=c(0.3,0.5)) plot(res,plot.type=c("classification","uncertainty"),plot.minUncer = 0.8,plot.quantiles=c(0.1,0.2)) plot(res,plot.type=c("classification","uncertainty"),plot.minUncer = 0.8,plot.quantiles=c(0.1,0.2),cex=2) plot(res) plot(res,plot.type="BIC") x = iris res = sml_em_gaussian(x,data.normalize=TRUE) plot(res,plot.type=c("density","perspective","classification","uncertainty")) plot(res,plot.type=c("density","perspective","classification","uncertainty"),mfrow=c(1,4)) res = sml_em_gaussian(x,demo.show=TRUE,plot.speed=0.25) res = sml_em_gaussian(x,demo.show=TRUE,plot.speed=0.25,plot.type=c("classification","uncertainty")) res = sml_em_gaussian(x,demo.show=TRUE,plot.speed=0.25,plot.pca=TRUE) res = sml_em_gaussian(x,demo.show=TRUE,plot.speed=0.25,data.normalize=TRUE) res = sml_em_gaussian(x,demo.show=TRUE,plot.speed=0.25,data.normalize=TRUE,plot.type=c("classification","uncertainty","ranked uncertainty")) res = sml_em_gaussian(x,data.normalize=TRUE,k=3) plot(res) res = sml_em_gaussian(x,demo.show=TRUE,demo.ani=TRUE,plot.pca=TRUE,plot.type="classification") plot(res) plot(res,mfrow=c(5,1)) plot(res,mfrow=c(1,5),show.more=TRUE) summary(res) reset_plot() res = sml_em_gaussian(x,data.normalize=TRUE) summary(res) res = sml_em_gaussian(x,data.normalize=TRUE,k=3:5) plot(res,ask=TRUE) res = sml_em_gaussian(x,mean=x[1:3,1:4],demo.show=TRUE) res = sml_em_gaussian(x,mean=as.matrix(x[1:3,1:4]),demo.show=TRUE,k=3) res = sml_em_gaussian(x,mean=as.matrix(x[1:3,1:4]),demo.show=TRUE,k=3,plot.type=c("classification","perspective")) res = sml_em_gaussian(x,demo.show=TRUE,iter.max=2) #test the various mixture gaussians res = sml_em_gaussian(x,model=c("spherical","diagonal","ellipsoidal")) res = sml_em_gaussian(x,model=c("VVV"),demo.show=TRUE,k=4) res = sml_em_gaussian(x,model=c("VVV"),demo.show=TRUE,k=4,plot.type=c("classification","perspective")) res = sml_em_gaussian(x,model=c("EII"),demo.show=TRUE,k=4) res = sml_em_gaussian(x,model=c("EII","VVV")) #trying various variances res = sml_em_gaussian(x,model=c("VVV"),demo.show=TRUE,proportion=c(.1,.9)) res = sml_em_gaussian(x,model=c("VVV"),demo.show=TRUE,proportion=c(.1,.9),k=2,plot.type=c("classification","density")) res = sml_em_gaussian(x,model=c("VII"),demo.show=TRUE,proportion=c(.1,.9),k=2,plot.type=c("classification","density"),variance=5,mean=x[1:2,1:4]) res = sml_em_gaussian(x,model=c("VVI"),demo.show=TRUE,k=2,plot.type=c("classification","density"),variance=c(1,5,2,3),mean=x[1:2,1:4]) tmp = matrix(c(2,4,2,6, 7,1,2,3, 5,7,6,5, 1,1,2,3),ncol=4,nrow=4) tmp = t(tmp)%*%tmp res = sml_em_gaussian(x,model=c("VVV"),demo.show=TRUE,k=2,plot.type=c("classification","density"),variance=tmp,mean=x[1:2,1:4]) res = sml_em_gaussian(x,k=1) summary(res) plot(res)
/testing/script_test_em_gaussian.R
no_license
linnykos/SML_package
R
false
false
4,329
r
#FIGURE OUT IF THE CIRCLES ARE 1 STANDARD DEV #DO SOMETHING WITH THE TRUE LABELS #MAKE SURE DATA.KEEP SPECIFIES IT IS AFTER NORMALIZATION #MAKE SURE IT'S POSITIVE DEF AND SYMMETRIC #if variance parameter is provided we need to check some things to make sure it's valid #CAN ONLY INITIALIZE ALL THE CLUSTERS TO THE SAME VARIANCE library(glmnet) library(mclust) setwd("C:/Users/UikosPC/Dropbox/Han Liu - private/package - sml/SML/R") source("standard.R") source("error_handling.R") source("helper_func.R") source("em_gaussian.R") x = faithful res = sml_em_gaussian(x) res summary(res) summary(res,show.param=FALSE) #EM clustering selected the Mixture of Spherical Gaussians (VII) model #according to BIC with 4 clusters achieving a log-likelihood of -384.54. plot(res) plot(res,plot.type="classification") plot(res,plot.type=c("classification","uncertainty")) plot(res,plot.type=c("classification","uncertainty"),plot.pca=TRUE) plot(res,plot.type=c("classification","uncertainty"),plot.pca=TRUE,asp=FALSE) plot(res,plot.type=c("classification","uncertainty"),plot.pca=TRUE,asp=FALSE,pty="m") plot(res,plot.type=c("classification","uncertainty"),asp=FALSE,pty="m") plot(res,plot.type="classification",asp=FALSE,pty="m") plot(res,plot.type=c("classification","uncertainty"),asp=FALSE,pty="m",show.title=FALSE) plot(res,plot.type=c("classification","uncertainty"),asp=FALSE,pty="m",show.more=TRUE) plot(res,plot.type=c("classification","uncertainty"),plot.minUncer = 0.8,plot.quantiles=c(0.3,0.5)) plot(res,plot.type=c("classification","uncertainty"),plot.minUncer = 0.8,plot.quantiles=c(0.1,0.2)) plot(res,plot.type=c("classification","uncertainty"),plot.minUncer = 0.8,plot.quantiles=c(0.1,0.2),cex=2) plot(res) plot(res,plot.type="BIC") x = iris res = sml_em_gaussian(x,data.normalize=TRUE) plot(res,plot.type=c("density","perspective","classification","uncertainty")) plot(res,plot.type=c("density","perspective","classification","uncertainty"),mfrow=c(1,4)) res = sml_em_gaussian(x,demo.show=TRUE,plot.speed=0.25) res = sml_em_gaussian(x,demo.show=TRUE,plot.speed=0.25,plot.type=c("classification","uncertainty")) res = sml_em_gaussian(x,demo.show=TRUE,plot.speed=0.25,plot.pca=TRUE) res = sml_em_gaussian(x,demo.show=TRUE,plot.speed=0.25,data.normalize=TRUE) res = sml_em_gaussian(x,demo.show=TRUE,plot.speed=0.25,data.normalize=TRUE,plot.type=c("classification","uncertainty","ranked uncertainty")) res = sml_em_gaussian(x,data.normalize=TRUE,k=3) plot(res) res = sml_em_gaussian(x,demo.show=TRUE,demo.ani=TRUE,plot.pca=TRUE,plot.type="classification") plot(res) plot(res,mfrow=c(5,1)) plot(res,mfrow=c(1,5),show.more=TRUE) summary(res) reset_plot() res = sml_em_gaussian(x,data.normalize=TRUE) summary(res) res = sml_em_gaussian(x,data.normalize=TRUE,k=3:5) plot(res,ask=TRUE) res = sml_em_gaussian(x,mean=x[1:3,1:4],demo.show=TRUE) res = sml_em_gaussian(x,mean=as.matrix(x[1:3,1:4]),demo.show=TRUE,k=3) res = sml_em_gaussian(x,mean=as.matrix(x[1:3,1:4]),demo.show=TRUE,k=3,plot.type=c("classification","perspective")) res = sml_em_gaussian(x,demo.show=TRUE,iter.max=2) #test the various mixture gaussians res = sml_em_gaussian(x,model=c("spherical","diagonal","ellipsoidal")) res = sml_em_gaussian(x,model=c("VVV"),demo.show=TRUE,k=4) res = sml_em_gaussian(x,model=c("VVV"),demo.show=TRUE,k=4,plot.type=c("classification","perspective")) res = sml_em_gaussian(x,model=c("EII"),demo.show=TRUE,k=4) res = sml_em_gaussian(x,model=c("EII","VVV")) #trying various variances res = sml_em_gaussian(x,model=c("VVV"),demo.show=TRUE,proportion=c(.1,.9)) res = sml_em_gaussian(x,model=c("VVV"),demo.show=TRUE,proportion=c(.1,.9),k=2,plot.type=c("classification","density")) res = sml_em_gaussian(x,model=c("VII"),demo.show=TRUE,proportion=c(.1,.9),k=2,plot.type=c("classification","density"),variance=5,mean=x[1:2,1:4]) res = sml_em_gaussian(x,model=c("VVI"),demo.show=TRUE,k=2,plot.type=c("classification","density"),variance=c(1,5,2,3),mean=x[1:2,1:4]) tmp = matrix(c(2,4,2,6, 7,1,2,3, 5,7,6,5, 1,1,2,3),ncol=4,nrow=4) tmp = t(tmp)%*%tmp res = sml_em_gaussian(x,model=c("VVV"),demo.show=TRUE,k=2,plot.type=c("classification","density"),variance=tmp,mean=x[1:2,1:4]) res = sml_em_gaussian(x,k=1) summary(res) plot(res)
# 0. Getting the data > setwd("C:/Users/Smilla") > url<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" > download.file(url,destfile="data.zip") > unzip("data.zip") > setwd("C:/Users/Smilla/UCI HAR Dataset") > trainfile<-list.files("train,full.names=TRUE")[-1] > trainfile<-list.files("train",full.names=TRUE)[-1] > testfile<-list.files("test",full.names=TRUE)[-1] # In order to read in all six files > file<-c(trainfile,testfile) > data <- lapply( file, read.table, stringsAsFactors = FALSE, header = FALSE ) # 1. Merges the training and the test sets to create one data set. # Binding the test and train file by each variable > data_1<-mapply(rbind,data[ c(1:3) ],data[ c(4:6) ]) > data_2<-do.call(cbind,data_1) # 2. Extracts only the measurements on the mean and standard deviation for each measurement. > feature<-fread(list.files()[2],header=FALSE,stringsAsFactors = FALSE) # Setting the labels for the data (required in Step 4) > setnames(data_2,c(1:563),c("subject",feature$V2,"activity")) # 3. Uses descriptive activity names to name the activities in the data set > measurement<-grep("std|mean\\(\\)",feature$V2)+1 > data_3<-data_2[,c(1,measurement,563)] > activitynames<-fread(list.files()[1],header=FALSE,stringsAsFactors = FALSE) > data_3$activity<-activitynames$V2[ match(data_3$activity,activitynames$V1)] # 4. Appropriately labels the data set with descriptive variable names. # 5. Creating an independent data set with the average of each variable for each subject and activity. > data_4<-aggregate(.~subject+activity, data=data_3,FUN=mean) > setwd("C:/Users/Smilla") > write.table(data_4,"averagedata.txt",row.names=FALSE)
/run_analysis.R
no_license
SmillaTx/GettingAndCleaningData
R
false
false
1,736
r
# 0. Getting the data > setwd("C:/Users/Smilla") > url<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" > download.file(url,destfile="data.zip") > unzip("data.zip") > setwd("C:/Users/Smilla/UCI HAR Dataset") > trainfile<-list.files("train,full.names=TRUE")[-1] > trainfile<-list.files("train",full.names=TRUE)[-1] > testfile<-list.files("test",full.names=TRUE)[-1] # In order to read in all six files > file<-c(trainfile,testfile) > data <- lapply( file, read.table, stringsAsFactors = FALSE, header = FALSE ) # 1. Merges the training and the test sets to create one data set. # Binding the test and train file by each variable > data_1<-mapply(rbind,data[ c(1:3) ],data[ c(4:6) ]) > data_2<-do.call(cbind,data_1) # 2. Extracts only the measurements on the mean and standard deviation for each measurement. > feature<-fread(list.files()[2],header=FALSE,stringsAsFactors = FALSE) # Setting the labels for the data (required in Step 4) > setnames(data_2,c(1:563),c("subject",feature$V2,"activity")) # 3. Uses descriptive activity names to name the activities in the data set > measurement<-grep("std|mean\\(\\)",feature$V2)+1 > data_3<-data_2[,c(1,measurement,563)] > activitynames<-fread(list.files()[1],header=FALSE,stringsAsFactors = FALSE) > data_3$activity<-activitynames$V2[ match(data_3$activity,activitynames$V1)] # 4. Appropriately labels the data set with descriptive variable names. # 5. Creating an independent data set with the average of each variable for each subject and activity. > data_4<-aggregate(.~subject+activity, data=data_3,FUN=mean) > setwd("C:/Users/Smilla") > write.table(data_4,"averagedata.txt",row.names=FALSE)
# Produce plot of data and quantiles for electricity example # To save passing large objects as arguments, this treats DT and qdemand # as global variables # Arguments: # id - the smartmetre id to plot # showquantiles - which quantiles to plot. By default, it plots the deciles qdemandplot <- function(id, showquantiles=seq(0.1,0.9,by=0.1)) { library(data.table) library(ggplot2) idlist <- unique(DT[,id]) if(id <= 500) id <- idlist[id] prob <- sort(unique(qdemand[,prob])) # Subset of DT j <- (DT[,id]==id) z <- DT[j, ] z[, tod:=z[,period]/2] z$dow <- factor(z$dow,levels=1:7, labels=c("Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday")) p1 <- ggplot(aes(y=demand, x=tod), data=z) + geom_point(shape=".") + facet_grid(~dow) + ylab("Demand (kWh)") + xlab("") + ggtitle(paste("Demand for ID:",id)) + guides(fill=FALSE) + scale_x_continuous(breaks=c(0,6,12,18,24)) # Subset of qdemand j <- (qdemand[,id]==id) z <- qdemand[j, ] z[, tod:= ((z[,tow]-1) %% 48)/2 +1] z[, dow:= trunc((z[,tow]-1)/48) + 1] z$dow <- factor(z$dow, levels=1:7, labels=c("Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday")) if(!missing(showquantiles)) { j <- z[,prob] %in% showquantiles z <- z[j,] } p2 <- ggplot(aes(y=demand, x=tod, colour=prob, group=prob), data=z) + geom_line() + facet_grid(~dow) + xlab("Time of day") + ylab("Quantiles") + scale_colour_gradientn(colours = rainbow(8), name="Probability", breaks=seq(0.1,0.9,by=0.2)) + theme(legend.position="bottom", legend.direction="horizontal", legend.key.width=unit(1,"cm"), legend.key.height=unit(.3,"cm"), strip.background = element_blank(), strip.text.x = element_blank()) + scale_x_continuous(breaks=c(0,6,12,18,24)) return(gridExtra::grid.arrange(p1,p2,ncol=1, heights=c(0.5,0.55))) } # Compute Jensen-Shannon distance # based on quantiles q and p at probabilities prob JS <- function(prob,q,p) { # Compute approximate densities x <- seq(min(q,p),max(q,p), l=201) qpmf <- pmf(x,prob,q) ppmf <- pmf(x,prob,p) m <- 0.5 * (ppmf + qpmf) JS <- suppressWarnings(0.5*(sum(na.omit(ppmf*log(ppmf/m))) + sum(na.omit(qpmf*log(qpmf/m))))) return(JS) } # Compute approximate discretized density (like a probability mass function) # at each x (equally spaced) given quantiles q with probabilities p pmf <- function(x, p, q) { qcdf <- approx(q,p,xout=x,yleft=0,yright=1)$y qpmf <- c(0,diff(qcdf)/ (x[2]-x[1])) return(qpmf / sum(qpmf)) } jsd <- function(qdemand) { idlist <- unique(qdemand[,id]) probs <- sort(unique(qdemand[,prob])) nid <- length(idlist) dmat <- matrix(0, nrow=nid, ncol=nid) rownames(dmat) <- colnames(dmat) <- idlist x <- seq(0, max(qdemand[,demand]), l=51) for(i in 2:nid) for(j in 1:(i-1)) { tmp <- qdemand[id==idlist[i],] tmp[, demand2:=qdemand[id==idlist[j],demand]] dmat[i,j] <- sum(tmp[, JS(prob,demand,demand2), by=.(tow)]$V1) # for(dow in 1:7) # for(period in 1:48) # { # tmp2 <- subset(tmp, period==period, dow==dow) # js <- JS(prob,) # } } # Create object of class "dist" return(as.dist(dmat + t(dmat))) } # Compute similarity matrix based on pairwise distances # 3 different kernels are possible # The h parameter is selected to be very large if the argument is omitted similarity <- function(distances, h, kernel=c("Gaussian","Epanechnikov","Triangle")) { if(class(distances) != "dist") stop("distances should be of class 'dist'") kernel <- match.arg(kernel) if(missing(h)) h <- 1000*max(distances) distances <- as.matrix(distances) if(kernel=="Gaussian") sims <- exp(-(distances/h)^2) else if(kernel=="Epanechnikov") sims <- pmax(1-(distances/h)^2, 0) else sims <- pmax(1-distances/h, 0) return(sims) } # Compute similarity matrix based on pairwise distances # Only implements Epanechnikov kernel # distances must be a dist class. # returns sparse object sparsesimilarity <- function(distances, h) { if(!is.element("dist",class(distances))) stop("distances must be of class 'dist'") if(missing(h)) h <- 1000*max(distances) n <- attr(distances,"Size") k <- distances < h col <- rep(1:(n-1),(n-1):1)[k] row <- matrix(rep(1:n,n), n,n)[lower.tri(matrix(0,n,n))] row <- row[k] v <- 1-(distances[k]/h)^2 sims <- Rcsdp::simple_triplet_sym_matrix(row,col,v,n=n) return(sims) } # Main function for finding embedding based on distances # Arguments: # distances - an object of class "dist" (essential the lower triangle of distances matrix) # m - embedding dimension # method - which dimension reduction method to use. Default is LaplacianMDS which # uses Laplacian but with very large h. This is equivalent to "MDSiso" but much # faster. # h - bandwidth for computing the similarity matrix. Only used for Laplacian # methods, apart from LaplacianMDS where hs is set to a large h. # ... - any other arguments are passed to the function implementing the embedding method. embedding <- function(distances, m=2, method=c("LaplacianMDS","Laplacian","Lrw","Lsym","Lsym2", "MDS","MDSiso","monoMDS","DPM","Rtsne"), h = median(distances), ...) { method <- match.arg(method) if(class(distances)!="dist") stop("distances should be of class dist") if(m>6) stop("Maximum embedding is 6 dimensions") if(method == "LaplacianMDS") { #Laplacian eigenmap with large h w <- similarity(distances, ...) n <- NROW(w) D <- diag(rowSums(w)) ei <- geigen::geigen(D-w,D,symmetric=TRUE)$vectors[,2:(m+1),drop=FALSE] } else if(method == "Laplacian") { #Laplacian eigenmap with regular h w <- similarity(distances, h=h, ...) n <- NROW(w) D <- diag(rowSums(w)) ei <- geigen::geigen(D-w,D,symmetric=TRUE) ei <- ei$vectors[,2:(m+1),drop=FALSE] } else if(method=="Lrw") { #Laplacian eigenmap. normalized Lrw w <- similarity(distances, h=h, ...) n <- NROW(w) D <- diag(1/rowSums(w)) ei <- geigen::geigen(diag(n) - D %*% w,D,symmetric=TRUE) ei <- ei$vectors[,2:(m+1),drop=FALSE] } else if(method=="Lsym") { #Laplacian eigenmap. normalized Lsym w <- similarity(distances, h=h, ...) n <- NROW(w) wden <- rowSums(w) D <- diag(1/wden) Dhalf <- diag(1/sqrt(wden)) ei <- geigen::geigen(diag(n) - Dhalf %*% w %*% Dhalf,D,symmetric=TRUE) ei <- ei$vectors[,2:(m+1),drop=FALSE] } else if(method=="Lsym2") { #Laplacian eigenmap. normalized Lsym w <- similarity(distances, h=h, ...) n <- NROW(w) wden <- rowSums(w) D <- diag(1/wden) Dhalf <- diag(1/sqrt(wden)) ei <- eigen(Dhalf %*% w %*% Dhalf,symmetric=TRUE) ei <- ei$vectors[,1:m,drop=FALSE] } else if(method=="Rtsne") { ei <- Rtsne::Rtsne(distances, dims=m, perplexity=9)$Y } else if(method=="MDS") ei <- mds(distances, ndim=m)$conf else if(method=="MDSiso") { # Multidimensional scaling mds <- MASS::isoMDS(distances, k=m) ei <- mds$points } else if(method=="monoMDS") { # Multidimensional scaling mds <- vegan::monoMDS(distances, k=m, model="local") ei <- mds$points } else if(method=="DPM") { # Density preserving map ei <- dpm(distances, m=m, ...) colnames(ei) <- paste("Comp",1:m, sep="") rownames(ei) <- attr(distances, "Labels") return(structure(scale(ei),class="embedding")) } else stop("Not implemented") colnames(ei) <- paste("Comp",1:m, sep="") rownames(ei) <- attr(distances,"Labels") # Scale embedding ei <- scale(ei) # Then take signs so medians are positive # Only purpose of this is to avoid arbitrary changes in sign for a component med_ei <- apply(ei, 2, median) j <- med_ei < 0 ei[,j] <- -ei[,j] return(structure(list(y=ei,method=method,distances=distances),class="embedding")) } # Find outliers in matrix x # embedded indicates if we look for outliers in embedded space # or original space outliers <- function(x, embedded=FALSE, method=c("kde","HDoutliers"), noutliers=NULL, pcoutliers=1, bandwidth=1e6) { method <- match.arg(method) if(class(x)!="embedding") stop("This function is for objects of class 'embedding'") if(embedded) { # Extract embedded points x <- x$y m <- NCOL(x) # Check inputs if(m>3 & method=='kde') { warning("kde can't find outliers in more than 3d space. Switching to HDoutliers") method <- "HDoutliers" } } else if(method=="HDoutliers") { warning("HDoutliers only works on embedded space. Switching to kde") method <- "kde" } if(method=="kde") { # Work in original space if(!embedded) fxy <- kdedist(x$distances, bandwidth) else fxy <- kdeobs(x) return(kdeoutliers(fxy, noutliers=noutliers, pcoutliers=pcoutliers)) } else { outliers <- HDoutliers::HDoutliers(x) names(outliers) <- rownames(x)[outliers] return(outliers) } } kdeoutliers <- function(fxy, noutliers=NULL, pcoutliers=1) { if(is.null(noutliers)) { ql <- quantile(fxy, prob=pcoutliers/100) noutliers <- sum(fxy < ql) } if(noutliers > 0) { outliers <- order(fxy)[seq(noutliers)] names(outliers) <- names(fxy)[outliers] return(outliers) } else return(NULL) } # Scatterplots of Laplacian eigenmaps # If embedded=TRUE, then show HDRs and outliers from embedded space # Else show from original space plot.embedding <- function(embed, embedded=TRUE, m=NCOL(embed$y), noutliers=NULL, pcoutliers=1, outliermethod=c("kde","HDoutliers"), levels=c(1,50,99), showhdr=TRUE, kde.package=c("ash","ks"), main=paste("Embedding:",embed$method), bandwidth=1e6, labels=c("metres","rank"), ...) { outliermethod <- match.arg(outliermethod) kde.package <- match.arg(kde.package) labels <- match.arg(labels) if(!embedded & outliermethod=="HDoutliers") { outliermethod <- "kde" warning("Using kde outlier method on original space") } data <- embed$y[,1:m] region <- NULL if(!showhdr) levels <- NULL # Find outliers and HDRs if(!is.null(levels) | outliermethod=="kde") { m <- NCOL(data) if(m > 2) kde.package <- "ks" if(embedded) fxy <- kdeobs(data, use_ash=(kde.package=='ash')) else fxy <- kdedist(embed$distances, bandwidth) if(!is.null(levels)) { levels <- sort(levels) if(max(levels) < 1) levels <- levels*100 ql <- quantile(fxy, prob=1-levels/100) region <- numeric(NROW(data)) + 100 for(i in rev(seq_along(levels))) region[fxy > ql[i]] <- levels[i] } if(outliermethod=="kde") outliers <- kdeoutliers(fxy, noutliers,pcoutliers) } if(outliermethod=="HDoutliers") { outliers <- HDoutliers::HDoutliers(data) names(outliers) <- rownames(x)[outliers] } data <- as.data.frame(data) varnames <- colnames(data) if(length(outliers) > 0) { if(labels=="metres") labs <- rownames(data)[outliers] else labs <- 1:length(outliers) } if(m==1) { p <- ggplot2::ggplot(data,ggplot2::aes_string(varnames[1])) + ggplot2::geom_density(bw="SJ", fill="salmon", col=FALSE) + ggplot2::geom_rug() if(!is.null(outliers)) { p <- p + ggplot2::annotate("text", x = data[[1]][outliers], y=rep(-max(fxy)/50,length(outliers)), label=labs, col='blue', cex=2.5) } if(!is.null(main)) p <- p + ggplot2::ggtitle(main) } else if(m==2) { p <- annotatedplot(data, ggplot2::aes_string(x=varnames[1],y=varnames[2]), outliers=outliers, labels=labs, region=region) if(!is.null(main)) p <- p + ggplot2::ggtitle(main) } else p <- GGally::ggpairs(data, title=main, lower=list(continuous=GGally::wrap(annotatedplot, outliers=outliers, region=region, labels=labs, textsize=2.5)), diag=list(continuous=mydensitydiag), upper=list(continuous=GGally::wrap(annotatedplot, outliers=outliers, region=region, labels=labs, textsize=2.5))) + ggplot2::theme(text = ggplot2::element_text(size=10)) return(p) } annotatedplot <- function(data, mapping, outliers=NULL, labels=NULL, region=NULL, textsize=2.5,...) { xvar <- rlang::quo_text(mapping$x) yvar <- rlang::quo_text(mapping$y) xlim <- diff(range(data[,xvar])) ylim <- diff(range(data[,yvar])) if(!is.null(region)) { # Construct region factor levels <- sort(unique(region[region < 100]), decreasing=TRUE) levels <- c(levels, 100) data$Region <- factor(region, levels=levels, labels=c(paste(head(levels,-1)), ">99")) # Sort data so the larger regions go first (other than outliers) k <- region k[region==100] <- 0 ord <- order(k, decreasing=TRUE) p <- ggplot2::ggplot(data[ord,], mapping) + ggplot2::geom_point(ggplot2::aes(col=data$Region[ord])) p <- p + ggplot2::scale_colour_manual( name="HDRs", breaks=c(paste(head(sort(levels),-1)), ">99"), values=c(RColorBrewer::brewer.pal(length(levels),"YlOrRd")[-1],"#000000")) } else p <- ggplot2::ggplot(data, mapping) + ggplot2::geom_point() if(!is.null(outliers)) { if(is.null(labels)) labels <- rownames(data)[outliers] p <- p + ggplot2::annotate("text", x = data[outliers,xvar]+xlim/50, y=data[outliers,yvar]+ylim/50, label=labels, col='blue', cex=textsize) } return(p) } mydensitydiag <- function (data, mapping, ..., rescale = FALSE) { p <- ggplot2::ggplot(data, mapping) + ggplot2::scale_y_continuous() if (identical(rescale, TRUE)) { p <- p + ggplot2::stat_density(aes(y = ..scaled.. * diff(range(x, na.rm = TRUE)) + min(x, na.rm = TRUE)), position = "identity", geom = "line", bw="SJ", , fill="salmon", col=FALSE, ...) } else { p <- p + ggplot2::geom_density(..., bw="SJ", fill="salmon", col=FALSE) } p$type <- "diag" p$subType <- "density" p } # Compute row means of sparse symmetric matrix rowMeansSparse <- function(x) { result <- numeric(x$n) for(i in seq(x$n)) { k <- (x$i==i | x$j==i) result[i] <- sum(x$v[k]) } return(result/x$n) } # Return number of non-zeros in each row of sparse symmetric matrix rowNonZero <- function(x) { result <- numeric(x$n) for(i in seq(x$n)) { k <- (x$i==i | x$j==i) result[i] <- sum(k) } return(result) } # Add two sparse symmetric matrices of equal size addSparse <- function(x,y) { if(x$n != y$n) stop("Matrices not the same size") # Combine rows, colums and non-zero elements i <- c(x$i,y$i) j <- c(x$j,y$j) v <- c(x$v,y$v) # Find duplicates z <- duplicated(cbind(i,j)) if(any(z)) { #Split duplicates into separate vectors i2 <- i[z] j2 <- j[z] v2 <- v[z] i <- i[!z] j <- j[!z] v <- v[!z] # Add together any duplicate values for(k in seq_along(i2)) { l <- which(i==i2[k] & j==j2[k]) v[l] <- v[l] + v2[k] } } return(Rcsdp::simple_triplet_sym_matrix(i,j,v,n=x$n)) } dij <- function(i,j,n) { Rcsdp::simple_triplet_sym_matrix( i=c(i,j,i), j=c(i,j,j), v=c(1,1,-1),n=n) } dpm <- function(d, h, m) { n <- attr(d,"Size") w <- sparsesimilarity(d, h=h) f <- rowMeansSparse(w) - rowNonZero(w)/n b <- c(f[f<0],0) A <- list() nA <- 0 for(i in seq(n)) { k <- (w$i==i | w$j==i) if(any(k)) { idx <- which(k) i0 <- w$i[idx] j0 <- w$j[idx] tmp <- Rcsdp::.simple_triplet_zero_sym_matrix(w$n) if(length(idx)>0) { for(j in seq_along(idx)) tmp <- addSparse(tmp, dij(i0[j],j0[j],n)) } tmp$v <- -tmp$v/h^2/n nA <- nA+1 A[[nA]] <- list(tmp) } } A[[nA+1]] <- list(matrix(1,n,n)) C <- list(Rcsdp::.simple_triplet_diag_sym_matrix(1,n)) tmp <- Rcsdp::csdp(C, A, b, list(type="s", size=n)) if(tmp$status!=0) warning("Not converged") return(tmp$X[[1]][,1:m,drop=FALSE]) } # Create nice R figures with minimal margins # in landscape format suitable for slides and papers savepdf <- function(file, width=16, height=10) { fname <<- paste("figs/",file,".pdf",sep="") pdf(fname, width=width/2.54, height=height/2.54, pointsize=10) par(mgp=c(2.2,0.45,0), tcl=-0.4, mar=c(3.3,3.6,1.1,1.1)) } # Crop pdf to remove all white space endpdf <- function() { #dev.off() crop::dev.off.crop(fname) } # Compute kernel density estimate at observations kdeobs <- function(x, h=NULL, use_ash=TRUE) { m <- NCOL(x) if(m==1) { if(is.null(h)) h <- bw.SJ(x) den <- density(x, bw=h) fxy <- approx(den$x,den$y,xout=x)$y } else if(m==2) { kde.package <- ifelse(use_ash, "ash", "ks") den <- hdrcde::hdr.2d(x[,1], x[,2], prob=0.5, kde.package=kde.package, h=h) fxy <- den$fxy } else { if(is.null(h)) h <- ks::Hpi.diag(x, binned = TRUE, nstage=1, optim.fun="optim") fxy <- ks::kde(x, eval.points=x,H=h)$estimate } names(fxy) <- rownames(x) return(fxy) } # Compute kernel density estimate from pairwise distances on original space kdedist <- function(d, bandwidth) { d <- as.matrix(d) fxy <- rowSums(exp(-d/bandwidth)) return(fxy) }
/data/functions.R
no_license
faheemja/ftsviz
R
false
false
17,940
r
# Produce plot of data and quantiles for electricity example # To save passing large objects as arguments, this treats DT and qdemand # as global variables # Arguments: # id - the smartmetre id to plot # showquantiles - which quantiles to plot. By default, it plots the deciles qdemandplot <- function(id, showquantiles=seq(0.1,0.9,by=0.1)) { library(data.table) library(ggplot2) idlist <- unique(DT[,id]) if(id <= 500) id <- idlist[id] prob <- sort(unique(qdemand[,prob])) # Subset of DT j <- (DT[,id]==id) z <- DT[j, ] z[, tod:=z[,period]/2] z$dow <- factor(z$dow,levels=1:7, labels=c("Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday")) p1 <- ggplot(aes(y=demand, x=tod), data=z) + geom_point(shape=".") + facet_grid(~dow) + ylab("Demand (kWh)") + xlab("") + ggtitle(paste("Demand for ID:",id)) + guides(fill=FALSE) + scale_x_continuous(breaks=c(0,6,12,18,24)) # Subset of qdemand j <- (qdemand[,id]==id) z <- qdemand[j, ] z[, tod:= ((z[,tow]-1) %% 48)/2 +1] z[, dow:= trunc((z[,tow]-1)/48) + 1] z$dow <- factor(z$dow, levels=1:7, labels=c("Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday")) if(!missing(showquantiles)) { j <- z[,prob] %in% showquantiles z <- z[j,] } p2 <- ggplot(aes(y=demand, x=tod, colour=prob, group=prob), data=z) + geom_line() + facet_grid(~dow) + xlab("Time of day") + ylab("Quantiles") + scale_colour_gradientn(colours = rainbow(8), name="Probability", breaks=seq(0.1,0.9,by=0.2)) + theme(legend.position="bottom", legend.direction="horizontal", legend.key.width=unit(1,"cm"), legend.key.height=unit(.3,"cm"), strip.background = element_blank(), strip.text.x = element_blank()) + scale_x_continuous(breaks=c(0,6,12,18,24)) return(gridExtra::grid.arrange(p1,p2,ncol=1, heights=c(0.5,0.55))) } # Compute Jensen-Shannon distance # based on quantiles q and p at probabilities prob JS <- function(prob,q,p) { # Compute approximate densities x <- seq(min(q,p),max(q,p), l=201) qpmf <- pmf(x,prob,q) ppmf <- pmf(x,prob,p) m <- 0.5 * (ppmf + qpmf) JS <- suppressWarnings(0.5*(sum(na.omit(ppmf*log(ppmf/m))) + sum(na.omit(qpmf*log(qpmf/m))))) return(JS) } # Compute approximate discretized density (like a probability mass function) # at each x (equally spaced) given quantiles q with probabilities p pmf <- function(x, p, q) { qcdf <- approx(q,p,xout=x,yleft=0,yright=1)$y qpmf <- c(0,diff(qcdf)/ (x[2]-x[1])) return(qpmf / sum(qpmf)) } jsd <- function(qdemand) { idlist <- unique(qdemand[,id]) probs <- sort(unique(qdemand[,prob])) nid <- length(idlist) dmat <- matrix(0, nrow=nid, ncol=nid) rownames(dmat) <- colnames(dmat) <- idlist x <- seq(0, max(qdemand[,demand]), l=51) for(i in 2:nid) for(j in 1:(i-1)) { tmp <- qdemand[id==idlist[i],] tmp[, demand2:=qdemand[id==idlist[j],demand]] dmat[i,j] <- sum(tmp[, JS(prob,demand,demand2), by=.(tow)]$V1) # for(dow in 1:7) # for(period in 1:48) # { # tmp2 <- subset(tmp, period==period, dow==dow) # js <- JS(prob,) # } } # Create object of class "dist" return(as.dist(dmat + t(dmat))) } # Compute similarity matrix based on pairwise distances # 3 different kernels are possible # The h parameter is selected to be very large if the argument is omitted similarity <- function(distances, h, kernel=c("Gaussian","Epanechnikov","Triangle")) { if(class(distances) != "dist") stop("distances should be of class 'dist'") kernel <- match.arg(kernel) if(missing(h)) h <- 1000*max(distances) distances <- as.matrix(distances) if(kernel=="Gaussian") sims <- exp(-(distances/h)^2) else if(kernel=="Epanechnikov") sims <- pmax(1-(distances/h)^2, 0) else sims <- pmax(1-distances/h, 0) return(sims) } # Compute similarity matrix based on pairwise distances # Only implements Epanechnikov kernel # distances must be a dist class. # returns sparse object sparsesimilarity <- function(distances, h) { if(!is.element("dist",class(distances))) stop("distances must be of class 'dist'") if(missing(h)) h <- 1000*max(distances) n <- attr(distances,"Size") k <- distances < h col <- rep(1:(n-1),(n-1):1)[k] row <- matrix(rep(1:n,n), n,n)[lower.tri(matrix(0,n,n))] row <- row[k] v <- 1-(distances[k]/h)^2 sims <- Rcsdp::simple_triplet_sym_matrix(row,col,v,n=n) return(sims) } # Main function for finding embedding based on distances # Arguments: # distances - an object of class "dist" (essential the lower triangle of distances matrix) # m - embedding dimension # method - which dimension reduction method to use. Default is LaplacianMDS which # uses Laplacian but with very large h. This is equivalent to "MDSiso" but much # faster. # h - bandwidth for computing the similarity matrix. Only used for Laplacian # methods, apart from LaplacianMDS where hs is set to a large h. # ... - any other arguments are passed to the function implementing the embedding method. embedding <- function(distances, m=2, method=c("LaplacianMDS","Laplacian","Lrw","Lsym","Lsym2", "MDS","MDSiso","monoMDS","DPM","Rtsne"), h = median(distances), ...) { method <- match.arg(method) if(class(distances)!="dist") stop("distances should be of class dist") if(m>6) stop("Maximum embedding is 6 dimensions") if(method == "LaplacianMDS") { #Laplacian eigenmap with large h w <- similarity(distances, ...) n <- NROW(w) D <- diag(rowSums(w)) ei <- geigen::geigen(D-w,D,symmetric=TRUE)$vectors[,2:(m+1),drop=FALSE] } else if(method == "Laplacian") { #Laplacian eigenmap with regular h w <- similarity(distances, h=h, ...) n <- NROW(w) D <- diag(rowSums(w)) ei <- geigen::geigen(D-w,D,symmetric=TRUE) ei <- ei$vectors[,2:(m+1),drop=FALSE] } else if(method=="Lrw") { #Laplacian eigenmap. normalized Lrw w <- similarity(distances, h=h, ...) n <- NROW(w) D <- diag(1/rowSums(w)) ei <- geigen::geigen(diag(n) - D %*% w,D,symmetric=TRUE) ei <- ei$vectors[,2:(m+1),drop=FALSE] } else if(method=="Lsym") { #Laplacian eigenmap. normalized Lsym w <- similarity(distances, h=h, ...) n <- NROW(w) wden <- rowSums(w) D <- diag(1/wden) Dhalf <- diag(1/sqrt(wden)) ei <- geigen::geigen(diag(n) - Dhalf %*% w %*% Dhalf,D,symmetric=TRUE) ei <- ei$vectors[,2:(m+1),drop=FALSE] } else if(method=="Lsym2") { #Laplacian eigenmap. normalized Lsym w <- similarity(distances, h=h, ...) n <- NROW(w) wden <- rowSums(w) D <- diag(1/wden) Dhalf <- diag(1/sqrt(wden)) ei <- eigen(Dhalf %*% w %*% Dhalf,symmetric=TRUE) ei <- ei$vectors[,1:m,drop=FALSE] } else if(method=="Rtsne") { ei <- Rtsne::Rtsne(distances, dims=m, perplexity=9)$Y } else if(method=="MDS") ei <- mds(distances, ndim=m)$conf else if(method=="MDSiso") { # Multidimensional scaling mds <- MASS::isoMDS(distances, k=m) ei <- mds$points } else if(method=="monoMDS") { # Multidimensional scaling mds <- vegan::monoMDS(distances, k=m, model="local") ei <- mds$points } else if(method=="DPM") { # Density preserving map ei <- dpm(distances, m=m, ...) colnames(ei) <- paste("Comp",1:m, sep="") rownames(ei) <- attr(distances, "Labels") return(structure(scale(ei),class="embedding")) } else stop("Not implemented") colnames(ei) <- paste("Comp",1:m, sep="") rownames(ei) <- attr(distances,"Labels") # Scale embedding ei <- scale(ei) # Then take signs so medians are positive # Only purpose of this is to avoid arbitrary changes in sign for a component med_ei <- apply(ei, 2, median) j <- med_ei < 0 ei[,j] <- -ei[,j] return(structure(list(y=ei,method=method,distances=distances),class="embedding")) } # Find outliers in matrix x # embedded indicates if we look for outliers in embedded space # or original space outliers <- function(x, embedded=FALSE, method=c("kde","HDoutliers"), noutliers=NULL, pcoutliers=1, bandwidth=1e6) { method <- match.arg(method) if(class(x)!="embedding") stop("This function is for objects of class 'embedding'") if(embedded) { # Extract embedded points x <- x$y m <- NCOL(x) # Check inputs if(m>3 & method=='kde') { warning("kde can't find outliers in more than 3d space. Switching to HDoutliers") method <- "HDoutliers" } } else if(method=="HDoutliers") { warning("HDoutliers only works on embedded space. Switching to kde") method <- "kde" } if(method=="kde") { # Work in original space if(!embedded) fxy <- kdedist(x$distances, bandwidth) else fxy <- kdeobs(x) return(kdeoutliers(fxy, noutliers=noutliers, pcoutliers=pcoutliers)) } else { outliers <- HDoutliers::HDoutliers(x) names(outliers) <- rownames(x)[outliers] return(outliers) } } kdeoutliers <- function(fxy, noutliers=NULL, pcoutliers=1) { if(is.null(noutliers)) { ql <- quantile(fxy, prob=pcoutliers/100) noutliers <- sum(fxy < ql) } if(noutliers > 0) { outliers <- order(fxy)[seq(noutliers)] names(outliers) <- names(fxy)[outliers] return(outliers) } else return(NULL) } # Scatterplots of Laplacian eigenmaps # If embedded=TRUE, then show HDRs and outliers from embedded space # Else show from original space plot.embedding <- function(embed, embedded=TRUE, m=NCOL(embed$y), noutliers=NULL, pcoutliers=1, outliermethod=c("kde","HDoutliers"), levels=c(1,50,99), showhdr=TRUE, kde.package=c("ash","ks"), main=paste("Embedding:",embed$method), bandwidth=1e6, labels=c("metres","rank"), ...) { outliermethod <- match.arg(outliermethod) kde.package <- match.arg(kde.package) labels <- match.arg(labels) if(!embedded & outliermethod=="HDoutliers") { outliermethod <- "kde" warning("Using kde outlier method on original space") } data <- embed$y[,1:m] region <- NULL if(!showhdr) levels <- NULL # Find outliers and HDRs if(!is.null(levels) | outliermethod=="kde") { m <- NCOL(data) if(m > 2) kde.package <- "ks" if(embedded) fxy <- kdeobs(data, use_ash=(kde.package=='ash')) else fxy <- kdedist(embed$distances, bandwidth) if(!is.null(levels)) { levels <- sort(levels) if(max(levels) < 1) levels <- levels*100 ql <- quantile(fxy, prob=1-levels/100) region <- numeric(NROW(data)) + 100 for(i in rev(seq_along(levels))) region[fxy > ql[i]] <- levels[i] } if(outliermethod=="kde") outliers <- kdeoutliers(fxy, noutliers,pcoutliers) } if(outliermethod=="HDoutliers") { outliers <- HDoutliers::HDoutliers(data) names(outliers) <- rownames(x)[outliers] } data <- as.data.frame(data) varnames <- colnames(data) if(length(outliers) > 0) { if(labels=="metres") labs <- rownames(data)[outliers] else labs <- 1:length(outliers) } if(m==1) { p <- ggplot2::ggplot(data,ggplot2::aes_string(varnames[1])) + ggplot2::geom_density(bw="SJ", fill="salmon", col=FALSE) + ggplot2::geom_rug() if(!is.null(outliers)) { p <- p + ggplot2::annotate("text", x = data[[1]][outliers], y=rep(-max(fxy)/50,length(outliers)), label=labs, col='blue', cex=2.5) } if(!is.null(main)) p <- p + ggplot2::ggtitle(main) } else if(m==2) { p <- annotatedplot(data, ggplot2::aes_string(x=varnames[1],y=varnames[2]), outliers=outliers, labels=labs, region=region) if(!is.null(main)) p <- p + ggplot2::ggtitle(main) } else p <- GGally::ggpairs(data, title=main, lower=list(continuous=GGally::wrap(annotatedplot, outliers=outliers, region=region, labels=labs, textsize=2.5)), diag=list(continuous=mydensitydiag), upper=list(continuous=GGally::wrap(annotatedplot, outliers=outliers, region=region, labels=labs, textsize=2.5))) + ggplot2::theme(text = ggplot2::element_text(size=10)) return(p) } annotatedplot <- function(data, mapping, outliers=NULL, labels=NULL, region=NULL, textsize=2.5,...) { xvar <- rlang::quo_text(mapping$x) yvar <- rlang::quo_text(mapping$y) xlim <- diff(range(data[,xvar])) ylim <- diff(range(data[,yvar])) if(!is.null(region)) { # Construct region factor levels <- sort(unique(region[region < 100]), decreasing=TRUE) levels <- c(levels, 100) data$Region <- factor(region, levels=levels, labels=c(paste(head(levels,-1)), ">99")) # Sort data so the larger regions go first (other than outliers) k <- region k[region==100] <- 0 ord <- order(k, decreasing=TRUE) p <- ggplot2::ggplot(data[ord,], mapping) + ggplot2::geom_point(ggplot2::aes(col=data$Region[ord])) p <- p + ggplot2::scale_colour_manual( name="HDRs", breaks=c(paste(head(sort(levels),-1)), ">99"), values=c(RColorBrewer::brewer.pal(length(levels),"YlOrRd")[-1],"#000000")) } else p <- ggplot2::ggplot(data, mapping) + ggplot2::geom_point() if(!is.null(outliers)) { if(is.null(labels)) labels <- rownames(data)[outliers] p <- p + ggplot2::annotate("text", x = data[outliers,xvar]+xlim/50, y=data[outliers,yvar]+ylim/50, label=labels, col='blue', cex=textsize) } return(p) } mydensitydiag <- function (data, mapping, ..., rescale = FALSE) { p <- ggplot2::ggplot(data, mapping) + ggplot2::scale_y_continuous() if (identical(rescale, TRUE)) { p <- p + ggplot2::stat_density(aes(y = ..scaled.. * diff(range(x, na.rm = TRUE)) + min(x, na.rm = TRUE)), position = "identity", geom = "line", bw="SJ", , fill="salmon", col=FALSE, ...) } else { p <- p + ggplot2::geom_density(..., bw="SJ", fill="salmon", col=FALSE) } p$type <- "diag" p$subType <- "density" p } # Compute row means of sparse symmetric matrix rowMeansSparse <- function(x) { result <- numeric(x$n) for(i in seq(x$n)) { k <- (x$i==i | x$j==i) result[i] <- sum(x$v[k]) } return(result/x$n) } # Return number of non-zeros in each row of sparse symmetric matrix rowNonZero <- function(x) { result <- numeric(x$n) for(i in seq(x$n)) { k <- (x$i==i | x$j==i) result[i] <- sum(k) } return(result) } # Add two sparse symmetric matrices of equal size addSparse <- function(x,y) { if(x$n != y$n) stop("Matrices not the same size") # Combine rows, colums and non-zero elements i <- c(x$i,y$i) j <- c(x$j,y$j) v <- c(x$v,y$v) # Find duplicates z <- duplicated(cbind(i,j)) if(any(z)) { #Split duplicates into separate vectors i2 <- i[z] j2 <- j[z] v2 <- v[z] i <- i[!z] j <- j[!z] v <- v[!z] # Add together any duplicate values for(k in seq_along(i2)) { l <- which(i==i2[k] & j==j2[k]) v[l] <- v[l] + v2[k] } } return(Rcsdp::simple_triplet_sym_matrix(i,j,v,n=x$n)) } dij <- function(i,j,n) { Rcsdp::simple_triplet_sym_matrix( i=c(i,j,i), j=c(i,j,j), v=c(1,1,-1),n=n) } dpm <- function(d, h, m) { n <- attr(d,"Size") w <- sparsesimilarity(d, h=h) f <- rowMeansSparse(w) - rowNonZero(w)/n b <- c(f[f<0],0) A <- list() nA <- 0 for(i in seq(n)) { k <- (w$i==i | w$j==i) if(any(k)) { idx <- which(k) i0 <- w$i[idx] j0 <- w$j[idx] tmp <- Rcsdp::.simple_triplet_zero_sym_matrix(w$n) if(length(idx)>0) { for(j in seq_along(idx)) tmp <- addSparse(tmp, dij(i0[j],j0[j],n)) } tmp$v <- -tmp$v/h^2/n nA <- nA+1 A[[nA]] <- list(tmp) } } A[[nA+1]] <- list(matrix(1,n,n)) C <- list(Rcsdp::.simple_triplet_diag_sym_matrix(1,n)) tmp <- Rcsdp::csdp(C, A, b, list(type="s", size=n)) if(tmp$status!=0) warning("Not converged") return(tmp$X[[1]][,1:m,drop=FALSE]) } # Create nice R figures with minimal margins # in landscape format suitable for slides and papers savepdf <- function(file, width=16, height=10) { fname <<- paste("figs/",file,".pdf",sep="") pdf(fname, width=width/2.54, height=height/2.54, pointsize=10) par(mgp=c(2.2,0.45,0), tcl=-0.4, mar=c(3.3,3.6,1.1,1.1)) } # Crop pdf to remove all white space endpdf <- function() { #dev.off() crop::dev.off.crop(fname) } # Compute kernel density estimate at observations kdeobs <- function(x, h=NULL, use_ash=TRUE) { m <- NCOL(x) if(m==1) { if(is.null(h)) h <- bw.SJ(x) den <- density(x, bw=h) fxy <- approx(den$x,den$y,xout=x)$y } else if(m==2) { kde.package <- ifelse(use_ash, "ash", "ks") den <- hdrcde::hdr.2d(x[,1], x[,2], prob=0.5, kde.package=kde.package, h=h) fxy <- den$fxy } else { if(is.null(h)) h <- ks::Hpi.diag(x, binned = TRUE, nstage=1, optim.fun="optim") fxy <- ks::kde(x, eval.points=x,H=h)$estimate } names(fxy) <- rownames(x) return(fxy) } # Compute kernel density estimate from pairwise distances on original space kdedist <- function(d, bandwidth) { d <- as.matrix(d) fxy <- rowSums(exp(-d/bandwidth)) return(fxy) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{ggld} \alias{ggld} \title{ggld} \usage{ ggld(data, summ) } \arguments{ \item{data}{object of class SnpMatrix from which LD will be calculated} \item{summ}{data.frame generated by \code{\link{guess.summ}()}} } \value{ ggplot output } \description{ Rotated LD plot } \details{ Generates a plot of r2, rotating through 45 degrees so it can be aligned with other results, similar to Haploview. The positions in the summ object are used to align the snps to the correct coordinates. } \seealso{ Other plotting GUESSFM results: \code{\link{addlines}}, \code{\link{ggbed}}, \code{\link{ggchr}}, \code{\link{pp.nsnp}}, \code{\link{scalepos}}, \code{\link{signal.plot}} } \author{ Chris Wallace }
/man/ggld.Rd
no_license
jinshaw16/GUESSFM
R
false
true
787
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{ggld} \alias{ggld} \title{ggld} \usage{ ggld(data, summ) } \arguments{ \item{data}{object of class SnpMatrix from which LD will be calculated} \item{summ}{data.frame generated by \code{\link{guess.summ}()}} } \value{ ggplot output } \description{ Rotated LD plot } \details{ Generates a plot of r2, rotating through 45 degrees so it can be aligned with other results, similar to Haploview. The positions in the summ object are used to align the snps to the correct coordinates. } \seealso{ Other plotting GUESSFM results: \code{\link{addlines}}, \code{\link{ggbed}}, \code{\link{ggchr}}, \code{\link{pp.nsnp}}, \code{\link{scalepos}}, \code{\link{signal.plot}} } \author{ Chris Wallace }
## The functions in this file invert a matrix and cache the ## inverse for later use. These are part of Assignment 2 for ## Dr. Roger Peng's Coursera on R. ## makeCacheMatrix takes in an invertible matrix X, calculates ## the inverse, and caches it. That cached matrix inverse can ## then be summoned using the cacheSolve function. makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setinv <- function(solve) m <<- solve getinv <- function() m list(set = set, get = get, setinv = setinv, getinv = getinv) } ## cacheSolve takes in a list of the type generated by ## the function makeCacheMatrix. Within that list, the ## inverse of the matrix may have already been computed. ## If not, the inverse will be calculated; if so, the ## cached matrix inverse will be returned. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getinv() if(!is.null(m)) { message("getting cached matrix inverse") return(m) } data <- x$get() m <- solve(data, ...) x$setinv(m) m }
/cachematrix.R
no_license
macpoly/ProgrammingAssignment2
R
false
false
1,205
r
## The functions in this file invert a matrix and cache the ## inverse for later use. These are part of Assignment 2 for ## Dr. Roger Peng's Coursera on R. ## makeCacheMatrix takes in an invertible matrix X, calculates ## the inverse, and caches it. That cached matrix inverse can ## then be summoned using the cacheSolve function. makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setinv <- function(solve) m <<- solve getinv <- function() m list(set = set, get = get, setinv = setinv, getinv = getinv) } ## cacheSolve takes in a list of the type generated by ## the function makeCacheMatrix. Within that list, the ## inverse of the matrix may have already been computed. ## If not, the inverse will be calculated; if so, the ## cached matrix inverse will be returned. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getinv() if(!is.null(m)) { message("getting cached matrix inverse") return(m) } data <- x$get() m <- solve(data, ...) x$setinv(m) m }
#this function converts fahrenheit to kelvin fahrenheit_2_kelv <- function(temp_F) { temp_K <- ((temp_F - 32) * (5 / 9)) + 273.15 return(temp_K) } #here I edit I have nothing ironic to say.
/Newfile.R
no_license
irochehurley/Hello_world
R
false
false
200
r
#this function converts fahrenheit to kelvin fahrenheit_2_kelv <- function(temp_F) { temp_K <- ((temp_F - 32) * (5 / 9)) + 273.15 return(temp_K) } #here I edit I have nothing ironic to say.
################################################################################ # Shiny module wordcloud # # Author: Simone Coscetti # Created: 2021-05-25 ################################################################################ # Module constants -------------------------------------------------------- # Module UI --------------------------------------------------------------- wordcloud_ui <- function(id, icon = "icon-wallet", icon_text = "") { ns <- NS(id) fluidRow( column(width = 4, echarts4rOutput(ns("wordcloud"), height = "500px")), column(width = 4, echarts4rOutput(ns("wordcloud_good"), height = "500px")), column(width = 4, echarts4rOutput(ns("wordcloud_bad"), height = "500px")), ) } # Module logic ------------------------------------------------------------ wordcloud <- function(input, output, session, d_data_model) { output$wordcloud <- renderEcharts4r({ req(d_data_model()) d_data_model()$all_reviews %>% e_color_range(freq, color) %>% e_charts() %>% e_cloud(word, freq, shape = "circle", sizeRange = c(10, 30), rotationRange = c(0, 0)) %>% e_title(subtext = "Word frequencies - all reviews", left = "center") %>% e_theme("walden") }) output$wordcloud_good <- renderEcharts4r({ req(d_data_model()) d_data_model()$good_reviews %>% e_color_range(freq, color) %>% e_charts() %>% e_cloud(word, freq, shape = "circle", sizeRange = c(10, 30), rotationRange = c(0, 0)) %>% e_title(subtext = "Word frequencies - good reviews", left = "center") %>% e_theme("walden") }) output$wordcloud_bad <- renderEcharts4r({ req(d_data_model()) d_data_model()$bad_reviews %>% e_color_range(freq, color) %>% e_charts() %>% e_cloud(word, freq, shape = "circle", sizeRange = c(10, 30), rotationRange = c(0, 0)) %>% e_title(subtext = "Word frequencies - bad reviews", left = "center") %>% e_theme("walden") }) }
/modules/mod_wordcloud.R
no_license
coscetti/brazilian-ecommerce
R
false
false
2,257
r
################################################################################ # Shiny module wordcloud # # Author: Simone Coscetti # Created: 2021-05-25 ################################################################################ # Module constants -------------------------------------------------------- # Module UI --------------------------------------------------------------- wordcloud_ui <- function(id, icon = "icon-wallet", icon_text = "") { ns <- NS(id) fluidRow( column(width = 4, echarts4rOutput(ns("wordcloud"), height = "500px")), column(width = 4, echarts4rOutput(ns("wordcloud_good"), height = "500px")), column(width = 4, echarts4rOutput(ns("wordcloud_bad"), height = "500px")), ) } # Module logic ------------------------------------------------------------ wordcloud <- function(input, output, session, d_data_model) { output$wordcloud <- renderEcharts4r({ req(d_data_model()) d_data_model()$all_reviews %>% e_color_range(freq, color) %>% e_charts() %>% e_cloud(word, freq, shape = "circle", sizeRange = c(10, 30), rotationRange = c(0, 0)) %>% e_title(subtext = "Word frequencies - all reviews", left = "center") %>% e_theme("walden") }) output$wordcloud_good <- renderEcharts4r({ req(d_data_model()) d_data_model()$good_reviews %>% e_color_range(freq, color) %>% e_charts() %>% e_cloud(word, freq, shape = "circle", sizeRange = c(10, 30), rotationRange = c(0, 0)) %>% e_title(subtext = "Word frequencies - good reviews", left = "center") %>% e_theme("walden") }) output$wordcloud_bad <- renderEcharts4r({ req(d_data_model()) d_data_model()$bad_reviews %>% e_color_range(freq, color) %>% e_charts() %>% e_cloud(word, freq, shape = "circle", sizeRange = c(10, 30), rotationRange = c(0, 0)) %>% e_title(subtext = "Word frequencies - bad reviews", left = "center") %>% e_theme("walden") }) }
<<<<<<< HEAD get_TL_links <- function(fw){ phy <- read_csv("../../Data/PHYLACINE_1.2/Data/Traits/Trait_data.csv", col_types = cols()) %>% transmute(Mass.g, Binomial.1.2, `Meat eater` = Diet.Vertebrate > 0) species <- append(fw$Predator, fw$Prey) %>% unique() TL <- trophic_levels(species) if(class(TL) == "numeric"){ return(0) } TL_links <- fw %>% mutate( `Pred class` = map(Predator, function(x){ filter(TL$species_class, Binomial.1.2 == x)$Size }) %>% unlist() %>% factor(levels = c("Microherbivore", "Megaherbivore", "Microcarnivore", "Mesocarnivore", "Megacarnivore")), `Prey class` = map(Prey, function(x){ filter(TL$species_class, Binomial.1.2 == x)$Size }) %>% unlist() %>% factor(levels = c("Microherbivore", "Megaherbivore", "Microcarnivore", "Mesocarnivore", "Megacarnivore")) ) %>% dplyr::select(`Pred class`, `Prey class`) %>% transmute( `Pred edge` = map(`Pred class`, function(x) which(levels(x) == x)) %>% unlist(), `Prey edge` = map(`Prey class`, function(x) which(levels(x) == x)) %>% unlist() ) # Graph ---- g <- graph(t(TL_links[, 2:1])) %>% set_edge_attr("weight", value = 1) %>% simplify(edge.attr.comb = list(weight="sum", "ignore")) return(g) ======= get_TL_links <- function(fw){ phy <- read_csv("../../Data/PHYLACINE_1.2/Data/Traits/Trait_data.csv", col_types = cols()) %>% transmute(Mass.g, Binomial.1.2, `Meat eater` = Diet.Vertebrate > 0) species <- append(fw$Predator, fw$Prey) %>% unique() TL <- trophic_levels(species) if(class(TL) == "numeric"){ return(0) } TL_links <- fw %>% mutate( `Pred class` = map(Predator, function(x){ filter(TL$species_class, Binomial.1.2 == x)$Size }) %>% unlist() %>% factor(levels = c("Microherbivore", "Mesoherbivore", "Megaherbivore", "Microcarnivore", "Mesocarnivore", "Megacarnivore")), `Prey class` = map(Prey, function(x){ filter(TL$species_class, Binomial.1.2 == x)$Size }) %>% unlist() %>% factor(levels = c("Microherbivore", "Mesoherbivore", "Megaherbivore", "Microcarnivore", "Mesocarnivore", "Megacarnivore")) ) %>% dplyr::select(`Pred class`, `Prey class`) %>% transmute( `Pred edge` = map(`Pred class`, function(x) which(levels(x) == x)) %>% unlist(), `Prey edge` = map(`Prey class`, function(x) which(levels(x) == x)) %>% unlist() ) # Graph ---- g <- graph(t(TL_links[, 2:1])) %>% set_edge_attr("weight", value = 1) %>% simplify(edge.attr.comb = list(weight="sum", "ignore")) return(g) >>>>>>> 12b6b2da9189d1eccc59266d2db8099edab0a4b2 }
/Code/R/get_TL_links.R
no_license
emilio-berti/rewiring-rewilding
R
false
false
2,697
r
<<<<<<< HEAD get_TL_links <- function(fw){ phy <- read_csv("../../Data/PHYLACINE_1.2/Data/Traits/Trait_data.csv", col_types = cols()) %>% transmute(Mass.g, Binomial.1.2, `Meat eater` = Diet.Vertebrate > 0) species <- append(fw$Predator, fw$Prey) %>% unique() TL <- trophic_levels(species) if(class(TL) == "numeric"){ return(0) } TL_links <- fw %>% mutate( `Pred class` = map(Predator, function(x){ filter(TL$species_class, Binomial.1.2 == x)$Size }) %>% unlist() %>% factor(levels = c("Microherbivore", "Megaherbivore", "Microcarnivore", "Mesocarnivore", "Megacarnivore")), `Prey class` = map(Prey, function(x){ filter(TL$species_class, Binomial.1.2 == x)$Size }) %>% unlist() %>% factor(levels = c("Microherbivore", "Megaherbivore", "Microcarnivore", "Mesocarnivore", "Megacarnivore")) ) %>% dplyr::select(`Pred class`, `Prey class`) %>% transmute( `Pred edge` = map(`Pred class`, function(x) which(levels(x) == x)) %>% unlist(), `Prey edge` = map(`Prey class`, function(x) which(levels(x) == x)) %>% unlist() ) # Graph ---- g <- graph(t(TL_links[, 2:1])) %>% set_edge_attr("weight", value = 1) %>% simplify(edge.attr.comb = list(weight="sum", "ignore")) return(g) ======= get_TL_links <- function(fw){ phy <- read_csv("../../Data/PHYLACINE_1.2/Data/Traits/Trait_data.csv", col_types = cols()) %>% transmute(Mass.g, Binomial.1.2, `Meat eater` = Diet.Vertebrate > 0) species <- append(fw$Predator, fw$Prey) %>% unique() TL <- trophic_levels(species) if(class(TL) == "numeric"){ return(0) } TL_links <- fw %>% mutate( `Pred class` = map(Predator, function(x){ filter(TL$species_class, Binomial.1.2 == x)$Size }) %>% unlist() %>% factor(levels = c("Microherbivore", "Mesoherbivore", "Megaherbivore", "Microcarnivore", "Mesocarnivore", "Megacarnivore")), `Prey class` = map(Prey, function(x){ filter(TL$species_class, Binomial.1.2 == x)$Size }) %>% unlist() %>% factor(levels = c("Microherbivore", "Mesoherbivore", "Megaherbivore", "Microcarnivore", "Mesocarnivore", "Megacarnivore")) ) %>% dplyr::select(`Pred class`, `Prey class`) %>% transmute( `Pred edge` = map(`Pred class`, function(x) which(levels(x) == x)) %>% unlist(), `Prey edge` = map(`Prey class`, function(x) which(levels(x) == x)) %>% unlist() ) # Graph ---- g <- graph(t(TL_links[, 2:1])) %>% set_edge_attr("weight", value = 1) %>% simplify(edge.attr.comb = list(weight="sum", "ignore")) return(g) >>>>>>> 12b6b2da9189d1eccc59266d2db8099edab0a4b2 }
## 텍스트 데이터 분석 # 1) text mining 패키지 불러오기 library(tm) # 2) Crude 데이터 불러오기 data("crude") tdm <- TermDocumentMatrix(crude) tdm # 3) 단어 탐색 inspect(tdm) # 4) 10회 이상 존재하는 단어만 출력 findFreqTerms(tdm,lowfreq=10) # 5) oil 단어와 관련 높은 단어 출력 findAssocs(tdm,"oil",0.7) # 6) 단어빈도 막대 그래프 freq <- sort(rowSums(as.matrix(tdm)), decreasing=TRUE) freq wf <- data.frame(word=names(freq), freq=freq) wf library(ggplot2) ggplot(subset(wf, freq>20), aes(word, freq))+ geom_bar(stat="identity")+ theme_bw()
/lession7/text_mining_basic.R
no_license
chunam76/RStudy
R
false
false
613
r
## 텍스트 데이터 분석 # 1) text mining 패키지 불러오기 library(tm) # 2) Crude 데이터 불러오기 data("crude") tdm <- TermDocumentMatrix(crude) tdm # 3) 단어 탐색 inspect(tdm) # 4) 10회 이상 존재하는 단어만 출력 findFreqTerms(tdm,lowfreq=10) # 5) oil 단어와 관련 높은 단어 출력 findAssocs(tdm,"oil",0.7) # 6) 단어빈도 막대 그래프 freq <- sort(rowSums(as.matrix(tdm)), decreasing=TRUE) freq wf <- data.frame(word=names(freq), freq=freq) wf library(ggplot2) ggplot(subset(wf, freq>20), aes(word, freq))+ geom_bar(stat="identity")+ theme_bw()
source("load_data.R") png(filename = "plot4.png", width = 480, height = 480) par(mfrow = c(2, 2)) # top left plot(two_day_data$DateTime, two_day_data$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power") # top right plot(two_day_data$DateTime, two_day_data$Voltage, type = "l", xlab = "datetime", ylab = "Voltage") ## Bottom-left plot(two_day_data$DateTime, two_day_data$Sub_metering_1, type = "l", col = "black", xlab = "", ylab = "Energy sub metering") lines(two_day_data$DateTime, two_day_data$Sub_metering_2, col = "red") lines(two_day_data$DateTime, two_day_data$Sub_metering_3, col = "blue") legend("topright", bty = "n", col = c("black", "red", "blue"), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd = 1) ## Bottom-right plot(two_day_data$DateTime, two_day_data$Global_reactive_power, type = "l", col = "black", xlab = "datetime", ylab = "Global_reactive_power") dev.off()
/plot4.R
no_license
ychua9/ExData_Plotting1
R
false
false
954
r
source("load_data.R") png(filename = "plot4.png", width = 480, height = 480) par(mfrow = c(2, 2)) # top left plot(two_day_data$DateTime, two_day_data$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power") # top right plot(two_day_data$DateTime, two_day_data$Voltage, type = "l", xlab = "datetime", ylab = "Voltage") ## Bottom-left plot(two_day_data$DateTime, two_day_data$Sub_metering_1, type = "l", col = "black", xlab = "", ylab = "Energy sub metering") lines(two_day_data$DateTime, two_day_data$Sub_metering_2, col = "red") lines(two_day_data$DateTime, two_day_data$Sub_metering_3, col = "blue") legend("topright", bty = "n", col = c("black", "red", "blue"), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd = 1) ## Bottom-right plot(two_day_data$DateTime, two_day_data$Global_reactive_power, type = "l", col = "black", xlab = "datetime", ylab = "Global_reactive_power") dev.off()
# ---------------------------------------------------Downloading and Installing Libraries --------------------------------------------------- install.packages("ff") install.packages("sqldf") install.packages("rpart") install.packages("Amelia") install.packages("class") install.packages("caret") install.packages("corrplot", dependencies = TRUE) install.packages("e1071") install.packages("truncnorm") install.packages("grDevices") install.packages("pca") install.packages("neuralnet") install.packages("devtools") install.packages("ggplot2") install.packages("ggvis") install.packages("pROC") install.packages("ROCR") install.packages("randomForest") install.packages("caTools") library(ff) library(sqldf) library(rpart) library(corrplot) library(Amelia) library(class) library(caret) library(e1071) library(truncnorm) library(grDevices) library(pca) library(neuralnet) library(devtools) install_github("ggbiplot", "vqv") library(ggbiplot) library(ggplot2) library(ggvis) library(pROC) library(ROCR) library(randomForest) library(caTools) args <- commandArgs(TRUE) # ----------------------------------------------- Load the tables into dataFrames -------------------------------------------- events <- read.table(file=args[1], nrows = 1000000, header = TRUE,sep = ",") page_views_sample <- read.table(file=args[2], header=TRUE, nrows = 1000000, sep = ",") clicks_train <- read.table(file=args[3],nrows = 1000000, header=TRUE, sep = ",") clicks_test <- read.table(file=args[4], header=TRUE, nrows = 1000000, sep = ",") promoted_content <- read.table(file=args[5], header=TRUE, nrows = 1000000, sep = ",") document_entities <- read.table(file=args[6], header=TRUE, nrows = 1000000, sep = ",") document_topics <- read.table(file=args[7], header=TRUE, nrows = 1000000, sep = ",") document_categories <- read.table(file=args[8], header=TRUE, nrows = 1000000, sep = ",") # --------------------------------------CHECK FOR NA AND INF VALUES, DATATYPES AND OUTLIERS------------------------------------------ #first, check for click_test table----------------------------------------------------- typeof(clicks_test$display_id) boxplot.stats(clicks_test$display_id) typeof(clicks_test$ad_id) boxplot.stats(clicks_test$ad_id) indx_na<- apply(clicks_test, 2, function(x) any(is.na(x))) indx_na indx_inf<- apply(clicks_test, 2, function(x) any(is.infinite(x))) indx_inf indx_nan<- apply(clicks_test, 2, function(x) any(is.nan(x))) indx_nan indx_null<- apply(clicks_test, 2, function(x) any(is.null(x))) indx_null #second, check for events table-------------------------------------------------------- typeof(events$display_id) boxplot.stats(events$display_id) typeof(events$uuid) #boxplot doesn't evaluate user_id variable because it isn't numeric/character. boxplot.stats(events$uuid) typeof(events$document_id) #as all document_id are independent, hence outliers doesn't make sense here. boxplot.stats(events$document_id) typeof(events$timestamp) boxplot.stats(events$timestamp) typeof(events$platform) typeof(events$geo_location) indx_na<- apply(events, 2, function(x) any(is.na(x))) indx_na indx_inf<- apply(events, 2, function(x) any(is.infinite(x))) indx_inf indx_nan<- apply(events, 2, function(x) any(is.nan(x))) indx_nan indx_null<- apply(events, 2, function(x) any(is.null(x))) indx_null #third, check for clicks_train table---------------------------------------------------- typeof(clicks_train$display_id) boxplot.stats(clicks_train$display_id) typeof(clicks_train$ad_id) boxplot.stats(clicks_train$ad_id) typeof(clicks_train$clicked) indx_na<- apply(clicks_train, 2, function(x) any(is.na(x))) indx_na indx_inf<- apply(clicks_train, 2, function(x) any(is.infinite(x))) indx_inf indx_nan<- apply(clicks_train, 2, function(x) any(is.nan(x))) indx_nan indx_null<- apply(clicks_train, 2, function(x) any(is.null(x))) indx_null #fourth, check for promoted_content table---------------------------------------------- typeof(promoted_content$ad_id) boxplot.stats(promoted_content$ad_id) typeof(promoted_content$document_id) typeof(promoted_content$campaign_id) boxplot.stats(promoted_content$ad_id) typeof(promoted_content$advertiser_id) boxplot.stats(promoted_content$advertiser_id) indx_na<- apply(promoted_content, 2, function(x) any(is.na(x))) indx_na indx_inf<- apply(promoted_content, 2, function(x) any(is.infinite(x))) indx_inf indx_nan<- apply(promoted_content, 2, function(x) any(is.nan(x))) indx_nan indx_null<- apply(promoted_content, 2, function(x) any(is.null(x))) indx_null #fifth, check for page_views_sample table----------------------------------------------- typeof(page_views_sample$uuid) boxplot.stats(page_views_sample$uuid) typeof(page_views_sample$document_id) boxplot.stats(page_views_sample$document_id) typeof(page_views_sample$timestamp) boxplot.stats(page_views_sample$timestamp) typeof(page_views_sample$platform) boxplot.stats(page_views_sample$platform) typeof(page_views_sample$geo_location) typeof(page_views_sample$traffic_source) boxplot.stats(page_views_sample$traffic_source) indx_na<- apply(page_views_sample, 2, function(x) any(is.na(x))) indx_na indx_inf<- apply(page_views_sample, 2, function(x) any(is.infinite(x))) indx_inf indx_nan<- apply(page_views_sample, 2, function(x) any(is.nan(x))) indx_nan indx_null<- apply(page_views_sample, 2, function(x) any(is.null(x))) indx_null #sixth, check for document_entities table------------------------------------------------ typeof(document_entities$document_id) boxplot.stats(document_entities$document_id) typeof(document_entities$entity_id) boxplot.stats(document_entities$entity_id) typeof(document_entities$confidence_level) boxplot.stats(document_entities$confidence_level) indx_na<- apply(document_entities, 2, function(x) any(is.na(x))) indx_na indx_inf<- apply(document_entities, 2, function(x) any(is.infinite(x))) indx_inf indx_nan<- apply(document_entities, 2, function(x) any(is.nan(x))) indx_nan indx_null<- apply(document_entities, 2, function(x) any(is.null(x))) indx_null #seventh, check for document_topics table----------------------------------------------- typeof(document_topics$document_id) boxplot.stats(document_topics$document_id) typeof(document_topics$topic_id) boxplot.stats(document_topics$topic_id) typeof(document_topics$confidence_level) boxplot(document_topics$confidence_level) boxplot.stats(document_topics$confidence_level) indx_na<- apply(document_topics, 2, function(x) any(is.na(x))) indx_na indx_inf<- apply(document_topics, 2, function(x) any(is.infinite(x))) indx_inf indx_nan<- apply(document_topics, 2, function(x) any(is.nan(x))) indx_nan indx_null<- apply(document_topics, 2, function(x) any(is.null(x))) indx_null #eighth, check for document_categories table--------------------------------------------- typeof(document_categories$document_id) boxplot.stats(document_categories$document_id) typeof(document_categories$category_id) boxplot.stats(document_categories$category_id) typeof(document_categories$confidence_level) boxplot.stats(document_categories$confidence_level) indx_na<- apply(document_categories, 2, function(x) any(is.na(x))) indx_na indx_inf<- apply(document_categories, 2, function(x) any(is.infinite(x))) indx_inf indx_nan<- apply(document_categories, 2, function(x) any(is.nan(x))) indx_nan indx_null<- apply(document_categories, 2, function(x) any(is.null(x))) indx_null #missingness map test------------------------------------------------------------------- missmap(page_views_sample,legend = TRUE,col = c("wheat","darkred")) # --------------------------------------------------------------------------------------------------------------------------- page_views_sample$platform<-(as.factor(page_views_sample$platform)) page_views_sample$traffic_source<-(as.factor(page_views_sample$traffic_source)) sample_m10<-model.matrix(~platform - 1,data =page_views_sample) sample_m11<-model.matrix(~traffic_source - 1,data =page_views_sample) page_views1<-cbind(page_views_sample,sample_m10) page_views<-cbind(page_views1,sample_m11) # check for NA's here: No NA values found------------------------------------------------------------------------------------ events_pageviews_na_val <- sqldf("select * from page_views as b") View(events_pageviews_na_val) events_pageviews_na <- sqldf("select a.display_id,a.uuid,a.document_id,b.geo_location,b.platform1,b.platform2,b.platform3,b.traffic_source1,b.traffic_source2,b.traffic_source3 from events as a,page_views as b where a.uuid = b.uuid and b.geo_location ='NA'") View(events_pageviews_na)--------#error is coming # ---------------------------------------------------------------------------------------------------------------------------- #Joining `events` and `page_views` events_pageviews <- sqldf("select a.display_id,a.uuid,a.document_id,a.geo_location,b.platform1,b.platform2,b.platform3,b.traffic_source1,b.traffic_source2,b.traffic_source3 from events as a,page_views as b where a.uuid = b.uuid") #Joining `events_pageviews` and `clicks_train` events_pageviews_clickstrain <- sqldf("select a.display_id,a.uuid,a.document_id,a.geo_location,a.platform1,a.platform2,a.platform3,a.traffic_source1,a.traffic_source2,a.traffic_source3,b.ad_id,b.clicked from events_pageviews as a,clicks_train as b where a.display_id = b.display_id") #Joining `events_pageviews_clickstrain` and `promotedcontent` events_pageviews_clickstrain_promotedcontent <- sqldf("select a.display_id,a.uuid,a.document_id,a.platform1,a.platform2,a.platform3,a.geo_location,a.traffic_source1,a.traffic_source2,a.traffic_source3,a.ad_id,b.campaign_id,b.advertiser_id,a.clicked from events_pageviews_clickstrain as a,promoted_content as b where a.ad_id = b.ad_id") # --------------------------------------------------------------------------------------------------------------------------- #Joining `events_pageviews_clickstrain_promotedcontent` and `document_topics` final_table<- sqldf("select a.display_id,a.uuid,a.document_id,a.platform1,a.platform2,a.platform3,a.geo_location,a.traffic_source1,a.traffic_source2,a.traffic_source3,a.ad_id,b.topic_id,b.confidence_level as topic_conf_level,a.clicked from events_pageviews_clickstrain_promotedcontent as a,document_topics as b where a.document_id = b.document_id") #Joining `final_table` and `document_entities` final_table1<- sqldf("select a.display_id,a.uuid,a.document_id,a.platform1,a.platform2,a.platform3,a.geo_location,a.traffic_source1,a.traffic_source2,a.traffic_source3,a.ad_id,a.topic_id,a.topic_conf_level,b.entity_id,b.confidence_level as entity_confidence,a.clicked from final_table as a,document_entities as b where a.document_id = b.document_id ") #Joining `final_table1` and `document_categories` final_table2<- sqldf("select a.display_id,a.uuid,a.document_id,a.platform1,a.platform2,a.platform3,a.geo_location,a.traffic_source1,a.traffic_source2,a.traffic_source3,a.ad_id,a.topic_id,a.topic_conf_level,a.entity_id,a.entity_confidence,b.category_id,b.confidence_level as category_confidence,a.clicked from final_table1 as a,document_categories as b where a.document_id = b.document_id ") # --------------------------------------------------------------------------------------------------------------------------- #Plotting the correlation matrix for determining dependencies correlationMatrix <- cor(final_table2[,c(1,3:6,8:13,15:18)]) print(correlationMatrix) corrplot(correlationMatrix,method="number") #--------------FEATURE SELECTION----------------- #--------Rank features by importance---------------------- final_table2$clicked <- as.factor(final_table2$clicked) control <- trainControl(method="repeatedcv", number=3, repeats=10) control2<- trainControl(method = "cv", number = 10) model_rank <- train(clicked~display_id+document_id+ad_id+topic_id+category_id+platform1+platform2+platform3+traffic_source1+traffic_source2+traffic_source3+topic_conf_level+entity_confidence+category_confidence, data=final_table2[,c(1,3:6,8:13,15:18)], method="gbm", preProcess="scale", trControl=control2) summary(model_rank) # ----------------XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX---------------------------- summary(d$clicked) plot(density(d$clicked)) sd(d$clicked,na.rm=TRUE) #THIS SECTION's CODE NOT TO BE RUN #dim(d) #check dimensions #str(d) #show structure of the data #sum(d) #colnames(d) #apply(d,2,var) #check the variance accross the variables #pca =prcomp(d) #applying principal component analysis on data #par(mar = rep(2, 4)) #plot to show variable importance #plot(pca) #'below code changes the directions of the biplot, if we donot include #the below two lines the plot will be mirror image to the below one.' #pca$rotation=-pca$rotation #pca$x=-pca$x #biplot (pca , scale =0) #plot pca components using biplot in r #-----------------XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX-------------------------- # ------------------------------------------------------------------------------------------------------------------------------------------- d<-final_table2; trainIndex <- sample(1:nrow(d), 0.5 * nrow(d)); temp <- d[trainIndex,]; summary(d$clicked) trainIndex <- sample(1:nrow(temp), 0.9 * nrow(temp)); train <- temp[trainIndex,]; test <- temp[-trainIndex,]; #Ratio of original data table(final_table2$clicked)/nrow(final_table2) #Ratio of reduced data table(train$clicked)/nrow(train) table(test$clicked)/nrow(test) # ------------------------------------------------------------------------------------------------------------------------------------------- #----------------------------------------------------FINDING THE VARIANCE OF ENTIRE DATASET d.data <- d[,c(1,3:6,8:13,15:17)] d.clicked <- d[,18] d.pca <- prcomp(d.data,center=TRUE,scale.=TRUE) plot(d.pca,type="l",main="Variance of the attributes in the Dataset") legend('topright',legend=c('1 - display_id','2 - document_id','3 - platform','4 - traffic_source','5 - ad_id','6 - topic_id','7 - topic_conf_level','8 - entity_confidence','9 - category_id','10 - category_confidence'),box.col=1,bg='white', bty='o'); #g <- ggbiplot(d.pca, obs.scale = 1, var.scale = 1,groups = d.clicked, ellipse = TRUE, circle = TRUE) #g <- g + scale_color_discrete(name = '') #g <- g + theme(legend.direction = 'horizontal', legend.position = 'top') #print(g) #BiPlot of entire Dataset ggbiplot(d.pca, obs.scale = 0.001, var.scale = 0.001,groups = d.clicked, ellipse = TRUE, circle = TRUE,main="BiPlot of Dataset") #https://www.r-bloggers.com/computing-and-visualizing-pca-in-r/ # ------------------------------------------------------------------------------------------------------------------------------------------- #----------------------------- Variable Separation Test Graph --------------------------------------- plot(test[,c(1,11)], test$clicked, pch=c(2,3), cex.main=1.5, frame.plot=FALSE, col=ifelse(test$clicked==1,2,1),main=paste("Relationship between ad_id and display_id","\n for displaying separation","\n between two classes relative to clicked")) legend('topleft',legend=c('ad_id(clicked)','ad_id(non-clicked)','display_id(clicked)','display_id(non-clicked)'),pch=c(2,2,3,3),col=c(2,1,2,1),box.col=1,bg='white', bty='o'); # ------------------------------------------------------------------------------------------------------------------------------------------- #--------------------------------------Linearity test with the help of S V M-------------------------------------------- #MODEL_1 clicked~ad_id+display_id #MODEL_2 clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence #MODEL_3 clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence+traffic_source2+traffic_source3 #creation of svm model from test data svm.model <- svm(clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence+traffic_source2+traffic_source3, train, cost = 100, gamma = 1); #test data on svm model created above svm.pred <- predict(svm.model, test[,c(1,3,5,9,10,11,15,16)]); #rounding values of test results into integers svm<-as.numeric(as.character(svm.pred)); #calculating accuracy accuracy_svm <- 100*mean(svm==test[,18]); #calculating accuracy using confusion matrix table(pred = svm, true = test[,18]); #plotting the result #plot(x=svm.pred,y=test$clicked,col='red',main=paste('Real vs predicted using SVM','\n Linearity Test'),pch=18,cex=1.5)+ abline(0,1,lwd=2)+ legend('bottomright',legend='NN',pch=18,col='red', bty='n'); cat("Accuracy using a linear SVM is: ",accuracy_svm,"\n") #Calculate ROC curve svm.round <- as.numeric(as.character(svm)) svmPrediction <- prediction(predictions = svm.round, labels = test$clicked) svmPerformance <- performance(svmPrediction, measure = "tpr", x.measure = "fpr") plot(svmPerformance, col="red", lwd=3,main=("ROC Curve for SVM Model")) # ---------------------------------------------------------------------------------------------------------------------------------------------------- #-----------------------------N A I V E B A Y E S----------------------------- #MODEL_1 clicked~ad_id+display_id #MODEL_2 clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence #MODEL_3 clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence+traffic_source2+traffic_source3 #Convert the final column in training data train$clicked <- as.factor(train$clicked) #Convert the final column in test data test$clicked <- as.factor(test$clicked) #Train the Naive Bayes model modelNB <- naiveBayes(clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence+traffic_source2+traffic_source3,data = train) #Fit the Naive Bayes using the created model predNB <- predict(modelNB,test[,c(1,3,5,9,10,11,15,16)],type="class") #Create a table for comparing predicted and original results resultsNB <- table(predNB,test[,18]) #calculating accuracy using confusion matrix table(pred = predNB, true = test[,18]); #Calculate the accuracy for the Naive Bayes Model accuracyNB = sum(diag(resultsNB))/sum(resultsNB) cat("Accuracy using Naive Bayes is", accuracyNB*100,"\n") #Calculate ROC curve predNB.round <- as.numeric(as.character(predNB)) nbPrediction <- prediction(predictions = predNB.round, labels = test$clicked) nbPerformance <- performance(nbPrediction, measure = "tpr", x.measure = "fpr") plot(nbPerformance, col="red", lwd=3,main=("ROC Curve for Naive Bayes Model")) # --------------------------------------------------------------------------------------------------------------------------------------------------- #-------------------------------------------k N N------------------------------ #Making the output variable as a factor #final_table2$clicked <- factor(final_table2$clicked) #Selecting a subset of the variables which will contribute in predicting the output variable myvars<-c("document_id","platform1","platform2","platform3","traffic_source1","traffic_source2","traffic_source3","ad_id","topic_id","topic_conf_level","entity_confidence","category_id","category_confidence") #Creating a subset of the selected variables train.knn <- train[myvars] test.knn <- test[myvars] #summary(train.knn) #Creating a KNN model with K=1 knn.1 <- knn(train.knn,test.knn,train$clicked,k=1) #Creating a KNN model with K=5 knn.5 <- knn(train.knn,test.knn,train$clicked,k=5) #Creating a KNN model with K=10 knn.10 <- knn(train.knn,test.knn,train$clicked,k=10) #Creating a KNN model with K=20 knn.20 <- knn(train.knn,test.knn,train$clicked,k=20) #calculating the accuracy for k = 1 inter_acc1<-100*sum(test$clicked == knn.1)/100 accuracy_1<-(inter_acc1*100)/nrow(test.knn) cat("Accuracy using kNN when k=1 is: ",accuracy_1,"\n") #calculating the accuracy for k = 5 inter_acc5<-100*sum(test$clicked == knn.5)/100 accuracy_5<-(inter_acc5*100)/nrow(test.knn) cat("Accuracy using kNN when k=5 is: ",accuracy_5,"\n") #calculating the accuracy for k = 10 inter_acc10<-100*sum(test$clicked == knn.10)/100 accuracy_10<-(inter_acc10*100)/nrow(test.knn) cat("Accuracy using kNN when k=10 is: ",accuracy_10,"\n") #calculating the accuracy for k = 20 inter_acc20<-100*sum(test$clicked == knn.20)/100 accuracy_20<-(inter_acc20*100)/nrow(test.knn) cat("Accuracy using kNN when k=20 is: ",accuracy_20,"\n") #ROC curve for k=1 knnResults = as.numeric(knn.1) - 1 knnPrediction <- prediction(predictions = knnResults, labels = test$clicked) knnPerformance <- performance(knnPrediction, measure = "tpr", x.measure = "fpr") plot(knnPerformance, col="red", lwd=3) #ROC curve for k=5 knnResults = as.numeric(knn.5) - 1 knnPrediction <- prediction(predictions = knnResults, labels = test$clicked) knnPerformance <- performance(knnPrediction, measure = "tpr", x.measure = "fpr") plot(knnPerformance, col="red", lwd=3) #ROC curve for k=10 knnResults = as.numeric(knn.10) - 1 knnPrediction <- prediction(predictions = knnResults, labels = test$clicked) knnPerformance <- performance(knnPrediction, measure = "tpr", x.measure = "fpr") plot(knnPerformance, col="red", lwd=3) #ROC curve for k=20 knnResults = as.numeric(knn.20) - 1 knnPrediction <- prediction(predictions = knnResults, labels = test$clicked) knnPerformance <- performance(knnPrediction, measure = "tpr", x.measure = "fpr") plot(knnPerformance, col="red", lwd=3) # ------------------------------------------------------------------------------------------------------------------------------------------------------ #-------------------------------R A N D O M F O R E S T---------------- #MODEL_1 clicked~ad_id+display_id #MODEL_2 clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence #MODEL_3 clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence+traffic_source2+traffic_source3 #names of the attributes names(train) #Creating the randomForest model rf <- randomForest(clicked ~ document_id+platform1+platform2+platform3+traffic_source1+traffic_source2+traffic_source3+ad_id+topic_id+topic_conf_level+entity_confidence+category_id+category_confidence,train,ntree=500,importance=T) #plot the model plot(rf) #check the variable importance varImpPlot(rf,sort = T,main = "Variable Importance",n.var = 5) #Calculating the accuracy prediction<-predict(rf,test[,1:17]) accuracy_rf <- 100*(mean(prediction == test$clicked)) cat("Accuracy using Random Forest is: ",accuracy_rf,"\n") #ROC curve for RandomForest prediction.round<-as.numeric(as.character(prediction)) rfPrediction <- prediction(predictions = prediction.round, labels = test$clicked) rfPerformance <- performance(rfPrediction, measure = "tpr", x.measure = "fpr") plot(rfPerformance, col="green", lwd=2) # ---------------------------------------------------------------------------------------------------------------------------------------------------- #-----------------------------B O O S T I N G---------------------------- #MODEL_1 clicked~ad_id+display_id #MODEL_2 clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence #MODEL_3 clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence+traffic_source2+traffic_source3 #simple boost tree fitting model boosting <- train(clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence+traffic_source2+traffic_source3, method = "gbm", data = train, verbose = F, trControl = trainControl(method = "cv", number = 10)) # out-of-sample errors for testing data pred_boosting <- predict(boosting, test[,c(1,3,5,9,10,11,15,16)]) pred_b <- as.numeric(as.character(pred_boosting)) #Confusion Matrix table(pred = pred_b,true = test$clicked) # summary of final model plot(boosting) plot(boosting$finalModel,main="Error in Boosting ") print(varImp(boosting)) #accuracy accuracy_boosting <- 100*mean(pred_b==test$clicked); cat("Accuracy using Boosting for the dataset is: ",accuracy_boosting,"\n") #boosting iterations plot(boosting,plotType = "level") #RMSE-Rsquared graphs resampleHist((boosting)) #ROC curve for Boosting bPrediction <- prediction(predictions = pred_b, labels = test$clicked) bPerformance <- performance(bPrediction, measure = "tpr", x.measure = "fpr") plot(bPerformance, col="green", lwd=2,main=("ROC Curve for Boosting Model")) # --------------------------------------------------------------------------------------------------------------------------------------------------- savehistory(file="20_11_27-11.history") # ----------------------------------------------------E N D----------------------------------------------------------------
/outbrains_final _v4-0.R
no_license
sunnyanand17/Machine-Learning
R
false
false
25,075
r
# ---------------------------------------------------Downloading and Installing Libraries --------------------------------------------------- install.packages("ff") install.packages("sqldf") install.packages("rpart") install.packages("Amelia") install.packages("class") install.packages("caret") install.packages("corrplot", dependencies = TRUE) install.packages("e1071") install.packages("truncnorm") install.packages("grDevices") install.packages("pca") install.packages("neuralnet") install.packages("devtools") install.packages("ggplot2") install.packages("ggvis") install.packages("pROC") install.packages("ROCR") install.packages("randomForest") install.packages("caTools") library(ff) library(sqldf) library(rpart) library(corrplot) library(Amelia) library(class) library(caret) library(e1071) library(truncnorm) library(grDevices) library(pca) library(neuralnet) library(devtools) install_github("ggbiplot", "vqv") library(ggbiplot) library(ggplot2) library(ggvis) library(pROC) library(ROCR) library(randomForest) library(caTools) args <- commandArgs(TRUE) # ----------------------------------------------- Load the tables into dataFrames -------------------------------------------- events <- read.table(file=args[1], nrows = 1000000, header = TRUE,sep = ",") page_views_sample <- read.table(file=args[2], header=TRUE, nrows = 1000000, sep = ",") clicks_train <- read.table(file=args[3],nrows = 1000000, header=TRUE, sep = ",") clicks_test <- read.table(file=args[4], header=TRUE, nrows = 1000000, sep = ",") promoted_content <- read.table(file=args[5], header=TRUE, nrows = 1000000, sep = ",") document_entities <- read.table(file=args[6], header=TRUE, nrows = 1000000, sep = ",") document_topics <- read.table(file=args[7], header=TRUE, nrows = 1000000, sep = ",") document_categories <- read.table(file=args[8], header=TRUE, nrows = 1000000, sep = ",") # --------------------------------------CHECK FOR NA AND INF VALUES, DATATYPES AND OUTLIERS------------------------------------------ #first, check for click_test table----------------------------------------------------- typeof(clicks_test$display_id) boxplot.stats(clicks_test$display_id) typeof(clicks_test$ad_id) boxplot.stats(clicks_test$ad_id) indx_na<- apply(clicks_test, 2, function(x) any(is.na(x))) indx_na indx_inf<- apply(clicks_test, 2, function(x) any(is.infinite(x))) indx_inf indx_nan<- apply(clicks_test, 2, function(x) any(is.nan(x))) indx_nan indx_null<- apply(clicks_test, 2, function(x) any(is.null(x))) indx_null #second, check for events table-------------------------------------------------------- typeof(events$display_id) boxplot.stats(events$display_id) typeof(events$uuid) #boxplot doesn't evaluate user_id variable because it isn't numeric/character. boxplot.stats(events$uuid) typeof(events$document_id) #as all document_id are independent, hence outliers doesn't make sense here. boxplot.stats(events$document_id) typeof(events$timestamp) boxplot.stats(events$timestamp) typeof(events$platform) typeof(events$geo_location) indx_na<- apply(events, 2, function(x) any(is.na(x))) indx_na indx_inf<- apply(events, 2, function(x) any(is.infinite(x))) indx_inf indx_nan<- apply(events, 2, function(x) any(is.nan(x))) indx_nan indx_null<- apply(events, 2, function(x) any(is.null(x))) indx_null #third, check for clicks_train table---------------------------------------------------- typeof(clicks_train$display_id) boxplot.stats(clicks_train$display_id) typeof(clicks_train$ad_id) boxplot.stats(clicks_train$ad_id) typeof(clicks_train$clicked) indx_na<- apply(clicks_train, 2, function(x) any(is.na(x))) indx_na indx_inf<- apply(clicks_train, 2, function(x) any(is.infinite(x))) indx_inf indx_nan<- apply(clicks_train, 2, function(x) any(is.nan(x))) indx_nan indx_null<- apply(clicks_train, 2, function(x) any(is.null(x))) indx_null #fourth, check for promoted_content table---------------------------------------------- typeof(promoted_content$ad_id) boxplot.stats(promoted_content$ad_id) typeof(promoted_content$document_id) typeof(promoted_content$campaign_id) boxplot.stats(promoted_content$ad_id) typeof(promoted_content$advertiser_id) boxplot.stats(promoted_content$advertiser_id) indx_na<- apply(promoted_content, 2, function(x) any(is.na(x))) indx_na indx_inf<- apply(promoted_content, 2, function(x) any(is.infinite(x))) indx_inf indx_nan<- apply(promoted_content, 2, function(x) any(is.nan(x))) indx_nan indx_null<- apply(promoted_content, 2, function(x) any(is.null(x))) indx_null #fifth, check for page_views_sample table----------------------------------------------- typeof(page_views_sample$uuid) boxplot.stats(page_views_sample$uuid) typeof(page_views_sample$document_id) boxplot.stats(page_views_sample$document_id) typeof(page_views_sample$timestamp) boxplot.stats(page_views_sample$timestamp) typeof(page_views_sample$platform) boxplot.stats(page_views_sample$platform) typeof(page_views_sample$geo_location) typeof(page_views_sample$traffic_source) boxplot.stats(page_views_sample$traffic_source) indx_na<- apply(page_views_sample, 2, function(x) any(is.na(x))) indx_na indx_inf<- apply(page_views_sample, 2, function(x) any(is.infinite(x))) indx_inf indx_nan<- apply(page_views_sample, 2, function(x) any(is.nan(x))) indx_nan indx_null<- apply(page_views_sample, 2, function(x) any(is.null(x))) indx_null #sixth, check for document_entities table------------------------------------------------ typeof(document_entities$document_id) boxplot.stats(document_entities$document_id) typeof(document_entities$entity_id) boxplot.stats(document_entities$entity_id) typeof(document_entities$confidence_level) boxplot.stats(document_entities$confidence_level) indx_na<- apply(document_entities, 2, function(x) any(is.na(x))) indx_na indx_inf<- apply(document_entities, 2, function(x) any(is.infinite(x))) indx_inf indx_nan<- apply(document_entities, 2, function(x) any(is.nan(x))) indx_nan indx_null<- apply(document_entities, 2, function(x) any(is.null(x))) indx_null #seventh, check for document_topics table----------------------------------------------- typeof(document_topics$document_id) boxplot.stats(document_topics$document_id) typeof(document_topics$topic_id) boxplot.stats(document_topics$topic_id) typeof(document_topics$confidence_level) boxplot(document_topics$confidence_level) boxplot.stats(document_topics$confidence_level) indx_na<- apply(document_topics, 2, function(x) any(is.na(x))) indx_na indx_inf<- apply(document_topics, 2, function(x) any(is.infinite(x))) indx_inf indx_nan<- apply(document_topics, 2, function(x) any(is.nan(x))) indx_nan indx_null<- apply(document_topics, 2, function(x) any(is.null(x))) indx_null #eighth, check for document_categories table--------------------------------------------- typeof(document_categories$document_id) boxplot.stats(document_categories$document_id) typeof(document_categories$category_id) boxplot.stats(document_categories$category_id) typeof(document_categories$confidence_level) boxplot.stats(document_categories$confidence_level) indx_na<- apply(document_categories, 2, function(x) any(is.na(x))) indx_na indx_inf<- apply(document_categories, 2, function(x) any(is.infinite(x))) indx_inf indx_nan<- apply(document_categories, 2, function(x) any(is.nan(x))) indx_nan indx_null<- apply(document_categories, 2, function(x) any(is.null(x))) indx_null #missingness map test------------------------------------------------------------------- missmap(page_views_sample,legend = TRUE,col = c("wheat","darkred")) # --------------------------------------------------------------------------------------------------------------------------- page_views_sample$platform<-(as.factor(page_views_sample$platform)) page_views_sample$traffic_source<-(as.factor(page_views_sample$traffic_source)) sample_m10<-model.matrix(~platform - 1,data =page_views_sample) sample_m11<-model.matrix(~traffic_source - 1,data =page_views_sample) page_views1<-cbind(page_views_sample,sample_m10) page_views<-cbind(page_views1,sample_m11) # check for NA's here: No NA values found------------------------------------------------------------------------------------ events_pageviews_na_val <- sqldf("select * from page_views as b") View(events_pageviews_na_val) events_pageviews_na <- sqldf("select a.display_id,a.uuid,a.document_id,b.geo_location,b.platform1,b.platform2,b.platform3,b.traffic_source1,b.traffic_source2,b.traffic_source3 from events as a,page_views as b where a.uuid = b.uuid and b.geo_location ='NA'") View(events_pageviews_na)--------#error is coming # ---------------------------------------------------------------------------------------------------------------------------- #Joining `events` and `page_views` events_pageviews <- sqldf("select a.display_id,a.uuid,a.document_id,a.geo_location,b.platform1,b.platform2,b.platform3,b.traffic_source1,b.traffic_source2,b.traffic_source3 from events as a,page_views as b where a.uuid = b.uuid") #Joining `events_pageviews` and `clicks_train` events_pageviews_clickstrain <- sqldf("select a.display_id,a.uuid,a.document_id,a.geo_location,a.platform1,a.platform2,a.platform3,a.traffic_source1,a.traffic_source2,a.traffic_source3,b.ad_id,b.clicked from events_pageviews as a,clicks_train as b where a.display_id = b.display_id") #Joining `events_pageviews_clickstrain` and `promotedcontent` events_pageviews_clickstrain_promotedcontent <- sqldf("select a.display_id,a.uuid,a.document_id,a.platform1,a.platform2,a.platform3,a.geo_location,a.traffic_source1,a.traffic_source2,a.traffic_source3,a.ad_id,b.campaign_id,b.advertiser_id,a.clicked from events_pageviews_clickstrain as a,promoted_content as b where a.ad_id = b.ad_id") # --------------------------------------------------------------------------------------------------------------------------- #Joining `events_pageviews_clickstrain_promotedcontent` and `document_topics` final_table<- sqldf("select a.display_id,a.uuid,a.document_id,a.platform1,a.platform2,a.platform3,a.geo_location,a.traffic_source1,a.traffic_source2,a.traffic_source3,a.ad_id,b.topic_id,b.confidence_level as topic_conf_level,a.clicked from events_pageviews_clickstrain_promotedcontent as a,document_topics as b where a.document_id = b.document_id") #Joining `final_table` and `document_entities` final_table1<- sqldf("select a.display_id,a.uuid,a.document_id,a.platform1,a.platform2,a.platform3,a.geo_location,a.traffic_source1,a.traffic_source2,a.traffic_source3,a.ad_id,a.topic_id,a.topic_conf_level,b.entity_id,b.confidence_level as entity_confidence,a.clicked from final_table as a,document_entities as b where a.document_id = b.document_id ") #Joining `final_table1` and `document_categories` final_table2<- sqldf("select a.display_id,a.uuid,a.document_id,a.platform1,a.platform2,a.platform3,a.geo_location,a.traffic_source1,a.traffic_source2,a.traffic_source3,a.ad_id,a.topic_id,a.topic_conf_level,a.entity_id,a.entity_confidence,b.category_id,b.confidence_level as category_confidence,a.clicked from final_table1 as a,document_categories as b where a.document_id = b.document_id ") # --------------------------------------------------------------------------------------------------------------------------- #Plotting the correlation matrix for determining dependencies correlationMatrix <- cor(final_table2[,c(1,3:6,8:13,15:18)]) print(correlationMatrix) corrplot(correlationMatrix,method="number") #--------------FEATURE SELECTION----------------- #--------Rank features by importance---------------------- final_table2$clicked <- as.factor(final_table2$clicked) control <- trainControl(method="repeatedcv", number=3, repeats=10) control2<- trainControl(method = "cv", number = 10) model_rank <- train(clicked~display_id+document_id+ad_id+topic_id+category_id+platform1+platform2+platform3+traffic_source1+traffic_source2+traffic_source3+topic_conf_level+entity_confidence+category_confidence, data=final_table2[,c(1,3:6,8:13,15:18)], method="gbm", preProcess="scale", trControl=control2) summary(model_rank) # ----------------XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX---------------------------- summary(d$clicked) plot(density(d$clicked)) sd(d$clicked,na.rm=TRUE) #THIS SECTION's CODE NOT TO BE RUN #dim(d) #check dimensions #str(d) #show structure of the data #sum(d) #colnames(d) #apply(d,2,var) #check the variance accross the variables #pca =prcomp(d) #applying principal component analysis on data #par(mar = rep(2, 4)) #plot to show variable importance #plot(pca) #'below code changes the directions of the biplot, if we donot include #the below two lines the plot will be mirror image to the below one.' #pca$rotation=-pca$rotation #pca$x=-pca$x #biplot (pca , scale =0) #plot pca components using biplot in r #-----------------XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX-------------------------- # ------------------------------------------------------------------------------------------------------------------------------------------- d<-final_table2; trainIndex <- sample(1:nrow(d), 0.5 * nrow(d)); temp <- d[trainIndex,]; summary(d$clicked) trainIndex <- sample(1:nrow(temp), 0.9 * nrow(temp)); train <- temp[trainIndex,]; test <- temp[-trainIndex,]; #Ratio of original data table(final_table2$clicked)/nrow(final_table2) #Ratio of reduced data table(train$clicked)/nrow(train) table(test$clicked)/nrow(test) # ------------------------------------------------------------------------------------------------------------------------------------------- #----------------------------------------------------FINDING THE VARIANCE OF ENTIRE DATASET d.data <- d[,c(1,3:6,8:13,15:17)] d.clicked <- d[,18] d.pca <- prcomp(d.data,center=TRUE,scale.=TRUE) plot(d.pca,type="l",main="Variance of the attributes in the Dataset") legend('topright',legend=c('1 - display_id','2 - document_id','3 - platform','4 - traffic_source','5 - ad_id','6 - topic_id','7 - topic_conf_level','8 - entity_confidence','9 - category_id','10 - category_confidence'),box.col=1,bg='white', bty='o'); #g <- ggbiplot(d.pca, obs.scale = 1, var.scale = 1,groups = d.clicked, ellipse = TRUE, circle = TRUE) #g <- g + scale_color_discrete(name = '') #g <- g + theme(legend.direction = 'horizontal', legend.position = 'top') #print(g) #BiPlot of entire Dataset ggbiplot(d.pca, obs.scale = 0.001, var.scale = 0.001,groups = d.clicked, ellipse = TRUE, circle = TRUE,main="BiPlot of Dataset") #https://www.r-bloggers.com/computing-and-visualizing-pca-in-r/ # ------------------------------------------------------------------------------------------------------------------------------------------- #----------------------------- Variable Separation Test Graph --------------------------------------- plot(test[,c(1,11)], test$clicked, pch=c(2,3), cex.main=1.5, frame.plot=FALSE, col=ifelse(test$clicked==1,2,1),main=paste("Relationship between ad_id and display_id","\n for displaying separation","\n between two classes relative to clicked")) legend('topleft',legend=c('ad_id(clicked)','ad_id(non-clicked)','display_id(clicked)','display_id(non-clicked)'),pch=c(2,2,3,3),col=c(2,1,2,1),box.col=1,bg='white', bty='o'); # ------------------------------------------------------------------------------------------------------------------------------------------- #--------------------------------------Linearity test with the help of S V M-------------------------------------------- #MODEL_1 clicked~ad_id+display_id #MODEL_2 clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence #MODEL_3 clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence+traffic_source2+traffic_source3 #creation of svm model from test data svm.model <- svm(clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence+traffic_source2+traffic_source3, train, cost = 100, gamma = 1); #test data on svm model created above svm.pred <- predict(svm.model, test[,c(1,3,5,9,10,11,15,16)]); #rounding values of test results into integers svm<-as.numeric(as.character(svm.pred)); #calculating accuracy accuracy_svm <- 100*mean(svm==test[,18]); #calculating accuracy using confusion matrix table(pred = svm, true = test[,18]); #plotting the result #plot(x=svm.pred,y=test$clicked,col='red',main=paste('Real vs predicted using SVM','\n Linearity Test'),pch=18,cex=1.5)+ abline(0,1,lwd=2)+ legend('bottomright',legend='NN',pch=18,col='red', bty='n'); cat("Accuracy using a linear SVM is: ",accuracy_svm,"\n") #Calculate ROC curve svm.round <- as.numeric(as.character(svm)) svmPrediction <- prediction(predictions = svm.round, labels = test$clicked) svmPerformance <- performance(svmPrediction, measure = "tpr", x.measure = "fpr") plot(svmPerformance, col="red", lwd=3,main=("ROC Curve for SVM Model")) # ---------------------------------------------------------------------------------------------------------------------------------------------------- #-----------------------------N A I V E B A Y E S----------------------------- #MODEL_1 clicked~ad_id+display_id #MODEL_2 clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence #MODEL_3 clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence+traffic_source2+traffic_source3 #Convert the final column in training data train$clicked <- as.factor(train$clicked) #Convert the final column in test data test$clicked <- as.factor(test$clicked) #Train the Naive Bayes model modelNB <- naiveBayes(clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence+traffic_source2+traffic_source3,data = train) #Fit the Naive Bayes using the created model predNB <- predict(modelNB,test[,c(1,3,5,9,10,11,15,16)],type="class") #Create a table for comparing predicted and original results resultsNB <- table(predNB,test[,18]) #calculating accuracy using confusion matrix table(pred = predNB, true = test[,18]); #Calculate the accuracy for the Naive Bayes Model accuracyNB = sum(diag(resultsNB))/sum(resultsNB) cat("Accuracy using Naive Bayes is", accuracyNB*100,"\n") #Calculate ROC curve predNB.round <- as.numeric(as.character(predNB)) nbPrediction <- prediction(predictions = predNB.round, labels = test$clicked) nbPerformance <- performance(nbPrediction, measure = "tpr", x.measure = "fpr") plot(nbPerformance, col="red", lwd=3,main=("ROC Curve for Naive Bayes Model")) # --------------------------------------------------------------------------------------------------------------------------------------------------- #-------------------------------------------k N N------------------------------ #Making the output variable as a factor #final_table2$clicked <- factor(final_table2$clicked) #Selecting a subset of the variables which will contribute in predicting the output variable myvars<-c("document_id","platform1","platform2","platform3","traffic_source1","traffic_source2","traffic_source3","ad_id","topic_id","topic_conf_level","entity_confidence","category_id","category_confidence") #Creating a subset of the selected variables train.knn <- train[myvars] test.knn <- test[myvars] #summary(train.knn) #Creating a KNN model with K=1 knn.1 <- knn(train.knn,test.knn,train$clicked,k=1) #Creating a KNN model with K=5 knn.5 <- knn(train.knn,test.knn,train$clicked,k=5) #Creating a KNN model with K=10 knn.10 <- knn(train.knn,test.knn,train$clicked,k=10) #Creating a KNN model with K=20 knn.20 <- knn(train.knn,test.knn,train$clicked,k=20) #calculating the accuracy for k = 1 inter_acc1<-100*sum(test$clicked == knn.1)/100 accuracy_1<-(inter_acc1*100)/nrow(test.knn) cat("Accuracy using kNN when k=1 is: ",accuracy_1,"\n") #calculating the accuracy for k = 5 inter_acc5<-100*sum(test$clicked == knn.5)/100 accuracy_5<-(inter_acc5*100)/nrow(test.knn) cat("Accuracy using kNN when k=5 is: ",accuracy_5,"\n") #calculating the accuracy for k = 10 inter_acc10<-100*sum(test$clicked == knn.10)/100 accuracy_10<-(inter_acc10*100)/nrow(test.knn) cat("Accuracy using kNN when k=10 is: ",accuracy_10,"\n") #calculating the accuracy for k = 20 inter_acc20<-100*sum(test$clicked == knn.20)/100 accuracy_20<-(inter_acc20*100)/nrow(test.knn) cat("Accuracy using kNN when k=20 is: ",accuracy_20,"\n") #ROC curve for k=1 knnResults = as.numeric(knn.1) - 1 knnPrediction <- prediction(predictions = knnResults, labels = test$clicked) knnPerformance <- performance(knnPrediction, measure = "tpr", x.measure = "fpr") plot(knnPerformance, col="red", lwd=3) #ROC curve for k=5 knnResults = as.numeric(knn.5) - 1 knnPrediction <- prediction(predictions = knnResults, labels = test$clicked) knnPerformance <- performance(knnPrediction, measure = "tpr", x.measure = "fpr") plot(knnPerformance, col="red", lwd=3) #ROC curve for k=10 knnResults = as.numeric(knn.10) - 1 knnPrediction <- prediction(predictions = knnResults, labels = test$clicked) knnPerformance <- performance(knnPrediction, measure = "tpr", x.measure = "fpr") plot(knnPerformance, col="red", lwd=3) #ROC curve for k=20 knnResults = as.numeric(knn.20) - 1 knnPrediction <- prediction(predictions = knnResults, labels = test$clicked) knnPerformance <- performance(knnPrediction, measure = "tpr", x.measure = "fpr") plot(knnPerformance, col="red", lwd=3) # ------------------------------------------------------------------------------------------------------------------------------------------------------ #-------------------------------R A N D O M F O R E S T---------------- #MODEL_1 clicked~ad_id+display_id #MODEL_2 clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence #MODEL_3 clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence+traffic_source2+traffic_source3 #names of the attributes names(train) #Creating the randomForest model rf <- randomForest(clicked ~ document_id+platform1+platform2+platform3+traffic_source1+traffic_source2+traffic_source3+ad_id+topic_id+topic_conf_level+entity_confidence+category_id+category_confidence,train,ntree=500,importance=T) #plot the model plot(rf) #check the variable importance varImpPlot(rf,sort = T,main = "Variable Importance",n.var = 5) #Calculating the accuracy prediction<-predict(rf,test[,1:17]) accuracy_rf <- 100*(mean(prediction == test$clicked)) cat("Accuracy using Random Forest is: ",accuracy_rf,"\n") #ROC curve for RandomForest prediction.round<-as.numeric(as.character(prediction)) rfPrediction <- prediction(predictions = prediction.round, labels = test$clicked) rfPerformance <- performance(rfPrediction, measure = "tpr", x.measure = "fpr") plot(rfPerformance, col="green", lwd=2) # ---------------------------------------------------------------------------------------------------------------------------------------------------- #-----------------------------B O O S T I N G---------------------------- #MODEL_1 clicked~ad_id+display_id #MODEL_2 clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence #MODEL_3 clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence+traffic_source2+traffic_source3 #simple boost tree fitting model boosting <- train(clicked~ad_id+display_id+document_id+category_id+platform2+entity_confidence+traffic_source2+traffic_source3, method = "gbm", data = train, verbose = F, trControl = trainControl(method = "cv", number = 10)) # out-of-sample errors for testing data pred_boosting <- predict(boosting, test[,c(1,3,5,9,10,11,15,16)]) pred_b <- as.numeric(as.character(pred_boosting)) #Confusion Matrix table(pred = pred_b,true = test$clicked) # summary of final model plot(boosting) plot(boosting$finalModel,main="Error in Boosting ") print(varImp(boosting)) #accuracy accuracy_boosting <- 100*mean(pred_b==test$clicked); cat("Accuracy using Boosting for the dataset is: ",accuracy_boosting,"\n") #boosting iterations plot(boosting,plotType = "level") #RMSE-Rsquared graphs resampleHist((boosting)) #ROC curve for Boosting bPrediction <- prediction(predictions = pred_b, labels = test$clicked) bPerformance <- performance(bPrediction, measure = "tpr", x.measure = "fpr") plot(bPerformance, col="green", lwd=2,main=("ROC Curve for Boosting Model")) # --------------------------------------------------------------------------------------------------------------------------------------------------- savehistory(file="20_11_27-11.history") # ----------------------------------------------------E N D----------------------------------------------------------------
# DSAnalysis.R library(SplicingCompass); library(DEXSeq); library(limma) setwd("/path_to_DS_scenario/sim_i/") conditions<-factor(c(rep("Normal",4),rep("Tumor",4)),levels=c("Normal","Tumor")) expInf<-new("ExperimentInfo") expInf<-setDescription(expInf,"NormalVsTumor") expInf<-setGroupInfo(expInf, groupName1="Normal", sampleNumsGroup1=1:4, groupName2="Tumor", sampleNumsGroup2=5:8) covBedCountFilesNormal<-c( "/path_to_quantification_covBed_sim_scenario/SRR057649.covBed.counts", "/path_to_quantification_covBed_sim_scenario/SRR057650.covBed.counts", "/path_to_quantification_covBed_sim_scenario/SRR057651.covBed.counts", "/path_to_quantification_covBed_sim_scenario/SRR057652.covBed.counts") covBedCountFilesTumor<- c("/path_to_quantification_covBed_sim_scenario/SRR057631.covBed.counts", "/path_to_quantification_covBed_sim_scenario/SRR057643.covBed.counts", "/path_to_quantification_covBed_sim_scenario/SRR057645.covBed.counts", "/path_to_quantification_covBed_sim_scenario/SRR057648.covBed.counts") expInf<-setCovBedCountFiles(expInf, c(covBedCountFilesTumor, covBedCountFilesNormal)) junctionBedFilesNormal<- c("/path_to_alignment_genome_sim_scenario/SRR057649/junctions.bed", "/path_to_alignment_genome_sim_scenario/SRR057650/junctions.bed", "/path_to_alignment_genome_sim_scenario/SRR057651/junctions.bed", "/path_to_alignment_genome_sim_scenario/SRR057652/junctions.bed") junctionBedFilesTumor<-c( c("/path_to_alignment_genome_sim_scenario/SRR057631/junctions.bed", "/path_to_alignment_genome_sim_scenario/SRR057643/junctions.bed", "/path_to_alignment_genome_sim_scenario/SRR057645/junctions.bed", "/path_to_alignment_genome_sim_scenario/SRR057648/junctions.bed") expInf<-setJunctionBedFiles(expInf, c(junctionBedFilesTumor, junctionBedFilesNormal)) expInf<-setReferenceAnnotation(expInf, “/path_to_annotation_files/flattened.splcmp.gtf”) referenceAnnotationFormat<-list(IDFieldName="geneSymbol",idValSep=" ") expInf<-setReferenceAnnotationFormat(expInf,referenceAnnotationFormat) checkExperimentInfo(expInf) ## Constructing an object of class CountTable mycountTable<-new("CountTable") mycountTable<-constructCountTable(mycountTable,nCores=20, printDotPerGene=TRUE) sc<-new("SplicingCompass") sc<-constructSplicingCompass(sc, mycountTable, minOverallJunctionReadSupport=5, nCores=20) # obataining significant DE genes sc<-initSigGenesFromResults(sc, adjusted=TRUE, threshold=0.05) sigGenes<-getSignificantGeneSymbols(sc) # obtaining a data frame with tested genes and correspoonding p-values resTab<-getResultTable(sc) resTab<-resTab[resTab$gene_id %in% iso_info$gene_id,] # tested gene ID genesTested<-getAllTestedGenes(sc) iso_info_SC<-iso_info[iso_info$gene_id %in% genesTested,] sigGenes<-getSignificantGeneSymbols(sc) sigGenes<-sigGenes[sigGenes %in% iso_info_SC$gene_id] save(resTab, file="SCresultTableNOv.RData") save(sc, file="SCobjectNOv.RData") save(mycountTable, file="SCCountTableNov.RData") #DEXSeq countfiles<-paste("path_to_quantification_DEXSeq_sim_scenario/htseq_sim_", paste("SRR0576", c(49:52, 31, 43, 45, 48),".htseq.counts", sep="") # building design matrix sample_data<-data.frame(condition= conditions, levels=c("Normal","Tumor"))) design<-~sample+exon+condition:exon row.names(sample_data)<-c(paste("C1R", 1:8, sep=""), paste("C2R", 1:8, sep="")) # build dexseq count matrix from HTseq output count_matrix<-DEXSeqDataSetFromHTSeq(countfiles, sample_data,design, flattenedfile="/path_to_annotation_files/flattened.dexseq.gtf") count_matrix<-estimateSizeFactors(count_matrix) count_matrix<-estimateDispersions(count_matrix, maxit=500, BPPARAM=MulticoreParam(workers=18)) fullModel<- ~sample + exon + condition:exon reducedModel<- ~sample + exon count_matrix<-testForDEU(count_matrix, fullModel=fullModel, reducedModel=reducedModel, BPPARAM=MulticoreParam(workers=20)) count_matrix<-estimateExonFoldChanges(count_matrix, fitExpToVar="condition", BPPARAM=MulticoreParam(workers=20), denominator="Normal") myresults<-DEXSeqResults( count_matrix ) perGeneQ<-perGeneQValue(myresults) myresultsDF<-as.data.frame(myresults) myresultsDF<-myresultsDF[!is.na(myresultsDF$padj) ,] myresultsDF$qvalGene<-do.call(c, lapply(1:nrow(myresultsDF), function(i){ return(perGeneQ[names(perGeneQ) == myresultsDF$groupID[i]]) })) myresultsDF<-myresultsDF[myresultsDF[,"groupID"]%in%unique(iso_info$gene_id), ] iso_info_dexseq<-iso_info[iso_info$gene_id %in% unique(myresultsDF[,"groupID"]), ] DEXGenes<-unique(myresultsDF[myresultsDF$qvalGene < 0.05,"groupID"]) save(count_matrix, file="DEXSeqCountMatrixSim.RData") #LimmaDS cm<-counts(count_matrix)[,1:8] cm2<-as.data.frame(rowRanges(count_matrix)) geneInfo<-cm2[,c(1:7)] y.all <- DGEList(counts=cm, genes=geneInfo) isexpr <- rowSums(cpm(y.all) > 1) >=3 y <- y.all[isexpr,,keep.lib.sizes=FALSE] save(y , file="expressCMNover.RData", compress="xz") y <- calcNormFactors(y) design <- model.matrix(~ conditions) v <- voom(y,design,plot=FALSE) fit <- lmFit(v, design) fit.de <- eBayes(fit, robust=TRUE) limmaResults<-data.frame(gene=fit.de$genes, baseMean=exp(fit.de$coefficients[,1]), logFC=fit.de$coefficients[,2], pval=fit.de$p.value[,2]) limmaResults$padj<-p.adjust(limmaResults$pval, method="BH") ex <- diffSplice(fit[,"conditionTumor"], geneid = "groupID", exonid = "start") DSRes<-topSplice(ex, test="simes", n=length(ex)) iso_info_limma<-iso_info[iso_info$gene_id %in% DSRes[,"groupID"],] DSRes<-DSRes[DSRes$groupID %in% unique(iso_info$gene_id ),] DELimma<-DSRes[DSRes$FDR < 0.05,"groupID"] save(ex, file="limmaDSNOver.RData", compress="xz") save(fit.de, file="fitdeLimmaNOver.RData", compress="xz") save(DSRes, file="LimmaDSRes.RData", compress="xz") ##CufflinksDS cuffresults<-read.delim("cufflinks_sim_scenario/splicing.diff") cuffresults_exp<-cuffresults[cuffresults$status == "OK",] cuffresults_exp$test_id<-as.character(cuffresults_exp$test_id) cuffresults_exp$gene<-as.character(cuffresults_exp$gene) rownames(cuffresults_exp)<-cuffresults_exp$test_id duplIDs<-duplicated(paste(cuffresults_exp$gene, cuffresults_exp$locus, sep="_")) duplIDs<-data.frame(test_id=cuffresults_exp$test_id, gene=cuffresults_exp$gene, locus=cuffresults_exp$locus, duplicados=duplicados) duplIDs<- duplIDs[duplIDs$duplIDs,] dim(duplIDs) # [1] 1968 4 uniqID<-cuffresults_exp$test_id[!(paste(cuffresults_exp$gene, cuffresults_exp$locus, sep="_") %in% paste(duplIDs$gene, duplIDs$locus, sep="_"))] aux<-sapply(1:nrow(duplIDs),function(id){ dupl<-cuffresults_exp[(cuffresults_exp$locus == duplIDs[id, "locus" ]) & cuffresults_exp$gene == duplIDs[id, "gene"],,drop=FALSE] if(any(dupl$significant == "yes")){ id<-dupl$test_id[which(dupl$significant == "yes")[1]] }else{id<-dupl$test_id[which.min(abs(dupl$q_value))[1]]} return(as.character(id)) }) cuffresults_exp<-cuffresults_exp[cuffresults_exp$test_id %in% c(uniqID, aux),] # convert gene name to ensembl conversion_info<- read.delim("~/path_to_annotation_files/tableEntreZ2Ensembl.tab", header=TRUE) rownames(conversion_info)<-conversion_info$gene_name cuffresults_exp<-cuffresults_exp[cuffresults_exp$gene %in% cuffresults_exp$gene[(cuffresults_exp$gene %in% rownames(conversion_info))] ,] cuffresults_exp$gene_id<- as.character(conversion_info[cuffresults_exp$gene,"gene_id"]) table(cuffresults_exp$significant == "yes") DEcuff<-unique(cuffresults_exp$gene_id[cuffresults_exp$q_value <=0.05]) cuffresults_exp<-cuffresults_exp[cuffresults_exp$gene_id %in% unique(iso_info$gene_id),] iso_info_cuff<-iso_info[iso_info$gene_id %in% cuffresults_exp$gene_id,] save(cuffresults_exp, file="cuffresults_expDS.RData", compress="xz") save("allDS.RData", compress="xz") sessionInfo() # R version 3.2.5 (2016-04-14) # Platform: x86_64-pc-linux-gnu (64-bit) # Running under: Ubuntu 14.04.5 LTS # # locale: # [1] LC_CTYPE=en_US.UTF-8 LC_NUMERIC=C LC_TIME=C # [4] LC_COLLATE=en_US.UTF-8 LC_MONETARY=C LC_MESSAGES=en_US.UTF-8 # [7] LC_PAPER=C LC_NAME=C LC_ADDRESS=C # [10] LC_TELEPHONE=C LC_MEASUREMENT=C LC_IDENTIFICATION=C # # attached base packages: # [1] splines stats4 parallel stats graphics grDevices utils # [8] datasets methods base # # other attached packages: # [1] edgeR_3.12.1 EBSeq_1.10.0 # [3] testthat_1.0.2 gplots_3.0.1 # [5] blockmodeling_0.1.8 NOISeq_2.14.1 # [7] Matrix_1.2-7.1 limma_3.26.9 # [9] DEXSeq_1.16.10 DESeq2_1.10.1 # [11] RcppArmadillo_0.7.400.2.0 Rcpp_0.12.7 # [13] SummarizedExperiment_1.0.2 GenomicRanges_1.22.4 # [15] GenomeInfoDb_1.6.3 IRanges_2.4.8 # [17] S4Vectors_0.8.11 Biobase_2.30.0 # [19] BiocGenerics_0.16.1 SplicingCompass_1.0.1 # [21] BiocParallel_1.4.3 gridExtra_2.2.1 # [23] ggplot2_2.1.0 # # loaded via a namespace (and not attached): # [1] locfit_1.5-9.1 lattice_0.20-34 gtools_3.5.0 # [4] Rsamtools_1.22.0 Biostrings_2.38.4 digest_0.6.10 # [7] R6_2.1.3 plyr_1.8.4 chron_2.3-47 # [10] futile.options_1.0.0 acepack_1.3-3.3 RSQLite_1.0.0 # [13] zlibbioc_1.16.0 gdata_2.17.0 data.table_1.9.6 # [16] annotate_1.48.0 rpart_4.1-10 labeling_0.3 # [19] statmod_1.4.26 geneplotter_1.48.0 stringr_1.1.0 # [22] foreign_0.8-67 RCurl_1.95-4.8 biomaRt_2.26.1 # [25] munsell_0.4.3 nnet_7.3-12 Hmisc_3.17-4 # [28] XML_3.98-1.4 crayon_1.3.2 bitops_1.0-6 # [31] grid_3.2.5 xtable_1.8-2 gtable_0.2.0 # [34] DBI_0.5-1 magrittr_1.5 scales_0.4.0 # [37] KernSmooth_2.23-15 stringi_1.1.2 XVector_0.10.0 # [40] hwriter_1.3.2 genefilter_1.52.1 latticeExtra_0.6-28 # [43] futile.logger_1.4.3 Formula_1.2-1 lambda.r_1.1.9 # [46] RColorBrewer_1.1-2 tools_3.2.5 survival_2.39-5 # [49] AnnotationDbi_1.32.3 colorspace_1.2-6 cluster_2.0.4 # [52] caTools_1.17.1
/SoftwareCodes/R_scripts/DSAnalysis.R
no_license
gamerino/benchmarkingDiffExprAndSpl
R
false
false
10,155
r
# DSAnalysis.R library(SplicingCompass); library(DEXSeq); library(limma) setwd("/path_to_DS_scenario/sim_i/") conditions<-factor(c(rep("Normal",4),rep("Tumor",4)),levels=c("Normal","Tumor")) expInf<-new("ExperimentInfo") expInf<-setDescription(expInf,"NormalVsTumor") expInf<-setGroupInfo(expInf, groupName1="Normal", sampleNumsGroup1=1:4, groupName2="Tumor", sampleNumsGroup2=5:8) covBedCountFilesNormal<-c( "/path_to_quantification_covBed_sim_scenario/SRR057649.covBed.counts", "/path_to_quantification_covBed_sim_scenario/SRR057650.covBed.counts", "/path_to_quantification_covBed_sim_scenario/SRR057651.covBed.counts", "/path_to_quantification_covBed_sim_scenario/SRR057652.covBed.counts") covBedCountFilesTumor<- c("/path_to_quantification_covBed_sim_scenario/SRR057631.covBed.counts", "/path_to_quantification_covBed_sim_scenario/SRR057643.covBed.counts", "/path_to_quantification_covBed_sim_scenario/SRR057645.covBed.counts", "/path_to_quantification_covBed_sim_scenario/SRR057648.covBed.counts") expInf<-setCovBedCountFiles(expInf, c(covBedCountFilesTumor, covBedCountFilesNormal)) junctionBedFilesNormal<- c("/path_to_alignment_genome_sim_scenario/SRR057649/junctions.bed", "/path_to_alignment_genome_sim_scenario/SRR057650/junctions.bed", "/path_to_alignment_genome_sim_scenario/SRR057651/junctions.bed", "/path_to_alignment_genome_sim_scenario/SRR057652/junctions.bed") junctionBedFilesTumor<-c( c("/path_to_alignment_genome_sim_scenario/SRR057631/junctions.bed", "/path_to_alignment_genome_sim_scenario/SRR057643/junctions.bed", "/path_to_alignment_genome_sim_scenario/SRR057645/junctions.bed", "/path_to_alignment_genome_sim_scenario/SRR057648/junctions.bed") expInf<-setJunctionBedFiles(expInf, c(junctionBedFilesTumor, junctionBedFilesNormal)) expInf<-setReferenceAnnotation(expInf, “/path_to_annotation_files/flattened.splcmp.gtf”) referenceAnnotationFormat<-list(IDFieldName="geneSymbol",idValSep=" ") expInf<-setReferenceAnnotationFormat(expInf,referenceAnnotationFormat) checkExperimentInfo(expInf) ## Constructing an object of class CountTable mycountTable<-new("CountTable") mycountTable<-constructCountTable(mycountTable,nCores=20, printDotPerGene=TRUE) sc<-new("SplicingCompass") sc<-constructSplicingCompass(sc, mycountTable, minOverallJunctionReadSupport=5, nCores=20) # obataining significant DE genes sc<-initSigGenesFromResults(sc, adjusted=TRUE, threshold=0.05) sigGenes<-getSignificantGeneSymbols(sc) # obtaining a data frame with tested genes and correspoonding p-values resTab<-getResultTable(sc) resTab<-resTab[resTab$gene_id %in% iso_info$gene_id,] # tested gene ID genesTested<-getAllTestedGenes(sc) iso_info_SC<-iso_info[iso_info$gene_id %in% genesTested,] sigGenes<-getSignificantGeneSymbols(sc) sigGenes<-sigGenes[sigGenes %in% iso_info_SC$gene_id] save(resTab, file="SCresultTableNOv.RData") save(sc, file="SCobjectNOv.RData") save(mycountTable, file="SCCountTableNov.RData") #DEXSeq countfiles<-paste("path_to_quantification_DEXSeq_sim_scenario/htseq_sim_", paste("SRR0576", c(49:52, 31, 43, 45, 48),".htseq.counts", sep="") # building design matrix sample_data<-data.frame(condition= conditions, levels=c("Normal","Tumor"))) design<-~sample+exon+condition:exon row.names(sample_data)<-c(paste("C1R", 1:8, sep=""), paste("C2R", 1:8, sep="")) # build dexseq count matrix from HTseq output count_matrix<-DEXSeqDataSetFromHTSeq(countfiles, sample_data,design, flattenedfile="/path_to_annotation_files/flattened.dexseq.gtf") count_matrix<-estimateSizeFactors(count_matrix) count_matrix<-estimateDispersions(count_matrix, maxit=500, BPPARAM=MulticoreParam(workers=18)) fullModel<- ~sample + exon + condition:exon reducedModel<- ~sample + exon count_matrix<-testForDEU(count_matrix, fullModel=fullModel, reducedModel=reducedModel, BPPARAM=MulticoreParam(workers=20)) count_matrix<-estimateExonFoldChanges(count_matrix, fitExpToVar="condition", BPPARAM=MulticoreParam(workers=20), denominator="Normal") myresults<-DEXSeqResults( count_matrix ) perGeneQ<-perGeneQValue(myresults) myresultsDF<-as.data.frame(myresults) myresultsDF<-myresultsDF[!is.na(myresultsDF$padj) ,] myresultsDF$qvalGene<-do.call(c, lapply(1:nrow(myresultsDF), function(i){ return(perGeneQ[names(perGeneQ) == myresultsDF$groupID[i]]) })) myresultsDF<-myresultsDF[myresultsDF[,"groupID"]%in%unique(iso_info$gene_id), ] iso_info_dexseq<-iso_info[iso_info$gene_id %in% unique(myresultsDF[,"groupID"]), ] DEXGenes<-unique(myresultsDF[myresultsDF$qvalGene < 0.05,"groupID"]) save(count_matrix, file="DEXSeqCountMatrixSim.RData") #LimmaDS cm<-counts(count_matrix)[,1:8] cm2<-as.data.frame(rowRanges(count_matrix)) geneInfo<-cm2[,c(1:7)] y.all <- DGEList(counts=cm, genes=geneInfo) isexpr <- rowSums(cpm(y.all) > 1) >=3 y <- y.all[isexpr,,keep.lib.sizes=FALSE] save(y , file="expressCMNover.RData", compress="xz") y <- calcNormFactors(y) design <- model.matrix(~ conditions) v <- voom(y,design,plot=FALSE) fit <- lmFit(v, design) fit.de <- eBayes(fit, robust=TRUE) limmaResults<-data.frame(gene=fit.de$genes, baseMean=exp(fit.de$coefficients[,1]), logFC=fit.de$coefficients[,2], pval=fit.de$p.value[,2]) limmaResults$padj<-p.adjust(limmaResults$pval, method="BH") ex <- diffSplice(fit[,"conditionTumor"], geneid = "groupID", exonid = "start") DSRes<-topSplice(ex, test="simes", n=length(ex)) iso_info_limma<-iso_info[iso_info$gene_id %in% DSRes[,"groupID"],] DSRes<-DSRes[DSRes$groupID %in% unique(iso_info$gene_id ),] DELimma<-DSRes[DSRes$FDR < 0.05,"groupID"] save(ex, file="limmaDSNOver.RData", compress="xz") save(fit.de, file="fitdeLimmaNOver.RData", compress="xz") save(DSRes, file="LimmaDSRes.RData", compress="xz") ##CufflinksDS cuffresults<-read.delim("cufflinks_sim_scenario/splicing.diff") cuffresults_exp<-cuffresults[cuffresults$status == "OK",] cuffresults_exp$test_id<-as.character(cuffresults_exp$test_id) cuffresults_exp$gene<-as.character(cuffresults_exp$gene) rownames(cuffresults_exp)<-cuffresults_exp$test_id duplIDs<-duplicated(paste(cuffresults_exp$gene, cuffresults_exp$locus, sep="_")) duplIDs<-data.frame(test_id=cuffresults_exp$test_id, gene=cuffresults_exp$gene, locus=cuffresults_exp$locus, duplicados=duplicados) duplIDs<- duplIDs[duplIDs$duplIDs,] dim(duplIDs) # [1] 1968 4 uniqID<-cuffresults_exp$test_id[!(paste(cuffresults_exp$gene, cuffresults_exp$locus, sep="_") %in% paste(duplIDs$gene, duplIDs$locus, sep="_"))] aux<-sapply(1:nrow(duplIDs),function(id){ dupl<-cuffresults_exp[(cuffresults_exp$locus == duplIDs[id, "locus" ]) & cuffresults_exp$gene == duplIDs[id, "gene"],,drop=FALSE] if(any(dupl$significant == "yes")){ id<-dupl$test_id[which(dupl$significant == "yes")[1]] }else{id<-dupl$test_id[which.min(abs(dupl$q_value))[1]]} return(as.character(id)) }) cuffresults_exp<-cuffresults_exp[cuffresults_exp$test_id %in% c(uniqID, aux),] # convert gene name to ensembl conversion_info<- read.delim("~/path_to_annotation_files/tableEntreZ2Ensembl.tab", header=TRUE) rownames(conversion_info)<-conversion_info$gene_name cuffresults_exp<-cuffresults_exp[cuffresults_exp$gene %in% cuffresults_exp$gene[(cuffresults_exp$gene %in% rownames(conversion_info))] ,] cuffresults_exp$gene_id<- as.character(conversion_info[cuffresults_exp$gene,"gene_id"]) table(cuffresults_exp$significant == "yes") DEcuff<-unique(cuffresults_exp$gene_id[cuffresults_exp$q_value <=0.05]) cuffresults_exp<-cuffresults_exp[cuffresults_exp$gene_id %in% unique(iso_info$gene_id),] iso_info_cuff<-iso_info[iso_info$gene_id %in% cuffresults_exp$gene_id,] save(cuffresults_exp, file="cuffresults_expDS.RData", compress="xz") save("allDS.RData", compress="xz") sessionInfo() # R version 3.2.5 (2016-04-14) # Platform: x86_64-pc-linux-gnu (64-bit) # Running under: Ubuntu 14.04.5 LTS # # locale: # [1] LC_CTYPE=en_US.UTF-8 LC_NUMERIC=C LC_TIME=C # [4] LC_COLLATE=en_US.UTF-8 LC_MONETARY=C LC_MESSAGES=en_US.UTF-8 # [7] LC_PAPER=C LC_NAME=C LC_ADDRESS=C # [10] LC_TELEPHONE=C LC_MEASUREMENT=C LC_IDENTIFICATION=C # # attached base packages: # [1] splines stats4 parallel stats graphics grDevices utils # [8] datasets methods base # # other attached packages: # [1] edgeR_3.12.1 EBSeq_1.10.0 # [3] testthat_1.0.2 gplots_3.0.1 # [5] blockmodeling_0.1.8 NOISeq_2.14.1 # [7] Matrix_1.2-7.1 limma_3.26.9 # [9] DEXSeq_1.16.10 DESeq2_1.10.1 # [11] RcppArmadillo_0.7.400.2.0 Rcpp_0.12.7 # [13] SummarizedExperiment_1.0.2 GenomicRanges_1.22.4 # [15] GenomeInfoDb_1.6.3 IRanges_2.4.8 # [17] S4Vectors_0.8.11 Biobase_2.30.0 # [19] BiocGenerics_0.16.1 SplicingCompass_1.0.1 # [21] BiocParallel_1.4.3 gridExtra_2.2.1 # [23] ggplot2_2.1.0 # # loaded via a namespace (and not attached): # [1] locfit_1.5-9.1 lattice_0.20-34 gtools_3.5.0 # [4] Rsamtools_1.22.0 Biostrings_2.38.4 digest_0.6.10 # [7] R6_2.1.3 plyr_1.8.4 chron_2.3-47 # [10] futile.options_1.0.0 acepack_1.3-3.3 RSQLite_1.0.0 # [13] zlibbioc_1.16.0 gdata_2.17.0 data.table_1.9.6 # [16] annotate_1.48.0 rpart_4.1-10 labeling_0.3 # [19] statmod_1.4.26 geneplotter_1.48.0 stringr_1.1.0 # [22] foreign_0.8-67 RCurl_1.95-4.8 biomaRt_2.26.1 # [25] munsell_0.4.3 nnet_7.3-12 Hmisc_3.17-4 # [28] XML_3.98-1.4 crayon_1.3.2 bitops_1.0-6 # [31] grid_3.2.5 xtable_1.8-2 gtable_0.2.0 # [34] DBI_0.5-1 magrittr_1.5 scales_0.4.0 # [37] KernSmooth_2.23-15 stringi_1.1.2 XVector_0.10.0 # [40] hwriter_1.3.2 genefilter_1.52.1 latticeExtra_0.6-28 # [43] futile.logger_1.4.3 Formula_1.2-1 lambda.r_1.1.9 # [46] RColorBrewer_1.1-2 tools_3.2.5 survival_2.39-5 # [49] AnnotationDbi_1.32.3 colorspace_1.2-6 cluster_2.0.4 # [52] caTools_1.17.1
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plots.R \name{replicate_summary_plot} \alias{replicate_summary_plot} \title{Plots a summary of deletion analysis replicates.} \usage{ replicate_summary_plot(del_result, outlier_threshold = NA) } \arguments{ \item{del_result}{Result from a call to deletion_analysis.} \item{outlier_threshold}{A numeric threshold for the outlier score, above which replicates will be colored differently.} } \value{ Returns a ggplot object with a summary of deletion analysis replicates. } \description{ Plots a summary of deletion analysis replicates. } \examples{ # Note: First run deletion_analysis() # mul1_del_results is a pre-loaded result replicate_summary_plot(mul1_del_results[[1]]) } \seealso{ \code{\link{deletion_analysis}} }
/man/replicate_summary_plot.Rd
no_license
cran/rgenie
R
false
true
800
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plots.R \name{replicate_summary_plot} \alias{replicate_summary_plot} \title{Plots a summary of deletion analysis replicates.} \usage{ replicate_summary_plot(del_result, outlier_threshold = NA) } \arguments{ \item{del_result}{Result from a call to deletion_analysis.} \item{outlier_threshold}{A numeric threshold for the outlier score, above which replicates will be colored differently.} } \value{ Returns a ggplot object with a summary of deletion analysis replicates. } \description{ Plots a summary of deletion analysis replicates. } \examples{ # Note: First run deletion_analysis() # mul1_del_results is a pre-loaded result replicate_summary_plot(mul1_del_results[[1]]) } \seealso{ \code{\link{deletion_analysis}} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Tools.R \name{cybrowserHide} \alias{cybrowserHide} \title{Cybrowser Hide} \usage{ cybrowserHide(id = NULL, base.url = .defaultBaseUrl) } \arguments{ \item{id}{(optional) The identifier for the browser window to hide} \item{base.url}{(optional) Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of RCy3.} } \value{ None } \description{ Hide an existing browser, whether it's in the Results panel or a separate window. } \examples{ \donttest{ cybrowserHide() } } \seealso{ \link{cybrowserShow} \link{cybrowserDialog} }
/man/cybrowserHide.Rd
permissive
cytoscape/RCy3
R
false
true
746
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Tools.R \name{cybrowserHide} \alias{cybrowserHide} \title{Cybrowser Hide} \usage{ cybrowserHide(id = NULL, base.url = .defaultBaseUrl) } \arguments{ \item{id}{(optional) The identifier for the browser window to hide} \item{base.url}{(optional) Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of RCy3.} } \value{ None } \description{ Hide an existing browser, whether it's in the Results panel or a separate window. } \examples{ \donttest{ cybrowserHide() } } \seealso{ \link{cybrowserShow} \link{cybrowserDialog} }
Create_Household_Power_DataFrame <- function(HP_file = "household_power_consumption.txt") { ## This reads the household_power_comsumption file and converts the columns to the appropriate data types. ## The date and time values are converted to a time variable and the file is filtered to those data ## between 2007-02-01 and 2007-02-02 as required. ## ## Use data.table for faster reading of large file ## Use dplyr for filter function library(data.table) library(dplyr) HP_df <- fread(HP_file) Date_Time_vec <- paste(HP_df$Date, HP_df$Time, sep = " ") Date_Time = strptime(Date_Time_vec, format = "%d/%m/%Y %H:%M:%S") HP_df <- cbind(HP_df, Date_Time) ## Set time limits time1 <- strptime("1/2/2007 00:00:00", format = "%d/%m/%Y %H:%M:%S") time2 <- strptime("3/2/2007 00:00:00", format = "%d/%m/%Y %H:%M:%S") ## Filter file HP_df <- filter(HP_df, Date_Time >= time1) HP_df <- filter(HP_df, Date_Time <= time2) ## Convert other columns to numeric HP_df$Global_active_power <- as.numeric(HP_df$Global_active_power) HP_df$Global_reactive_power <- as.numeric(HP_df$Global_reactive_power) HP_df$Voltage <- as.numeric(HP_df$Voltage) HP_df$Global_intensity <- as.numeric(HP_df$Global_intensity) HP_df$Sub_metering_1 <- as.numeric(HP_df$Sub_metering_1) HP_df$Sub_metering_2 <- as.numeric(HP_df$Sub_metering_2) HP_df$Sub_metering_3 <- as.numeric(HP_df$Sub_metering_3) ## Create column with time differences in fractional days Time_Change <- (as.numeric((difftime(HP_df$Date_Time, time1)))/(60*60*24)) HP_df <- cbind(HP_df, Time_Change) } Make_plot3 <- function(HP_file = "household_power_consumption.txt") { ## This plots an x-y plot with three lines Sub-metering_1, Sub-metering_2, and Sub-metering_3 ## with appropriate color differences and a legend. ## ## The Create_Household_Power_Dataframe function reads in the data and formats the variables ## appropriately for use ## It can be commented out if the HP_df file has already been read in. ## HP_df <- Create_Household_Power_DataFrame() plot(HP_df$Time_Change, HP_df$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering", cex.lab = 0.75, xaxt = "n", yaxt = "n") axis(side = 1, at = c(0, 1, 2), labels = c("Thu", "Fri", "Sat"), cex.axis = 0.75) axis(side = 2, at = c(0, 10, 20, 30), cex.axis = 0.75) lines(HP_df$Time_Change, HP_df$Sub_metering_2, col = "red") lines(HP_df$Time_Change, HP_df$Sub_metering_3, col = "blue") legend(x = "topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex = 0.75, lty=c(1, 1, 1), col = c("black", "red", "blue")) } Make_plot3_png <- function() { ## This makes a 480 X 480 png version of plot1 png("plot3.png") Make_plot3() dev.off() }
/plot3.R
no_license
jeremymberg/ExData_Plotting1
R
false
false
2,961
r
Create_Household_Power_DataFrame <- function(HP_file = "household_power_consumption.txt") { ## This reads the household_power_comsumption file and converts the columns to the appropriate data types. ## The date and time values are converted to a time variable and the file is filtered to those data ## between 2007-02-01 and 2007-02-02 as required. ## ## Use data.table for faster reading of large file ## Use dplyr for filter function library(data.table) library(dplyr) HP_df <- fread(HP_file) Date_Time_vec <- paste(HP_df$Date, HP_df$Time, sep = " ") Date_Time = strptime(Date_Time_vec, format = "%d/%m/%Y %H:%M:%S") HP_df <- cbind(HP_df, Date_Time) ## Set time limits time1 <- strptime("1/2/2007 00:00:00", format = "%d/%m/%Y %H:%M:%S") time2 <- strptime("3/2/2007 00:00:00", format = "%d/%m/%Y %H:%M:%S") ## Filter file HP_df <- filter(HP_df, Date_Time >= time1) HP_df <- filter(HP_df, Date_Time <= time2) ## Convert other columns to numeric HP_df$Global_active_power <- as.numeric(HP_df$Global_active_power) HP_df$Global_reactive_power <- as.numeric(HP_df$Global_reactive_power) HP_df$Voltage <- as.numeric(HP_df$Voltage) HP_df$Global_intensity <- as.numeric(HP_df$Global_intensity) HP_df$Sub_metering_1 <- as.numeric(HP_df$Sub_metering_1) HP_df$Sub_metering_2 <- as.numeric(HP_df$Sub_metering_2) HP_df$Sub_metering_3 <- as.numeric(HP_df$Sub_metering_3) ## Create column with time differences in fractional days Time_Change <- (as.numeric((difftime(HP_df$Date_Time, time1)))/(60*60*24)) HP_df <- cbind(HP_df, Time_Change) } Make_plot3 <- function(HP_file = "household_power_consumption.txt") { ## This plots an x-y plot with three lines Sub-metering_1, Sub-metering_2, and Sub-metering_3 ## with appropriate color differences and a legend. ## ## The Create_Household_Power_Dataframe function reads in the data and formats the variables ## appropriately for use ## It can be commented out if the HP_df file has already been read in. ## HP_df <- Create_Household_Power_DataFrame() plot(HP_df$Time_Change, HP_df$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering", cex.lab = 0.75, xaxt = "n", yaxt = "n") axis(side = 1, at = c(0, 1, 2), labels = c("Thu", "Fri", "Sat"), cex.axis = 0.75) axis(side = 2, at = c(0, 10, 20, 30), cex.axis = 0.75) lines(HP_df$Time_Change, HP_df$Sub_metering_2, col = "red") lines(HP_df$Time_Change, HP_df$Sub_metering_3, col = "blue") legend(x = "topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex = 0.75, lty=c(1, 1, 1), col = c("black", "red", "blue")) } Make_plot3_png <- function() { ## This makes a 480 X 480 png version of plot1 png("plot3.png") Make_plot3() dev.off() }