content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(PKreport)
### Name: PKdata
### Title: Data input
### Aliases: PKdata
### Keywords: methods
### ** Examples
# setup configuration
general.list <- list(save.format="bmp", width = 480, height = 480, package=2)
hist.list <- list(type=c("count"), layout=c(1,1), ind.layout=c(5,5))
scatter.list <- list(span=0.25, type=c("p", "smooth"), layout=c(1,1), ind.layout=c(5,5))
var.name <- list(ID="ID", DV="CONC", TIME="TIME", PRED="PRED", RES="RES",
WRES="WRES",IPRE="IPRE", IDV=c("CLCR", "WT"), COV=c("WT", "AGE"),
ETA=c("ETA1", "ETA2"), PARA=c("CL", "V"))
data(pdata)
# PKdata(data=pdata, match.term=var.name)
# PKconfig(general.list, hist.list, scatter.list)
# PKfigure(pdata, 1)
# PKshow()
# PKclean()
| /data/genthat_extracted_code/PKreport/examples/PKdata.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 713 | r | library(PKreport)
### Name: PKdata
### Title: Data input
### Aliases: PKdata
### Keywords: methods
### ** Examples
# setup configuration
general.list <- list(save.format="bmp", width = 480, height = 480, package=2)
hist.list <- list(type=c("count"), layout=c(1,1), ind.layout=c(5,5))
scatter.list <- list(span=0.25, type=c("p", "smooth"), layout=c(1,1), ind.layout=c(5,5))
var.name <- list(ID="ID", DV="CONC", TIME="TIME", PRED="PRED", RES="RES",
WRES="WRES",IPRE="IPRE", IDV=c("CLCR", "WT"), COV=c("WT", "AGE"),
ETA=c("ETA1", "ETA2"), PARA=c("CL", "V"))
data(pdata)
# PKdata(data=pdata, match.term=var.name)
# PKconfig(general.list, hist.list, scatter.list)
# PKfigure(pdata, 1)
# PKshow()
# PKclean()
|
# Author: C Lynam, Cefas
# Contact: chris.lynam@cefas.co.uk
# Version: 1
# Date: May 2020
#Lynam_IND_script_FINALPLOTS.R
PLOTROWN <- sum(TyL_GeoM,MaxL,Loo,Lm,MEANTLs,LFI) + ifelse(sum(TyL_GeoM,MaxL,Lm)==3,1,0)
if((Lm & TyL_GeoM) | (MaxL & TyL_GeoM & !Lm) ) PLOTROWN<-PLOTROWN+1
PLOTCOLN <- length(SPECIES)
windows(width=PLOTCOLN*8, height=4*PLOTROWN)
par(mfrow=c(PLOTROWN,PLOTCOLN),mar=c(2,4,2,2),oma=c(1,1,3,1))
YRS<- YRS
if(BOOTSTRAP){ ADDBOOTTREND<-F; ADDBOOTTREND_CI<-F; ADDBOOT_ERRBAR<-T; } else { ADDBOOT_ERRBAR<-F; ADDBOOTTREND<-F; ADDBOOTTREND_CI<-F}
ADDGAM<-F
BEST_AND_BOOT<- T # add crosses to plot
TITAdd<-T; ADDLOESS=T; ADDLAST6LM=T
if(LFI){
#### plot LFI
YLAB <- "Large Fish Indicator"
for(plotgroups in 1:length(SPECIES)){
if(SPECIES[plotgroups]=="ALL"){ DATA2PLOT <- IND_OUT[["LFI_by_sub_all"]][,"LFIregional"]; YLIM<- c(0,0.05); BOOTDATA2PLOT <- LFI_regional$all;if(TITAdd){ TITLE<-"All fish"} }
if(SPECIES[plotgroups]=="PEL"){ plot(1,1,col="white",axes=F,ylab=""); next; DATA2PLOT <- IND_OUT[["LFI_by_sub_pel"]][,"LFIregional"]; YLIM<- c(0,0.05); BOOTDATA2PLOT <- LFI_regional$pel;if(TITAdd){ TITLE<-"Pelagic fish"} }
if(SPECIES[plotgroups]=="DEM"){ DATA2PLOT <- IND_OUT[["LFI_by_sub_dem"]][,"LFIregional"]; YLIM<- c(0.1,0.45);BOOTDATA2PLOT <- LFI_regional$dem;if(TITAdd){ TITLE<-"Demersal fish"} }
print(DATA2PLOT)
if(!is.null(DATA2PLOT)){
summary(lmseaLFI<-gam(DATA2PLOT ~ (YRS))); GAMMOD<-predict(lmseaLFI,se=T)
if(ADDGAM){ summary(gseaLFI<-gam(DATA2PLOT ~ s(YRS,k=6))); GAMMOD<-predict(gseaLFI,se=T) }
YLIM<-c( (min(DATA2PLOT,na.rm=T)*.95), (max(DATA2PLOT,na.rm=T)*1.05) )
if(exists("BOOT_OUT") & !is.null(BOOTDATA2PLOT) ){
INDPLOTFN(DATA2PLOT[is.finite(DATA2PLOT)], BOOTDATA2PLOT[is.finite(DATA2PLOT)],YRS[is.finite(DATA2PLOT)],YLIM=YLIM,TITLE=TITLE,BEST_AND_BOOT=T,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM)
} else {#no boot
INDPLOTFN(DATA2PLOT[is.finite(DATA2PLOT)], BOOTDATA2PLOT[is.finite(DATA2PLOT)],YRS[is.finite(DATA2PLOT)],YLIM=YLIM,TITLE=TITLE,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM, BEST_AND_BOOT=BEST_AND_BOOT)
}
}
}
TITAdd<-F; TITLE<-""
}
if(MaxL){
#### plot MML
YLAB <- "Mean Max Length (cm)"
for(plotgroups in 1:length(SPECIES)){
if(SPECIES[plotgroups]=="ALL"){ DATA2PLOT <- IND_OUT[["MaxLsea_all"]][,"sea"]; YLIM<- c(35,50); BOOTDATA2PLOT <- MaxL_regional$all; if(TITAdd){TITLE<-"All fish"} }
if(SPECIES[plotgroups]=="PEL"){ DATA2PLOT <- IND_OUT[["MaxLsea_pel"]][,"sea"]; YLIM<- c(35,50); BOOTDATA2PLOT <- MaxL_regional$pel; if(TITAdd){TITLE<-"Pelagic fish"}}
if(SPECIES[plotgroups]=="DEM"){ DATA2PLOT <- IND_OUT[["MaxLsea_dem"]][,"sea"]; YLIM<- c(45,125); BOOTDATA2PLOT <- MaxL_regional$dem; if(TITAdd){TITLE<-"Demersal fish"} }
summary(lmseaMML<-gam(DATA2PLOT ~ (YRS))); GAMMOD<-predict(lmseaMML,se=T)
if(ADDGAM){ summary(gseaMML<-gam(DATA2PLOT ~ s(YRS,k=6))); GAMMOD<-predict(gseaMML,se=T) }
YLIM<-c(floor(min(DATA2PLOT,na.rm=T)*.95), ceiling(max(DATA2PLOT,na.rm=T)*1.05) )
if(exists("BOOT_OUT") & !is.null(BOOTDATA2PLOT) ){
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,BEST_AND_BOOT=T,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM)
} else {#no boot
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM, BEST_AND_BOOT=BEST_AND_BOOT)
}
}
TITAdd<-F; TITLE<-""
}
if(Loo){
YLAB <- "Asymptotic Length (cm)"
for(plotgroups in 1:length(SPECIES)){
if(SPECIES[plotgroups]=="ALL"){ DATA2PLOT <- IND_OUT[["Loosea_all"]][,"sea"]; YLIM<- c(35,50); BOOTDATA2PLOT <- Loo_regional$all; if(TITAdd){TITLE<-"All fish"} }
if(SPECIES[plotgroups]=="PEL"){ DATA2PLOT <- IND_OUT[["Loosea_pel"]][,"sea"]; YLIM<- c(35,50); BOOTDATA2PLOT <- Loo_regional$pel; if(TITAdd){TITLE<-"Pelagic fish"}}
if(SPECIES[plotgroups]=="DEM"){ DATA2PLOT <- IND_OUT[["Loosea_dem"]][,"sea"]; YLIM<- c(45,125); BOOTDATA2PLOT <- Loo_regional$dem; if(TITAdd){TITLE<-"Demersal fish"} }
summary(lmseaMML<-gam(DATA2PLOT ~ (YRS))); GAMMOD<-predict(lmseaMML,se=T)
if(ADDGAM){ summary(gseaMML<-gam(DATA2PLOT ~ s(YRS,k=6))); GAMMOD<-predict(gseaMML,se=T) }
YLIM<-c(floor(min(DATA2PLOT,na.rm=T)*.95), ceiling(max(DATA2PLOT,na.rm=T)*1.05) )
if(exists("BOOT_OUT") & !is.null(BOOTDATA2PLOT) ){
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,BEST_AND_BOOT=T,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM)
} else {#no boot
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM, BEST_AND_BOOT=BEST_AND_BOOT)
}
}
TITAdd<-F; TITLE<-""
}
if(Lm){
YLAB <- "Length at Maturity (cm)"
for(plotgroups in 1:length(SPECIES)){
if(SPECIES[plotgroups]=="ALL"){ DATA2PLOT <- IND_OUT[["Lmsea_all"]][,"sea"]; YLIM<- c(35,50); BOOTDATA2PLOT <- Lm_regional$all; if(TITAdd){TITLE<-"All fish"} }
if(SPECIES[plotgroups]=="PEL"){ DATA2PLOT <- IND_OUT[["Lmsea_pel"]][,"sea"]; YLIM<- c(35,50); BOOTDATA2PLOT <- Lm_regional$pel; if(TITAdd){TITLE<-"Pelagic fish"}}
if(SPECIES[plotgroups]=="DEM"){ DATA2PLOT <- IND_OUT[["Lmsea_dem"]][,"sea"]; YLIM<- c(45,125); BOOTDATA2PLOT <- Lm_regional$dem; if(TITAdd){TITLE<-"Demersal fish"} }
summary(lmseaMML<-gam(DATA2PLOT ~ (YRS))); GAMMOD<-predict(lmseaMML,se=T)
if(ADDGAM){ summary(gseaMML<-gam(DATA2PLOT ~ s(YRS,k=6))); GAMMOD<-predict(gseaMML,se=T) }
YLIM<-c(floor(min(DATA2PLOT,na.rm=T)*.95), ceiling(max(DATA2PLOT,na.rm=T)*1.05) )
if(exists("BOOT_OUT") & !is.null(BOOTDATA2PLOT) ){
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,BEST_AND_BOOT=T,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM)
} else {#no boot
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM, BEST_AND_BOOT=BEST_AND_BOOT)
}
}
TITAdd<-F; TITLE<-""
}
if(TyL_GeoM){
#### plot tyl geometric
YLAB <- "TyL geomean (cm)"
for(plotgroups in 1:length(SPECIES)){
if(SPECIES[plotgroups]=="ALL"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_all"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$all; if(TITAdd){TITLE<-"All fish"} }
if(SPECIES[plotgroups]=="PEL"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_pel"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$pel; if(TITAdd){TITLE<-"Pelagic fish"} }
if(SPECIES[plotgroups]=="DEM"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_dem"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$dem; if(TITAdd){TITLE<-"Demersal fish"} }
summary(lmTyL<-gam(DATA2PLOT ~ (YRS))); GAMMOD<-predict(lmTyL,se=T)
if(ADDGAM){ summary(gTyL<-gam(DATA2PLOT ~ s(YRS,k=6))); GAMMOD<-predict(gTyL,se=T) }
YLIM<-c(floor(min(DATA2PLOT,na.rm=T)*.95), ceiling(max(DATA2PLOT,na.rm=T)*1.05) )
if(exists("BOOT_OUT") & !is.null(BOOTDATA2PLOT) ){
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,BEST_AND_BOOT=T,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM)
} else {#no boot
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE, GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM, BEST_AND_BOOT=BEST_AND_BOOT)
}
}
TITAdd<-F; TITLE<-""
}
if(MEANTLs){
#### plot tl
YLAB <- "MTL"
for(plotgroups in 1:length(SPECIES)){
if(SPECIES[plotgroups]=="ALL"){ DATA2PLOT <- IND_OUT[["TLsea_all"]][,"sea"]; BOOTDATA2PLOT <- TL_regional$all; if(TITAdd){TITLE<-"All fish"} }
if(SPECIES[plotgroups]=="PEL"){ DATA2PLOT <- IND_OUT[["TLsea_pel"]][,"sea"]; BOOTDATA2PLOT <- TL_regional$pel; if(TITAdd){TITLE<-"Pelagic fish"} }
if(SPECIES[plotgroups]=="DEM"){ DATA2PLOT <- IND_OUT[["TLsea_dem"]][,"sea"]; BOOTDATA2PLOT <- TL_regional$dem; if(TITAdd){TITLE<-"Demersal fish"} }
summary(lmTL<-gam(DATA2PLOT ~ (YRS))); GAMMOD<-predict(lmTL,se=T)
if(ADDGAM){ summary(gTL<-gam(DATA2PLOT ~ s(YRS,k=6))); GAMMOD<-predict(gTL,se=T) }
YLIM<-c((min(DATA2PLOT,na.rm=T)*.975), (max(DATA2PLOT,na.rm=T)*1.025) )
if(exists("BOOT_OUT") & !is.null(BOOTDATA2PLOT) ){
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,BEST_AND_BOOT=T,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM)
} else {#no boot
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE, GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM, BEST_AND_BOOT=BEST_AND_BOOT)
}
}
TITAdd<-F; TITLE<-""
}
if(MaxL & TyL_GeoM & !Lm){
####plot TyL / MML
YLAB <- "TyL/MML"
for(plotgroups in 1:length(SPECIES)){
if(SPECIES[plotgroups]=="ALL"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_all"]][,"sea"]/IND_OUT[["MaxLsea_all"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$all/MaxL_regional$all }
if(SPECIES[plotgroups]=="PEL"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_pel"]][,"sea"]/IND_OUT[["MaxLsea_pel"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$pel/MaxL_regional$pel }
if(SPECIES[plotgroups]=="DEM"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_dem"]][,"sea"]/IND_OUT[["MaxLsea_dem"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$dem/MaxL_regional$dem}
summary(lmTyL<-gam(DATA2PLOT ~ (YRS))); GAMMOD<-predict(lmTyL,se=T)
if(ADDGAM){ summary(gTyL<-gam(DATA2PLOT ~ s(YRS,k=6))); GAMMOD<-predict(gTyL,se=T) }
YLIM<-c((min(DATA2PLOT,na.rm=T)*.95), (max(DATA2PLOT,na.rm=T)*1.05) )
if(exists("BOOT_OUT") & !is.null(BOOTDATA2PLOT) ){
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,BEST_AND_BOOT=T,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,
BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=F, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR,
ADDGAM=ADDGAM)
} else {#no boot
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE, BEST_AND_BOOT=BEST_AND_BOOT, GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM, BOOTSTRAP=F,ADDGAM=ADDGAM)
}
}
}
if(Lm & TyL_GeoM){
####plot TyL / Lm
YLAB <- "TyL/Lm"
for(plotgroups in 1:length(SPECIES)){
if(SPECIES[plotgroups]=="ALL"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_all"]][,"sea"]/IND_OUT[["Lmsea_all"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$all/Lm_regional$all }
if(SPECIES[plotgroups]=="PEL"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_pel"]][,"sea"]/IND_OUT[["Lmsea_pel"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$pel/Lm_regional$pel }
if(SPECIES[plotgroups]=="DEM"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_dem"]][,"sea"]/IND_OUT[["Lmsea_dem"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$dem/Lm_regional$dem}
summary(lmTyL<-gam(DATA2PLOT ~ (YRS))); GAMMOD<-predict(lmTyL,se=T)
if(ADDGAM){ summary(gTyL<-gam(DATA2PLOT ~ s(YRS,k=6))); GAMMOD<-predict(gTyL,se=T) }
YLIM<-c((min(DATA2PLOT,na.rm=T)*.95), (max(DATA2PLOT,na.rm=T)*1.05) )
if(exists("BOOT_OUT") & !is.null(BOOTDATA2PLOT) ){
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,BEST_AND_BOOT=T,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,
BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=F, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR,
ADDGAM=ADDGAM)
} else {#no boot
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE, BEST_AND_BOOT=BEST_AND_BOOT, GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM, BOOTSTRAP=F,ADDGAM=ADDGAM)
}
abline(h=1,col=2)
}
}
mtext(paste(survey,sep=' '),line=-0.5,outer=T)
WHICHIND<- "_"
if(LFI) WHICHIND<- paste(WHICHIND, "LFI.",sep='')
if(MaxL) WHICHIND<- paste(WHICHIND, "MaxL.",sep='')
if(Loo) WHICHIND<- paste(WHICHIND, "Loo.",sep='')
if(Lm) WHICHIND<- paste(WHICHIND, "Lm.",sep='')
if(TyL_GeoM) WHICHIND<- paste(WHICHIND, "TyL.",sep='')
if(MEANTLs) WHICHIND<- paste(WHICHIND, "MTL.",sep='')
if(CATCHABILITY_COR_WALKER){qcor<-"Qwalk"} else {qcor<-NULL}
savePlot(filename= paste(FILENAM,qcor,WHICHIND,"bmp",sep=''),type="bmp")
| /Lynam_IND_script_FINALPLOTS.R | no_license | JoeRibeiro/R_old | R | false | false | 13,705 | r | # Author: C Lynam, Cefas
# Contact: chris.lynam@cefas.co.uk
# Version: 1
# Date: May 2020
#Lynam_IND_script_FINALPLOTS.R
PLOTROWN <- sum(TyL_GeoM,MaxL,Loo,Lm,MEANTLs,LFI) + ifelse(sum(TyL_GeoM,MaxL,Lm)==3,1,0)
if((Lm & TyL_GeoM) | (MaxL & TyL_GeoM & !Lm) ) PLOTROWN<-PLOTROWN+1
PLOTCOLN <- length(SPECIES)
windows(width=PLOTCOLN*8, height=4*PLOTROWN)
par(mfrow=c(PLOTROWN,PLOTCOLN),mar=c(2,4,2,2),oma=c(1,1,3,1))
YRS<- YRS
if(BOOTSTRAP){ ADDBOOTTREND<-F; ADDBOOTTREND_CI<-F; ADDBOOT_ERRBAR<-T; } else { ADDBOOT_ERRBAR<-F; ADDBOOTTREND<-F; ADDBOOTTREND_CI<-F}
ADDGAM<-F
BEST_AND_BOOT<- T # add crosses to plot
TITAdd<-T; ADDLOESS=T; ADDLAST6LM=T
if(LFI){
#### plot LFI
YLAB <- "Large Fish Indicator"
for(plotgroups in 1:length(SPECIES)){
if(SPECIES[plotgroups]=="ALL"){ DATA2PLOT <- IND_OUT[["LFI_by_sub_all"]][,"LFIregional"]; YLIM<- c(0,0.05); BOOTDATA2PLOT <- LFI_regional$all;if(TITAdd){ TITLE<-"All fish"} }
if(SPECIES[plotgroups]=="PEL"){ plot(1,1,col="white",axes=F,ylab=""); next; DATA2PLOT <- IND_OUT[["LFI_by_sub_pel"]][,"LFIregional"]; YLIM<- c(0,0.05); BOOTDATA2PLOT <- LFI_regional$pel;if(TITAdd){ TITLE<-"Pelagic fish"} }
if(SPECIES[plotgroups]=="DEM"){ DATA2PLOT <- IND_OUT[["LFI_by_sub_dem"]][,"LFIregional"]; YLIM<- c(0.1,0.45);BOOTDATA2PLOT <- LFI_regional$dem;if(TITAdd){ TITLE<-"Demersal fish"} }
print(DATA2PLOT)
if(!is.null(DATA2PLOT)){
summary(lmseaLFI<-gam(DATA2PLOT ~ (YRS))); GAMMOD<-predict(lmseaLFI,se=T)
if(ADDGAM){ summary(gseaLFI<-gam(DATA2PLOT ~ s(YRS,k=6))); GAMMOD<-predict(gseaLFI,se=T) }
YLIM<-c( (min(DATA2PLOT,na.rm=T)*.95), (max(DATA2PLOT,na.rm=T)*1.05) )
if(exists("BOOT_OUT") & !is.null(BOOTDATA2PLOT) ){
INDPLOTFN(DATA2PLOT[is.finite(DATA2PLOT)], BOOTDATA2PLOT[is.finite(DATA2PLOT)],YRS[is.finite(DATA2PLOT)],YLIM=YLIM,TITLE=TITLE,BEST_AND_BOOT=T,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM)
} else {#no boot
INDPLOTFN(DATA2PLOT[is.finite(DATA2PLOT)], BOOTDATA2PLOT[is.finite(DATA2PLOT)],YRS[is.finite(DATA2PLOT)],YLIM=YLIM,TITLE=TITLE,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM, BEST_AND_BOOT=BEST_AND_BOOT)
}
}
}
TITAdd<-F; TITLE<-""
}
if(MaxL){
#### plot MML
YLAB <- "Mean Max Length (cm)"
for(plotgroups in 1:length(SPECIES)){
if(SPECIES[plotgroups]=="ALL"){ DATA2PLOT <- IND_OUT[["MaxLsea_all"]][,"sea"]; YLIM<- c(35,50); BOOTDATA2PLOT <- MaxL_regional$all; if(TITAdd){TITLE<-"All fish"} }
if(SPECIES[plotgroups]=="PEL"){ DATA2PLOT <- IND_OUT[["MaxLsea_pel"]][,"sea"]; YLIM<- c(35,50); BOOTDATA2PLOT <- MaxL_regional$pel; if(TITAdd){TITLE<-"Pelagic fish"}}
if(SPECIES[plotgroups]=="DEM"){ DATA2PLOT <- IND_OUT[["MaxLsea_dem"]][,"sea"]; YLIM<- c(45,125); BOOTDATA2PLOT <- MaxL_regional$dem; if(TITAdd){TITLE<-"Demersal fish"} }
summary(lmseaMML<-gam(DATA2PLOT ~ (YRS))); GAMMOD<-predict(lmseaMML,se=T)
if(ADDGAM){ summary(gseaMML<-gam(DATA2PLOT ~ s(YRS,k=6))); GAMMOD<-predict(gseaMML,se=T) }
YLIM<-c(floor(min(DATA2PLOT,na.rm=T)*.95), ceiling(max(DATA2PLOT,na.rm=T)*1.05) )
if(exists("BOOT_OUT") & !is.null(BOOTDATA2PLOT) ){
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,BEST_AND_BOOT=T,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM)
} else {#no boot
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM, BEST_AND_BOOT=BEST_AND_BOOT)
}
}
TITAdd<-F; TITLE<-""
}
if(Loo){
YLAB <- "Asymptotic Length (cm)"
for(plotgroups in 1:length(SPECIES)){
if(SPECIES[plotgroups]=="ALL"){ DATA2PLOT <- IND_OUT[["Loosea_all"]][,"sea"]; YLIM<- c(35,50); BOOTDATA2PLOT <- Loo_regional$all; if(TITAdd){TITLE<-"All fish"} }
if(SPECIES[plotgroups]=="PEL"){ DATA2PLOT <- IND_OUT[["Loosea_pel"]][,"sea"]; YLIM<- c(35,50); BOOTDATA2PLOT <- Loo_regional$pel; if(TITAdd){TITLE<-"Pelagic fish"}}
if(SPECIES[plotgroups]=="DEM"){ DATA2PLOT <- IND_OUT[["Loosea_dem"]][,"sea"]; YLIM<- c(45,125); BOOTDATA2PLOT <- Loo_regional$dem; if(TITAdd){TITLE<-"Demersal fish"} }
summary(lmseaMML<-gam(DATA2PLOT ~ (YRS))); GAMMOD<-predict(lmseaMML,se=T)
if(ADDGAM){ summary(gseaMML<-gam(DATA2PLOT ~ s(YRS,k=6))); GAMMOD<-predict(gseaMML,se=T) }
YLIM<-c(floor(min(DATA2PLOT,na.rm=T)*.95), ceiling(max(DATA2PLOT,na.rm=T)*1.05) )
if(exists("BOOT_OUT") & !is.null(BOOTDATA2PLOT) ){
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,BEST_AND_BOOT=T,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM)
} else {#no boot
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM, BEST_AND_BOOT=BEST_AND_BOOT)
}
}
TITAdd<-F; TITLE<-""
}
if(Lm){
YLAB <- "Length at Maturity (cm)"
for(plotgroups in 1:length(SPECIES)){
if(SPECIES[plotgroups]=="ALL"){ DATA2PLOT <- IND_OUT[["Lmsea_all"]][,"sea"]; YLIM<- c(35,50); BOOTDATA2PLOT <- Lm_regional$all; if(TITAdd){TITLE<-"All fish"} }
if(SPECIES[plotgroups]=="PEL"){ DATA2PLOT <- IND_OUT[["Lmsea_pel"]][,"sea"]; YLIM<- c(35,50); BOOTDATA2PLOT <- Lm_regional$pel; if(TITAdd){TITLE<-"Pelagic fish"}}
if(SPECIES[plotgroups]=="DEM"){ DATA2PLOT <- IND_OUT[["Lmsea_dem"]][,"sea"]; YLIM<- c(45,125); BOOTDATA2PLOT <- Lm_regional$dem; if(TITAdd){TITLE<-"Demersal fish"} }
summary(lmseaMML<-gam(DATA2PLOT ~ (YRS))); GAMMOD<-predict(lmseaMML,se=T)
if(ADDGAM){ summary(gseaMML<-gam(DATA2PLOT ~ s(YRS,k=6))); GAMMOD<-predict(gseaMML,se=T) }
YLIM<-c(floor(min(DATA2PLOT,na.rm=T)*.95), ceiling(max(DATA2PLOT,na.rm=T)*1.05) )
if(exists("BOOT_OUT") & !is.null(BOOTDATA2PLOT) ){
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,BEST_AND_BOOT=T,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM)
} else {#no boot
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM, BEST_AND_BOOT=BEST_AND_BOOT)
}
}
TITAdd<-F; TITLE<-""
}
if(TyL_GeoM){
#### plot tyl geometric
YLAB <- "TyL geomean (cm)"
for(plotgroups in 1:length(SPECIES)){
if(SPECIES[plotgroups]=="ALL"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_all"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$all; if(TITAdd){TITLE<-"All fish"} }
if(SPECIES[plotgroups]=="PEL"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_pel"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$pel; if(TITAdd){TITLE<-"Pelagic fish"} }
if(SPECIES[plotgroups]=="DEM"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_dem"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$dem; if(TITAdd){TITLE<-"Demersal fish"} }
summary(lmTyL<-gam(DATA2PLOT ~ (YRS))); GAMMOD<-predict(lmTyL,se=T)
if(ADDGAM){ summary(gTyL<-gam(DATA2PLOT ~ s(YRS,k=6))); GAMMOD<-predict(gTyL,se=T) }
YLIM<-c(floor(min(DATA2PLOT,na.rm=T)*.95), ceiling(max(DATA2PLOT,na.rm=T)*1.05) )
if(exists("BOOT_OUT") & !is.null(BOOTDATA2PLOT) ){
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,BEST_AND_BOOT=T,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM)
} else {#no boot
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE, GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM, BEST_AND_BOOT=BEST_AND_BOOT)
}
}
TITAdd<-F; TITLE<-""
}
if(MEANTLs){
#### plot tl
YLAB <- "MTL"
for(plotgroups in 1:length(SPECIES)){
if(SPECIES[plotgroups]=="ALL"){ DATA2PLOT <- IND_OUT[["TLsea_all"]][,"sea"]; BOOTDATA2PLOT <- TL_regional$all; if(TITAdd){TITLE<-"All fish"} }
if(SPECIES[plotgroups]=="PEL"){ DATA2PLOT <- IND_OUT[["TLsea_pel"]][,"sea"]; BOOTDATA2PLOT <- TL_regional$pel; if(TITAdd){TITLE<-"Pelagic fish"} }
if(SPECIES[plotgroups]=="DEM"){ DATA2PLOT <- IND_OUT[["TLsea_dem"]][,"sea"]; BOOTDATA2PLOT <- TL_regional$dem; if(TITAdd){TITLE<-"Demersal fish"} }
summary(lmTL<-gam(DATA2PLOT ~ (YRS))); GAMMOD<-predict(lmTL,se=T)
if(ADDGAM){ summary(gTL<-gam(DATA2PLOT ~ s(YRS,k=6))); GAMMOD<-predict(gTL,se=T) }
YLIM<-c((min(DATA2PLOT,na.rm=T)*.975), (max(DATA2PLOT,na.rm=T)*1.025) )
if(exists("BOOT_OUT") & !is.null(BOOTDATA2PLOT) ){
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,BEST_AND_BOOT=T,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM)
} else {#no boot
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE, GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=ADDBOOTTREND_CI, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR, ADDGAM=ADDGAM, BEST_AND_BOOT=BEST_AND_BOOT)
}
}
TITAdd<-F; TITLE<-""
}
if(MaxL & TyL_GeoM & !Lm){
####plot TyL / MML
YLAB <- "TyL/MML"
for(plotgroups in 1:length(SPECIES)){
if(SPECIES[plotgroups]=="ALL"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_all"]][,"sea"]/IND_OUT[["MaxLsea_all"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$all/MaxL_regional$all }
if(SPECIES[plotgroups]=="PEL"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_pel"]][,"sea"]/IND_OUT[["MaxLsea_pel"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$pel/MaxL_regional$pel }
if(SPECIES[plotgroups]=="DEM"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_dem"]][,"sea"]/IND_OUT[["MaxLsea_dem"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$dem/MaxL_regional$dem}
summary(lmTyL<-gam(DATA2PLOT ~ (YRS))); GAMMOD<-predict(lmTyL,se=T)
if(ADDGAM){ summary(gTyL<-gam(DATA2PLOT ~ s(YRS,k=6))); GAMMOD<-predict(gTyL,se=T) }
YLIM<-c((min(DATA2PLOT,na.rm=T)*.95), (max(DATA2PLOT,na.rm=T)*1.05) )
if(exists("BOOT_OUT") & !is.null(BOOTDATA2PLOT) ){
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,BEST_AND_BOOT=T,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,
BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=F, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR,
ADDGAM=ADDGAM)
} else {#no boot
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE, BEST_AND_BOOT=BEST_AND_BOOT, GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM, BOOTSTRAP=F,ADDGAM=ADDGAM)
}
}
}
if(Lm & TyL_GeoM){
####plot TyL / Lm
YLAB <- "TyL/Lm"
for(plotgroups in 1:length(SPECIES)){
if(SPECIES[plotgroups]=="ALL"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_all"]][,"sea"]/IND_OUT[["Lmsea_all"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$all/Lm_regional$all }
if(SPECIES[plotgroups]=="PEL"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_pel"]][,"sea"]/IND_OUT[["Lmsea_pel"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$pel/Lm_regional$pel }
if(SPECIES[plotgroups]=="DEM"){ DATA2PLOT <- IND_OUT[["TyL.cm.sea_dem"]][,"sea"]/IND_OUT[["Lmsea_dem"]][,"sea"]; BOOTDATA2PLOT <- TyLrect_regional$dem/Lm_regional$dem}
summary(lmTyL<-gam(DATA2PLOT ~ (YRS))); GAMMOD<-predict(lmTyL,se=T)
if(ADDGAM){ summary(gTyL<-gam(DATA2PLOT ~ s(YRS,k=6))); GAMMOD<-predict(gTyL,se=T) }
YLIM<-c((min(DATA2PLOT,na.rm=T)*.95), (max(DATA2PLOT,na.rm=T)*1.05) )
if(exists("BOOT_OUT") & !is.null(BOOTDATA2PLOT) ){
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE,BEST_AND_BOOT=T,GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM,
BOOTSTRAP=BOOTSTRAP, ADDBOOTTREND=ADDBOOTTREND, ADDBOOTTREND_CI=F, ADDBOOT_ERRBAR=ADDBOOT_ERRBAR,
ADDGAM=ADDGAM)
} else {#no boot
INDPLOTFN(DATA2PLOT, BOOTDATA2PLOT,YRS,YLIM=YLIM,TITLE=TITLE, BEST_AND_BOOT=BEST_AND_BOOT, GAMMOD=GAMMOD, YLAB=YLAB
,ADDLOESS=ADDLOESS,ADDLAST6LM=ADDLAST6LM, BOOTSTRAP=F,ADDGAM=ADDGAM)
}
abline(h=1,col=2)
}
}
mtext(paste(survey,sep=' '),line=-0.5,outer=T)
WHICHIND<- "_"
if(LFI) WHICHIND<- paste(WHICHIND, "LFI.",sep='')
if(MaxL) WHICHIND<- paste(WHICHIND, "MaxL.",sep='')
if(Loo) WHICHIND<- paste(WHICHIND, "Loo.",sep='')
if(Lm) WHICHIND<- paste(WHICHIND, "Lm.",sep='')
if(TyL_GeoM) WHICHIND<- paste(WHICHIND, "TyL.",sep='')
if(MEANTLs) WHICHIND<- paste(WHICHIND, "MTL.",sep='')
if(CATCHABILITY_COR_WALKER){qcor<-"Qwalk"} else {qcor<-NULL}
savePlot(filename= paste(FILENAM,qcor,WHICHIND,"bmp",sep=''),type="bmp")
|
#-------------------------------------------------------------------#
# Summary Statistics for Full Dataset #
# 2014-2018 #
#-------------------------------------------------------------------#
#Clear environment and set working directory
rm(list=ls())
setwd("~/ISU/Project/Data")
#Load libraries
library(lubridate)
library(dplyr)
library(tidyr)
#Read in data
Bees <- read.csv("Bees/Bee IDs.csv", header = T, na.strings = c("", "NA"))
Quadrats <- read.csv("Plants/Quadrats.csv")
#Format date with lubridate
Bees$Date <- mdy(Bees$Date)
Quadrats$Date <- mdy(Quadrats$Date)
#Change Year from number to year
Bees$Year <- year(Bees$Date)
Quadrats$Year <- year(Quadrats$Date)
#Filter out bad stuff from Bees
bees <- Bees %>%
filter(Family != "Wasp") %>%
filter(Family != "Fly") %>%
filter(Binomial != "Wasp") %>%
filter(Binomial != "Unidentifiable") %>%
filter(!is.na(Site))
#Format Bare.Ground column as numeric
Quadrats$BareGround <- as.numeric(Quadrats$BareGround)
#Average bare ground cover for each site
bareground <- Quadrats %>%
filter(!is.na(BareGround)) %>%
group_by(Site, Date, Quadrat) %>%
summarise(total.bareground = BareGround[1])
avg.bareground <- bareground %>%
group_by(Site) %>%
summarise(avg.bareground = mean(total.bareground),
number.quadrats = length(total.bareground))
#Average floral cover for each site
floral.cover <- Quadrats %>%
group_by(Site) %>%
summarise(avg.floralcover = mean(Cover))
#Determine species richness without nesting plots
beespp.noplot <- Bees %>%
filter(!is.na(Binomial)) %>%
filter(Family != "Wasp") %>%
filter(Trap != "Plot") %>%
count(Binomial)
#Bee species richness
beespp <- bees %>%
count(Binomial)
#Export as .csv
#write.csv(beespp, file = "C:/Users/Morgan Mackert/Documents/ISU/Project/mmackert/Graphs/SummaryStats/BeeSpeciesRichness.csv")
#Determine abundance for each site
bees.site <- Bees %>%
filter(Family != "Wasp") %>%
filter(Family != "Fly") %>%
filter(!is.na(Binomial)) %>%
filter(!is.na(Site)) %>%
group_by(Site) %>%
count(Binomial)
bees.site <- bees.site %>%
group_by(Site) %>%
summarise(total.bees = sum(n))
#Check to make sure total matches original datafile
bees.site %>%
summarise(sum(total.bees))
#15,904, good to go!
#Determine abundance by trap for each site/date
AbundTrap <- Bees %>%
filter(Trap != "Target") %>%
filter(Family != "Wasp") %>%
group_by(Site, Date, Trap) %>%
count(Binomial)
AbundTrap <- AbundTrap %>%
group_by(Site, Date, Trap) %>%
summarise(Bee.Abundance = sum(n))
AbundTrapwide <- spread(AbundTrap, Trap, Bee.Abundance)
#Export as .csv
#write.csv(AbundTrapwide, file = "C:/Users/morga/Documents/ISU/Project/mmackert/Graphs/SummaryStats/AbundancebyTrap1234.csv")
#Determine species richness for each site
beespp.site <- Bees %>%
filter(Family != "Wasp") %>%
filter(Family != "Fly") %>%
filter(Binomial != "Unidentifiable") %>%
filter(!is.na(Site)) %>%
group_by(Site) %>%
summarise(no.beespp = n_distinct(Binomial))
#Determine species richness by trap for each site/date
SpecRichTrap <- Bees %>%
filter(Trap != "Target") %>%
filter(Family != "Wasp") %>%
group_by(Site, Date, Trap) %>%
summarise(Bee.Species.Richness = length(unique(Binomial)))
SpecRichTrapwide <- spread(SpecRichTrap, Trap, Bee.Species.Richness)
#Export as .csv
#write.csv(SpecRichTrapwide, file = "C:/Users/morga/Documents/ISU/Project/mmackert/Graphs/SummaryStats/SpeciesRichnessbyTrap1234.csv")
#Determine genus richness for each site/date
GenusRich <- Bees %>%
filter(Trap != "Target") %>%
filter(Family != "Wasp") %>%
group_by(Site, Date) %>%
summarise(Bee.Genus.Richness = length(unique(Genus)))
#Export as .csv
#write.csv(GenusRich, file = "C:/Users/morga/Documents/ISU/Project/mmackert/Graphs/SummaryStats/GenusRichness1234.csv")
#Determine number of individuals of each species collected for each site/date
SpecRichAbund <- Bees %>%
filter(Trap != "Target") %>%
filter(Family != "Wasp") %>%
group_by(Site, Date) %>%
count(Binomial)
SpecRichAbundwide <- spread(SpecRichAbund, Binomial, n)
SpecRichAbundwide[is.na(SpecRichAbundwide)] <- 0
#Export as .csv
#write.csv(SpecRichAbundwide, file = "C:/Users/morga/Documents/ISU/Project/mmackert/Graphs/SummaryStats/SpeciesRichnessandAbundance1234.csv")
#Genus richness for 2014-2018
genusrich <- Bees %>%
group_by(Genus) %>%
count(Genus)
#Determine the number of individuals collected in nesting plots by year
npbees <- bees %>%
filter(Trap == "Plot") %>%
group_by(Site, Year) %>%
count(Binomial)
npbees <- npbees %>%
summarise(no.bees = sum(n))
#Determine the number of species collected in nesting plots by year
npbeespp <- bees %>%
filter(Trap == "Plot") %>%
group_by(Site, Year) %>%
summarise(no.npbeespp = n_distinct(Binomial))
#Explore number of bees and number of species collected in emergence traps by year to check for differences in emergence trap deployment timing (2014 in late May, 2015-2016 in early May, 2017-2018 in late April)
etrap.bees <- bees %>%
filter(Trap == "Emergence") %>%
group_by(Date) %>%
count(Binomial)
#Format from long to wide for easier interpretation
etrap.bees.wide <- spread(etrap.bees, Date, n)
#Export as .csv
#write.csv(etrap.bees.wide, file = "C:/Users/Morgan Mackert/Documents/ISU/Project/mmackert/Graphs/SummaryStats/EmergenceTrapBeesbyDate.csv")
#Floral resources
#Determine relative abundance of each floral species during each sampling event
Quadrats <- read.csv("Plants/Quadrats.csv", na.strings = c("", "NA"))
Quadrats$Date <- mdy(Quadrats$Date)
#Filter 2016-2017
quads34 <- Quadrats %>%
filter(Year == "3" | Year == "4")
#Mutate flower names
quads34 <- quads34 %>%
mutate(Species = case_when(
Species == "Alfalfa" ~ "Medicago sativa",
Species == "Bee balm" ~ "Monarda fistulosa",
Species == "Birdsfoot trefoil" ~ "Lotus corniculatus",
Species == "Black-eyed Susan" ~ "Rudbeckia hirta",
Species == "Black medic" ~ "Medicago lupulina",
Species == "Bull thistle" ~ "Cirsium vulgare",
Species == "Canada anemone" ~ "Anemone canadensis",
Species == "Canada goldenrod" ~ "Solidago canadensis",
Species == "Canada thistle" ~ "Cirsium arvense",
Species == "Carolina horsenettle" ~ "Solanum carolinense",
Species == "Cleavers" ~ "Galium aparine",
Species == "Common daisy" ~ "Bellis perennis",
Species == "Common daylily" ~ "Hemerocallis fulva",
Species == "Common milkweed" ~ "Asclepias syriaca",
Species == "Common mullein" ~ "Verbascum thapsus",
Species == "Common yellow wood sorrel" ~ "Oxalis stricta",
Species == "Cup plant" ~ "Silphium perfoliatum",
Species == "Curly dock" ~ "Rumex crispus",
Species == "Daisy fleabane" ~ "Erigeron annuus",
Species == "Dandelion" ~ "Taraxacum officinale",
Species == "Deptford pink" ~ "Dianthus armeria",
Species == "Dodder" ~ "Cuscuta gronovii",
Species == "Dogbane" ~ "Apocynum cannabinum",
Species == "Dotted smartweed" ~ "Polygonum punctatum",
Species == "False white indigo" ~ "Baptisia alba",
Species == "Field bindweed" ~ "Convolvulus arvensis",
Species == "Field pennycress" ~ "Thlaspi arvense",
Species == "Golden Alexander" ~ "Zizia aurea",
Species == "Gray-headed coneflower" ~ "Ratibida pinnata",
Species == "Ground cherry" ~ "Physalis virginiana",
Species == "Hairy vetch" ~ "Vicia villosa",
Species == "Hoary vervain" ~ "Verbena stricta",
Species == "Japanese honeysuckle" ~ "Lonicera japonica",
Species == "Marestail" ~ "Conyza canadensis",
Species == "Mock strawberry" ~ "Duchesnea indica",
Species == "Musk thistle" ~ "Carduus nutans",
Species == "Oxeye sunflower" ~ "Heliopsis helianthoides",
Species == "Pennsylvania smartweed" ~ "Polygonum pensylvanicum",
Species == "Pineapple weed" ~ "Matricaria discoidea",
Species == "Prairie ironweed" ~ "Vernonia fasciculata",
Species == "Prickly lettuce" ~ "Lactuca canadensis",
Species == "Purple coneflower" ~ "Echinacea purpurea",
Species == "Purple prairie clover" ~ "Dalea purpurea",
Species == "Queen Anne's lace" ~ "Daucus carota",
Species == "Rattlesnake master" ~ "Eryngium yuccifolium",
Species == "Red clover" ~ "Trifolium pratense",
Species == "Red raspberry" ~ "Rubus idaeus",
Species == "Sawtooth sunflower" ~ "Helianthus grosseserratus",
Species == "Showy tick trefoil" ~ "Desmodium canadense",
Species == "Soapwort" ~ "Saponaria officinalis",
Species == "Sow thistle" ~ "Sonchus arvensis",
Species == "Star of Bethlehem" ~ "Ornithogalum umbellatum",
Species == "Stiff goldenrod" ~ "Solidago rigida",
Species == "Velvet leaf" ~ "Abutilon theophrasti",
Species == "White campion" ~ "Silene latifolia",
Species == "White clover" ~ "Trifolium repens",
Species == "White sweet clover" ~ "Melilotus albus",
Species == "Whorled milkweed" ~ "Asclepias verticillata",
Species == "Wild cucumber" ~ "Echinocystis lobata",
Species == "Wild mustard" ~ "Sinapis arvensis",
Species == "Wild parsnip" ~ "Pastinaca sativa",
Species == "Yarrow" ~ "Achillea millefolium",
Species == "Yellow sweet clover" ~ "Melilotus officinalis"
))
#Determine total number of plants in bloom in quadrats during 2016-2017
bloom.plants <- quads34 %>%
filter(!is.na(Species)) %>%
group_by(Species) %>%
count()
#Convert percent coverage to square meters
plantcover.sqm <- quads34 %>%
filter(!is.na(Species)) %>%
mutate(cover.sqm = Cover/100)
#Calculate total amount of coverage for each species
plantcover.sqm <- plantcover.sqm %>%
group_by(Species) %>%
summarise(Total.Cover = sum(cover.sqm))
#Divide Total Cover by 800 sq. m. to determine total relative abundance of each floral species over 2016-2017
plantcover.sqm <- plantcover.sqm %>%
group_by(Species) %>%
mutate(rel.abun = Total.Cover/800)
#Determine percentage of relative abundance
plantcover.sqm <- plantcover.sqm %>%
group_by(Species) %>%
mutate(rel.abun.per = rel.abun*100)
#Old code ####
#Reformat dataset from long to wide
quads34.wide <- spread(quads34.long, Species, Total.Cover)
#Fill NAs with 0
quads34.wide[is.na(quads34.wide)] <- 0
#Remove V1 column
quads34.wide <- quads34.wide %>%
select(-V1)
#Combine Site and Date columns
quads34.wide <- quads34.wide %>%
unite(SiteDate, c(Site, Date), sep = " ", remove = TRUE)
#Change column to rownames
quads34.wide <- quads34.wide %>%
remove_rownames %>%
column_to_rownames(var = "SiteDate")
#Determine relative abundance of blooming plants in quadrats with vegan
quads.rel <- decostand(quads34.wide, method = "total", na.rm = FALSE)
#Export as .csv
write.csv(quads.rel, file = "C:/Users/Morgan Mackert/Documents/ISU/Project/mmackert/Graphs/SummaryStats/Plant Abundance.csv")
| /AnalysisScripts/SummaryStats.R | no_license | morganmackert/mmackert | R | false | false | 10,864 | r | #-------------------------------------------------------------------#
# Summary Statistics for Full Dataset #
# 2014-2018 #
#-------------------------------------------------------------------#
#Clear environment and set working directory
rm(list=ls())
setwd("~/ISU/Project/Data")
#Load libraries
library(lubridate)
library(dplyr)
library(tidyr)
#Read in data
Bees <- read.csv("Bees/Bee IDs.csv", header = T, na.strings = c("", "NA"))
Quadrats <- read.csv("Plants/Quadrats.csv")
#Format date with lubridate
Bees$Date <- mdy(Bees$Date)
Quadrats$Date <- mdy(Quadrats$Date)
#Change Year from number to year
Bees$Year <- year(Bees$Date)
Quadrats$Year <- year(Quadrats$Date)
#Filter out bad stuff from Bees
bees <- Bees %>%
filter(Family != "Wasp") %>%
filter(Family != "Fly") %>%
filter(Binomial != "Wasp") %>%
filter(Binomial != "Unidentifiable") %>%
filter(!is.na(Site))
#Format Bare.Ground column as numeric
Quadrats$BareGround <- as.numeric(Quadrats$BareGround)
#Average bare ground cover for each site
bareground <- Quadrats %>%
filter(!is.na(BareGround)) %>%
group_by(Site, Date, Quadrat) %>%
summarise(total.bareground = BareGround[1])
avg.bareground <- bareground %>%
group_by(Site) %>%
summarise(avg.bareground = mean(total.bareground),
number.quadrats = length(total.bareground))
#Average floral cover for each site
floral.cover <- Quadrats %>%
group_by(Site) %>%
summarise(avg.floralcover = mean(Cover))
#Determine species richness without nesting plots
beespp.noplot <- Bees %>%
filter(!is.na(Binomial)) %>%
filter(Family != "Wasp") %>%
filter(Trap != "Plot") %>%
count(Binomial)
#Bee species richness
beespp <- bees %>%
count(Binomial)
#Export as .csv
#write.csv(beespp, file = "C:/Users/Morgan Mackert/Documents/ISU/Project/mmackert/Graphs/SummaryStats/BeeSpeciesRichness.csv")
#Determine abundance for each site
bees.site <- Bees %>%
filter(Family != "Wasp") %>%
filter(Family != "Fly") %>%
filter(!is.na(Binomial)) %>%
filter(!is.na(Site)) %>%
group_by(Site) %>%
count(Binomial)
bees.site <- bees.site %>%
group_by(Site) %>%
summarise(total.bees = sum(n))
#Check to make sure total matches original datafile
bees.site %>%
summarise(sum(total.bees))
#15,904, good to go!
#Determine abundance by trap for each site/date
AbundTrap <- Bees %>%
filter(Trap != "Target") %>%
filter(Family != "Wasp") %>%
group_by(Site, Date, Trap) %>%
count(Binomial)
AbundTrap <- AbundTrap %>%
group_by(Site, Date, Trap) %>%
summarise(Bee.Abundance = sum(n))
AbundTrapwide <- spread(AbundTrap, Trap, Bee.Abundance)
#Export as .csv
#write.csv(AbundTrapwide, file = "C:/Users/morga/Documents/ISU/Project/mmackert/Graphs/SummaryStats/AbundancebyTrap1234.csv")
#Determine species richness for each site
beespp.site <- Bees %>%
filter(Family != "Wasp") %>%
filter(Family != "Fly") %>%
filter(Binomial != "Unidentifiable") %>%
filter(!is.na(Site)) %>%
group_by(Site) %>%
summarise(no.beespp = n_distinct(Binomial))
#Determine species richness by trap for each site/date
SpecRichTrap <- Bees %>%
filter(Trap != "Target") %>%
filter(Family != "Wasp") %>%
group_by(Site, Date, Trap) %>%
summarise(Bee.Species.Richness = length(unique(Binomial)))
SpecRichTrapwide <- spread(SpecRichTrap, Trap, Bee.Species.Richness)
#Export as .csv
#write.csv(SpecRichTrapwide, file = "C:/Users/morga/Documents/ISU/Project/mmackert/Graphs/SummaryStats/SpeciesRichnessbyTrap1234.csv")
#Determine genus richness for each site/date
GenusRich <- Bees %>%
filter(Trap != "Target") %>%
filter(Family != "Wasp") %>%
group_by(Site, Date) %>%
summarise(Bee.Genus.Richness = length(unique(Genus)))
#Export as .csv
#write.csv(GenusRich, file = "C:/Users/morga/Documents/ISU/Project/mmackert/Graphs/SummaryStats/GenusRichness1234.csv")
#Determine number of individuals of each species collected for each site/date
SpecRichAbund <- Bees %>%
filter(Trap != "Target") %>%
filter(Family != "Wasp") %>%
group_by(Site, Date) %>%
count(Binomial)
SpecRichAbundwide <- spread(SpecRichAbund, Binomial, n)
SpecRichAbundwide[is.na(SpecRichAbundwide)] <- 0
#Export as .csv
#write.csv(SpecRichAbundwide, file = "C:/Users/morga/Documents/ISU/Project/mmackert/Graphs/SummaryStats/SpeciesRichnessandAbundance1234.csv")
#Genus richness for 2014-2018
genusrich <- Bees %>%
group_by(Genus) %>%
count(Genus)
#Determine the number of individuals collected in nesting plots by year
npbees <- bees %>%
filter(Trap == "Plot") %>%
group_by(Site, Year) %>%
count(Binomial)
npbees <- npbees %>%
summarise(no.bees = sum(n))
#Determine the number of species collected in nesting plots by year
npbeespp <- bees %>%
filter(Trap == "Plot") %>%
group_by(Site, Year) %>%
summarise(no.npbeespp = n_distinct(Binomial))
#Explore number of bees and number of species collected in emergence traps by year to check for differences in emergence trap deployment timing (2014 in late May, 2015-2016 in early May, 2017-2018 in late April)
etrap.bees <- bees %>%
filter(Trap == "Emergence") %>%
group_by(Date) %>%
count(Binomial)
#Format from long to wide for easier interpretation
etrap.bees.wide <- spread(etrap.bees, Date, n)
#Export as .csv
#write.csv(etrap.bees.wide, file = "C:/Users/Morgan Mackert/Documents/ISU/Project/mmackert/Graphs/SummaryStats/EmergenceTrapBeesbyDate.csv")
#Floral resources
#Determine relative abundance of each floral species during each sampling event
Quadrats <- read.csv("Plants/Quadrats.csv", na.strings = c("", "NA"))
Quadrats$Date <- mdy(Quadrats$Date)
#Filter 2016-2017
quads34 <- Quadrats %>%
filter(Year == "3" | Year == "4")
#Mutate flower names
quads34 <- quads34 %>%
mutate(Species = case_when(
Species == "Alfalfa" ~ "Medicago sativa",
Species == "Bee balm" ~ "Monarda fistulosa",
Species == "Birdsfoot trefoil" ~ "Lotus corniculatus",
Species == "Black-eyed Susan" ~ "Rudbeckia hirta",
Species == "Black medic" ~ "Medicago lupulina",
Species == "Bull thistle" ~ "Cirsium vulgare",
Species == "Canada anemone" ~ "Anemone canadensis",
Species == "Canada goldenrod" ~ "Solidago canadensis",
Species == "Canada thistle" ~ "Cirsium arvense",
Species == "Carolina horsenettle" ~ "Solanum carolinense",
Species == "Cleavers" ~ "Galium aparine",
Species == "Common daisy" ~ "Bellis perennis",
Species == "Common daylily" ~ "Hemerocallis fulva",
Species == "Common milkweed" ~ "Asclepias syriaca",
Species == "Common mullein" ~ "Verbascum thapsus",
Species == "Common yellow wood sorrel" ~ "Oxalis stricta",
Species == "Cup plant" ~ "Silphium perfoliatum",
Species == "Curly dock" ~ "Rumex crispus",
Species == "Daisy fleabane" ~ "Erigeron annuus",
Species == "Dandelion" ~ "Taraxacum officinale",
Species == "Deptford pink" ~ "Dianthus armeria",
Species == "Dodder" ~ "Cuscuta gronovii",
Species == "Dogbane" ~ "Apocynum cannabinum",
Species == "Dotted smartweed" ~ "Polygonum punctatum",
Species == "False white indigo" ~ "Baptisia alba",
Species == "Field bindweed" ~ "Convolvulus arvensis",
Species == "Field pennycress" ~ "Thlaspi arvense",
Species == "Golden Alexander" ~ "Zizia aurea",
Species == "Gray-headed coneflower" ~ "Ratibida pinnata",
Species == "Ground cherry" ~ "Physalis virginiana",
Species == "Hairy vetch" ~ "Vicia villosa",
Species == "Hoary vervain" ~ "Verbena stricta",
Species == "Japanese honeysuckle" ~ "Lonicera japonica",
Species == "Marestail" ~ "Conyza canadensis",
Species == "Mock strawberry" ~ "Duchesnea indica",
Species == "Musk thistle" ~ "Carduus nutans",
Species == "Oxeye sunflower" ~ "Heliopsis helianthoides",
Species == "Pennsylvania smartweed" ~ "Polygonum pensylvanicum",
Species == "Pineapple weed" ~ "Matricaria discoidea",
Species == "Prairie ironweed" ~ "Vernonia fasciculata",
Species == "Prickly lettuce" ~ "Lactuca canadensis",
Species == "Purple coneflower" ~ "Echinacea purpurea",
Species == "Purple prairie clover" ~ "Dalea purpurea",
Species == "Queen Anne's lace" ~ "Daucus carota",
Species == "Rattlesnake master" ~ "Eryngium yuccifolium",
Species == "Red clover" ~ "Trifolium pratense",
Species == "Red raspberry" ~ "Rubus idaeus",
Species == "Sawtooth sunflower" ~ "Helianthus grosseserratus",
Species == "Showy tick trefoil" ~ "Desmodium canadense",
Species == "Soapwort" ~ "Saponaria officinalis",
Species == "Sow thistle" ~ "Sonchus arvensis",
Species == "Star of Bethlehem" ~ "Ornithogalum umbellatum",
Species == "Stiff goldenrod" ~ "Solidago rigida",
Species == "Velvet leaf" ~ "Abutilon theophrasti",
Species == "White campion" ~ "Silene latifolia",
Species == "White clover" ~ "Trifolium repens",
Species == "White sweet clover" ~ "Melilotus albus",
Species == "Whorled milkweed" ~ "Asclepias verticillata",
Species == "Wild cucumber" ~ "Echinocystis lobata",
Species == "Wild mustard" ~ "Sinapis arvensis",
Species == "Wild parsnip" ~ "Pastinaca sativa",
Species == "Yarrow" ~ "Achillea millefolium",
Species == "Yellow sweet clover" ~ "Melilotus officinalis"
))
#Determine total number of plants in bloom in quadrats during 2016-2017
bloom.plants <- quads34 %>%
filter(!is.na(Species)) %>%
group_by(Species) %>%
count()
#Convert percent coverage to square meters
plantcover.sqm <- quads34 %>%
filter(!is.na(Species)) %>%
mutate(cover.sqm = Cover/100)
#Calculate total amount of coverage for each species
plantcover.sqm <- plantcover.sqm %>%
group_by(Species) %>%
summarise(Total.Cover = sum(cover.sqm))
#Divide Total Cover by 800 sq. m. to determine total relative abundance of each floral species over 2016-2017
plantcover.sqm <- plantcover.sqm %>%
group_by(Species) %>%
mutate(rel.abun = Total.Cover/800)
#Determine percentage of relative abundance
plantcover.sqm <- plantcover.sqm %>%
group_by(Species) %>%
mutate(rel.abun.per = rel.abun*100)
#Old code ####
#Reformat dataset from long to wide
quads34.wide <- spread(quads34.long, Species, Total.Cover)
#Fill NAs with 0
quads34.wide[is.na(quads34.wide)] <- 0
#Remove V1 column
quads34.wide <- quads34.wide %>%
select(-V1)
#Combine Site and Date columns
quads34.wide <- quads34.wide %>%
unite(SiteDate, c(Site, Date), sep = " ", remove = TRUE)
#Change column to rownames
quads34.wide <- quads34.wide %>%
remove_rownames %>%
column_to_rownames(var = "SiteDate")
#Determine relative abundance of blooming plants in quadrats with vegan
quads.rel <- decostand(quads34.wide, method = "total", na.rm = FALSE)
#Export as .csv
write.csv(quads.rel, file = "C:/Users/Morgan Mackert/Documents/ISU/Project/mmackert/Graphs/SummaryStats/Plant Abundance.csv")
|
#rm(list=ls(all=T))
library(inline)
txt<-'
#include<Eigen/Dense>
using namespace Rcpp;
using namespace Eigen;
MatrixXd m1=as<MatrixXd> (xs);
ColPivHouseholderQR<MatrixXd> dec(m1);
MatrixXd q1=dec.householderQ();
MatrixXd p1=dec.colsPermutation();
//MatrixXd r1=dec.matrixR().triangularView<Upper>()*p1.inverse();
MatrixXd r1=dec.matrixR().triangularView<Upper>();
return List::create(Named("Q")=q1, Named("R")=r1, Named("P")=p1);
'
qr1<-cxxfunction(signature(xs="numeric"),body=txt,plugin="RcppEigen")
#m1<-matrix(rnorm(9),nr=3)
#q1<-qr(m1)
#q2<-qr1(m1)
| /qr.R | no_license | Allisterh/High_Dimensional_Cointegration | R | false | false | 559 | r | #rm(list=ls(all=T))
library(inline)
txt<-'
#include<Eigen/Dense>
using namespace Rcpp;
using namespace Eigen;
MatrixXd m1=as<MatrixXd> (xs);
ColPivHouseholderQR<MatrixXd> dec(m1);
MatrixXd q1=dec.householderQ();
MatrixXd p1=dec.colsPermutation();
//MatrixXd r1=dec.matrixR().triangularView<Upper>()*p1.inverse();
MatrixXd r1=dec.matrixR().triangularView<Upper>();
return List::create(Named("Q")=q1, Named("R")=r1, Named("P")=p1);
'
qr1<-cxxfunction(signature(xs="numeric"),body=txt,plugin="RcppEigen")
#m1<-matrix(rnorm(9),nr=3)
#q1<-qr(m1)
#q2<-qr1(m1)
|
## System packages == installed packages with a non-NA priority
## Returns TRUE/FALSE, indicating whether the symlinking was successful
symlinkSystemPackages <- function(project = NULL) {
project <- getProjectDir(project)
# Get the path to the base R library installation
sysLibPath <- normalizePath(R.home("library"), winslash = "/", mustWork = TRUE)
## Get the system packages
sysPkgs <- utils::installed.packages(sysLibPath)
sysPkgsBase <- sysPkgs[!is.na(sysPkgs[, "Priority"]), ]
sysPkgNames <- rownames(sysPkgsBase)
## Make a directory where we can symlink these libraries
libRdir <- libRdir(project = project)
if (!file.exists(libRdir))
if (!dir.create(libRdir, recursive = TRUE))
return(FALSE)
## Generate symlinks for each package
for (pkg in sysPkgNames) {
source <- file.path(sysLibPath, pkg)
target <- file.path(libRdir, pkg)
if (!ensurePackageSymlink(source, target))
return(FALSE)
}
TRUE
}
isPathToSamePackage <- function(source, target) {
# When not on Windows, we can just check that the normalized
# paths resolve to the same location.
if (!is.windows())
return(normalizePath(source) == normalizePath(target))
# On Windows, junction points are not resolved by 'normalizePath()',
# so we need an alternate strategy for determining if the junction
# point is up to date. We ensure that the 'DESCRIPTION' files at
# both locations are equivalent.
lhsPath <- file.path(source, "DESCRIPTION")
rhsPath <- file.path(target, "DESCRIPTION")
lhsContents <- readChar(lhsPath, file.info(lhsPath)$size, TRUE)
rhsContents <- readChar(rhsPath, file.info(rhsPath)$size, TRUE)
identical(lhsContents, rhsContents)
}
# Clean up recursive symlinks erroneously generated by
# older versions of packrat. This code can probably be
# removed in a future release of packrat.
cleanRecursivePackageSymlinks <- function(source) {
target <- file.path(source, basename(source))
if (file.exists(target)) {
sourceFiles <- list.files(source)
targetFiles <- list.files(target)
if (identical(sourceFiles, targetFiles))
unlink(target)
}
}
ensurePackageSymlink <- function(source, target) {
cleanRecursivePackageSymlinks(source)
# If we have a symlink already active in the
# target location, check that it points to the
# library corresponding to the current running
# R session.
if (file.exists(target)) {
if (isPathToSamePackage(source, target))
return(TRUE)
# Remove the old symlink. Both junction points and symlinks
# are safely removed with a simple, non-recursive unlink.
unlink(target)
}
# If, for some reason, the target directory
# still exists, bail as otherwise symlinking
# will not work as desired.
if (file.exists(target))
stop("Target '", target, "' already exists and is not a symlink")
# Perform the symlink.
symlink(source, target)
# Success if the file now exists
file.exists(target)
}
symlinkExternalPackages <- function(project = NULL) {
project <- getProjectDir(project)
# Bash any old symlinks that might exist
unlink(libExtDir(project), recursive = TRUE)
dir.create(libExtDir(project), recursive = TRUE)
# Find the user libraries -- if packrat mode is off, this is presumedly
# just the .libPaths(); if we're in packrat mode we have to ask packrat
# for those libraries
lib.loc <- NULL
if (isPackratModeOn())
lib.loc <- .packrat_mutables$get("origLibPaths")
## Although this shouldn't occur in practice, there can be intermediate states
## where e.g. packrat mode is 'on' but this state has been lost -- .libPaths()
## is usually where we want to look for external packages, anyhow
if (!length(lib.loc))
lib.loc <- .libPaths()
# Get the external packages as well as their dependencies (these need
# to be symlinked in so that imports and so on can be correctly resolved)
external.packages <- opts$external.packages()
if (!length(external.packages)) return(invisible(NULL))
pkgDeps <- recursivePackageDependencies(
external.packages,
lib.loc = lib.loc,
available.packages = NULL
)
allPkgs <- union(external.packages, pkgDeps)
# Get the locations of these packages within the supplied lib.loc
loc <- lapply(allPkgs, function(x) {
find.package(x, lib.loc = lib.loc, quiet = TRUE)
})
names(loc) <- allPkgs
# Warn about missing packages
notFound <- loc[sapply(loc, function(x) {
!length(x)
})]
if (length(notFound)) {
warning("The following external packages could not be located:\n- ",
paste(shQuote(names(notFound)), collapse = ", "))
}
# Symlink the packages that were found
loc <- loc[sapply(loc, function(x) length(x) > 0)]
results <- lapply(loc, function(x) {
symlink(
x,
file.path(libExtDir(project), basename(x))
)
})
failedSymlinks <- results[sapply(results, Negate(isTRUE))]
if (length(failedSymlinks)) {
warning("The following external packages could not be linked into ",
"the packrat private library:\n- ",
paste(shQuote(names(failedSymlinks)), collapse = ", "))
}
}
is.symlink <- function(path) {
## Strip trailing '/'
path <- gsub("/*$", "", path)
## Sys.readlink returns NA for error, "" for 'not a symlink', and <path> for symlink
## return false for first two cases, true for second
result <- Sys.readlink(path)
if (is.na(result)) FALSE
else nzchar(result)
}
useSymlinkedSystemLibrary <- function(project = NULL) {
project <- getProjectDir(project)
replaceLibrary(".Library", libRdir(project = project))
}
| /R/library-support.R | no_license | astraadria4ari/packrat | R | false | false | 5,604 | r | ## System packages == installed packages with a non-NA priority
## Returns TRUE/FALSE, indicating whether the symlinking was successful
symlinkSystemPackages <- function(project = NULL) {
project <- getProjectDir(project)
# Get the path to the base R library installation
sysLibPath <- normalizePath(R.home("library"), winslash = "/", mustWork = TRUE)
## Get the system packages
sysPkgs <- utils::installed.packages(sysLibPath)
sysPkgsBase <- sysPkgs[!is.na(sysPkgs[, "Priority"]), ]
sysPkgNames <- rownames(sysPkgsBase)
## Make a directory where we can symlink these libraries
libRdir <- libRdir(project = project)
if (!file.exists(libRdir))
if (!dir.create(libRdir, recursive = TRUE))
return(FALSE)
## Generate symlinks for each package
for (pkg in sysPkgNames) {
source <- file.path(sysLibPath, pkg)
target <- file.path(libRdir, pkg)
if (!ensurePackageSymlink(source, target))
return(FALSE)
}
TRUE
}
isPathToSamePackage <- function(source, target) {
# When not on Windows, we can just check that the normalized
# paths resolve to the same location.
if (!is.windows())
return(normalizePath(source) == normalizePath(target))
# On Windows, junction points are not resolved by 'normalizePath()',
# so we need an alternate strategy for determining if the junction
# point is up to date. We ensure that the 'DESCRIPTION' files at
# both locations are equivalent.
lhsPath <- file.path(source, "DESCRIPTION")
rhsPath <- file.path(target, "DESCRIPTION")
lhsContents <- readChar(lhsPath, file.info(lhsPath)$size, TRUE)
rhsContents <- readChar(rhsPath, file.info(rhsPath)$size, TRUE)
identical(lhsContents, rhsContents)
}
# Clean up recursive symlinks erroneously generated by
# older versions of packrat. This code can probably be
# removed in a future release of packrat.
cleanRecursivePackageSymlinks <- function(source) {
target <- file.path(source, basename(source))
if (file.exists(target)) {
sourceFiles <- list.files(source)
targetFiles <- list.files(target)
if (identical(sourceFiles, targetFiles))
unlink(target)
}
}
ensurePackageSymlink <- function(source, target) {
cleanRecursivePackageSymlinks(source)
# If we have a symlink already active in the
# target location, check that it points to the
# library corresponding to the current running
# R session.
if (file.exists(target)) {
if (isPathToSamePackage(source, target))
return(TRUE)
# Remove the old symlink. Both junction points and symlinks
# are safely removed with a simple, non-recursive unlink.
unlink(target)
}
# If, for some reason, the target directory
# still exists, bail as otherwise symlinking
# will not work as desired.
if (file.exists(target))
stop("Target '", target, "' already exists and is not a symlink")
# Perform the symlink.
symlink(source, target)
# Success if the file now exists
file.exists(target)
}
symlinkExternalPackages <- function(project = NULL) {
project <- getProjectDir(project)
# Bash any old symlinks that might exist
unlink(libExtDir(project), recursive = TRUE)
dir.create(libExtDir(project), recursive = TRUE)
# Find the user libraries -- if packrat mode is off, this is presumedly
# just the .libPaths(); if we're in packrat mode we have to ask packrat
# for those libraries
lib.loc <- NULL
if (isPackratModeOn())
lib.loc <- .packrat_mutables$get("origLibPaths")
## Although this shouldn't occur in practice, there can be intermediate states
## where e.g. packrat mode is 'on' but this state has been lost -- .libPaths()
## is usually where we want to look for external packages, anyhow
if (!length(lib.loc))
lib.loc <- .libPaths()
# Get the external packages as well as their dependencies (these need
# to be symlinked in so that imports and so on can be correctly resolved)
external.packages <- opts$external.packages()
if (!length(external.packages)) return(invisible(NULL))
pkgDeps <- recursivePackageDependencies(
external.packages,
lib.loc = lib.loc,
available.packages = NULL
)
allPkgs <- union(external.packages, pkgDeps)
# Get the locations of these packages within the supplied lib.loc
loc <- lapply(allPkgs, function(x) {
find.package(x, lib.loc = lib.loc, quiet = TRUE)
})
names(loc) <- allPkgs
# Warn about missing packages
notFound <- loc[sapply(loc, function(x) {
!length(x)
})]
if (length(notFound)) {
warning("The following external packages could not be located:\n- ",
paste(shQuote(names(notFound)), collapse = ", "))
}
# Symlink the packages that were found
loc <- loc[sapply(loc, function(x) length(x) > 0)]
results <- lapply(loc, function(x) {
symlink(
x,
file.path(libExtDir(project), basename(x))
)
})
failedSymlinks <- results[sapply(results, Negate(isTRUE))]
if (length(failedSymlinks)) {
warning("The following external packages could not be linked into ",
"the packrat private library:\n- ",
paste(shQuote(names(failedSymlinks)), collapse = ", "))
}
}
is.symlink <- function(path) {
## Strip trailing '/'
path <- gsub("/*$", "", path)
## Sys.readlink returns NA for error, "" for 'not a symlink', and <path> for symlink
## return false for first two cases, true for second
result <- Sys.readlink(path)
if (is.na(result)) FALSE
else nzchar(result)
}
useSymlinkedSystemLibrary <- function(project = NULL) {
project <- getProjectDir(project)
replaceLibrary(".Library", libRdir(project = project))
}
|
write("<html>","index.html")
write("<body>","index.html",append=T)
write("<h1>Wheat exchanges visualization</h1>","index.html",append=T)
write("<h2>click on a year</h2>","index.html",append=T)
for(period in 1:(length(years.considered)-1)){
linkName<-paste("year_",years.considered[period],".html",sep="")
linkHtmlCode<-paste("<a href=\"",linkName,"\">",years.considered[period],"</a>, ")
write(linkHtmlCode,"index.html",append=T)
}
linkName<-paste("year_",(years.considered[period+1]),".html",sep="")
linkHtmlCode<-paste("<a href=\"",linkName,"\">",years.considered[period+1],"</a> ")
write(linkHtmlCode,"index.html",append=T)
write("<h2>Some images to watch the dynamics of commercial relationships</h2>","index.html",append=T)
write("<h3>All the relationships</h3>","index.html",append=T)
write("<img src=\"wheat_world.gif\" width=\"500\" alt=\"wheat world exchanges\">","index.html",append=T)
write("<h3>Southern Europe</h3>","index.html",append=T)
write("<img src=\"wheat_southern_europe.gif\" width=\"500\" alt=\"wheat Southern Europe exchanges\">","index.html",append=T)
write("</body>","index.html",append=T)
write("</html>","index.html",append=T)
system("mv index.html html/")
| /scripts/map/r_create_html_index_annual.R | no_license | gfgprojects/cms_wheat | R | false | false | 1,192 | r | write("<html>","index.html")
write("<body>","index.html",append=T)
write("<h1>Wheat exchanges visualization</h1>","index.html",append=T)
write("<h2>click on a year</h2>","index.html",append=T)
for(period in 1:(length(years.considered)-1)){
linkName<-paste("year_",years.considered[period],".html",sep="")
linkHtmlCode<-paste("<a href=\"",linkName,"\">",years.considered[period],"</a>, ")
write(linkHtmlCode,"index.html",append=T)
}
linkName<-paste("year_",(years.considered[period+1]),".html",sep="")
linkHtmlCode<-paste("<a href=\"",linkName,"\">",years.considered[period+1],"</a> ")
write(linkHtmlCode,"index.html",append=T)
write("<h2>Some images to watch the dynamics of commercial relationships</h2>","index.html",append=T)
write("<h3>All the relationships</h3>","index.html",append=T)
write("<img src=\"wheat_world.gif\" width=\"500\" alt=\"wheat world exchanges\">","index.html",append=T)
write("<h3>Southern Europe</h3>","index.html",append=T)
write("<img src=\"wheat_southern_europe.gif\" width=\"500\" alt=\"wheat Southern Europe exchanges\">","index.html",append=T)
write("</body>","index.html",append=T)
write("</html>","index.html",append=T)
system("mv index.html html/")
|
plotSol <- function (x, yLoglikFunc, resMLE,
xlabel, ylabel, subtitle, title,
zeroLine=TRUE) {
# Variables Configurations
y <- apply(as.matrix(x), 1, function(v) yLoglikFunc(v))
xIters <- resMLE$iterations
yIters <- apply(as.matrix(xIters), 1, function(v) yLoglikFunc(v))
labelIters <- cbind(1:length(xIters), xIters)
namebank <- apply(as.matrix(labelIters), 1,
function(v) sprintf("iter %d: %f", v[1], v[2]))
# Plot derivative of loglikelihood function
plot(x, y, type="l", xlab=xlabel, ylab=ylabel, sub=subtitle, main=title)
if (zeroLine) {
abline(h = 0, lty = 2)
}
points(xIters, yIters, pch=16, col="blue")
points(resMLE$rootApproximation, # Root x
yLoglikFunc(resMLE$rootApproximation),
pch=20, col="red")
text(resMLE$rootApproximation, # Root x
yLoglikFunc(resMLE$rootApproximation),
labels=sprintf("iter %d: %f\ntime: %f", length(xIters), resMLE$rootApproximation, resMLE$time[1]),
cex= 0.7, pos=1)
}
plotMLE <- function (f, newtonMLE, bisecMLE, fixedPoiMLE, secantMLE,
step=1/50, xrange=c(-30, 30),
xlabel="theta", ylabel="log likelihood",
subtitle="Normal Log likelihood (std=1.)",
zeroLine=TRUE) {
par(mfrow=c(2,2))
xs <- seq(xrange[1], xrange[2], step) # Range of means to plot
plotSol(xs, f, newtonMLE,
xlabel=xlabel, ylabel=ylabel, subtitle=subtitle, title=newtonMLE$methodName,
zeroLine=zeroLine)
plotSol(xs, f, bisecMLE,
xlabel=xlabel, ylabel=ylabel, subtitle=subtitle, title=bisecMLE$methodName,
zeroLine=zeroLine)
plotSol(xs, f, fixedPoiMLE,
xlabel=xlabel, ylabel=ylabel, subtitle=subtitle, title=fixedPoiMLE$methodName,
zeroLine=zeroLine)
plotSol(xs, f, secantMLE,
xlabel=xlabel, ylabel=ylabel, subtitle=subtitle, title=secantMLE$methodName,
zeroLine=zeroLine)
}
plot2DMLE <- function(f, resMLE, subtitle, xlim=c(-5, 10), ylim=c(-5, 10), step=1/50) {
x <- seq(xlim[1], xlim[2], step)
y <- seq(ylim[1], ylim[2], step)
u <- as.matrix(expand.grid(x, y)) # Points to evaluate
z <- matrix(apply(u, 1, function(v) f(v)), nrow=length(x))
contour(x, y, z, nlevels=30, sub=subtitle)
# points(resMLE$iterations[,1], resMLE$iterations[,2], pch=16, col="blue")
lines(resMLE$iterations[,1], resMLE$iterations[,2], type="o", col="blue")
points(resMLE$rootApproximation[1], resMLE$rootApproximation[2],
pch=20, col="red")
text(resMLE$rootApproximation[1], resMLE$rootApproximation[2],
labels=sprintf("iter %d: %f\ntime: %f", length(resMLE$iterations[,1]), resMLE$rootApproximation, resMLE$time[1]),
cex= 0.7, pos=1)
} | /hw2-optimization/plotter.R | no_license | jcbozonier/Gatech-ISYE-6416 | R | false | false | 2,813 | r | plotSol <- function (x, yLoglikFunc, resMLE,
xlabel, ylabel, subtitle, title,
zeroLine=TRUE) {
# Variables Configurations
y <- apply(as.matrix(x), 1, function(v) yLoglikFunc(v))
xIters <- resMLE$iterations
yIters <- apply(as.matrix(xIters), 1, function(v) yLoglikFunc(v))
labelIters <- cbind(1:length(xIters), xIters)
namebank <- apply(as.matrix(labelIters), 1,
function(v) sprintf("iter %d: %f", v[1], v[2]))
# Plot derivative of loglikelihood function
plot(x, y, type="l", xlab=xlabel, ylab=ylabel, sub=subtitle, main=title)
if (zeroLine) {
abline(h = 0, lty = 2)
}
points(xIters, yIters, pch=16, col="blue")
points(resMLE$rootApproximation, # Root x
yLoglikFunc(resMLE$rootApproximation),
pch=20, col="red")
text(resMLE$rootApproximation, # Root x
yLoglikFunc(resMLE$rootApproximation),
labels=sprintf("iter %d: %f\ntime: %f", length(xIters), resMLE$rootApproximation, resMLE$time[1]),
cex= 0.7, pos=1)
}
plotMLE <- function (f, newtonMLE, bisecMLE, fixedPoiMLE, secantMLE,
step=1/50, xrange=c(-30, 30),
xlabel="theta", ylabel="log likelihood",
subtitle="Normal Log likelihood (std=1.)",
zeroLine=TRUE) {
par(mfrow=c(2,2))
xs <- seq(xrange[1], xrange[2], step) # Range of means to plot
plotSol(xs, f, newtonMLE,
xlabel=xlabel, ylabel=ylabel, subtitle=subtitle, title=newtonMLE$methodName,
zeroLine=zeroLine)
plotSol(xs, f, bisecMLE,
xlabel=xlabel, ylabel=ylabel, subtitle=subtitle, title=bisecMLE$methodName,
zeroLine=zeroLine)
plotSol(xs, f, fixedPoiMLE,
xlabel=xlabel, ylabel=ylabel, subtitle=subtitle, title=fixedPoiMLE$methodName,
zeroLine=zeroLine)
plotSol(xs, f, secantMLE,
xlabel=xlabel, ylabel=ylabel, subtitle=subtitle, title=secantMLE$methodName,
zeroLine=zeroLine)
}
plot2DMLE <- function(f, resMLE, subtitle, xlim=c(-5, 10), ylim=c(-5, 10), step=1/50) {
x <- seq(xlim[1], xlim[2], step)
y <- seq(ylim[1], ylim[2], step)
u <- as.matrix(expand.grid(x, y)) # Points to evaluate
z <- matrix(apply(u, 1, function(v) f(v)), nrow=length(x))
contour(x, y, z, nlevels=30, sub=subtitle)
# points(resMLE$iterations[,1], resMLE$iterations[,2], pch=16, col="blue")
lines(resMLE$iterations[,1], resMLE$iterations[,2], type="o", col="blue")
points(resMLE$rootApproximation[1], resMLE$rootApproximation[2],
pch=20, col="red")
text(resMLE$rootApproximation[1], resMLE$rootApproximation[2],
labels=sprintf("iter %d: %f\ntime: %f", length(resMLE$iterations[,1]), resMLE$rootApproximation, resMLE$time[1]),
cex= 0.7, pos=1)
} |
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Add aggregate event variables
FYC <- FYC %>% mutate(
HHTEXP.yy. = HHAEXP.yy. + HHNEXP.yy., # Home Health Agency + Independent providers
ERTEXP.yy. = ERFEXP.yy. + ERDEXP.yy., # Doctor + Facility Expenses for OP, ER, IP events
IPTEXP.yy. = IPFEXP.yy. + IPDEXP.yy.,
OPTEXP.yy. = OPFEXP.yy. + OPDEXP.yy., # All Outpatient
OPYEXP.yy. = OPVEXP.yy. + OPSEXP.yy., # Physician only
OPZEXP.yy. = OPOEXP.yy. + OPPEXP.yy., # Non-physician only
OMAEXP.yy. = VISEXP.yy. + OTHEXP.yy.) # Other medical equipment and services
FYC <- FYC %>% mutate(
TOTUSE.yy. = ((DVTOT.yy. > 0) + (RXTOT.yy. > 0) + (OBTOTV.yy. > 0) +
(OPTOTV.yy. > 0) + (ERTOT.yy. > 0) + (IPDIS.yy. > 0) +
(HHTOTD.yy. > 0) + (OMAEXP.yy. > 0))
)
# Sex
FYC <- FYC %>%
mutate(sex = recode_factor(SEX, .default = "Missing", .missing = "Missing",
"1" = "Male",
"2" = "Female"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
# Loop over event types
events <- c("TOT", "DVT", "RX", "OBV", "OBD", "OBO",
"OPT", "OPY", "OPZ", "ERT", "IPT", "HHT", "OMA")
results <- list()
for(ev in events) {
key <- paste0(ev, "EXP", ".yy.")
formula <- as.formula(sprintf("~%s", key))
results[[key]] <- svyby(formula, FUN = svymean, by = ~sex, design = subset(FYCdsgn, FYC[[key]] > 0))
}
print(results)
| /mepstrends/hc_use/json/code/r/meanEXP__sex__event__.r | permissive | RandomCriticalAnalysis/MEPS-summary-tables | R | false | false | 2,222 | r | # Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Add aggregate event variables
FYC <- FYC %>% mutate(
HHTEXP.yy. = HHAEXP.yy. + HHNEXP.yy., # Home Health Agency + Independent providers
ERTEXP.yy. = ERFEXP.yy. + ERDEXP.yy., # Doctor + Facility Expenses for OP, ER, IP events
IPTEXP.yy. = IPFEXP.yy. + IPDEXP.yy.,
OPTEXP.yy. = OPFEXP.yy. + OPDEXP.yy., # All Outpatient
OPYEXP.yy. = OPVEXP.yy. + OPSEXP.yy., # Physician only
OPZEXP.yy. = OPOEXP.yy. + OPPEXP.yy., # Non-physician only
OMAEXP.yy. = VISEXP.yy. + OTHEXP.yy.) # Other medical equipment and services
FYC <- FYC %>% mutate(
TOTUSE.yy. = ((DVTOT.yy. > 0) + (RXTOT.yy. > 0) + (OBTOTV.yy. > 0) +
(OPTOTV.yy. > 0) + (ERTOT.yy. > 0) + (IPDIS.yy. > 0) +
(HHTOTD.yy. > 0) + (OMAEXP.yy. > 0))
)
# Sex
FYC <- FYC %>%
mutate(sex = recode_factor(SEX, .default = "Missing", .missing = "Missing",
"1" = "Male",
"2" = "Female"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
# Loop over event types
events <- c("TOT", "DVT", "RX", "OBV", "OBD", "OBO",
"OPT", "OPY", "OPZ", "ERT", "IPT", "HHT", "OMA")
results <- list()
for(ev in events) {
key <- paste0(ev, "EXP", ".yy.")
formula <- as.formula(sprintf("~%s", key))
results[[key]] <- svyby(formula, FUN = svymean, by = ~sex, design = subset(FYCdsgn, FYC[[key]] > 0))
}
print(results)
|
shinyServer(function(input, output,session) {
## set up input menu in sidebar
output$sb <- renderUI({
if (input$sbMenu=="stations") {
inputPanel(id="ip",
selectInput("country","Select Country",countryChoice, selected="Canada"),
radioButtons("tempScale","",c("Celsius","Fahrenheit"),inline= TRUE)
)
} else if (input$sbMenu=="statetemps") {
radioButtons("tempScale2","Select Scale",c("Celsius","Fahrenheit"),selected="Fahrenheit",inline= TRUE)
} else if (input$sbMenu=="earthquakes") {
inputPanel(id="ip2",
sliderInput("mag","Enter Magnitude Range",min=2,max=10,value=c(4,10),step=0.5)
)
}
})
source("code/locations.R", local=TRUE)
source("code/stateTemps.R", local=TRUE)
source("code/earthquakes.R", local=TRUE)
})
| /server.R | no_license | JT85/climate | R | false | false | 858 | r |
shinyServer(function(input, output,session) {
## set up input menu in sidebar
output$sb <- renderUI({
if (input$sbMenu=="stations") {
inputPanel(id="ip",
selectInput("country","Select Country",countryChoice, selected="Canada"),
radioButtons("tempScale","",c("Celsius","Fahrenheit"),inline= TRUE)
)
} else if (input$sbMenu=="statetemps") {
radioButtons("tempScale2","Select Scale",c("Celsius","Fahrenheit"),selected="Fahrenheit",inline= TRUE)
} else if (input$sbMenu=="earthquakes") {
inputPanel(id="ip2",
sliderInput("mag","Enter Magnitude Range",min=2,max=10,value=c(4,10),step=0.5)
)
}
})
source("code/locations.R", local=TRUE)
source("code/stateTemps.R", local=TRUE)
source("code/earthquakes.R", local=TRUE)
})
|
# nodes <- 1
# cores <- 20
# timeLimit <- "01:00:00"
# partition <- 'shas'
# qos <- 'long'
# output <- 'job-%j.out'
string <- c(
'#!/bin/bash',
'',
#paste0('#SBATCH --nodes=', nodes),
#paste0('#SBATCH --ntasks=', cores),
paste0('#SBATCH --time=', timeLimit),
paste0('#SBATCH --partition=', partition),
#paste0('#SBATCH --qos=', qos),
paste0('#SBATCH --output=', output),
paste0('#SBATCH --account=', acct),
paste0('#SBATCH --array=1-', nrow(jobs),'%',maxArray),
'
echo "SLURM_JOBID: " $SLURM_JOBID
echo "SLURM_ARRAY_TASK_ID: " $SLURM_ARRAY_TASK_ID
echo "SLURM_ARRAY_JOB_ID: " $SLURM_ARRAY_JOB_ID
echo "Scratch: " $GLOBAL_SCRATCH
module load R/3.6.1-gcc7.1.0
module load gdal/2.2.2-gcc proj/5.0.1-gcc-7.1.0 gcc/7.1.0
module load gis/geos-3.5.0
echo $(date)
Rscript loadBalance.R $SLURM_ARRAY_TASK_ID
echo $(date)
echo "== End of Job =="
')
cat(string, sep = '\n', file = file.path(outdir, 'loadBalance.sh'))
cat(
'#!/bin/bash
ls -l ', dataDir, ' | wc -l
echo "out of"
echo "', nrow(RUN), '"', file = file.path(outdir, 'check.sh')) | /code/sbatch.R | no_license | fickse/ssim | R | false | false | 1,043 | r |
# nodes <- 1
# cores <- 20
# timeLimit <- "01:00:00"
# partition <- 'shas'
# qos <- 'long'
# output <- 'job-%j.out'
string <- c(
'#!/bin/bash',
'',
#paste0('#SBATCH --nodes=', nodes),
#paste0('#SBATCH --ntasks=', cores),
paste0('#SBATCH --time=', timeLimit),
paste0('#SBATCH --partition=', partition),
#paste0('#SBATCH --qos=', qos),
paste0('#SBATCH --output=', output),
paste0('#SBATCH --account=', acct),
paste0('#SBATCH --array=1-', nrow(jobs),'%',maxArray),
'
echo "SLURM_JOBID: " $SLURM_JOBID
echo "SLURM_ARRAY_TASK_ID: " $SLURM_ARRAY_TASK_ID
echo "SLURM_ARRAY_JOB_ID: " $SLURM_ARRAY_JOB_ID
echo "Scratch: " $GLOBAL_SCRATCH
module load R/3.6.1-gcc7.1.0
module load gdal/2.2.2-gcc proj/5.0.1-gcc-7.1.0 gcc/7.1.0
module load gis/geos-3.5.0
echo $(date)
Rscript loadBalance.R $SLURM_ARRAY_TASK_ID
echo $(date)
echo "== End of Job =="
')
cat(string, sep = '\n', file = file.path(outdir, 'loadBalance.sh'))
cat(
'#!/bin/bash
ls -l ', dataDir, ' | wc -l
echo "out of"
echo "', nrow(RUN), '"', file = file.path(outdir, 'check.sh')) |
### data are xts objects -----
library(xts)
library(quantmod)
### Aggregate all SB loans into one series ------
CI <- readRDS("./totSB_CI_loans.rds")
NNRE <- readRDS("./totSB_NN_RE_loans.rds")
## from here: https://stackoverflow.com/questions/42628385/sum-list-of-matrices-with-nas
modifiedSum <- function(x, y) {
replace(x, is.na(x), 0) + replace(y, is.na(y), 0)
}
### create total SB loans xts object-----
totSBloans <- modifiedSum(CI, NNRE)
## lag
totSBloans_lagged_1_year <- lag(totSBloans, k = 4)
## % change
totSBloans_Delt <- totSBloans / totSBloans_lagged_1_year - 1
saveRDS(totSBloans, "totSBloans.rds")
saveRDS(totSBloans_lagged_1_year, "totSBloans_lagged_1_year.rds")
saveRDS(totSBloans_Delt, "totSBloans_Delt.rds")
| /1_querying_data_and_analysis/analyses/panel_data_analysis/get_panel_data/pulling_all_firms/data_small_business_loans/create_totSBloans_vars.R | no_license | Matt-Brigida/FFIEC_Call_Reports | R | false | false | 740 | r | ### data are xts objects -----
library(xts)
library(quantmod)
### Aggregate all SB loans into one series ------
CI <- readRDS("./totSB_CI_loans.rds")
NNRE <- readRDS("./totSB_NN_RE_loans.rds")
## from here: https://stackoverflow.com/questions/42628385/sum-list-of-matrices-with-nas
modifiedSum <- function(x, y) {
replace(x, is.na(x), 0) + replace(y, is.na(y), 0)
}
### create total SB loans xts object-----
totSBloans <- modifiedSum(CI, NNRE)
## lag
totSBloans_lagged_1_year <- lag(totSBloans, k = 4)
## % change
totSBloans_Delt <- totSBloans / totSBloans_lagged_1_year - 1
saveRDS(totSBloans, "totSBloans.rds")
saveRDS(totSBloans_lagged_1_year, "totSBloans_lagged_1_year.rds")
saveRDS(totSBloans_Delt, "totSBloans_Delt.rds")
|
# calendar example
# Inspired by: https://www.tatvic.com/blog/calender-heatmap-with-google-analytics-data/
# install libraries
# install.packages("googleAuthR")
# install.packages("googleAnalyticsR")
# install.packages("quantmod")
# install.packages("ggplot2")
# install.packages("reshape2")
# install.packages("plyr")
# install.packages("scales")
# install.packages("zoo")
# load libraries
library("googleAuthR")
library("googleAnalyticsR")
library("quantmod")
library("ggplot2")
library("reshape2")
library("plyr")
library("scales")
library("zoo")
# optional - add your own Google Developers Console key
#options(googleAuthR.client_id = "uxxxxxxx2fd4kesu6.apps.googleusercontent.com")
#options(googleAuthR.client_secret = "3JhLa_GxxxxxCQYLe31c64")
#options(googleAuthR.scopes.selected = "https://www.googleapis.com/auth/analytics")
# authorize connection with Google Analytics servers
ga_auth()
## get your accounts
account_list <- google_analytics_account_list()
## pick a profile with data to query
#ga_id <- account_list[275,'viewId']
# or give it explicite using tool http://michalbrys.github.io/ga-tools/table-id.html in format 99999999
ga_id <- 00000000
# get data from Goolgle Analytics account
gadata <- google_analytics(id = ga_id,
start="2015-01-01", end="2016-12-31",
metrics = "sessions",
dimensions = "date",
max = 5000)
head(gadata)
# Run commands listed below - data wrangling
gadata$year <- as.numeric(format(as.Date(gadata$date),"%Y"))
gadata$month <- as.numeric(format(as.Date(gadata$date),"%m"))
gadata$monthf <- factor(gadata$month,levels=as.character(1:12),
labels=c("Jan","Feb","Mar","Apr","May","Jun",
"Jul","Aug","Sep","Oct","Nov","Dec"),
ordered=TRUE)
gadata$weekday <- as.POSIXlt(gadata$date)$wday
gadata$weekday[gadata$weekday==0] <- 7
gadata$weekdayf <- factor(gadata$weekday,levels=rev(1:7),
labels=rev(c("Mon","Tue","Wed","Thu","Fri","Sat","Sun")),
ordered=TRUE)
gadata$yearmonth <- as.numeric(format(as.Date(gadata$date),"%Y%m"))
gadata$yearmonthf <- factor(gadata$yearmonth)
gadata$week <- as.numeric(format(as.Date(gadata$date,format="%Y%m%d"),"%W"))
gadata <- ddply(gadata,.(yearmonthf),transform,monthweek=1+week-min(week))
# Plot for calendar
calendar_sessions <- ggplot(gadata, aes(monthweek, weekdayf, fill = sessions)) +
geom_tile(colour = "white") +
facet_grid(year~monthf) +
scale_fill_gradient(high="#3182bd",low="#deebf7") +
labs(title = "Calendar") +
xlab("Week of month") +
ylab("")
# View calendar
calendar_sessions
| /9_calendar.R | no_license | mjimcua/R-Google-Analytics | R | false | false | 2,722 | r | # calendar example
# Inspired by: https://www.tatvic.com/blog/calender-heatmap-with-google-analytics-data/
# install libraries
# install.packages("googleAuthR")
# install.packages("googleAnalyticsR")
# install.packages("quantmod")
# install.packages("ggplot2")
# install.packages("reshape2")
# install.packages("plyr")
# install.packages("scales")
# install.packages("zoo")
# load libraries
library("googleAuthR")
library("googleAnalyticsR")
library("quantmod")
library("ggplot2")
library("reshape2")
library("plyr")
library("scales")
library("zoo")
# optional - add your own Google Developers Console key
#options(googleAuthR.client_id = "uxxxxxxx2fd4kesu6.apps.googleusercontent.com")
#options(googleAuthR.client_secret = "3JhLa_GxxxxxCQYLe31c64")
#options(googleAuthR.scopes.selected = "https://www.googleapis.com/auth/analytics")
# authorize connection with Google Analytics servers
ga_auth()
## get your accounts
account_list <- google_analytics_account_list()
## pick a profile with data to query
#ga_id <- account_list[275,'viewId']
# or give it explicite using tool http://michalbrys.github.io/ga-tools/table-id.html in format 99999999
ga_id <- 00000000
# get data from Goolgle Analytics account
gadata <- google_analytics(id = ga_id,
start="2015-01-01", end="2016-12-31",
metrics = "sessions",
dimensions = "date",
max = 5000)
head(gadata)
# Run commands listed below - data wrangling
gadata$year <- as.numeric(format(as.Date(gadata$date),"%Y"))
gadata$month <- as.numeric(format(as.Date(gadata$date),"%m"))
gadata$monthf <- factor(gadata$month,levels=as.character(1:12),
labels=c("Jan","Feb","Mar","Apr","May","Jun",
"Jul","Aug","Sep","Oct","Nov","Dec"),
ordered=TRUE)
gadata$weekday <- as.POSIXlt(gadata$date)$wday
gadata$weekday[gadata$weekday==0] <- 7
gadata$weekdayf <- factor(gadata$weekday,levels=rev(1:7),
labels=rev(c("Mon","Tue","Wed","Thu","Fri","Sat","Sun")),
ordered=TRUE)
gadata$yearmonth <- as.numeric(format(as.Date(gadata$date),"%Y%m"))
gadata$yearmonthf <- factor(gadata$yearmonth)
gadata$week <- as.numeric(format(as.Date(gadata$date,format="%Y%m%d"),"%W"))
gadata <- ddply(gadata,.(yearmonthf),transform,monthweek=1+week-min(week))
# Plot for calendar
calendar_sessions <- ggplot(gadata, aes(monthweek, weekdayf, fill = sessions)) +
geom_tile(colour = "white") +
facet_grid(year~monthf) +
scale_fill_gradient(high="#3182bd",low="#deebf7") +
labs(title = "Calendar") +
xlab("Week of month") +
ylab("")
# View calendar
calendar_sessions
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{get_survey_id_by_name}
\alias{get_survey_id_by_name}
\title{get_survey_id_by_name}
\usage{
get_survey_id_by_name(name, match.exact = TRUE, surveys_df = NULL)
}
\arguments{
\item{name}{Survey name}
\item{match.exact}{Search for exact name? (or partial)}
\item{surveys_df}{Dataframe of available surveys. If NULL, will load search surveys loaded from Qualtrics API}
}
\value{
Survey ID
}
\description{
Get a survey ID by name
}
| /man/get_survey_id_by_name.Rd | permissive | jlpalomino/qtoolkit | R | false | true | 519 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{get_survey_id_by_name}
\alias{get_survey_id_by_name}
\title{get_survey_id_by_name}
\usage{
get_survey_id_by_name(name, match.exact = TRUE, surveys_df = NULL)
}
\arguments{
\item{name}{Survey name}
\item{match.exact}{Search for exact name? (or partial)}
\item{surveys_df}{Dataframe of available surveys. If NULL, will load search surveys loaded from Qualtrics API}
}
\value{
Survey ID
}
\description{
Get a survey ID by name
}
|
pred <- function(trans, weight, qtrsec) {
if(trans==0) {trans <- "Automatic"}
else if(trans==1) {trans <- "Manual"}
mydf <- data.frame(am=trans,wt=weight,qsec=qtrsec)
basic <- lm(formula = mpg ~ am, data = mtcars)
best <- lm(formula = mpg ~ wt + qsec + am, data = mtcars)
int <- lm(formula = mpg ~ am:wt + am:qsec, data = mtcars)
a <- predict(basic, mydf)
b <- predict(best, mydf)
c <- predict(int, mydf)
p <- c(a,b,c)
names(p) <- c("Basic", "Best", "Int")
p
} | /Regression_Models/Transmission_Efficiency/pred.R | no_license | fentontaylor/CourseraDataScience | R | false | false | 532 | r | pred <- function(trans, weight, qtrsec) {
if(trans==0) {trans <- "Automatic"}
else if(trans==1) {trans <- "Manual"}
mydf <- data.frame(am=trans,wt=weight,qsec=qtrsec)
basic <- lm(formula = mpg ~ am, data = mtcars)
best <- lm(formula = mpg ~ wt + qsec + am, data = mtcars)
int <- lm(formula = mpg ~ am:wt + am:qsec, data = mtcars)
a <- predict(basic, mydf)
b <- predict(best, mydf)
c <- predict(int, mydf)
p <- c(a,b,c)
names(p) <- c("Basic", "Best", "Int")
p
} |
library(ggplot2)
library(dplyr)
library(fasttime)
library(readr)
infile <- "../../data/serverLoad.txt"
calculateMovingAverage <- function(x, n)
stats::filter(x, rep(1/n, n), sides = 2)
ui <- shinyUI(
fluidPage(
titlePanel("Server Load"),
tags$hr(),
dateRangeInput(
inputId = "dateRange",
label = "Select a date range",
start = Sys.Date(),
end = Sys.Date(),
separator = 'through'
),
checkboxInput("ma", "Fit a moving average", FALSE),
conditionalPanel(
condition = "input.ma == true",
sliderInput(
inputId = "ma_adjust",
label = "Moving average adjustment",
min = 1, max = 100, value = 5, step = 1.0,ticks = FALSE
)
),
plotOutput("plot"),
dataTableOutput("data")
)
)
server <- function(input, output, session) {
fileReaderData <- reactiveFileReader(
intervalMillis = 500,
session = session,
filePath = infile,
readFunc = read_csv,
col_names = c('dte', 'Load')
)
allData <- reactive({
fileReaderData() %>%
mutate(Date = fastPOSIXct(dte))
})
filterData <- reactive({
allData() %>%
filter(Date >= fastPOSIXct(input$dateRange[1])) %>%
filter(Date < fastPOSIXct(input$dateRange[2]+1))
})
plotData <- reactive({
if(input$ma){
adj <- nrow(filterData()) * input$ma_adjust / 100
filterData() %>%
mutate(MA = calculateMovingAverage(Load, adj))
}
else
filterData()
})
output$plot <- renderPlot({
p1 <- ggplot(plotData(), aes(x=Date, xend=Date, y=0, yend=Load)) +
geom_segment(color='darkblue', alpha = 0.25) +
xlab("") +
ylab("Log10 Load") +
scale_y_log10()
if(input$ma)
p1 + geom_line(aes(Date, MA), size = 1.5, color = 'tomato', alpha = 0.75)
else
p1
})
output$data <- renderDataTable(
select(filterData(), Date, Load),
options = list(pageLength = 5, order = list(0, 'desc'))
)
}
shinyApp(ui, server) | /02-NewTopics/05-reactivepoll/apps/02a-withWidgets/app.R | no_license | nwstephens/shiny-day-2016 | R | false | false | 2,052 | r | library(ggplot2)
library(dplyr)
library(fasttime)
library(readr)
infile <- "../../data/serverLoad.txt"
calculateMovingAverage <- function(x, n)
stats::filter(x, rep(1/n, n), sides = 2)
ui <- shinyUI(
fluidPage(
titlePanel("Server Load"),
tags$hr(),
dateRangeInput(
inputId = "dateRange",
label = "Select a date range",
start = Sys.Date(),
end = Sys.Date(),
separator = 'through'
),
checkboxInput("ma", "Fit a moving average", FALSE),
conditionalPanel(
condition = "input.ma == true",
sliderInput(
inputId = "ma_adjust",
label = "Moving average adjustment",
min = 1, max = 100, value = 5, step = 1.0,ticks = FALSE
)
),
plotOutput("plot"),
dataTableOutput("data")
)
)
server <- function(input, output, session) {
fileReaderData <- reactiveFileReader(
intervalMillis = 500,
session = session,
filePath = infile,
readFunc = read_csv,
col_names = c('dte', 'Load')
)
allData <- reactive({
fileReaderData() %>%
mutate(Date = fastPOSIXct(dte))
})
filterData <- reactive({
allData() %>%
filter(Date >= fastPOSIXct(input$dateRange[1])) %>%
filter(Date < fastPOSIXct(input$dateRange[2]+1))
})
plotData <- reactive({
if(input$ma){
adj <- nrow(filterData()) * input$ma_adjust / 100
filterData() %>%
mutate(MA = calculateMovingAverage(Load, adj))
}
else
filterData()
})
output$plot <- renderPlot({
p1 <- ggplot(plotData(), aes(x=Date, xend=Date, y=0, yend=Load)) +
geom_segment(color='darkblue', alpha = 0.25) +
xlab("") +
ylab("Log10 Load") +
scale_y_log10()
if(input$ma)
p1 + geom_line(aes(Date, MA), size = 1.5, color = 'tomato', alpha = 0.75)
else
p1
})
output$data <- renderDataTable(
select(filterData(), Date, Load),
options = list(pageLength = 5, order = list(0, 'desc'))
)
}
shinyApp(ui, server) |
#' Calculate Cell cycle phase prediction
#'
#' This function takes an object of class iCellR and assignes cell cycle stage for the cells.
#' @param object A data frame containing gene counts for cells.
#' @param s.genes Genes that are used as a marker for S phase.
#' @param g2m.genes Genes that are used as a marker for G2 and M phase.
#' @return The data frame object
#' @importFrom Hmisc cut2
#' @export
cc <- function (object = NULL,
s.genes = s.phase,
g2m.genes = g2m.phase) {
if ("iCellR" != class(object)[1]) {
stop("object should be an object of class iCellR")
}
##### get genes case insensetive
ALLgenes = row.names(object@raw.data)
s.phase.genes <- s.genes
s.phase.genes <- paste("^",s.phase.genes,"$", sep="")
s.phase.genes <- paste(s.phase.genes,collapse="|")
s.phase.genes <- grep(s.phase.genes, x = ALLgenes, value = TRUE, ignore.case = TRUE)
s.genes <- s.phase.genes
#
g2m.phase.genes <- g2m.genes
g2m.phase.genes <- paste("^",g2m.phase.genes,"$", sep="")
g2m.phase.genes <- paste(g2m.phase.genes,collapse="|")
g2m.phase.genes <- grep(g2m.phase.genes, x = ALLgenes, value = TRUE, ignore.case = TRUE)
g2m.genes <- g2m.phase.genes
#####
Table <- object@stats
row.names(Table) <- Table$CellIds
attributes(object)$stats <- Table
# head(object@stats)
########## 1
LengthCheck <- function(values, cutoff = 0) {
return(vapply(
X = values,
FUN = function(x) {
return(length(x = x) > cutoff)
},
FUN.VALUE = logical(1)
))
}
########## 2
####
AddMetaDatame <- function (object, metadata, col.name = NULL)
{
if (typeof(x = metadata) != "list") {
metadata <- as.data.frame(x = metadata)
if (is.null(x = col.name)) {
stop("Please provide a name for provided metadata")
}
colnames(x = metadata) <- col.name
}
cols.add <- colnames(x = metadata)
meta.order <- match(rownames(object@stats), rownames(metadata))
meta.add <- metadata[meta.order, ]
if (all(is.null(x = meta.add))) {
stop("Metadata provided doesn't match the cells in this object")
}
object@stats[, cols.add] <- meta.add
return(object)
}
######### func 2
AddModuleScoreme <- function (object, genes.list = NULL, genes.pool = NULL, n.bin = 25,
seed.use = 1, ctrl.size = 100, use.k = FALSE, enrich.name = "Cluster",
random.seed = 1)
{
set.seed(seed = random.seed)
genes.old <- genes.list
if (use.k) {
genes.list <- list()
for (i in as.numeric(x = names(x = table(object@kmeans.obj[[1]]$cluster)))) {
genes.list[[i]] <- names(x = which(x = object@kmeans.obj[[1]]$cluster ==
i))
}
cluster.length <- length(x = genes.list)
}
else {
if (is.null(x = genes.list)) {
stop("Missing input gene list")
}
genes.list <- lapply(X = genes.list, FUN = function(x) {
return(intersect(x = x, y = rownames(x = object@raw.data)))
})
cluster.length <- length(x = genes.list)
}
if (!all(LengthCheck(values = genes.list))) {
warning(paste("Could not find enough genes in the object from the following gene lists:",
paste(names(x = which(x = !LengthCheck(values = genes.list)))),
"Attempting to match case..."))
genes.list <- lapply(X = genes.old, FUN = CaseMatch,
match = rownames(x = object@raw.data))
}
if (!all(LengthCheck(values = genes.list))) {
stop(paste("The following gene lists do not have enough genes present in the object:",
paste(names(x = which(x = !LengthCheck(values = genes.list)))),
"exiting..."))
}
if (is.null(x = genes.pool)) {
genes.pool = rownames(x = object@raw.data)
}
data.avg <- Matrix::rowMeans(x = object@raw.data[genes.pool,
])
data.avg <- data.avg[order(data.avg)]
#
data.cut <- as.numeric(x = Hmisc::cut2(x = data.avg, m = round(x = length(x = data.avg)/n.bin)))
names(x = data.cut) <- names(x = data.avg)
ctrl.use <- vector(mode = "list", length = cluster.length)
for (i in 1:cluster.length) {
genes.use <- genes.list[[i]]
for (j in 1:length(x = genes.use)) {
ctrl.use[[i]] <- c(ctrl.use[[i]], names(x = sample(x = data.cut[which(x = data.cut ==
data.cut[genes.use[j]])], size = ctrl.size, replace = FALSE)))
}
}
ctrl.use <- lapply(X = ctrl.use, FUN = unique)
ctrl.scores <- matrix(data = numeric(length = 1L), nrow = length(x = ctrl.use),
ncol = ncol(x = object@raw.data))
for (i in 1:length(ctrl.use)) {
genes.use <- ctrl.use[[i]]
ctrl.scores[i, ] <- Matrix::colMeans(x = object@raw.data[genes.use,
])
}
genes.scores <- matrix(data = numeric(length = 1L), nrow = cluster.length,
ncol = ncol(x = object@raw.data))
for (i in 1:cluster.length) {
genes.use <- genes.list[[i]]
data.use <- object@raw.data[genes.use, , drop = FALSE]
genes.scores[i, ] <- Matrix::colMeans(x = data.use)
}
genes.scores.use <- genes.scores - ctrl.scores
rownames(x = genes.scores.use) <- paste0(enrich.name, 1:cluster.length)
genes.scores.use <- as.data.frame(x = t(x = genes.scores.use))
rownames(x = genes.scores.use) <- colnames(x = object@raw.data)
object <- AddMetaDatame(object = object, metadata = genes.scores.use,
col.name = colnames(x = genes.scores.use))
gc(verbose = FALSE)
return(object)
}
######## 3
CellCycleScoringme <- function (object, g2m.genes, s.genes, set.ident = FALSE)
{
enrich.name <- "Cell Cycle"
genes.list <- list(S.Score = s.genes, G2M.Score = g2m.genes)
object.cc <- AddModuleScoreme(object = object, genes.list = genes.list,
enrich.name = enrich.name, ctrl.size = min(vapply(X = genes.list,
FUN = length, FUN.VALUE = numeric(1))))
cc.columns <- grep(pattern = enrich.name, x = colnames(x = object.cc@stats))
cc.scores <- object.cc@stats[, cc.columns]
# head(cc.scores)
rm(object.cc)
gc(verbose = FALSE)
assignments <- apply(X = cc.scores, MARGIN = 1, FUN = function(scores,
first = "S", second = "G2M", null = "G1") {
if (all(scores < 0)) {
return(null)
}
else {
if (length(which(x = scores == max(scores))) > 1) {
return("Undecided")
}
else {
return(c(first, second)[which(x = scores == max(scores))])
}
}
})
cc.scores <- merge(x = cc.scores, y = data.frame(assignments),
by = 0)
colnames(x = cc.scores) <- c("rownames", "S.Score", "G2M.Score",
"Phase")
rownames(x = cc.scores) <- cc.scores$rownames
cc.scores <- cc.scores[, c("S.Score", "G2M.Score", "Phase")]
# cc.scores
object <- AddMetaDatame(object = object, metadata = cc.scores)
if (set.ident) {
object <- StashIdent(object = object, save.name = "old.ident")
object <- SetAllIdent(object = object, id = "Phase")
}
return(object)
}
#############
#############
############# How to run
object <- CellCycleScoringme(object = object, s.genes = s.genes, g2m.genes = g2m.genes)
STATS <- object@stats
attributes(object)$stats <- STATS
return(object)
}
| /iCellR/R/F0045.R | no_license | akhikolla/InformationHouse | R | false | false | 8,033 | r | #' Calculate Cell cycle phase prediction
#'
#' This function takes an object of class iCellR and assignes cell cycle stage for the cells.
#' @param object A data frame containing gene counts for cells.
#' @param s.genes Genes that are used as a marker for S phase.
#' @param g2m.genes Genes that are used as a marker for G2 and M phase.
#' @return The data frame object
#' @importFrom Hmisc cut2
#' @export
cc <- function (object = NULL,
s.genes = s.phase,
g2m.genes = g2m.phase) {
if ("iCellR" != class(object)[1]) {
stop("object should be an object of class iCellR")
}
##### get genes case insensetive
ALLgenes = row.names(object@raw.data)
s.phase.genes <- s.genes
s.phase.genes <- paste("^",s.phase.genes,"$", sep="")
s.phase.genes <- paste(s.phase.genes,collapse="|")
s.phase.genes <- grep(s.phase.genes, x = ALLgenes, value = TRUE, ignore.case = TRUE)
s.genes <- s.phase.genes
#
g2m.phase.genes <- g2m.genes
g2m.phase.genes <- paste("^",g2m.phase.genes,"$", sep="")
g2m.phase.genes <- paste(g2m.phase.genes,collapse="|")
g2m.phase.genes <- grep(g2m.phase.genes, x = ALLgenes, value = TRUE, ignore.case = TRUE)
g2m.genes <- g2m.phase.genes
#####
Table <- object@stats
row.names(Table) <- Table$CellIds
attributes(object)$stats <- Table
# head(object@stats)
########## 1
LengthCheck <- function(values, cutoff = 0) {
return(vapply(
X = values,
FUN = function(x) {
return(length(x = x) > cutoff)
},
FUN.VALUE = logical(1)
))
}
########## 2
####
AddMetaDatame <- function (object, metadata, col.name = NULL)
{
if (typeof(x = metadata) != "list") {
metadata <- as.data.frame(x = metadata)
if (is.null(x = col.name)) {
stop("Please provide a name for provided metadata")
}
colnames(x = metadata) <- col.name
}
cols.add <- colnames(x = metadata)
meta.order <- match(rownames(object@stats), rownames(metadata))
meta.add <- metadata[meta.order, ]
if (all(is.null(x = meta.add))) {
stop("Metadata provided doesn't match the cells in this object")
}
object@stats[, cols.add] <- meta.add
return(object)
}
######### func 2
AddModuleScoreme <- function (object, genes.list = NULL, genes.pool = NULL, n.bin = 25,
seed.use = 1, ctrl.size = 100, use.k = FALSE, enrich.name = "Cluster",
random.seed = 1)
{
set.seed(seed = random.seed)
genes.old <- genes.list
if (use.k) {
genes.list <- list()
for (i in as.numeric(x = names(x = table(object@kmeans.obj[[1]]$cluster)))) {
genes.list[[i]] <- names(x = which(x = object@kmeans.obj[[1]]$cluster ==
i))
}
cluster.length <- length(x = genes.list)
}
else {
if (is.null(x = genes.list)) {
stop("Missing input gene list")
}
genes.list <- lapply(X = genes.list, FUN = function(x) {
return(intersect(x = x, y = rownames(x = object@raw.data)))
})
cluster.length <- length(x = genes.list)
}
if (!all(LengthCheck(values = genes.list))) {
warning(paste("Could not find enough genes in the object from the following gene lists:",
paste(names(x = which(x = !LengthCheck(values = genes.list)))),
"Attempting to match case..."))
genes.list <- lapply(X = genes.old, FUN = CaseMatch,
match = rownames(x = object@raw.data))
}
if (!all(LengthCheck(values = genes.list))) {
stop(paste("The following gene lists do not have enough genes present in the object:",
paste(names(x = which(x = !LengthCheck(values = genes.list)))),
"exiting..."))
}
if (is.null(x = genes.pool)) {
genes.pool = rownames(x = object@raw.data)
}
data.avg <- Matrix::rowMeans(x = object@raw.data[genes.pool,
])
data.avg <- data.avg[order(data.avg)]
#
data.cut <- as.numeric(x = Hmisc::cut2(x = data.avg, m = round(x = length(x = data.avg)/n.bin)))
names(x = data.cut) <- names(x = data.avg)
ctrl.use <- vector(mode = "list", length = cluster.length)
for (i in 1:cluster.length) {
genes.use <- genes.list[[i]]
for (j in 1:length(x = genes.use)) {
ctrl.use[[i]] <- c(ctrl.use[[i]], names(x = sample(x = data.cut[which(x = data.cut ==
data.cut[genes.use[j]])], size = ctrl.size, replace = FALSE)))
}
}
ctrl.use <- lapply(X = ctrl.use, FUN = unique)
ctrl.scores <- matrix(data = numeric(length = 1L), nrow = length(x = ctrl.use),
ncol = ncol(x = object@raw.data))
for (i in 1:length(ctrl.use)) {
genes.use <- ctrl.use[[i]]
ctrl.scores[i, ] <- Matrix::colMeans(x = object@raw.data[genes.use,
])
}
genes.scores <- matrix(data = numeric(length = 1L), nrow = cluster.length,
ncol = ncol(x = object@raw.data))
for (i in 1:cluster.length) {
genes.use <- genes.list[[i]]
data.use <- object@raw.data[genes.use, , drop = FALSE]
genes.scores[i, ] <- Matrix::colMeans(x = data.use)
}
genes.scores.use <- genes.scores - ctrl.scores
rownames(x = genes.scores.use) <- paste0(enrich.name, 1:cluster.length)
genes.scores.use <- as.data.frame(x = t(x = genes.scores.use))
rownames(x = genes.scores.use) <- colnames(x = object@raw.data)
object <- AddMetaDatame(object = object, metadata = genes.scores.use,
col.name = colnames(x = genes.scores.use))
gc(verbose = FALSE)
return(object)
}
######## 3
CellCycleScoringme <- function (object, g2m.genes, s.genes, set.ident = FALSE)
{
enrich.name <- "Cell Cycle"
genes.list <- list(S.Score = s.genes, G2M.Score = g2m.genes)
object.cc <- AddModuleScoreme(object = object, genes.list = genes.list,
enrich.name = enrich.name, ctrl.size = min(vapply(X = genes.list,
FUN = length, FUN.VALUE = numeric(1))))
cc.columns <- grep(pattern = enrich.name, x = colnames(x = object.cc@stats))
cc.scores <- object.cc@stats[, cc.columns]
# head(cc.scores)
rm(object.cc)
gc(verbose = FALSE)
assignments <- apply(X = cc.scores, MARGIN = 1, FUN = function(scores,
first = "S", second = "G2M", null = "G1") {
if (all(scores < 0)) {
return(null)
}
else {
if (length(which(x = scores == max(scores))) > 1) {
return("Undecided")
}
else {
return(c(first, second)[which(x = scores == max(scores))])
}
}
})
cc.scores <- merge(x = cc.scores, y = data.frame(assignments),
by = 0)
colnames(x = cc.scores) <- c("rownames", "S.Score", "G2M.Score",
"Phase")
rownames(x = cc.scores) <- cc.scores$rownames
cc.scores <- cc.scores[, c("S.Score", "G2M.Score", "Phase")]
# cc.scores
object <- AddMetaDatame(object = object, metadata = cc.scores)
if (set.ident) {
object <- StashIdent(object = object, save.name = "old.ident")
object <- SetAllIdent(object = object, id = "Phase")
}
return(object)
}
#############
#############
############# How to run
object <- CellCycleScoringme(object = object, s.genes = s.genes, g2m.genes = g2m.genes)
STATS <- object@stats
attributes(object)$stats <- STATS
return(object)
}
|
# Copyright (c) 2016, Universitat Rovira i Virgili (Spain), Aarhus University
# (Denmark) and University of Aveiro (Portugal)
#
# Written by Carlos P. Roca
# as Research funded by the European Union
# for the research paper by Roca, Gomes, Amorim & Scott-Fordsmand: "Variation-
# preserving normalization unveils blind spots in gene expression profiling".
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Implements Watson U2 statistic
watson.u2 <- function( x, y )
{
# see section 6.5 of Durbin, Distribution Theory for Tests Based on the
# Sample Distribution Function, SIAM, Philadelphia (1973)
n <- length( x )
m <- length( y )
r <- c( sort( x ), sort( y ) )
r.rank <- rank( r, ties.method="average" )
z <- ( r.rank[ 1:n ] - 1:n ) / m - ( 1:n - 1/2 ) / n
( m / (n+m) ) * sum( ( z - mean( z ) )^2 ) +
( m*(m+2*n) ) / ( 12*n*m*(n+m) )
}
| /watson_u2.r | permissive | carlosproca/gene-expr-norm-paper | R | false | false | 1,922 | r | # Copyright (c) 2016, Universitat Rovira i Virgili (Spain), Aarhus University
# (Denmark) and University of Aveiro (Portugal)
#
# Written by Carlos P. Roca
# as Research funded by the European Union
# for the research paper by Roca, Gomes, Amorim & Scott-Fordsmand: "Variation-
# preserving normalization unveils blind spots in gene expression profiling".
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Implements Watson U2 statistic
watson.u2 <- function( x, y )
{
# see section 6.5 of Durbin, Distribution Theory for Tests Based on the
# Sample Distribution Function, SIAM, Philadelphia (1973)
n <- length( x )
m <- length( y )
r <- c( sort( x ), sort( y ) )
r.rank <- rank( r, ties.method="average" )
z <- ( r.rank[ 1:n ] - 1:n ) / m - ( 1:n - 1/2 ) / n
( m / (n+m) ) * sum( ( z - mean( z ) )^2 ) +
( m*(m+2*n) ) / ( 12*n*m*(n+m) )
}
|
#
# Anaquin - Sequin statistical analysis. Version 1.1.1.
#
# This R script was generated at %1%.
#
# %2%
#
library(Anaquin)
data <- read.csv('%3%/%4%', row.names=1, sep='\t')
title <- '%5%'
xlab <- '%6%'
ylab <- '%7%'
# Expected log-fold (x-axis)
expected <- %8%
# Measured log-fold (y-axis)
measured <- %9%
# Create Anaquin data for PlotLinear
anaquin <- AnaquinData(analysis='PlotLinear', seqs=row.names(data), input=expected, measured=measured%11%)
plotLinear(anaquin, title=title, xlab=xlab, ylab=ylab, showAxis=%10%, showLOQ=FALSE)
| /src/r/plotFold.R | permissive | wangzhennan14/Anaquin | R | false | false | 550 | r | #
# Anaquin - Sequin statistical analysis. Version 1.1.1.
#
# This R script was generated at %1%.
#
# %2%
#
library(Anaquin)
data <- read.csv('%3%/%4%', row.names=1, sep='\t')
title <- '%5%'
xlab <- '%6%'
ylab <- '%7%'
# Expected log-fold (x-axis)
expected <- %8%
# Measured log-fold (y-axis)
measured <- %9%
# Create Anaquin data for PlotLinear
anaquin <- AnaquinData(analysis='PlotLinear', seqs=row.names(data), input=expected, measured=measured%11%)
plotLinear(anaquin, title=title, xlab=xlab, ylab=ylab, showAxis=%10%, showLOQ=FALSE)
|
####librerias utilizadas#####
library(ggplot2)
library(tidyverse)
library(readxl)
library(ggthemes)
theme_set(theme_minimal())
#####zona de trabajo#######
setwd("~/graficas macro")
#####datos#######
df <- read_xlsx("datos_ratio.xlsx")
view(df)
#####grafica#######
#----- version tidyverse
df_wider = df %>%
pivot_longer(cols = colnames(df)[2:ncol(df)],
names_to = 'Tasa',
values_to='valor') %>%
rename(Periodo=colnames(df)[1])
###theme_tufte()
df_wider %>%
ggplot(aes(x=Periodo, y= valor*100, color = Tasa))+
geom_line(size=1)+
geom_point()+
ylab('%')+
ggtitle("Ratio Deuda/PBI", "Evolución del Ratio Deuda/PBI en %")+
theme_hc()+
scale_x_continuous(breaks = unique(df_wider$Periodo))+
theme(legend.title = element_blank())
| /Dinámica de deuda pública en economía cerrada/Grafica_ratio_deuda_PBI.R | no_license | lucassebaord29/Trabajo-Pr-ctico-1-MACROECONOM-A-GRUPO-2- | R | false | false | 863 | r | ####librerias utilizadas#####
library(ggplot2)
library(tidyverse)
library(readxl)
library(ggthemes)
theme_set(theme_minimal())
#####zona de trabajo#######
setwd("~/graficas macro")
#####datos#######
df <- read_xlsx("datos_ratio.xlsx")
view(df)
#####grafica#######
#----- version tidyverse
df_wider = df %>%
pivot_longer(cols = colnames(df)[2:ncol(df)],
names_to = 'Tasa',
values_to='valor') %>%
rename(Periodo=colnames(df)[1])
###theme_tufte()
df_wider %>%
ggplot(aes(x=Periodo, y= valor*100, color = Tasa))+
geom_line(size=1)+
geom_point()+
ylab('%')+
ggtitle("Ratio Deuda/PBI", "Evolución del Ratio Deuda/PBI en %")+
theme_hc()+
scale_x_continuous(breaks = unique(df_wider$Periodo))+
theme(legend.title = element_blank())
|
library(flexdashboard) # Dashboard package
library(highcharter) # Interactive data visualizations
library(plotly) # Interactive data visualizations
library(ggplot2)
library(viridis) # Color gradients
library(tidyverse) # Metapackge
library(countrycode) # Converts country names/codes
library(rjson) # JSON reader
library(crosstalk) # Provides interactivity for HTML widgets
library(DT) # Displaying data tables
library(tidyr)
library(dplyr)
library(readr)
library(readxl)
library(janitor)
library(leaflet)
library(geojsonio)
library(tigris)
library(RColorBrewer)
library(rvest)
library(ggplot2)
library(purrr)
library(tidyverse)
library(stringr)
library(lubridate)
library(gganimate)
library(plotly)
| /scripts/library.R | no_license | rugnepal/nepal_tourism_dashboard | R | false | false | 700 | r | library(flexdashboard) # Dashboard package
library(highcharter) # Interactive data visualizations
library(plotly) # Interactive data visualizations
library(ggplot2)
library(viridis) # Color gradients
library(tidyverse) # Metapackge
library(countrycode) # Converts country names/codes
library(rjson) # JSON reader
library(crosstalk) # Provides interactivity for HTML widgets
library(DT) # Displaying data tables
library(tidyr)
library(dplyr)
library(readr)
library(readxl)
library(janitor)
library(leaflet)
library(geojsonio)
library(tigris)
library(RColorBrewer)
library(rvest)
library(ggplot2)
library(purrr)
library(tidyverse)
library(stringr)
library(lubridate)
library(gganimate)
library(plotly)
|
# ---
# title: "Intro to R"
# author: Gibran Hemani
# output:
# html_document:
# theme: united
# highlight: tango
# ---
# ## Basic calculations
# What is the probability of winning the lottery? Assume 49 balls, and 6 balls chosen without replacement. This is how many unique combinations there are.
# $$
# \frac{49!}
# {6!(49-6)!}
# =
# \frac{49 \times 48 \times 47 \times 46 \times 45 \times 44} {6 \times 5 \times 4 \times 3 \times 2 \times 1}
# $$
# In R it can be calculated like this:
49 * 48 * 47 * 46 * 45 * 44 / (6 * 5 * 4 * 3 * 2 * 1)
# $$
# 49 \times 48 \times 47 \times 46 \times 45 \times 44 / (6 \times 5 \times 4 \times 3 \times 2 \times 1)
# $$
# Or we can use built in functions:
factorial(49) / (factorial(6) * factorial(49-6))
# We can find out about the `factorial` function like this:
?factorial
# We want to store the result from our calculation. This is done like so:
lottery <- factorial(49) / (factorial(6) * factorial(49-6))
# What happens now if you type
lottery
# This retrieves the stored value.
# What is the probability of winning the lottery if you buy 1 ticket?
1 / lottery
# How about 10 tickets?
10 / lottery
# What is the chance of winning the lottery twice?
(1 / lottery)^2
# In R we can make vectors of numbers instead of dealing in single elements. For example
n <- c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
# Here the `c()` command tells R to string the numbers 1 to 10 into an array. Another way to do this is:
n <- 1:10
# Now we can calculate the chances of winning for 1, 2, 3, ..., 10 tickets in one command:
n / lottery
# This prints out 10 values, each one is for a different value in `n`.
# Notice that we can overwrite a value in a variable, for example
n <- 30:40
# Now type `n` - it has changed because R has replaced the original value with the new value.
# It's possible to only extract a single value from an array, using square brackets:
n[1]
n[5:8]
# will extract only the first value of `n` for the first command; or the 5th, 6th, 7th and 8th values in the second command.
# How many values are in `n`?
length(n)
# What is the sum of all the values in `n`?
sum(n)
# What is the median value of `n`?
median(n)
# If you type `ls()` you can see your **workspace**. This is the list of all the objects that you have created. Notice that there is one object there called `lottery`, and one called `n`. We can remove objects like this:
rm(lottery)
# Type `ls()` again. Type `lottery`, what happens?
# ## Data
# R comes pre-loaded with some example datasets, one of which we will use here as an example of some basic data manipulation. We will be using the US States Facts and Figures dataset, which is stored as the `state.x77` R object. There is a help file available with background information on this dataset.
# The dataset itself is quite large: typing `state.x77` into the R console to look at it results in the output running off the screen.
# Instead we can use the `head` function in R to look at the first few rows of the dataset.
head(state.x77)
# or the `tail` function to see the last few rows
tail(state.x77)
# Use the `dim` function to see how many rows and columns it has.
dim(state.x77)
# Type and run the following portion of R code
Alaska_Life_Exp <- state.x77[2, 4]
ffrc <- state.x77[1:4, 1:4]
Population <- state.x77[ , 1]
# This portion of R code uses square brackets to extract data from the `state.x77` R object. Being a table (or matrix) the entries of `state.x77` are indexed by two indices that refer to the row and column. So `state.x77[2, 4]` gives the entry in the second row and fourth column (the Alaskan life expectancy, 69.05 years). Also s`tate.x77[1:4, 1:4]` gives the first four rows and columns of the table. Finally, `state.x77[ , 1]` gives the first column (the population of all the states). Note that the first row displayed in the R console gives the column headings and the first column displayed in the R console gives the row headings.
# Can you use this data to calculate:
# 1. The total area of the US?
# 2. The total population US?
# 3. The average illiteracy US?
# ## Plotting
# What is the relationship between income and literacy amongst the US states? What is the correlation?
cor(state.x77[, "Income"], state.x77[, "Illiteracy"])
# There seems to be an inverse proportional relationship. We can visualise this:
plot(Income ~ Illiteracy, state.x77)
# What is the distribution of Income?
hist(state.x77[, "Income"])
# You can save a plot like this:
pdf("test.pdf")
hist(state.x77[, "Income"])
dev.off()
# ## Reading and writing data
# It is important to be able to get data into R, and back out again. Here we will look at two examples - Excel files, and Stata files.
# ### Excel
# In Excel it is possible to save spreadsheet data as `.csv` files - "comma separated values". R can read `.csv` files using the `read.csv()` function. Have a look at the documentation:
?read.csv
# Notice that there are a lot of options here to be as flexible as possible for reading in data that has been formatted in different ways. The default options for `read.csv` are usually suitable for reading in a file that has just been experted from Excel.
# Let's try reading in a csv file...
phen <- read.csv("../data/example_data/phen.csv", stringsAsFactors=FALSE)
# What does this data look like?
head(phen)
# What are the dimensions?
dim(phen)
# Note, the `phen` object and the `state.x77` object are actually different **data types**. Look:
class(phen)
class(state.x77)
# The difference between a **data.frame** and a **matrix** is that in a matrix every element must be the same type of data. In the examlpe of `state.x77`, every element is a numeric value. A **data.frame** on the other hand allows each column to be a different type of data. You can access a particular column using the `$` operator like this:
class(phen$IID)
class(phen$BMI)
# Let's calculate the mean value of DBP (diastolic blood pressure):
mean(phen$DBP)
mean(phen[,"DBP"])
mean(phen[,4])
mean(phen[["DBP"]])
mean(phen[[4]])
# All the above are different ways of accessing the same thing - data frames are quite methods of storing data.
# Can you use this data to:
# 1. Draw a histogram of BMI values?
# 2. Plot the relationship between DBP and SBP?
# ### Stata
# We can also read in files that are in Stata format. But first we need to install a library that will provide the necessary functions.
install.packages("readstata13")
# Once the library is installed we can load it
library(readstata13)
# And now we can use the functions that are provided by this package. Let's read in a Stata file:
phen <- read.dta13("../data/example_data/phen.dta")
# ## Monty Hall problem
# Suppose you're on a game show, and you're given the choice of three doors: Behind one door is a car; behind the others, goats. You pick a door, say No. 1, and the host, who knows what's behind the doors, opens another door, say No. 3, which has a goat. He then says to you, "Do you want to pick door No. 2?" Is it to your advantage to switch your choice?
# Let's simulate this scenario to check!
# We will make a function which simulates one game:
monty <- function()
{
doors <- 1:3 # initialize the doors behind one of which is a good prize
win <- 0 # to keep track of number of wins
prize <- sample(1:3, 1) # randomize which door has the good prize
guess <- sample(1:3, 1) # guess a door at random
## Reveal one of the doors you didn't pick which has a goat
if(prize != guess) {
reveal <- doors[-c(prize,guess)]
} else {
reveal <- sample(doors[-c(prize,guess)], 1)
}
## Stay with your initial guess or switch
switch_guess <- doors[-c(reveal,guess)]
stay_guess <- guess
## Did you win?
win <- ifelse(switch_guess == prize, "switch", "stay")
## return results
result <- data.frame(
prize = prize,
guess = guess,
win = win,
stringsAsFactors=FALSE
)
return(result)
}
# This function requires drawing random numbers (using the `sample` function). To make the results reproducible we should set the "random seed". This means that each time you run the result you will get the same answer.
set.seed(12345)
# Here's how it works:
monty()
# Let's see what happens if we do this multiple times...
n_simulations <- 10
all_results <- list()
for(i in 1:n_simulations)
{
message(i)
all_results[[i]] <- monty()
}
all_results <- do.call(rbind, all_results)
# Let's see what the results look like! We want to know the proportion of wins for the 'stay' strategy, and the proportion of wins for the 'switch' strategy
table(all_results$win)
# Perhaps this was just chance? Let's plot what it looks like. We will need to install a new library, `ggplot2`. This library is fantastic for making fairly complex plots very quickly.
install.packages("ggplot2")
library(ggplot2)
# Here's the plot
# Label the simulations
all_results$simulation <- 1:n_simulations
# Get the cumulative sum of wins
all_results$cumulative <- NA
stay_index <- all_results$win == "stay"
all_results$cumulative[stay_index] <- 1:sum(stay_index)
switch_index <- all_results$win == "switch"
all_results$cumulative[switch_index] <- 1:sum(switch_index)
# Make the plot
ggplot(all_results, aes(x=simulation, y=cumulative)) +
geom_point(aes(colour=win)) +
geom_line(aes(colour=win)) +
labs(y="Cumulative sum of successes", x="Simulations number")
# We need more simulations to be sure about this.
# 1. Run the simulations again, but this time do 1000 simulations instead of just 10.
# 2. If you were playing the game, would you stick with your initial choise or switch?
# ## Packages
# We have already installed two packages. For the remainder of the course we are going to need some more. There are three main sources to get packages
# - **CRAN** This is the main R package repository. It has over 8000 packages for a huge variaty of things. [https://cran.r-project.org](https://cran.r-project.org)
# - **Bioconductor** This is another repository which has packages that are mostly focused on genomic data. [http://bioconductor.org](http://bioconductor.org)
# - **GitHub** A lot of people publish packages, or updates to packages, on GitHub before they are released to the CRAN or Bioconductor.
# We need to install the following packages from CRAN:
install.packages("CpGassoc")
install.packages("GenABEL")
# And the following package from Bioconductor:
source("http://bioconductor.org/biocLite.R")
biocLite("minfi")
| /worksheets_mac/intro_to_r.R | no_license | explodecomputer/EEPE_2016 | R | false | false | 11,106 | r | # ---
# title: "Intro to R"
# author: Gibran Hemani
# output:
# html_document:
# theme: united
# highlight: tango
# ---
# ## Basic calculations
# What is the probability of winning the lottery? Assume 49 balls, and 6 balls chosen without replacement. This is how many unique combinations there are.
# $$
# \frac{49!}
# {6!(49-6)!}
# =
# \frac{49 \times 48 \times 47 \times 46 \times 45 \times 44} {6 \times 5 \times 4 \times 3 \times 2 \times 1}
# $$
# In R it can be calculated like this:
49 * 48 * 47 * 46 * 45 * 44 / (6 * 5 * 4 * 3 * 2 * 1)
# $$
# 49 \times 48 \times 47 \times 46 \times 45 \times 44 / (6 \times 5 \times 4 \times 3 \times 2 \times 1)
# $$
# Or we can use built in functions:
factorial(49) / (factorial(6) * factorial(49-6))
# We can find out about the `factorial` function like this:
?factorial
# We want to store the result from our calculation. This is done like so:
lottery <- factorial(49) / (factorial(6) * factorial(49-6))
# What happens now if you type
lottery
# This retrieves the stored value.
# What is the probability of winning the lottery if you buy 1 ticket?
1 / lottery
# How about 10 tickets?
10 / lottery
# What is the chance of winning the lottery twice?
(1 / lottery)^2
# In R we can make vectors of numbers instead of dealing in single elements. For example
n <- c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
# Here the `c()` command tells R to string the numbers 1 to 10 into an array. Another way to do this is:
n <- 1:10
# Now we can calculate the chances of winning for 1, 2, 3, ..., 10 tickets in one command:
n / lottery
# This prints out 10 values, each one is for a different value in `n`.
# Notice that we can overwrite a value in a variable, for example
n <- 30:40
# Now type `n` - it has changed because R has replaced the original value with the new value.
# It's possible to only extract a single value from an array, using square brackets:
n[1]
n[5:8]
# will extract only the first value of `n` for the first command; or the 5th, 6th, 7th and 8th values in the second command.
# How many values are in `n`?
length(n)
# What is the sum of all the values in `n`?
sum(n)
# What is the median value of `n`?
median(n)
# If you type `ls()` you can see your **workspace**. This is the list of all the objects that you have created. Notice that there is one object there called `lottery`, and one called `n`. We can remove objects like this:
rm(lottery)
# Type `ls()` again. Type `lottery`, what happens?
# ## Data
# R comes pre-loaded with some example datasets, one of which we will use here as an example of some basic data manipulation. We will be using the US States Facts and Figures dataset, which is stored as the `state.x77` R object. There is a help file available with background information on this dataset.
# The dataset itself is quite large: typing `state.x77` into the R console to look at it results in the output running off the screen.
# Instead we can use the `head` function in R to look at the first few rows of the dataset.
head(state.x77)
# or the `tail` function to see the last few rows
tail(state.x77)
# Use the `dim` function to see how many rows and columns it has.
dim(state.x77)
# Type and run the following portion of R code
Alaska_Life_Exp <- state.x77[2, 4]
ffrc <- state.x77[1:4, 1:4]
Population <- state.x77[ , 1]
# This portion of R code uses square brackets to extract data from the `state.x77` R object. Being a table (or matrix) the entries of `state.x77` are indexed by two indices that refer to the row and column. So `state.x77[2, 4]` gives the entry in the second row and fourth column (the Alaskan life expectancy, 69.05 years). Also s`tate.x77[1:4, 1:4]` gives the first four rows and columns of the table. Finally, `state.x77[ , 1]` gives the first column (the population of all the states). Note that the first row displayed in the R console gives the column headings and the first column displayed in the R console gives the row headings.
# Can you use this data to calculate:
# 1. The total area of the US?
# 2. The total population US?
# 3. The average illiteracy US?
# ## Plotting
# What is the relationship between income and literacy amongst the US states? What is the correlation?
cor(state.x77[, "Income"], state.x77[, "Illiteracy"])
# There seems to be an inverse proportional relationship. We can visualise this:
plot(Income ~ Illiteracy, state.x77)
# What is the distribution of Income?
hist(state.x77[, "Income"])
# You can save a plot like this:
pdf("test.pdf")
hist(state.x77[, "Income"])
dev.off()
# ## Reading and writing data
# It is important to be able to get data into R, and back out again. Here we will look at two examples - Excel files, and Stata files.
# ### Excel
# In Excel it is possible to save spreadsheet data as `.csv` files - "comma separated values". R can read `.csv` files using the `read.csv()` function. Have a look at the documentation:
?read.csv
# Notice that there are a lot of options here to be as flexible as possible for reading in data that has been formatted in different ways. The default options for `read.csv` are usually suitable for reading in a file that has just been experted from Excel.
# Let's try reading in a csv file...
phen <- read.csv("../data/example_data/phen.csv", stringsAsFactors=FALSE)
# What does this data look like?
head(phen)
# What are the dimensions?
dim(phen)
# Note, the `phen` object and the `state.x77` object are actually different **data types**. Look:
class(phen)
class(state.x77)
# The difference between a **data.frame** and a **matrix** is that in a matrix every element must be the same type of data. In the examlpe of `state.x77`, every element is a numeric value. A **data.frame** on the other hand allows each column to be a different type of data. You can access a particular column using the `$` operator like this:
class(phen$IID)
class(phen$BMI)
# Let's calculate the mean value of DBP (diastolic blood pressure):
mean(phen$DBP)
mean(phen[,"DBP"])
mean(phen[,4])
mean(phen[["DBP"]])
mean(phen[[4]])
# All the above are different ways of accessing the same thing - data frames are quite methods of storing data.
# Can you use this data to:
# 1. Draw a histogram of BMI values?
# 2. Plot the relationship between DBP and SBP?
# ### Stata
# We can also read in files that are in Stata format. But first we need to install a library that will provide the necessary functions.
install.packages("readstata13")
# Once the library is installed we can load it
library(readstata13)
# And now we can use the functions that are provided by this package. Let's read in a Stata file:
phen <- read.dta13("../data/example_data/phen.dta")
# ## Monty Hall problem
# Suppose you're on a game show, and you're given the choice of three doors: Behind one door is a car; behind the others, goats. You pick a door, say No. 1, and the host, who knows what's behind the doors, opens another door, say No. 3, which has a goat. He then says to you, "Do you want to pick door No. 2?" Is it to your advantage to switch your choice?
# Let's simulate this scenario to check!
# We will make a function which simulates one game:
monty <- function()
{
doors <- 1:3 # initialize the doors behind one of which is a good prize
win <- 0 # to keep track of number of wins
prize <- sample(1:3, 1) # randomize which door has the good prize
guess <- sample(1:3, 1) # guess a door at random
## Reveal one of the doors you didn't pick which has a goat
if(prize != guess) {
reveal <- doors[-c(prize,guess)]
} else {
reveal <- sample(doors[-c(prize,guess)], 1)
}
## Stay with your initial guess or switch
switch_guess <- doors[-c(reveal,guess)]
stay_guess <- guess
## Did you win?
win <- ifelse(switch_guess == prize, "switch", "stay")
## return results
result <- data.frame(
prize = prize,
guess = guess,
win = win,
stringsAsFactors=FALSE
)
return(result)
}
# This function requires drawing random numbers (using the `sample` function). To make the results reproducible we should set the "random seed". This means that each time you run the result you will get the same answer.
set.seed(12345)
# Here's how it works:
monty()
# Let's see what happens if we do this multiple times...
n_simulations <- 10
all_results <- list()
for(i in 1:n_simulations)
{
message(i)
all_results[[i]] <- monty()
}
all_results <- do.call(rbind, all_results)
# Let's see what the results look like! We want to know the proportion of wins for the 'stay' strategy, and the proportion of wins for the 'switch' strategy
table(all_results$win)
# Perhaps this was just chance? Let's plot what it looks like. We will need to install a new library, `ggplot2`. This library is fantastic for making fairly complex plots very quickly.
install.packages("ggplot2")
library(ggplot2)
# Here's the plot
# Label the simulations
all_results$simulation <- 1:n_simulations
# Get the cumulative sum of wins
all_results$cumulative <- NA
stay_index <- all_results$win == "stay"
all_results$cumulative[stay_index] <- 1:sum(stay_index)
switch_index <- all_results$win == "switch"
all_results$cumulative[switch_index] <- 1:sum(switch_index)
# Make the plot
ggplot(all_results, aes(x=simulation, y=cumulative)) +
geom_point(aes(colour=win)) +
geom_line(aes(colour=win)) +
labs(y="Cumulative sum of successes", x="Simulations number")
# We need more simulations to be sure about this.
# 1. Run the simulations again, but this time do 1000 simulations instead of just 10.
# 2. If you were playing the game, would you stick with your initial choise or switch?
# ## Packages
# We have already installed two packages. For the remainder of the course we are going to need some more. There are three main sources to get packages
# - **CRAN** This is the main R package repository. It has over 8000 packages for a huge variaty of things. [https://cran.r-project.org](https://cran.r-project.org)
# - **Bioconductor** This is another repository which has packages that are mostly focused on genomic data. [http://bioconductor.org](http://bioconductor.org)
# - **GitHub** A lot of people publish packages, or updates to packages, on GitHub before they are released to the CRAN or Bioconductor.
# We need to install the following packages from CRAN:
install.packages("CpGassoc")
install.packages("GenABEL")
# And the following package from Bioconductor:
source("http://bioconductor.org/biocLite.R")
biocLite("minfi")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/UtilityFunctions.R
\name{zenith_angle}
\alias{zenith_angle}
\title{Zenith Angle}
\usage{
zenith_angle(doy, lat, lon, hour, offset = NA)
}
\arguments{
\item{doy}{\code{numeric} day of year (1-366). This can be obtained from standard date via \code{\link{day_of_year}}.}
\item{lat}{\code{numeric} latitude (decimal degrees).}
\item{lon}{\code{numeric} longitude (decimal degrees).}
\item{hour}{\code{numeric} hour of the day (0-24).}
\item{offset}{\code{numeric} the number of hours to add to UTC (Coordinated Universal Time) to get local time (improves accuracy but not always necessary). Optional. Defaults to NA.}
}
\value{
\code{numeric} zenith angle (degrees)
}
\description{
The function calculates the zenith angle, the location of the sun as an angle (in degrees) measured from vertical \insertCite{Campbell1998}{TrenchR}.
}
\examples{
zenith_angle(doy = 112,
lat = 47.61,
lon = -122.33,
hour = 12)
}
\references{
\insertAllCited{}
}
\seealso{
Other utility functions:
\code{\link{airpressure_from_elev}()},
\code{\link{azimuth_angle}()},
\code{\link{day_of_year}()},
\code{\link{daylength}()},
\code{\link{dec_angle}()},
\code{\link{solar_noon}()},
\code{\link{temperature conversions}}
}
\concept{utility functions}
| /man/zenith_angle.Rd | permissive | trenchproject/TrenchR | R | false | true | 1,361 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/UtilityFunctions.R
\name{zenith_angle}
\alias{zenith_angle}
\title{Zenith Angle}
\usage{
zenith_angle(doy, lat, lon, hour, offset = NA)
}
\arguments{
\item{doy}{\code{numeric} day of year (1-366). This can be obtained from standard date via \code{\link{day_of_year}}.}
\item{lat}{\code{numeric} latitude (decimal degrees).}
\item{lon}{\code{numeric} longitude (decimal degrees).}
\item{hour}{\code{numeric} hour of the day (0-24).}
\item{offset}{\code{numeric} the number of hours to add to UTC (Coordinated Universal Time) to get local time (improves accuracy but not always necessary). Optional. Defaults to NA.}
}
\value{
\code{numeric} zenith angle (degrees)
}
\description{
The function calculates the zenith angle, the location of the sun as an angle (in degrees) measured from vertical \insertCite{Campbell1998}{TrenchR}.
}
\examples{
zenith_angle(doy = 112,
lat = 47.61,
lon = -122.33,
hour = 12)
}
\references{
\insertAllCited{}
}
\seealso{
Other utility functions:
\code{\link{airpressure_from_elev}()},
\code{\link{azimuth_angle}()},
\code{\link{day_of_year}()},
\code{\link{daylength}()},
\code{\link{dec_angle}()},
\code{\link{solar_noon}()},
\code{\link{temperature conversions}}
}
\concept{utility functions}
|
### Name: stackpoly
### Title: Display the columns of a matrix or data frame as stacked
### polygons.
### Aliases: stackpoly
### Keywords: misc
### ** Examples
testx<-matrix(abs(rnorm(100)),nrow=10)
stackpoly(matrix(cumsum(testx),nrow=10),main="Test Stackpoly I",
xaxlab=c("One","Two","Three","Four","Five",
"Six","Seven","Eight","Nine","Ten"),border="black",staxx=TRUE)
stackpoly(testx,main="Test Stackpoly II",
xaxlab=c("One","Two","Three","Four","Five",
"Six","Seven","Eight","Nine","Ten"),border="black",
staxx=TRUE,stack=TRUE)
stackpoly(rev(sort(testx-mean(testx))),main="Test Waterfall Plot",
col="green",border="black")
| /icfp09/lib/plotrix/R-ex/stackpoly.R | no_license | Jacob33123/narorumo | R | false | false | 650 | r | ### Name: stackpoly
### Title: Display the columns of a matrix or data frame as stacked
### polygons.
### Aliases: stackpoly
### Keywords: misc
### ** Examples
testx<-matrix(abs(rnorm(100)),nrow=10)
stackpoly(matrix(cumsum(testx),nrow=10),main="Test Stackpoly I",
xaxlab=c("One","Two","Three","Four","Five",
"Six","Seven","Eight","Nine","Ten"),border="black",staxx=TRUE)
stackpoly(testx,main="Test Stackpoly II",
xaxlab=c("One","Two","Three","Four","Five",
"Six","Seven","Eight","Nine","Ten"),border="black",
staxx=TRUE,stack=TRUE)
stackpoly(rev(sort(testx-mean(testx))),main="Test Waterfall Plot",
col="green",border="black")
|
context("ROC Curve/AUC")
library(dplyr)
# HPC_CV takes too long
hpc_cv2 <- filter(hpc_cv, Resample %in% c("Fold06", "Fold07", "Fold08", "Fold09", "Fold10"))
# ------------------------------------------------------------------------------
roc_curv <- pROC::roc(two_class_example$truth,
two_class_example$Class1,
levels = rev(levels(two_class_example$truth)))
lvls <- levels(two_class_example$truth)
roc_val <- as.numeric(roc_curv$auc)
smooth_curv <- pROC::roc(two_class_example$truth,
two_class_example$Class1,
levels = rev(levels(two_class_example$truth)),
smooth = TRUE)
test_that('Two class', {
expect_equal(
roc_auc(two_class_example, truth, Class1)[[".estimate"]],
roc_val
)
expect_equal(
roc_auc(two_class_example, truth = "truth", Class2)[[".estimate"]],
roc_val
)
expect_equal(
roc_auc(two_class_example, truth, Class1, options = list(smooth = TRUE))[[".estimate"]],
as.numeric(smooth_curv$auc),
tol = 0.001
)
})
test_that('ROC Curve', {
library(pROC)
points <- coords(roc_curv, x = unique(c(-Inf, two_class_example$Class1, Inf)), input = "threshold")
points <- dplyr::as_tibble(t(points)) %>% dplyr::arrange(threshold) %>% dplyr::rename(.threshold = threshold)
s_points <- coords(smooth_curv, x = unique(c(0, smooth_curv$specificities, 1)), input = "specificity")
s_points <- dplyr::as_tibble(t(s_points)) %>% dplyr::arrange(specificity)
expect_equal(
as.data.frame(roc_curve(two_class_example, truth, Class1)),
as.data.frame(points)
)
expect_equal(
as.data.frame(roc_curve(two_class_example, truth, Class1, options = list(smooth = TRUE))),
as.data.frame(s_points)
)
})
test_that("Multiclass ROC Curve", {
res <- roc_curve(hpc_cv2, obs, VF:L)
# structural tests
expect_equal(colnames(res), c(".level", ".threshold", "specificity", "sensitivity"))
expect_equal(unique(res$.level), levels(hpc_cv2$obs))
res_g <- roc_curve(group_by(hpc_cv2, Resample), obs, VF:L)
# structural tests
expect_equal(colnames(res_g), c("Resample", ".level", ".threshold", "specificity", "sensitivity"))
})
# ------------------------------------------------------------------------------
# HandTill2001::auc(HandTill2001::multcap(hpc_cv2$obs, as.matrix(select(hpc_cv2, VF:L))))
test_that("Hand Till multiclass", {
expect_equal(
roc_auc(hpc_cv2, obs, VF:L)[[".estimate"]],
0.827387699597311
)
})
# ------------------------------------------------------------------------------
hpc_f1 <- data_hpc_fold1()
test_that("Multiclass ROC AUC", {
expect_equal(
roc_auc(hpc_f1, obs, VF:L, estimator = "macro")[[".estimate"]],
prob_macro_metric(roc_auc_binary, options = list())
)
expect_equal(
roc_auc(hpc_f1, obs, VF:L, estimator = "macro_weighted")[[".estimate"]],
prob_macro_weighted_metric(roc_auc_binary, options = list())
)
})
| /tests/testthat/test-prob-roc.R | no_license | alexhallam/yardstick | R | false | false | 2,964 | r | context("ROC Curve/AUC")
library(dplyr)
# HPC_CV takes too long
hpc_cv2 <- filter(hpc_cv, Resample %in% c("Fold06", "Fold07", "Fold08", "Fold09", "Fold10"))
# ------------------------------------------------------------------------------
roc_curv <- pROC::roc(two_class_example$truth,
two_class_example$Class1,
levels = rev(levels(two_class_example$truth)))
lvls <- levels(two_class_example$truth)
roc_val <- as.numeric(roc_curv$auc)
smooth_curv <- pROC::roc(two_class_example$truth,
two_class_example$Class1,
levels = rev(levels(two_class_example$truth)),
smooth = TRUE)
test_that('Two class', {
expect_equal(
roc_auc(two_class_example, truth, Class1)[[".estimate"]],
roc_val
)
expect_equal(
roc_auc(two_class_example, truth = "truth", Class2)[[".estimate"]],
roc_val
)
expect_equal(
roc_auc(two_class_example, truth, Class1, options = list(smooth = TRUE))[[".estimate"]],
as.numeric(smooth_curv$auc),
tol = 0.001
)
})
test_that('ROC Curve', {
library(pROC)
points <- coords(roc_curv, x = unique(c(-Inf, two_class_example$Class1, Inf)), input = "threshold")
points <- dplyr::as_tibble(t(points)) %>% dplyr::arrange(threshold) %>% dplyr::rename(.threshold = threshold)
s_points <- coords(smooth_curv, x = unique(c(0, smooth_curv$specificities, 1)), input = "specificity")
s_points <- dplyr::as_tibble(t(s_points)) %>% dplyr::arrange(specificity)
expect_equal(
as.data.frame(roc_curve(two_class_example, truth, Class1)),
as.data.frame(points)
)
expect_equal(
as.data.frame(roc_curve(two_class_example, truth, Class1, options = list(smooth = TRUE))),
as.data.frame(s_points)
)
})
test_that("Multiclass ROC Curve", {
res <- roc_curve(hpc_cv2, obs, VF:L)
# structural tests
expect_equal(colnames(res), c(".level", ".threshold", "specificity", "sensitivity"))
expect_equal(unique(res$.level), levels(hpc_cv2$obs))
res_g <- roc_curve(group_by(hpc_cv2, Resample), obs, VF:L)
# structural tests
expect_equal(colnames(res_g), c("Resample", ".level", ".threshold", "specificity", "sensitivity"))
})
# ------------------------------------------------------------------------------
# HandTill2001::auc(HandTill2001::multcap(hpc_cv2$obs, as.matrix(select(hpc_cv2, VF:L))))
test_that("Hand Till multiclass", {
expect_equal(
roc_auc(hpc_cv2, obs, VF:L)[[".estimate"]],
0.827387699597311
)
})
# ------------------------------------------------------------------------------
hpc_f1 <- data_hpc_fold1()
test_that("Multiclass ROC AUC", {
expect_equal(
roc_auc(hpc_f1, obs, VF:L, estimator = "macro")[[".estimate"]],
prob_macro_metric(roc_auc_binary, options = list())
)
expect_equal(
roc_auc(hpc_f1, obs, VF:L, estimator = "macro_weighted")[[".estimate"]],
prob_macro_weighted_metric(roc_auc_binary, options = list())
)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{brcaMethylData1}
\alias{brcaMethylData1}
\title{A matrix with DNA methylation levels
from some CpGs on chromosome 1}
\format{
A matrix object
}
\usage{
data(brcaMethylData1)
}
\description{
This object contains methylation levels (0 to 1)
for select cytosines in chromosome 1
for TCGA breast cancer patients from
a DNA methylation microarray (Illumina 450k microarray).
Each row corresponds to one cytosine and
the coordinates for these cytosines are
in the object brcaMCoord1, (data("brcaMCoord1"), hg38 genome).
Only select cytosines on chr1 are included to keep
the example data small. Columns are patients,
with TCGA patient identifiers as column names.
6004 CpGs and 300 patients are included.
DNA methlyation data is Illumina 450k
microarray data from breast cancer
patients from The Cancer Genome Atlas
(TCGA-BRCA, https://portal.gdc.cancer.gov/).
}
\keyword{datasets}
| /man/brcaMethylData1.Rd | permissive | databio/COCOA | R | false | true | 986 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{brcaMethylData1}
\alias{brcaMethylData1}
\title{A matrix with DNA methylation levels
from some CpGs on chromosome 1}
\format{
A matrix object
}
\usage{
data(brcaMethylData1)
}
\description{
This object contains methylation levels (0 to 1)
for select cytosines in chromosome 1
for TCGA breast cancer patients from
a DNA methylation microarray (Illumina 450k microarray).
Each row corresponds to one cytosine and
the coordinates for these cytosines are
in the object brcaMCoord1, (data("brcaMCoord1"), hg38 genome).
Only select cytosines on chr1 are included to keep
the example data small. Columns are patients,
with TCGA patient identifiers as column names.
6004 CpGs and 300 patients are included.
DNA methlyation data is Illumina 450k
microarray data from breast cancer
patients from The Cancer Genome Atlas
(TCGA-BRCA, https://portal.gdc.cancer.gov/).
}
\keyword{datasets}
|
library(Frames2)
### Name: MLDW
### Title: Multinomial logistic estimator under dual frame approach with
### auxiliary information from the whole population
### Aliases: MLDW
### ** Examples
data(DatMA)
data(DatMB)
data(DatPopM)
IndSample <- c(DatMA$Id_Pop, DatMB$Id_Pop)
#Let calculate proportions of categories of variable Prog using MLDW estimator
#using Read as auxiliary variable
MLDW(DatMA$Prog, DatMB$Prog, DatMA$ProbA, DatMB$ProbB, DatMA$Domain, DatMB$Domain,
DatMA$Read, DatMB$Read, DatPopM$Read, IndSample)
#Let obtain 95% confidence intervals together with the estimations
MLDW(DatMA$Prog, DatMB$Prog, DatMA$ProbA, DatMB$ProbB, DatMA$Domain, DatMB$Domain,
DatMA$Read, DatMB$Read, DatPopM$Read, IndSample, 0.95)
| /data/genthat_extracted_code/Frames2/examples/MLDW.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 736 | r | library(Frames2)
### Name: MLDW
### Title: Multinomial logistic estimator under dual frame approach with
### auxiliary information from the whole population
### Aliases: MLDW
### ** Examples
data(DatMA)
data(DatMB)
data(DatPopM)
IndSample <- c(DatMA$Id_Pop, DatMB$Id_Pop)
#Let calculate proportions of categories of variable Prog using MLDW estimator
#using Read as auxiliary variable
MLDW(DatMA$Prog, DatMB$Prog, DatMA$ProbA, DatMB$ProbB, DatMA$Domain, DatMB$Domain,
DatMA$Read, DatMB$Read, DatPopM$Read, IndSample)
#Let obtain 95% confidence intervals together with the estimations
MLDW(DatMA$Prog, DatMB$Prog, DatMA$ProbA, DatMB$ProbB, DatMA$Domain, DatMB$Domain,
DatMA$Read, DatMB$Read, DatPopM$Read, IndSample, 0.95)
|
\name{ldtagr}
\alias{ldtagr}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
expand a list of variants by including those in a VCF with LD exceeding
some threshold
}
\description{
expand a list of variants by including those in a VCF with LD exceeding
some threshold
}
\usage{
ldtagr(snprng, tf, samples, genome = "hg19", lbmaf = 0.05, lbR2 = 0.8, radius = 1e+05)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{snprng}{
a named GRanges for a single SNP. The name must correspond to
the name that will be assigned by \code{\link[VariantAnnotation]{genotypeToSnpMatrix}} to the corresponding column of a SnpMatrix.
}
\item{tf}{
TabixFile instance pointing to a bgzipped tabix-indexed VCF file
}
\item{samples}{
a vector of sample identifiers, if excluded, all samples used
}
\item{genome}{
tag like 'hg19'
}
\item{lbmaf}{
lower bound on variant MAF to allow consideration
}
\item{lbR2}{
lower bound on R squared for regarding SNP to be incorporated
}
\item{radius}{
radius of search in bp around the input range
}
}
\details{
uses snpStats ld()
}
\value{
a GRanges with names corresponding to 'new' variants
and mcols fields 'paramRangeID' (base variant input)
and 'R2'
}
%\references{
%% ~put references to the literature/web site here ~
%}
\author{
VJ Carey
}
\note{
slow but safe approach. probably a matrix method could
be substituted using the nice sparse approach already in snpStats
}
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
\examples{
require(GenomicRanges)
if (requireNamespace("gQTLstats")) {
# install gQTLstats to test this function
cand = GRanges("1", IRanges(113038694, width=1))
names(cand) = "rs883593"
require(VariantAnnotation)
expath = dir(system.file("vcf", package="gwascat"), patt=".*exon.*gz$", full=TRUE)
tf = TabixFile(expath)
ldtagr( cand, tf, lbR2 = .8)
}
# should do with 1000 genomes in S3 bucket and gwascat
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ models }
| /man/ldtagr.Rd | no_license | stankiewicz565/gwascat | R | false | false | 2,056 | rd | \name{ldtagr}
\alias{ldtagr}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
expand a list of variants by including those in a VCF with LD exceeding
some threshold
}
\description{
expand a list of variants by including those in a VCF with LD exceeding
some threshold
}
\usage{
ldtagr(snprng, tf, samples, genome = "hg19", lbmaf = 0.05, lbR2 = 0.8, radius = 1e+05)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{snprng}{
a named GRanges for a single SNP. The name must correspond to
the name that will be assigned by \code{\link[VariantAnnotation]{genotypeToSnpMatrix}} to the corresponding column of a SnpMatrix.
}
\item{tf}{
TabixFile instance pointing to a bgzipped tabix-indexed VCF file
}
\item{samples}{
a vector of sample identifiers, if excluded, all samples used
}
\item{genome}{
tag like 'hg19'
}
\item{lbmaf}{
lower bound on variant MAF to allow consideration
}
\item{lbR2}{
lower bound on R squared for regarding SNP to be incorporated
}
\item{radius}{
radius of search in bp around the input range
}
}
\details{
uses snpStats ld()
}
\value{
a GRanges with names corresponding to 'new' variants
and mcols fields 'paramRangeID' (base variant input)
and 'R2'
}
%\references{
%% ~put references to the literature/web site here ~
%}
\author{
VJ Carey
}
\note{
slow but safe approach. probably a matrix method could
be substituted using the nice sparse approach already in snpStats
}
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
\examples{
require(GenomicRanges)
if (requireNamespace("gQTLstats")) {
# install gQTLstats to test this function
cand = GRanges("1", IRanges(113038694, width=1))
names(cand) = "rs883593"
require(VariantAnnotation)
expath = dir(system.file("vcf", package="gwascat"), patt=".*exon.*gz$", full=TRUE)
tf = TabixFile(expath)
ldtagr( cand, tf, lbR2 = .8)
}
# should do with 1000 genomes in S3 bucket and gwascat
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ models }
|
importModuleUI <- function(id) {
ns <- NS(id)
exampleDatasets <- c() ## Need to add final small example data here
if ("TENxPBMCData" %in% rownames(installed.packages())){
exampleDatasets <- c(exampleDatasets,
"PBMC 3K (10X)" = "pbmc3k",
"PBMC 4K (10X)" = "pbmc4k",
"PBMC 6K (10X)" = "pbmc6k",
"PBMC 8K (10X)" = "pbmc8k",
"PBMC 33K (10X)" = "pbmc33k",
"PBMC 68K (10X)" = "pbmc68k")
}
if ("scRNAseq" %in% rownames(installed.packages())){
exampleDatasets <- c(exampleDatasets,
"Fluidigm (Pollen et al, 2014)" = "fluidigm_pollen",
"Mouse Brain (Tasic et al, 2016)" = "allen_tasic")
}
tagList(
useShinyjs(),
tags$style(appCSS),
tags$div(
class = "jumbotron", style = "background-color:#ededed",
tags$div(
class = "container",
h1("Single Cell Toolkit"),
p("Filter, cluster, and analyze single cell RNA-Seq data"),
p(
"Need help?",
tags$a(href = "https://compbiomed.github.io/sctk_docs/",
"Read the docs.", target = "_blank")
)
)
),
tags$br(),
tags$div(
class = "container",
h1("Upload"),
h5(tags$a(href = "https://compbiomed.github.io/sctk_docs/articles/v03-tab01_Upload.html",
"(help)", target = "_blank")),
tags$hr(),
hidden(wellPanel(id = ns("annotationData"),
h3("Data summary"),
tableOutput(ns("summarycontents")))),
h3("Choose data source:"),
radioButtons(ns("uploadChoice"), label = NULL, c("Import from a preprocessing tool" = 'directory',
"Upload files" = "files",
"Upload SingleCellExperiment or Seurat object stored in an RDS File" = "rds",
"Use example data" = "example")
),
tags$hr(),
conditionalPanel(condition = sprintf("input['%s'] == 'files'", ns("uploadChoice")),
h3("Upload data in tab separated text format:"),
fluidRow(
column(width = 4,
wellPanel(
h4("Example count file:"),
HTML('<table class="table"><thead><tr class="header"><th>Gene</th>
<th>Cell1</th><th>Cell2</th><th>…</th><th>CellN</th>
</tr></thead><tbody><tr class="odd"><td>Gene1</td><td>0</td>
<td>0</td><td>…</td><td>0</td></tr><tr class="even">
<td>Gene2</td><td>5</td><td>6</td><td>…</td><td>0</td>
</tr><tr class="odd"><td>Gene3</td><td>4</td><td>3</td>
<td>…</td><td>8</td></tr><tr class="even">
<td>…</td><td>…</td><td>…</td>
<td>…</td><td>…</td></tr><tr class="odd">
<td>GeneM</td><td>10</td><td>10</td><td>…</td><td>10</td>
</tr></tbody></table>'),
tags$a(href = "https://drive.google.com/open?id=1n0CtM6phfkWX0O6xRtgPPg6QuPFP6pY8",
"Download an example count file here.", target = "_blank"),
tags$br(),
tags$br(),
fileInput(
"countsfile",
HTML(
paste("Input assay (eg. counts, required):",
tags$span(style = "color:red", "*", sep = ""))
),
accept = c(
"text/csv", "text/comma-separated-values", "mtx",
"text/tab-separated-values", "text/plain", ".csv", ".tsv"
)
)
),
h4("Input Assay Type:"),
selectInput("inputAssayType", label = NULL,
c("counts", "normcounts", "logcounts", "cpm",
"logcpm", "tpm", "logtpm")
)
),
column(width = 4,
wellPanel(
h4("Example cell annotation file:"),
HTML('<table class="table"><thead><tr class="header"><th>Cell</th>
<th>Annot1</th><th>…</th></tr></thead><tbody><tr class="odd">
<td>Cell1</td><td>a</td><td>…</td></tr><tr class="even">
<td>Cell2</td><td>a</td><td>…</td></tr><tr class="odd">
<td>Cell3</td><td>b</td><td>…</td></tr><tr class="even">
<td>…</td><td>…</td><td>…</td></tr><tr class="odd"><td>CellN</td>
<td>b</td><td>…</td></tr></tbody></table>'),
tags$a(href = "https://drive.google.com/open?id=10IDmZQUiASN4wnzO4-WRJQopKvxCNu6J",
"Download an example annotation file here.", target = "_blank"),
tags$br(),
tags$br(),
fileInput(
"annotFile", "Cell annotations (optional):",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain", ".csv", ".tsv"
)
)
)
),
column(width = 4,
wellPanel(
h4("Example feature file:"),
HTML('<table class="table"><thead><tr class="header"><th>Gene</th>
<th>Annot2</th><th>…</th></tr></thead><tbody><tr class="odd">
<td>Gene1</td><td>a</td><td>…</td></tr><tr class="even">
<td>Gene2</td><td>a</td><td>…</td></tr><tr class="odd">
<td>Gene3</td><td>b</td><td>…</td></tr><tr class="even">
<td>…</td><td>…</td><td>…</td></tr><tr class="odd"><td>GeneM</td>
<td>b</td><td>…</td></tr></tbody></table>'),
tags$a(href = "https://drive.google.com/open?id=1gxXaZPq5Wrn2lNHacEVaCN2a_FHNvs4O",
"Download an example feature file here.", target = "_blank"),
tags$br(),
tags$br(),
fileInput(
"featureFile", "Feature annotations (optional):",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain", ".csv", ".tsv"
)
)
)
)
),
actionButton(ns("addFilesImport"), "Add To Sample List")
),
conditionalPanel(condition = sprintf("input['%s'] == 'example'", ns("uploadChoice")),
h3("Choose Example Dataset:"),
selectInput(ns("selectExampleData"), label = NULL, exampleDatasets),
conditionalPanel(
condition = sprintf("input['%s'] == 'fluidigm_pollen'", ns("selectExampleData")),
h3(tags$a(href = "http://dx.doi.org/10.1038/nbt.2967", "130 cells from (Pollen et al. 2014), 65 at high coverage and 65 at low coverage", target = "_blank")),
"Transcriptomes of cell populations in both of low-coverage (~0.27 million reads per cell) and high-coverage (~5 million reads per cell) to identify cell-type-specific biomarkers, and to compare gene expression across samples specifically for cells of a given type as well as to reconstruct developmental lineages of related cell types. Data was loaded from the 'scRNASeq' package.",
tags$br(),
tags$br()
),
conditionalPanel(condition = sprintf("input['%s'] == 'allen_tasic'", ns("selectExampleData")),
h3(tags$a(href = "http://dx.doi.org/10.1038/nn.4216", "Mouse visual cortex cells from (Tasic et al. 2016)", target = "_blank")),
"Subset of 379 cells from the mouse visual cortex. Data was loaded from the 'scRNASeq' package.",
tags$br(),
tags$br()
),
conditionalPanel(condition = sprintf("input['%s'] == 'pbmc3k'", ns("selectExampleData")),
h3(tags$a(href = "https://doi.org/10.1038/ncomms14049", "2,700 peripheral blood mononuclear cells (PBMCs) from 10X Genomics", target = "_blank")),
"Data was loaded with the 'TENxPBMCData' package.",
tags$br(),
tags$br()
),
conditionalPanel(condition = sprintf("input['%s'] == 'pbmc4k'", ns("selectExampleData")),
h3(tags$a(href = "https://doi.org/10.1038/ncomms14049", "4,430 peripheral blood mononuclear cells (PBMCs) from 10X Genomics", target = "_blank")),
"Data was loaded with the 'TENxPBMCData' package.",
tags$br(),
tags$br()
),
conditionalPanel(condition = sprintf("input['%s'] == 'pbmc6k'", ns("selectExampleData")),
h3(tags$a(href = "https://doi.org/10.1038/ncomms14049", "5,419 peripheral blood mononuclear cells (PBMCs) from 10X Genomics", target = "_blank")),
"Data was loaded with the 'TENxPBMCData' package.",
tags$br(),
tags$br()
),
conditionalPanel(condition = sprintf("input['%s'] == 'pbmc8k'", ns("selectExampleData")),
h3(tags$a(href = "https://doi.org/10.1038/ncomms14049", "8,381 peripheral blood mononuclear cells (PBMCs) from 10X Genomics", target = "_blank")),
"Data was loaded with the 'TENxPBMCData' package.",
tags$br(),
tags$br()
),
conditionalPanel(condition = sprintf("input['%s'] == 'pbmc33k'", ns("selectExampleData")),
h3(tags$a(href = "https://doi.org/10.1038/ncomms14049", "33,148 peripheral blood mononuclear cells (PBMCs) from 10X Genomics", target = "_blank")),
"Data was loaded with the 'TENxPBMCData' package.",
tags$br(),
tags$br()
),
conditionalPanel(condition = sprintf("input['%s'] == 'pbmc68k'", ns("selectExampleData")),
h3(tags$a(href = "https://doi.org/10.1038/ncomms14049", "68,579 peripheral blood mononuclear cells (PBMCs) from 10X Genomics", target = "_blank")),
"Data was loaded with the 'TENxPBMCData' package.",
tags$br(),
tags$br()
),
actionButton(ns("addExampleImport"), "Add To Sample List")
),
conditionalPanel(condition = sprintf("input['%s'] == 'rds'", ns("uploadChoice")),
h3("Choose an RDS file that contains a SingleCellExperiment or Seurat object:"),
fileInput(
ns("rdsFile"), "SingleCellExperiment RDS file:", accept = c(".rds", ".RDS")
),
actionButton(ns("addRDSImport"), "Add To Sample List")
),
conditionalPanel(
condition = sprintf("input['%s'] == 'directory'", ns("uploadChoice")),
tags$style(HTML("
div {
word-wrap: break-word;
}
")),
h3("Choose a Preprocessing Tool:"),
radioButtons(ns("algoChoice"), label = NULL, c("Cell Ranger v2" = "cellRanger2",
"Cell Ranger v3" = "cellRanger3",
"STARsolo" = "starSolo",
"BUStools" = "busTools",
"SEQC" = "seqc",
"Optimus" = "optimus")
),
tags$br(),
conditionalPanel( condition = sprintf("input['%s'] == 'cellRanger2'", ns("algoChoice")),
actionButton(ns("addCR2Sample"), "Add a Sample"),
),
conditionalPanel(condition = sprintf("input['%s'] == 'cellRanger3'", ns("algoChoice")),
actionButton(ns("addCR3Sample"), "Add a Sample"),
),
conditionalPanel(condition = sprintf("input['%s'] == 'starSolo'", ns("algoChoice")),
wellPanel(
h5("Please select the directory that contains your /Gene directory as your base directory. ")
),
actionButton(ns("addSSSample"), "Add a Sample"),
),
conditionalPanel(condition = sprintf("input['%s'] == 'busTools'", ns("algoChoice")),
wellPanel(
h5("Please select your /genecount directory as your base directory.")
),
actionButton(ns("addBUSSample"), "Add a Sample"),
),
conditionalPanel(condition = sprintf("input['%s'] == 'seqc'", ns("algoChoice")),
wellPanel(
h5("Please select the directory that contains your sample files as your base directory.")
),
actionButton(ns("addSEQSample"), "Add a Sample"),
),
conditionalPanel(condition = sprintf("input['%s'] == 'optimus'", ns("algoChoice")),
wellPanel(
h5("Please select the directory that contains the following four directories - call-MergeCountFiles, call-MergeCellMetrics, call-MergeGeneMetrics, call-RunEmptyDrops - as your base directory.")
),
actionButton(ns("addOptSample"), "Add a Sample"),
)
),
tags$hr(),
wellPanel(
h4("Samples to Import:"),
fluidRow(
column(3, tags$b("Type")),
column(3, tags$b("Location")),
column(3, tags$b("Sample Name")),
column(3, tags$b("Remove"))
),
tags$div(id = ns("newSampleImport")),
tags$br(),
tags$br(),
actionButton(ns("clearAllImport"), "Clear Samples")
),
radioButtons(ns("combineSCEChoice"), label = NULL, c("Add to existing SCE object" = 'addToExistingSCE',
"Overwrite existing SCE object" = "overwriteSCE")
),
withBusyIndicatorUI(
actionButton(ns("uploadData"), "Import")
),
tags$div(
class = "container",
p("")
)
)
)
} | /inst/shiny/ui_01_import.R | permissive | vidyaap/singleCellTK | R | false | false | 14,108 | r | importModuleUI <- function(id) {
ns <- NS(id)
exampleDatasets <- c() ## Need to add final small example data here
if ("TENxPBMCData" %in% rownames(installed.packages())){
exampleDatasets <- c(exampleDatasets,
"PBMC 3K (10X)" = "pbmc3k",
"PBMC 4K (10X)" = "pbmc4k",
"PBMC 6K (10X)" = "pbmc6k",
"PBMC 8K (10X)" = "pbmc8k",
"PBMC 33K (10X)" = "pbmc33k",
"PBMC 68K (10X)" = "pbmc68k")
}
if ("scRNAseq" %in% rownames(installed.packages())){
exampleDatasets <- c(exampleDatasets,
"Fluidigm (Pollen et al, 2014)" = "fluidigm_pollen",
"Mouse Brain (Tasic et al, 2016)" = "allen_tasic")
}
tagList(
useShinyjs(),
tags$style(appCSS),
tags$div(
class = "jumbotron", style = "background-color:#ededed",
tags$div(
class = "container",
h1("Single Cell Toolkit"),
p("Filter, cluster, and analyze single cell RNA-Seq data"),
p(
"Need help?",
tags$a(href = "https://compbiomed.github.io/sctk_docs/",
"Read the docs.", target = "_blank")
)
)
),
tags$br(),
tags$div(
class = "container",
h1("Upload"),
h5(tags$a(href = "https://compbiomed.github.io/sctk_docs/articles/v03-tab01_Upload.html",
"(help)", target = "_blank")),
tags$hr(),
hidden(wellPanel(id = ns("annotationData"),
h3("Data summary"),
tableOutput(ns("summarycontents")))),
h3("Choose data source:"),
radioButtons(ns("uploadChoice"), label = NULL, c("Import from a preprocessing tool" = 'directory',
"Upload files" = "files",
"Upload SingleCellExperiment or Seurat object stored in an RDS File" = "rds",
"Use example data" = "example")
),
tags$hr(),
conditionalPanel(condition = sprintf("input['%s'] == 'files'", ns("uploadChoice")),
h3("Upload data in tab separated text format:"),
fluidRow(
column(width = 4,
wellPanel(
h4("Example count file:"),
HTML('<table class="table"><thead><tr class="header"><th>Gene</th>
<th>Cell1</th><th>Cell2</th><th>…</th><th>CellN</th>
</tr></thead><tbody><tr class="odd"><td>Gene1</td><td>0</td>
<td>0</td><td>…</td><td>0</td></tr><tr class="even">
<td>Gene2</td><td>5</td><td>6</td><td>…</td><td>0</td>
</tr><tr class="odd"><td>Gene3</td><td>4</td><td>3</td>
<td>…</td><td>8</td></tr><tr class="even">
<td>…</td><td>…</td><td>…</td>
<td>…</td><td>…</td></tr><tr class="odd">
<td>GeneM</td><td>10</td><td>10</td><td>…</td><td>10</td>
</tr></tbody></table>'),
tags$a(href = "https://drive.google.com/open?id=1n0CtM6phfkWX0O6xRtgPPg6QuPFP6pY8",
"Download an example count file here.", target = "_blank"),
tags$br(),
tags$br(),
fileInput(
"countsfile",
HTML(
paste("Input assay (eg. counts, required):",
tags$span(style = "color:red", "*", sep = ""))
),
accept = c(
"text/csv", "text/comma-separated-values", "mtx",
"text/tab-separated-values", "text/plain", ".csv", ".tsv"
)
)
),
h4("Input Assay Type:"),
selectInput("inputAssayType", label = NULL,
c("counts", "normcounts", "logcounts", "cpm",
"logcpm", "tpm", "logtpm")
)
),
column(width = 4,
wellPanel(
h4("Example cell annotation file:"),
HTML('<table class="table"><thead><tr class="header"><th>Cell</th>
<th>Annot1</th><th>…</th></tr></thead><tbody><tr class="odd">
<td>Cell1</td><td>a</td><td>…</td></tr><tr class="even">
<td>Cell2</td><td>a</td><td>…</td></tr><tr class="odd">
<td>Cell3</td><td>b</td><td>…</td></tr><tr class="even">
<td>…</td><td>…</td><td>…</td></tr><tr class="odd"><td>CellN</td>
<td>b</td><td>…</td></tr></tbody></table>'),
tags$a(href = "https://drive.google.com/open?id=10IDmZQUiASN4wnzO4-WRJQopKvxCNu6J",
"Download an example annotation file here.", target = "_blank"),
tags$br(),
tags$br(),
fileInput(
"annotFile", "Cell annotations (optional):",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain", ".csv", ".tsv"
)
)
)
),
column(width = 4,
wellPanel(
h4("Example feature file:"),
HTML('<table class="table"><thead><tr class="header"><th>Gene</th>
<th>Annot2</th><th>…</th></tr></thead><tbody><tr class="odd">
<td>Gene1</td><td>a</td><td>…</td></tr><tr class="even">
<td>Gene2</td><td>a</td><td>…</td></tr><tr class="odd">
<td>Gene3</td><td>b</td><td>…</td></tr><tr class="even">
<td>…</td><td>…</td><td>…</td></tr><tr class="odd"><td>GeneM</td>
<td>b</td><td>…</td></tr></tbody></table>'),
tags$a(href = "https://drive.google.com/open?id=1gxXaZPq5Wrn2lNHacEVaCN2a_FHNvs4O",
"Download an example feature file here.", target = "_blank"),
tags$br(),
tags$br(),
fileInput(
"featureFile", "Feature annotations (optional):",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain", ".csv", ".tsv"
)
)
)
)
),
actionButton(ns("addFilesImport"), "Add To Sample List")
),
conditionalPanel(condition = sprintf("input['%s'] == 'example'", ns("uploadChoice")),
h3("Choose Example Dataset:"),
selectInput(ns("selectExampleData"), label = NULL, exampleDatasets),
conditionalPanel(
condition = sprintf("input['%s'] == 'fluidigm_pollen'", ns("selectExampleData")),
h3(tags$a(href = "http://dx.doi.org/10.1038/nbt.2967", "130 cells from (Pollen et al. 2014), 65 at high coverage and 65 at low coverage", target = "_blank")),
"Transcriptomes of cell populations in both of low-coverage (~0.27 million reads per cell) and high-coverage (~5 million reads per cell) to identify cell-type-specific biomarkers, and to compare gene expression across samples specifically for cells of a given type as well as to reconstruct developmental lineages of related cell types. Data was loaded from the 'scRNASeq' package.",
tags$br(),
tags$br()
),
conditionalPanel(condition = sprintf("input['%s'] == 'allen_tasic'", ns("selectExampleData")),
h3(tags$a(href = "http://dx.doi.org/10.1038/nn.4216", "Mouse visual cortex cells from (Tasic et al. 2016)", target = "_blank")),
"Subset of 379 cells from the mouse visual cortex. Data was loaded from the 'scRNASeq' package.",
tags$br(),
tags$br()
),
conditionalPanel(condition = sprintf("input['%s'] == 'pbmc3k'", ns("selectExampleData")),
h3(tags$a(href = "https://doi.org/10.1038/ncomms14049", "2,700 peripheral blood mononuclear cells (PBMCs) from 10X Genomics", target = "_blank")),
"Data was loaded with the 'TENxPBMCData' package.",
tags$br(),
tags$br()
),
conditionalPanel(condition = sprintf("input['%s'] == 'pbmc4k'", ns("selectExampleData")),
h3(tags$a(href = "https://doi.org/10.1038/ncomms14049", "4,430 peripheral blood mononuclear cells (PBMCs) from 10X Genomics", target = "_blank")),
"Data was loaded with the 'TENxPBMCData' package.",
tags$br(),
tags$br()
),
conditionalPanel(condition = sprintf("input['%s'] == 'pbmc6k'", ns("selectExampleData")),
h3(tags$a(href = "https://doi.org/10.1038/ncomms14049", "5,419 peripheral blood mononuclear cells (PBMCs) from 10X Genomics", target = "_blank")),
"Data was loaded with the 'TENxPBMCData' package.",
tags$br(),
tags$br()
),
conditionalPanel(condition = sprintf("input['%s'] == 'pbmc8k'", ns("selectExampleData")),
h3(tags$a(href = "https://doi.org/10.1038/ncomms14049", "8,381 peripheral blood mononuclear cells (PBMCs) from 10X Genomics", target = "_blank")),
"Data was loaded with the 'TENxPBMCData' package.",
tags$br(),
tags$br()
),
conditionalPanel(condition = sprintf("input['%s'] == 'pbmc33k'", ns("selectExampleData")),
h3(tags$a(href = "https://doi.org/10.1038/ncomms14049", "33,148 peripheral blood mononuclear cells (PBMCs) from 10X Genomics", target = "_blank")),
"Data was loaded with the 'TENxPBMCData' package.",
tags$br(),
tags$br()
),
conditionalPanel(condition = sprintf("input['%s'] == 'pbmc68k'", ns("selectExampleData")),
h3(tags$a(href = "https://doi.org/10.1038/ncomms14049", "68,579 peripheral blood mononuclear cells (PBMCs) from 10X Genomics", target = "_blank")),
"Data was loaded with the 'TENxPBMCData' package.",
tags$br(),
tags$br()
),
actionButton(ns("addExampleImport"), "Add To Sample List")
),
conditionalPanel(condition = sprintf("input['%s'] == 'rds'", ns("uploadChoice")),
h3("Choose an RDS file that contains a SingleCellExperiment or Seurat object:"),
fileInput(
ns("rdsFile"), "SingleCellExperiment RDS file:", accept = c(".rds", ".RDS")
),
actionButton(ns("addRDSImport"), "Add To Sample List")
),
conditionalPanel(
condition = sprintf("input['%s'] == 'directory'", ns("uploadChoice")),
tags$style(HTML("
div {
word-wrap: break-word;
}
")),
h3("Choose a Preprocessing Tool:"),
radioButtons(ns("algoChoice"), label = NULL, c("Cell Ranger v2" = "cellRanger2",
"Cell Ranger v3" = "cellRanger3",
"STARsolo" = "starSolo",
"BUStools" = "busTools",
"SEQC" = "seqc",
"Optimus" = "optimus")
),
tags$br(),
conditionalPanel( condition = sprintf("input['%s'] == 'cellRanger2'", ns("algoChoice")),
actionButton(ns("addCR2Sample"), "Add a Sample"),
),
conditionalPanel(condition = sprintf("input['%s'] == 'cellRanger3'", ns("algoChoice")),
actionButton(ns("addCR3Sample"), "Add a Sample"),
),
conditionalPanel(condition = sprintf("input['%s'] == 'starSolo'", ns("algoChoice")),
wellPanel(
h5("Please select the directory that contains your /Gene directory as your base directory. ")
),
actionButton(ns("addSSSample"), "Add a Sample"),
),
conditionalPanel(condition = sprintf("input['%s'] == 'busTools'", ns("algoChoice")),
wellPanel(
h5("Please select your /genecount directory as your base directory.")
),
actionButton(ns("addBUSSample"), "Add a Sample"),
),
conditionalPanel(condition = sprintf("input['%s'] == 'seqc'", ns("algoChoice")),
wellPanel(
h5("Please select the directory that contains your sample files as your base directory.")
),
actionButton(ns("addSEQSample"), "Add a Sample"),
),
conditionalPanel(condition = sprintf("input['%s'] == 'optimus'", ns("algoChoice")),
wellPanel(
h5("Please select the directory that contains the following four directories - call-MergeCountFiles, call-MergeCellMetrics, call-MergeGeneMetrics, call-RunEmptyDrops - as your base directory.")
),
actionButton(ns("addOptSample"), "Add a Sample"),
)
),
tags$hr(),
wellPanel(
h4("Samples to Import:"),
fluidRow(
column(3, tags$b("Type")),
column(3, tags$b("Location")),
column(3, tags$b("Sample Name")),
column(3, tags$b("Remove"))
),
tags$div(id = ns("newSampleImport")),
tags$br(),
tags$br(),
actionButton(ns("clearAllImport"), "Clear Samples")
),
radioButtons(ns("combineSCEChoice"), label = NULL, c("Add to existing SCE object" = 'addToExistingSCE',
"Overwrite existing SCE object" = "overwriteSCE")
),
withBusyIndicatorUI(
actionButton(ns("uploadData"), "Import")
),
tags$div(
class = "container",
p("")
)
)
)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/korsoer-age-gender.R
\docType{data}
\name{korsoer_age_gender}
\alias{korsoer_age_gender}
\title{Cholera in Korsør by age and gender}
\format{A data frame with 7 rows and 3 variables:
\describe{
\item{\bold{age_grop}}{Character vector of the age grouping}
\item{\bold{male_sick}}{Integer vector of the number of males diagnosed
with cholera}
\item{\bold{male_dead}}{Integer vector of the number of males recorded as
dying due to cholera}
\item{\bold{female_sick}}{Integer vector of the number of females diagnosed
with cholera}
\item{\bold{female_dead}}{Integer vector of the number of females recorded as
dying due to cholera}
\item{\bold{total_sick}}{Integer vector of the total number of people
diagnosed with cholera}
\item{\bold{total_dead}}{Integer vector of the total number of people recorded as
dying due to cholera}
}}
\source{
{Mads to tell}
}
\usage{
korsoer_age_gender
}
\description{
A dataset containing the number of cholera cases and deaths broken down by
age and gender in Aalborg in 1853. Data comes from...
}
\keyword{datasets}
| /man/korsoer_age_gender.Rd | no_license | matthew-phelps/CholeraDataDK | R | false | true | 1,154 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/korsoer-age-gender.R
\docType{data}
\name{korsoer_age_gender}
\alias{korsoer_age_gender}
\title{Cholera in Korsør by age and gender}
\format{A data frame with 7 rows and 3 variables:
\describe{
\item{\bold{age_grop}}{Character vector of the age grouping}
\item{\bold{male_sick}}{Integer vector of the number of males diagnosed
with cholera}
\item{\bold{male_dead}}{Integer vector of the number of males recorded as
dying due to cholera}
\item{\bold{female_sick}}{Integer vector of the number of females diagnosed
with cholera}
\item{\bold{female_dead}}{Integer vector of the number of females recorded as
dying due to cholera}
\item{\bold{total_sick}}{Integer vector of the total number of people
diagnosed with cholera}
\item{\bold{total_dead}}{Integer vector of the total number of people recorded as
dying due to cholera}
}}
\source{
{Mads to tell}
}
\usage{
korsoer_age_gender
}
\description{
A dataset containing the number of cholera cases and deaths broken down by
age and gender in Aalborg in 1853. Data comes from...
}
\keyword{datasets}
|
source("ona_package.R") ## this is a copy paste of the onaR package source file
library(tidyverse)
library(splitstackshape)
library(shinydashboard)
library(DT)
library(shinyWidgets)
library(future)
## targets using the bens listing file
target <- readxl::read_excel("R5 bens lookup file_updated.xlsx")%>%
group_by(Member_id, District_Name,Community_Name)%>%
dplyr::summarise(Target = n())
## variables to be used -- just for initializing the dashboard
varshown <- c("today", "ONA_username","e.Interview_criteria","Which.is.the.agency.supporting.the.beneficiary.","District_Name","Community_Name","start", "end","_duration")
# Initialize a empty dataframe
listData <- function(){
d <- data.frame(matrix(ncol=length(varshown), nrow = 0))
colnames(d)<-varshown
names(d)[match("Which.is.the.agency.supporting.the.beneficiary.",names(d))] <- "Member_id"
return(d)
}
## function to format the data
prepare_data <- function(data){
data$today <- as.character(as.Date(data$end, tz = "GMT"))
data$end <- as.POSIXct(data$end)
data$start <- as.POSIXct(data$start)
data$interviewDuration <- difftime(data$end, data$start, units='mins')
data$long <- data$interviewDuration>120
data$short <- between(data$interviewDuration, 0, 12)
names(data)[match("a.Member_id",names(data))] <- "Member_id"
names(data)[match("e.Interview_criteria",names(data))] <- "Interview_criteria"
names(data)[match("d.consensus",names(data))] <- "consent"
names(data)[match("consensus",names(data))] <- "consent"
names(data) <- str_remove_all(names(data),"g.HH_confirmation.")
names(data) <- str_remove_all(names(data),"q.q2.COVID.19.")
data <- data[-1,]
data <- data%>%
mutate(Interview_criteria = ifelse(is.na(Interview_criteria) |
Interview_criteria == "If most information is matching and respondent confirms that you are speaking with the selected household, please proceed with the survey","1","0"))
return(data)
}
# Retrieve data from ona
get_data <- function(login, password ){
sn_r5 <- tryCatch(onaDownload("Condensed_PDM_2020_Survey_R5_L",
"BRCiS",login,password, keepGroupNames=FALSE),
error=function(e){message("can't access data")})
return(prepare_data(sn_r5))
}
| /global.R | no_license | faithmusili/SN_data_check | R | false | false | 2,313 | r | source("ona_package.R") ## this is a copy paste of the onaR package source file
library(tidyverse)
library(splitstackshape)
library(shinydashboard)
library(DT)
library(shinyWidgets)
library(future)
## targets using the bens listing file
target <- readxl::read_excel("R5 bens lookup file_updated.xlsx")%>%
group_by(Member_id, District_Name,Community_Name)%>%
dplyr::summarise(Target = n())
## variables to be used -- just for initializing the dashboard
varshown <- c("today", "ONA_username","e.Interview_criteria","Which.is.the.agency.supporting.the.beneficiary.","District_Name","Community_Name","start", "end","_duration")
# Initialize a empty dataframe
listData <- function(){
d <- data.frame(matrix(ncol=length(varshown), nrow = 0))
colnames(d)<-varshown
names(d)[match("Which.is.the.agency.supporting.the.beneficiary.",names(d))] <- "Member_id"
return(d)
}
## function to format the data
prepare_data <- function(data){
data$today <- as.character(as.Date(data$end, tz = "GMT"))
data$end <- as.POSIXct(data$end)
data$start <- as.POSIXct(data$start)
data$interviewDuration <- difftime(data$end, data$start, units='mins')
data$long <- data$interviewDuration>120
data$short <- between(data$interviewDuration, 0, 12)
names(data)[match("a.Member_id",names(data))] <- "Member_id"
names(data)[match("e.Interview_criteria",names(data))] <- "Interview_criteria"
names(data)[match("d.consensus",names(data))] <- "consent"
names(data)[match("consensus",names(data))] <- "consent"
names(data) <- str_remove_all(names(data),"g.HH_confirmation.")
names(data) <- str_remove_all(names(data),"q.q2.COVID.19.")
data <- data[-1,]
data <- data%>%
mutate(Interview_criteria = ifelse(is.na(Interview_criteria) |
Interview_criteria == "If most information is matching and respondent confirms that you are speaking with the selected household, please proceed with the survey","1","0"))
return(data)
}
# Retrieve data from ona
get_data <- function(login, password ){
sn_r5 <- tryCatch(onaDownload("Condensed_PDM_2020_Survey_R5_L",
"BRCiS",login,password, keepGroupNames=FALSE),
error=function(e){message("can't access data")})
return(prepare_data(sn_r5))
}
|
####################################################################
#' Plot Target's Distribution vs Another Variable
#'
#' Study the distribution of a target variable vs another variable. This
#' function is quite similar to the funModeling's corrplot function.
#'
#' @param data Dataframe
#' @param target Character. Name of the Main -target- variable
#' @param values Character. Name of the Secondary variable
#' @param top Integer. Filter and plot the most n frequent for categorical values
#' @param breaks Integer. Number of splits for numerical values
#' @param custom_colours Boolean. Use custom colours function?
#' @param abc Boolean. Do you wish to sort by alphabetical order?
#' @param na.rm Boolean. Ignore NAs if needed
#' @param print Boolean. Print the table's result
#' @param save Boolean. Save the output plot in our working directory
#' @param subdir Character. Into which subdirectory do you wish to save the plot to?
#' @export
plot_distr <- function(data, target, values,
top = 10,
breaks = 10,
custom_colours = FALSE,
abc = FALSE,
na.rm = FALSE,
print = FALSE,
save = FALSE,
subdir = NA) {
require(ggplot2)
require(gridExtra)
require(dplyr)
targets <- data[[target]]
value <- data[[values]]
targets_name <- colnames(data[target[1]])
variable_name <- colnames(data[values[1]])
if (length(targets) != length(value)) {
message("The targets and value vectors should be the same length.")
stop(message(paste("Currently, targets has", length(targets),
"rows and value has", length(value))))
}
if (length(unique(value)) > breaks & !is.numeric(value)) {
message(paste0("You can't split ", values, " in ", breaks,
"! Trying with less instead..."))
breaks <- length(unique(value))
}
if (length(unique(targets)) > 9) {
stop("You should use a 'target' variable with max 9 different value!")
}
if (is.numeric(value)) {
value <- cut(value, quantile(value, prob = seq(0, 1, length = breaks), type = 7, na.rm = T))
}
if (length(unique(value)) > top & (is.character(value) | is.factor(value))) {
value <- lares::categ_reducer(value, top = min(rbind(top, breaks)))
}
df <- data.frame(targets = targets, value = value)
if (na.rm == TRUE) {
df <- df[complete.cases(df), ]
if (sum(is.na(df$value)) > 0) {
breaks <- breaks + 1
}
}
freqs <- df %>% group_by(value, targets) %>%
tally() %>% arrange(desc(n)) %>%
mutate(p = round(100*n/sum(n),2)) %>% ungroup() %>%
mutate(row = row_number(),
order = ifelse(grepl("\\(|\\)", value),
as.numeric(as.character(substr(gsub(",.*", "", value), 2, 100))), row))
if (abc == TRUE) {
freqs <- freqs %>% mutate(order = rank(as.character(value)))
}
distr <- df %>% group_by(targets) %>%
tally() %>% arrange(n) %>%
mutate(p = round(100*n/sum(n),2),
pcum = cumsum(p))
count <- ggplot(freqs, aes(x=reorder(as.character(value), order), y=n,
fill=tolower(as.character(targets)),
label=n, ymax=max(n)*1.1)) +
geom_col(position = "dodge") +
geom_text(check_overlap = TRUE, position = position_dodge(0.9), size=3, vjust = -0.15) +
labs(x = "", y = "Counter") + theme_minimal() +
theme(legend.position="top", legend.title=element_blank())
if (length(unique(value)) >= 10) {
count <- count + theme(axis.text.x = element_text(angle = 45, hjust=1))
}
prop <- ggplot(freqs,
aes(x = value,
y = as.numeric(p/100),
fill=tolower(as.character(targets)),
label = p)) +
geom_col(position = "fill") +
geom_text(check_overlap = TRUE, size = 3.2,
position = position_stack(vjust = 0.5)) +
geom_hline(yintercept = distr$pcum[1:(nrow(distr)-1)]/100,
colour = "purple", linetype = "dotted", alpha = 0.8) +
theme_minimal() + coord_flip() + guides(fill=FALSE) +
labs(x = "Proportions", y = "") +
labs(caption = paste("Variables:", targets_name, "vs.", variable_name))
if (length(unique(value)) > top) {
showed <- max(c(length(unique(value)), top))
count <- count + labs(caption = paste("Showing only the top", showed, "frequent values"))
}
if (custom_colours == TRUE) {
count <- count + gg_fill_customs()
prop <- prop + gg_fill_customs()
} else {
count <- count + scale_fill_brewer(palette = "Blues")
prop <- prop + scale_fill_brewer(palette = "Blues")
}
if (print == TRUE) {
print(freqs %>% select(-order))
}
if (save == TRUE) {
file_name <- paste0("viz_distr_", targets_name, ".vs.", variable_name, ".png")
if (!is.na(subdir)) {
dir.create(file.path(getwd(), subdir))
file_name <- paste(subdir, file_name, sep="/")
}
png(file_name, height = 1500, width = 2000, res = 300)
grid.arrange(count, prop, ncol = 1, nrow = 2)
dev.off()
}
# Plot the result
return(grid.arrange(count, prop, ncol = 1, nrow = 2))
}
| /R/plot_distr.R | no_license | fxcebx/lares | R | false | false | 5,315 | r | ####################################################################
#' Plot Target's Distribution vs Another Variable
#'
#' Study the distribution of a target variable vs another variable. This
#' function is quite similar to the funModeling's corrplot function.
#'
#' @param data Dataframe
#' @param target Character. Name of the Main -target- variable
#' @param values Character. Name of the Secondary variable
#' @param top Integer. Filter and plot the most n frequent for categorical values
#' @param breaks Integer. Number of splits for numerical values
#' @param custom_colours Boolean. Use custom colours function?
#' @param abc Boolean. Do you wish to sort by alphabetical order?
#' @param na.rm Boolean. Ignore NAs if needed
#' @param print Boolean. Print the table's result
#' @param save Boolean. Save the output plot in our working directory
#' @param subdir Character. Into which subdirectory do you wish to save the plot to?
#' @export
plot_distr <- function(data, target, values,
top = 10,
breaks = 10,
custom_colours = FALSE,
abc = FALSE,
na.rm = FALSE,
print = FALSE,
save = FALSE,
subdir = NA) {
require(ggplot2)
require(gridExtra)
require(dplyr)
targets <- data[[target]]
value <- data[[values]]
targets_name <- colnames(data[target[1]])
variable_name <- colnames(data[values[1]])
if (length(targets) != length(value)) {
message("The targets and value vectors should be the same length.")
stop(message(paste("Currently, targets has", length(targets),
"rows and value has", length(value))))
}
if (length(unique(value)) > breaks & !is.numeric(value)) {
message(paste0("You can't split ", values, " in ", breaks,
"! Trying with less instead..."))
breaks <- length(unique(value))
}
if (length(unique(targets)) > 9) {
stop("You should use a 'target' variable with max 9 different value!")
}
if (is.numeric(value)) {
value <- cut(value, quantile(value, prob = seq(0, 1, length = breaks), type = 7, na.rm = T))
}
if (length(unique(value)) > top & (is.character(value) | is.factor(value))) {
value <- lares::categ_reducer(value, top = min(rbind(top, breaks)))
}
df <- data.frame(targets = targets, value = value)
if (na.rm == TRUE) {
df <- df[complete.cases(df), ]
if (sum(is.na(df$value)) > 0) {
breaks <- breaks + 1
}
}
freqs <- df %>% group_by(value, targets) %>%
tally() %>% arrange(desc(n)) %>%
mutate(p = round(100*n/sum(n),2)) %>% ungroup() %>%
mutate(row = row_number(),
order = ifelse(grepl("\\(|\\)", value),
as.numeric(as.character(substr(gsub(",.*", "", value), 2, 100))), row))
if (abc == TRUE) {
freqs <- freqs %>% mutate(order = rank(as.character(value)))
}
distr <- df %>% group_by(targets) %>%
tally() %>% arrange(n) %>%
mutate(p = round(100*n/sum(n),2),
pcum = cumsum(p))
count <- ggplot(freqs, aes(x=reorder(as.character(value), order), y=n,
fill=tolower(as.character(targets)),
label=n, ymax=max(n)*1.1)) +
geom_col(position = "dodge") +
geom_text(check_overlap = TRUE, position = position_dodge(0.9), size=3, vjust = -0.15) +
labs(x = "", y = "Counter") + theme_minimal() +
theme(legend.position="top", legend.title=element_blank())
if (length(unique(value)) >= 10) {
count <- count + theme(axis.text.x = element_text(angle = 45, hjust=1))
}
prop <- ggplot(freqs,
aes(x = value,
y = as.numeric(p/100),
fill=tolower(as.character(targets)),
label = p)) +
geom_col(position = "fill") +
geom_text(check_overlap = TRUE, size = 3.2,
position = position_stack(vjust = 0.5)) +
geom_hline(yintercept = distr$pcum[1:(nrow(distr)-1)]/100,
colour = "purple", linetype = "dotted", alpha = 0.8) +
theme_minimal() + coord_flip() + guides(fill=FALSE) +
labs(x = "Proportions", y = "") +
labs(caption = paste("Variables:", targets_name, "vs.", variable_name))
if (length(unique(value)) > top) {
showed <- max(c(length(unique(value)), top))
count <- count + labs(caption = paste("Showing only the top", showed, "frequent values"))
}
if (custom_colours == TRUE) {
count <- count + gg_fill_customs()
prop <- prop + gg_fill_customs()
} else {
count <- count + scale_fill_brewer(palette = "Blues")
prop <- prop + scale_fill_brewer(palette = "Blues")
}
if (print == TRUE) {
print(freqs %>% select(-order))
}
if (save == TRUE) {
file_name <- paste0("viz_distr_", targets_name, ".vs.", variable_name, ".png")
if (!is.na(subdir)) {
dir.create(file.path(getwd(), subdir))
file_name <- paste(subdir, file_name, sep="/")
}
png(file_name, height = 1500, width = 2000, res = 300)
grid.arrange(count, prop, ncol = 1, nrow = 2)
dev.off()
}
# Plot the result
return(grid.arrange(count, prop, ncol = 1, nrow = 2))
}
|
/Courseworks/Stat133_Statistical_Data_Computation/Lab/lab06/code/lab06-script-Dui-Lee.R | no_license | duilee/workspace | R | false | false | 5,864 | r | ||
# SJDM 20/06/2014
# A function to map the stock assessment boundaries for BET/YFT and SKJ
# The corners of all the subregions (as specified in mufdager) are listed below
# and the function call determines which ones are displayed
plot.map.region.boundaries = function(reg.keep, reg.highlight, linesize = c(2,6), cols = "white")
{
require(scales)
require(ggplot2)
require(ggmap)
setInternet2(TRUE)
reg.names = data.frame(x=c(145,180,160,180), y=c(30,30,-25,-25), r=c('1','2','5','6'))
reg.bounds.x = list(BR1 = c(120,170,170,120,120),
BR2 = c(170,210,210,170,170),
BR3 = c(120,170,170,120,120),
BR4 = c(170,210,210,170,170),
BR5 = c(140,170,170,140,140),
BR6 = c(170,210,210,170,170),
BR7 = c(110,140,140,110,110),
BR8 = c(140,160,160,155,155,140,140),
BR9 = c(140,150,150,140,140),
BR31 = c(140,155,155,160,160,170,170,140,140),
BR72 = c(130,140,140,130,130),
BR3A = c(110,130,130,110,110),
BR37 = c(130,140,140,130,130),
BR5A = c(140,170,170,140,140,150,150,140,140),
SR1 = c(120,210,210,120,120),
SR2 = c(110,170,170,110,110),
SR3 = c(170,210,210,170,170),
SR21 = c(140,155,155,160,160,170,170,140,140),
SR5 = c(140,160,160,155,155,140,140),
SR4 = c(110,140,140,110,110),
SR41 = c(110,140,140,130,130,110,110),
SR42 = c(130,140,140,130,130),
SR22 = c(120,170,170,120,120))
reg.bounds.y = list(BR1 = c(20,20,50,50,20),
BR2 = c(20,20,50,50,20),
BR3 = c(-10,-10,20,20,-10),
BR4 = c(-10,-10,20,20,-10),
BR5 = c(-40,-40,-10,-10,-40),
BR6 = c(-40,-40,-10,-10,-40),
BR7 = c(-10,-10,20,20,-10),
BR8 = c(-10,-10,-5,-5,0,0,-10),
BR9 = c(-20,-20,-15,-15,-20),
BR31 = c(0,0,-5,-5,-10,-10,20,20,0),
BR72 = c(-10,-10,0,0,-10),
BR3A = c(-10,-10,20,20,-10),
BR37 = c(0,0,20,20,0),
BR5A = c(-40,-40,-10,-10,-15,-15,-20,-20,-40),
SR1 = c(20,20,50,50,20),
SR2 = c(-20,-20,20,20,-20),
SR3 = c(-20,-20,20,20,-20),
SR21 = c(0,0,-5,-5,-20,-20,20,20,0),
SR5 = c(-20,-20,-5,-5,0,0,-20),
SR4 = c(-20,-20,20,20,-20),
SR41 = c(-20,-20,0,0,20,20,-20),
SR42 = c(0,0,20,20,0),
SR22 = c(-20,-20,20,20,-20))
reg.txt = data.frame(reg = c('BR1','BR2','BR31','BR4','BR5','BR6','BR7','BR8','BR9','SR1','SR2','SR3','SR4','SR5', 'SR22', 'BR3'),
x = c( 160, 200, 160, 200, 160, 200, 130, 150, 145, 170, 160, 200, 120, 150, 160, 160),
y = c( 40, 40, 15, 15, -20, -20, 15, -5, -19.5, 40, 10, 10, 10, -15, 10, 15),
r = c( '1', '2', '3', '4', '5', '6', '7', '8', '9', '1', '2', '3', '4', '5', '2', '3'))
reg.map = get_map(location = c(160,5), zoom = 3, maptype = 'satellite')# maptype = 'roadmap')
# p = ggmap(reg.map, fullpage=TRUE)
p = ggmap(reg.map)
if(reg.highlight == 'NULL')
{
print('Example Plot')
} else
{
for(i in reg.highlight)
{
dat = data.frame(x=reg.bounds.x[[i]],y=reg.bounds.y[[i]])
p = p + geom_polygon(data=dat, mapping=aes(x=x, y=y, fill='red', alpha=1/2))
}
}
for(i in reg.keep)
{
dat = data.frame(x=reg.bounds.x[[i]],y=reg.bounds.y[[i]])
dat.txt = reg.txt[reg.txt$reg == i,]
p = p + geom_path(data=dat, mapping=aes(x=x, y=y), colour=cols, size=linesize[1])
p = p + geom_text(data=dat.txt, aes(x=x, y=y, label=r), colour=cols, hjust=0, vjust=0, size=linesize[2])
}
p = p + theme(legend.position="none")
p = p + xlab("") + ylab("")
p
}
| /R4MFCL.src/R4MFCL/R/plot.map.region.boundaries.r | no_license | hoyles/r4mfcl_git | R | false | false | 4,621 | r | # SJDM 20/06/2014
# A function to map the stock assessment boundaries for BET/YFT and SKJ
# The corners of all the subregions (as specified in mufdager) are listed below
# and the function call determines which ones are displayed
plot.map.region.boundaries = function(reg.keep, reg.highlight, linesize = c(2,6), cols = "white")
{
require(scales)
require(ggplot2)
require(ggmap)
setInternet2(TRUE)
reg.names = data.frame(x=c(145,180,160,180), y=c(30,30,-25,-25), r=c('1','2','5','6'))
reg.bounds.x = list(BR1 = c(120,170,170,120,120),
BR2 = c(170,210,210,170,170),
BR3 = c(120,170,170,120,120),
BR4 = c(170,210,210,170,170),
BR5 = c(140,170,170,140,140),
BR6 = c(170,210,210,170,170),
BR7 = c(110,140,140,110,110),
BR8 = c(140,160,160,155,155,140,140),
BR9 = c(140,150,150,140,140),
BR31 = c(140,155,155,160,160,170,170,140,140),
BR72 = c(130,140,140,130,130),
BR3A = c(110,130,130,110,110),
BR37 = c(130,140,140,130,130),
BR5A = c(140,170,170,140,140,150,150,140,140),
SR1 = c(120,210,210,120,120),
SR2 = c(110,170,170,110,110),
SR3 = c(170,210,210,170,170),
SR21 = c(140,155,155,160,160,170,170,140,140),
SR5 = c(140,160,160,155,155,140,140),
SR4 = c(110,140,140,110,110),
SR41 = c(110,140,140,130,130,110,110),
SR42 = c(130,140,140,130,130),
SR22 = c(120,170,170,120,120))
reg.bounds.y = list(BR1 = c(20,20,50,50,20),
BR2 = c(20,20,50,50,20),
BR3 = c(-10,-10,20,20,-10),
BR4 = c(-10,-10,20,20,-10),
BR5 = c(-40,-40,-10,-10,-40),
BR6 = c(-40,-40,-10,-10,-40),
BR7 = c(-10,-10,20,20,-10),
BR8 = c(-10,-10,-5,-5,0,0,-10),
BR9 = c(-20,-20,-15,-15,-20),
BR31 = c(0,0,-5,-5,-10,-10,20,20,0),
BR72 = c(-10,-10,0,0,-10),
BR3A = c(-10,-10,20,20,-10),
BR37 = c(0,0,20,20,0),
BR5A = c(-40,-40,-10,-10,-15,-15,-20,-20,-40),
SR1 = c(20,20,50,50,20),
SR2 = c(-20,-20,20,20,-20),
SR3 = c(-20,-20,20,20,-20),
SR21 = c(0,0,-5,-5,-20,-20,20,20,0),
SR5 = c(-20,-20,-5,-5,0,0,-20),
SR4 = c(-20,-20,20,20,-20),
SR41 = c(-20,-20,0,0,20,20,-20),
SR42 = c(0,0,20,20,0),
SR22 = c(-20,-20,20,20,-20))
reg.txt = data.frame(reg = c('BR1','BR2','BR31','BR4','BR5','BR6','BR7','BR8','BR9','SR1','SR2','SR3','SR4','SR5', 'SR22', 'BR3'),
x = c( 160, 200, 160, 200, 160, 200, 130, 150, 145, 170, 160, 200, 120, 150, 160, 160),
y = c( 40, 40, 15, 15, -20, -20, 15, -5, -19.5, 40, 10, 10, 10, -15, 10, 15),
r = c( '1', '2', '3', '4', '5', '6', '7', '8', '9', '1', '2', '3', '4', '5', '2', '3'))
reg.map = get_map(location = c(160,5), zoom = 3, maptype = 'satellite')# maptype = 'roadmap')
# p = ggmap(reg.map, fullpage=TRUE)
p = ggmap(reg.map)
if(reg.highlight == 'NULL')
{
print('Example Plot')
} else
{
for(i in reg.highlight)
{
dat = data.frame(x=reg.bounds.x[[i]],y=reg.bounds.y[[i]])
p = p + geom_polygon(data=dat, mapping=aes(x=x, y=y, fill='red', alpha=1/2))
}
}
for(i in reg.keep)
{
dat = data.frame(x=reg.bounds.x[[i]],y=reg.bounds.y[[i]])
dat.txt = reg.txt[reg.txt$reg == i,]
p = p + geom_path(data=dat, mapping=aes(x=x, y=y), colour=cols, size=linesize[1])
p = p + geom_text(data=dat.txt, aes(x=x, y=y, label=r), colour=cols, hjust=0, vjust=0, size=linesize[2])
}
p = p + theme(legend.position="none")
p = p + xlab("") + ylab("")
p
}
|
brittle <- read.csv('http://datasets.connectmv.com/file/brittleness-index.csv')
summary(brittle)
bitmap(file='../images/brittleness-single.jpg', type="png256", res=300, height = 5, width = 9, pointsize=12)
plot(brittle$TK104, type="l", col="red")
dev.off()
bitmap(file='../images/brittleness-default.jpg', type="png256", res=300, height = 5, width = 9, pointsize=12)
plot(brittle$TK104, type="l", col="red")
lines(brittle$TK105, type="l", col="black")
lines(brittle$TK107, type="l", col="darkgreen")
dev.off()
rng.104 <- range(brittle$TK104, na.rm=TRUE)
rng.105 <- range(brittle$TK105, na.rm=TRUE)
rng.107 <- range(brittle$TK107, na.rm=TRUE)
ylim <- c(min(rng.104, rng.105, rng.107), max(rng.104, rng.105, rng.107))
bitmap(file='../images/brittleness-better.jpg', type="png256", res=300, height = 5, width = 9, pointsize=12)
plot(brittle$TK104, type="l", col="red", ylim=ylim, ylab="Brittleness values", xlab="Sequence order of batches")
lines(brittle$TK105, type="l", col="black")
lines(brittle$TK107, type="l", col="darkgreen")
dev.off()
# Add legend
# ----------
rng.104 <- range(brittle$TK104, na.rm=TRUE)
rng.105 <- range(brittle$TK105, na.rm=TRUE)
rng.107 <- range(brittle$TK107, na.rm=TRUE)
ylim = c(min(rng.104, rng.105, rng.107), max(rng.104, rng.105, rng.107))
bitmap(file='../images/brittleness-best.jpg', type="png256", res=300, height = 5, width = 9, pointsize=12)
plot(brittle$TK104, type="l", col="red", ylim=ylim, ylab="Brittleness values", xlab="Sequence order")
lines(brittle$TK105, type="l", col="black")
lines(brittle$TK107, type="l", col="darkgreen")
legend(x=15, y=720, legend=c("TK104", "TK105", "TK107"), lwd = c(2, 2, 2), col=c("red", "black", "darkgreen"))
dev.off() | /r-tutorial/code/multiple-series-plot.R | no_license | kgdunn/matlab-R-tutorial | R | false | false | 1,704 | r | brittle <- read.csv('http://datasets.connectmv.com/file/brittleness-index.csv')
summary(brittle)
bitmap(file='../images/brittleness-single.jpg', type="png256", res=300, height = 5, width = 9, pointsize=12)
plot(brittle$TK104, type="l", col="red")
dev.off()
bitmap(file='../images/brittleness-default.jpg', type="png256", res=300, height = 5, width = 9, pointsize=12)
plot(brittle$TK104, type="l", col="red")
lines(brittle$TK105, type="l", col="black")
lines(brittle$TK107, type="l", col="darkgreen")
dev.off()
rng.104 <- range(brittle$TK104, na.rm=TRUE)
rng.105 <- range(brittle$TK105, na.rm=TRUE)
rng.107 <- range(brittle$TK107, na.rm=TRUE)
ylim <- c(min(rng.104, rng.105, rng.107), max(rng.104, rng.105, rng.107))
bitmap(file='../images/brittleness-better.jpg', type="png256", res=300, height = 5, width = 9, pointsize=12)
plot(brittle$TK104, type="l", col="red", ylim=ylim, ylab="Brittleness values", xlab="Sequence order of batches")
lines(brittle$TK105, type="l", col="black")
lines(brittle$TK107, type="l", col="darkgreen")
dev.off()
# Add legend
# ----------
rng.104 <- range(brittle$TK104, na.rm=TRUE)
rng.105 <- range(brittle$TK105, na.rm=TRUE)
rng.107 <- range(brittle$TK107, na.rm=TRUE)
ylim = c(min(rng.104, rng.105, rng.107), max(rng.104, rng.105, rng.107))
bitmap(file='../images/brittleness-best.jpg', type="png256", res=300, height = 5, width = 9, pointsize=12)
plot(brittle$TK104, type="l", col="red", ylim=ylim, ylab="Brittleness values", xlab="Sequence order")
lines(brittle$TK105, type="l", col="black")
lines(brittle$TK107, type="l", col="darkgreen")
legend(x=15, y=720, legend=c("TK104", "TK105", "TK107"), lwd = c(2, 2, 2), col=c("red", "black", "darkgreen"))
dev.off() |
library(SI)
### Name: SI.SPM
### Title: Stochastic Point Method
### Aliases: SI.SPM
### Keywords: SPM
### ** Examples
## To integrate exp(x) from -1 to 1
set.seed(0)
h <- function(x){
exp(x)
}
N <- 100000
SPMresult <- SI.SPM(h,-1,1,exp(1),N)
I1 <- SPMresult[[1]]
VarI1 <- SPMresult[[2]]
| /data/genthat_extracted_code/SI/examples/SI.SPM.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 298 | r | library(SI)
### Name: SI.SPM
### Title: Stochastic Point Method
### Aliases: SI.SPM
### Keywords: SPM
### ** Examples
## To integrate exp(x) from -1 to 1
set.seed(0)
h <- function(x){
exp(x)
}
N <- 100000
SPMresult <- SI.SPM(h,-1,1,exp(1),N)
I1 <- SPMresult[[1]]
VarI1 <- SPMresult[[2]]
|
#' List of "control" mouse gene IDs
#'
#' A list of ensembl mouse gene IDs to serve as
#' an example control gene set.
#'
#' @format A character list with 7872 entries:
#' \describe{
#' \item{geneID}{ensembl gene ID}
#' }
#' @source Taliaferro Lab FMR1 localization
#' analysis. These genes are expessed in CAD
#' cells and do not depend on FMR1 for
#' efficient localization to neuronal
#' projections. This genes list excludes all
#' genes in case_genes.
"ctrl_genes"
| /R/ctrl_genes.R | no_license | TaliaferroLab/FeatureReachR | R | false | false | 478 | r | #' List of "control" mouse gene IDs
#'
#' A list of ensembl mouse gene IDs to serve as
#' an example control gene set.
#'
#' @format A character list with 7872 entries:
#' \describe{
#' \item{geneID}{ensembl gene ID}
#' }
#' @source Taliaferro Lab FMR1 localization
#' analysis. These genes are expessed in CAD
#' cells and do not depend on FMR1 for
#' efficient localization to neuronal
#' projections. This genes list excludes all
#' genes in case_genes.
"ctrl_genes"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ncvxclustr.R
\docType{package}
\name{ncvxclustr}
\alias{ncvxclustr}
\alias{ncvxclustr-package}
\title{ncvxclustr: non-convex clustering}
\description{
algorithm to solve the clustering problem by non-convex penalization
}
\section{Functions}{
}
| /man/ncvxclustr.Rd | permissive | wenshuoliu/ncvxclustr | R | false | true | 325 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ncvxclustr.R
\docType{package}
\name{ncvxclustr}
\alias{ncvxclustr}
\alias{ncvxclustr-package}
\title{ncvxclustr: non-convex clustering}
\description{
algorithm to solve the clustering problem by non-convex penalization
}
\section{Functions}{
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Utils.R
\name{.splitIndex}
\alias{.splitIndex}
\title{Split a vector into groups}
\usage{
.splitIndex(index, nGroups, randomize = FALSE)
}
\arguments{
\item{index}{the vector of index values to be split into groups}
\item{nGroups}{number of groups to be generated}
\item{randomize}{should the groups contain a random or ordered sampling from
the index vector}
}
\value{
returns a list containing `groups` and `groupIndex` both of which are
lists of length nGroups
}
\description{
Split a vector into groups
}
| /man/dot-splitIndex.Rd | no_license | pmbrophy/mspReader | R | false | true | 591 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Utils.R
\name{.splitIndex}
\alias{.splitIndex}
\title{Split a vector into groups}
\usage{
.splitIndex(index, nGroups, randomize = FALSE)
}
\arguments{
\item{index}{the vector of index values to be split into groups}
\item{nGroups}{number of groups to be generated}
\item{randomize}{should the groups contain a random or ordered sampling from
the index vector}
}
\value{
returns a list containing `groups` and `groupIndex` both of which are
lists of length nGroups
}
\description{
Split a vector into groups
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find.R
\name{\%find\%}
\alias{\%find\%}
\title{Magically grepl without NAs}
\usage{
x \%find\% y
}
\arguments{
\item{y}{}
}
\description{
Magically grepl without NAs
}
| /man/grapes-find-grapes.Rd | no_license | blueprint-ade/bputils | R | false | true | 246 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find.R
\name{\%find\%}
\alias{\%find\%}
\title{Magically grepl without NAs}
\usage{
x \%find\% y
}
\arguments{
\item{y}{}
}
\description{
Magically grepl without NAs
}
|
Ztlist.lme <- function(model) {
Z <- Matrix(extract.lmeDesign(model)$Z)
Zt <- t(Z)
getInds <- function(i) {
n <- model$dims$ngrps[i] * model$dims$qvec[i] ## number of elements in this block
# n <- diff(object@Gp)[i] ## number of elements in this block
nt <- model$dims$qvec[i] ## number of REs
inds <- lapply(seq(nt),seq, to = n, by = nt) ## pull out individual RE indices
inds <- lapply(inds, function(x) x ) ## add group offset
}
inds <- do.call(c,lapply(seq_len(model$dims$Q),getInds))
Ztlist <- lapply(inds,function(i) Zt[i,])
return(Ztlist)
}
## for lme4
tnames <- function(object,diag.only = FALSE,old = TRUE,prefix = NULL) {
pfun <- mkPfun(diag.only = diag.only, old = old, prefix = prefix)
c(unlist(mapply(pfun, names(object@cnms), object@cnms)))
}
mkPfun <- function(diag.only = FALSE, old = TRUE, prefix = NULL){
local({
function(g,e) {
mm <- outer(e,e,paste,sep = ".")
if(old) {
diag(mm) <- e
} else {
mm[] <- paste(mm,g,sep = "|")
if (!is.null(prefix)) mm[] <- paste(prefix[2],mm,sep = "_")
diag(mm) <- paste(e,g,sep = "|")
if (!is.null(prefix)) diag(mm) <- paste(prefix[1],diag(mm),sep = "_")
}
mm <- if (diag.only) diag(mm) else mm[lower.tri(mm,diag = TRUE)]
if(old) paste(g,mm,sep = ".") else mm
}
})
}
"Ztlist" =
{
getInds <- function(i) {
n2 <- diff(object@Gp)[i] ## number of elements in this block
nt2 <- length(cnms[[i]]) ## number of REs
inds2 <- lapply(seq(nt2),seq,to = n2,by = nt2) ## pull out individual RE indices
inds2 <- lapply(inds2,function(x) x + object@Gp[i]) ## add group offset
}
inds2 <- do.call(c,lapply(seq_along(cnms),getInds))
setNames(lapply(inds,function(i) PR$Zt[i,]),
tnames(object,diag.only = TRUE))
}, | /misc/test_files/Ztlist.R | no_license | mwest80/lmeresampler | R | false | false | 1,868 | r | Ztlist.lme <- function(model) {
Z <- Matrix(extract.lmeDesign(model)$Z)
Zt <- t(Z)
getInds <- function(i) {
n <- model$dims$ngrps[i] * model$dims$qvec[i] ## number of elements in this block
# n <- diff(object@Gp)[i] ## number of elements in this block
nt <- model$dims$qvec[i] ## number of REs
inds <- lapply(seq(nt),seq, to = n, by = nt) ## pull out individual RE indices
inds <- lapply(inds, function(x) x ) ## add group offset
}
inds <- do.call(c,lapply(seq_len(model$dims$Q),getInds))
Ztlist <- lapply(inds,function(i) Zt[i,])
return(Ztlist)
}
## for lme4
tnames <- function(object,diag.only = FALSE,old = TRUE,prefix = NULL) {
pfun <- mkPfun(diag.only = diag.only, old = old, prefix = prefix)
c(unlist(mapply(pfun, names(object@cnms), object@cnms)))
}
mkPfun <- function(diag.only = FALSE, old = TRUE, prefix = NULL){
local({
function(g,e) {
mm <- outer(e,e,paste,sep = ".")
if(old) {
diag(mm) <- e
} else {
mm[] <- paste(mm,g,sep = "|")
if (!is.null(prefix)) mm[] <- paste(prefix[2],mm,sep = "_")
diag(mm) <- paste(e,g,sep = "|")
if (!is.null(prefix)) diag(mm) <- paste(prefix[1],diag(mm),sep = "_")
}
mm <- if (diag.only) diag(mm) else mm[lower.tri(mm,diag = TRUE)]
if(old) paste(g,mm,sep = ".") else mm
}
})
}
"Ztlist" =
{
getInds <- function(i) {
n2 <- diff(object@Gp)[i] ## number of elements in this block
nt2 <- length(cnms[[i]]) ## number of REs
inds2 <- lapply(seq(nt2),seq,to = n2,by = nt2) ## pull out individual RE indices
inds2 <- lapply(inds2,function(x) x + object@Gp[i]) ## add group offset
}
inds2 <- do.call(c,lapply(seq_along(cnms),getInds))
setNames(lapply(inds,function(i) PR$Zt[i,]),
tnames(object,diag.only = TRUE))
}, |
loadData <- function() {
# download zip file containing data if it hasn't already been downloaded
zipUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zipFile <- "exdata_data_household_power_consumption.zip"
if (!file.exists(zipFile)) {
download.file(zipUrl, zipFile, mode = "wb")
}
# unzip zip file containing data if data directory doesn't already exist
fileName <- "household_power_consumption.txt"
if (!file.exists(fileName)) {
unzip(zipFile)
}
library(lubridate)
data <- read.table(fileName, header = TRUE, sep = ";", dec = ".", stringsAsFactors = FALSE)
sub_data <- subset(data, Date == "1/2/2007" | Date == "2/2/2007")
rm(data)
sub_data$DateTime <- paste(sub_data$Date, sub_data$Time, sep = " ")
sub_data$DateTime <- dmy_hms(sub_data$DateTime)
sub_data$Global_active_power <- as.numeric(sub_data$Global_active_power)
sub_data$Sub_metering_1 <- as.numeric(sub_data$Sub_metering_1)
sub_data$Sub_metering_2 <- as.character(sub_data$Sub_metering_2)
sub_data$Sub_metering_3 <- as.character(sub_data$Sub_metering_3)
return (sub_data)
#nao testei como usar este arquivo
#nao verifiquei como posso ler o sub_data no arquivo chamador
} | /Analise Exploratoria de Dados/Semana 1/ExData_Plotting1/loadData.R | no_license | vcoluci/Coursera | R | false | false | 1,242 | r | loadData <- function() {
# download zip file containing data if it hasn't already been downloaded
zipUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zipFile <- "exdata_data_household_power_consumption.zip"
if (!file.exists(zipFile)) {
download.file(zipUrl, zipFile, mode = "wb")
}
# unzip zip file containing data if data directory doesn't already exist
fileName <- "household_power_consumption.txt"
if (!file.exists(fileName)) {
unzip(zipFile)
}
library(lubridate)
data <- read.table(fileName, header = TRUE, sep = ";", dec = ".", stringsAsFactors = FALSE)
sub_data <- subset(data, Date == "1/2/2007" | Date == "2/2/2007")
rm(data)
sub_data$DateTime <- paste(sub_data$Date, sub_data$Time, sep = " ")
sub_data$DateTime <- dmy_hms(sub_data$DateTime)
sub_data$Global_active_power <- as.numeric(sub_data$Global_active_power)
sub_data$Sub_metering_1 <- as.numeric(sub_data$Sub_metering_1)
sub_data$Sub_metering_2 <- as.character(sub_data$Sub_metering_2)
sub_data$Sub_metering_3 <- as.character(sub_data$Sub_metering_3)
return (sub_data)
#nao testei como usar este arquivo
#nao verifiquei como posso ler o sub_data no arquivo chamador
} |
## Put comments here that give an overall description of what your
## functions do
## The following 2 functions calculate the inverse of a matrix, if needed,
## and then stores the inserve matrix.
## Write a short comment describing this function
## The below function creates a special "matrix" object
## that can cache its inverse.
##makeCacheMatrix <- function(x = matrix()) {
##}
makeCacheMatrix <- function(m=matrix()) {
i <- NULL
set <- function(y) {
m <<- y
i <<- NULL
}
get <- function() m
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## The below function computes the inverse of the special "matrix"
## returned by makeCacheMatrix. If the inverse has already been calculated
## (and the matrix has not changed), then the cacheSolve will retrieve
## the inverse from the cache.
## cacheSolve <- function(x, ...) {}
## Return a matrix that is the inverse of 'x'
cacheSolve <- function(m, ...) {
i <- m$getinverse()
if(!is.null(i)) {
message( "getting cached data" )
return(i)
}
m$setinverse( solve( m$get(),...))
}
| /cachematrix.R | no_license | daviddewitt36/ProgrammingAssignment2 | R | false | false | 1,365 | r | ## Put comments here that give an overall description of what your
## functions do
## The following 2 functions calculate the inverse of a matrix, if needed,
## and then stores the inserve matrix.
## Write a short comment describing this function
## The below function creates a special "matrix" object
## that can cache its inverse.
##makeCacheMatrix <- function(x = matrix()) {
##}
makeCacheMatrix <- function(m=matrix()) {
i <- NULL
set <- function(y) {
m <<- y
i <<- NULL
}
get <- function() m
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## The below function computes the inverse of the special "matrix"
## returned by makeCacheMatrix. If the inverse has already been calculated
## (and the matrix has not changed), then the cacheSolve will retrieve
## the inverse from the cache.
## cacheSolve <- function(x, ...) {}
## Return a matrix that is the inverse of 'x'
cacheSolve <- function(m, ...) {
i <- m$getinverse()
if(!is.null(i)) {
message( "getting cached data" )
return(i)
}
m$setinverse( solve( m$get(),...))
}
|
library(caret)
1.
library(ElemStatLearn)
data(vowel.train)
data(vowel.test)
#Set the variable y to be a factor variable in both the training and test set.
#Then set the seed to 33833. Fit (1) a random forest predictor relating the factor
#variable y to the remaining variables and (2) a boosted predictor using the "gbm" method.
#Fit these both with the train() command in the caret package.
vowel.train$y <- factor(vowel.train$y)
vowel.test$y <- factor(vowel.test$y)
set.seed(33833)
mod1 <- train(y ~ ., method = "rf", data = vowel.train)
mod2 <- train(y ~., method="gbm", data = vowel.train)
pred1 <- predict(mod1, vowel.test)
pred2 <- predict(mod2, vowel.test)
a1 <- sum( pred1 == vowel.test$y) / length(pred1)
a2 <- sum( pred2 == vowel.test$y) / length(pred2)
sum( pred1 == vowel.test$y & pred1 == pred2) / sum (pred1 == pred2)
#What are the accuracies for the two approaches on the test data set?
#What is the accuracy among the test set samples where the two methods agree?
Try:
#RF Accuracy = 0.6082, GBM Accuracy = 0.5152, Agreement Accuracy = 0.6361
2.
library(caret)
library(gbm)
set.seed(3433)
library(AppliedPredictiveModeling)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
#Set the seed to 62433 and predict diagnosis with all the other variables using a random forest ("rf"),
#boosted trees ("gbm") and linear discriminant analysis ("lda") model.
#Stack the predictions together using random forests ("rf").
set.seed(62433)
mod1 <- train(diagnosis ~ ., method = "rf", data = training)
mod2 <- train(diagnosis ~ ., method = "gbm", data = training)
mod3 <- train(diagnosis ~ ., method = "lda", data = training)
pred1 <- predict(mod1, testing)
pred2 <- predict(mod2, testing)
pred3 <- predict(mod3, testing)
predDF <- data.frame (pred1, pred2, pred3, diagnosis = testing$diagnosis)
modC <- train(diagnosis ~ ., method = "rf", data = predDF)
pred <- predict(modC, testing)
sum( pred1 == testing$diagnosis) / dim(testing)[1]
sum( pred2 == testing$diagnosis) / dim(testing)[1]
sum( pred3 == testing$diagnosis) / dim(testing)[1]
sum( pred == testing$diagnosis) / dim(testing)[1]
#What is the resulting accuracy on the test set?
#Is it better or worse than each of the individual predictions?
Try:
Stacked Accuracy: 0.80 is better than all three other methods
Stacked Accuracy: 0.88 is better than all three other methods
3.
set.seed(3523)
library(AppliedPredictiveModeling)
data(concrete)
inTrain = createDataPartition(concrete$CompressiveStrength, p = 3/4)[[1]]
training = concrete[ inTrain,]
testing = concrete[-inTrain,]
#Set the seed to 233 and fit a lasso model to predict Compressive Strength.
#Which variable is the last coefficient to be set to zero as the penalty increases?
#(Hint: it may be useful to look up ?plot.enet).
set.seed(233)
mod <- train(CompressiveStrength ~ ., method = "lasso", data = training)
pred <- predict(mod, testing)
object <- enet(x=testing$Cement, y=pred, lambda = 0)
plot(mod)
try:
Cement
4.
library(lubridate) # For year() function below
setwd("E:/My Documents/R/C8")
dat = read.csv("gaData.csv")
training = dat[year(dat$date) < 2012,]
testing = dat[(year(dat$date)) > 2011,]
tstrain = ts(training$visitsTumblr)
#Fit a model using the bats() function in the forecast package to the training time series.
#Then forecast this model for the remaining time points.
#For how many of the testing points is the true value within the 95% prediction
#interval bounds?
library(forecast)
fit <- bats(training$visitsTumblr)
pred <- forecast(fit, data=testing)
(dim(testing)[1] - length(pred)) / dim(testing)[1]
Try:
96%
5.
set.seed(3523)
library(AppliedPredictiveModeling)
data(concrete)
inTrain = createDataPartition(concrete$CompressiveStrength, p = 3/4)[[1]]
training = concrete[ inTrain,]
testing = concrete[-inTrain,]
#Set the seed to 325 and fit a support vector machine using the e1071 package
#to predict Compressive Strength using the default settings.
#Predict on the testing set. What is the RMSE?
set.seed(325)
library(e1071)
library(ModelMetrics)
mod <- svm(CompressiveStrength ~ ., training)
pred <- predict(mod, testing)
rmse (testing$CompressiveStrength, pred)
Try:
6.72
| /Q3.R | no_license | bzhtapp/LiftingWeights | R | false | false | 4,333 | r | library(caret)
1.
library(ElemStatLearn)
data(vowel.train)
data(vowel.test)
#Set the variable y to be a factor variable in both the training and test set.
#Then set the seed to 33833. Fit (1) a random forest predictor relating the factor
#variable y to the remaining variables and (2) a boosted predictor using the "gbm" method.
#Fit these both with the train() command in the caret package.
vowel.train$y <- factor(vowel.train$y)
vowel.test$y <- factor(vowel.test$y)
set.seed(33833)
mod1 <- train(y ~ ., method = "rf", data = vowel.train)
mod2 <- train(y ~., method="gbm", data = vowel.train)
pred1 <- predict(mod1, vowel.test)
pred2 <- predict(mod2, vowel.test)
a1 <- sum( pred1 == vowel.test$y) / length(pred1)
a2 <- sum( pred2 == vowel.test$y) / length(pred2)
sum( pred1 == vowel.test$y & pred1 == pred2) / sum (pred1 == pred2)
#What are the accuracies for the two approaches on the test data set?
#What is the accuracy among the test set samples where the two methods agree?
Try:
#RF Accuracy = 0.6082, GBM Accuracy = 0.5152, Agreement Accuracy = 0.6361
2.
library(caret)
library(gbm)
set.seed(3433)
library(AppliedPredictiveModeling)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
#Set the seed to 62433 and predict diagnosis with all the other variables using a random forest ("rf"),
#boosted trees ("gbm") and linear discriminant analysis ("lda") model.
#Stack the predictions together using random forests ("rf").
set.seed(62433)
mod1 <- train(diagnosis ~ ., method = "rf", data = training)
mod2 <- train(diagnosis ~ ., method = "gbm", data = training)
mod3 <- train(diagnosis ~ ., method = "lda", data = training)
pred1 <- predict(mod1, testing)
pred2 <- predict(mod2, testing)
pred3 <- predict(mod3, testing)
predDF <- data.frame (pred1, pred2, pred3, diagnosis = testing$diagnosis)
modC <- train(diagnosis ~ ., method = "rf", data = predDF)
pred <- predict(modC, testing)
sum( pred1 == testing$diagnosis) / dim(testing)[1]
sum( pred2 == testing$diagnosis) / dim(testing)[1]
sum( pred3 == testing$diagnosis) / dim(testing)[1]
sum( pred == testing$diagnosis) / dim(testing)[1]
#What is the resulting accuracy on the test set?
#Is it better or worse than each of the individual predictions?
Try:
Stacked Accuracy: 0.80 is better than all three other methods
Stacked Accuracy: 0.88 is better than all three other methods
3.
set.seed(3523)
library(AppliedPredictiveModeling)
data(concrete)
inTrain = createDataPartition(concrete$CompressiveStrength, p = 3/4)[[1]]
training = concrete[ inTrain,]
testing = concrete[-inTrain,]
#Set the seed to 233 and fit a lasso model to predict Compressive Strength.
#Which variable is the last coefficient to be set to zero as the penalty increases?
#(Hint: it may be useful to look up ?plot.enet).
set.seed(233)
mod <- train(CompressiveStrength ~ ., method = "lasso", data = training)
pred <- predict(mod, testing)
object <- enet(x=testing$Cement, y=pred, lambda = 0)
plot(mod)
try:
Cement
4.
library(lubridate) # For year() function below
setwd("E:/My Documents/R/C8")
dat = read.csv("gaData.csv")
training = dat[year(dat$date) < 2012,]
testing = dat[(year(dat$date)) > 2011,]
tstrain = ts(training$visitsTumblr)
#Fit a model using the bats() function in the forecast package to the training time series.
#Then forecast this model for the remaining time points.
#For how many of the testing points is the true value within the 95% prediction
#interval bounds?
library(forecast)
fit <- bats(training$visitsTumblr)
pred <- forecast(fit, data=testing)
(dim(testing)[1] - length(pred)) / dim(testing)[1]
Try:
96%
5.
set.seed(3523)
library(AppliedPredictiveModeling)
data(concrete)
inTrain = createDataPartition(concrete$CompressiveStrength, p = 3/4)[[1]]
training = concrete[ inTrain,]
testing = concrete[-inTrain,]
#Set the seed to 325 and fit a support vector machine using the e1071 package
#to predict Compressive Strength using the default settings.
#Predict on the testing set. What is the RMSE?
set.seed(325)
library(e1071)
library(ModelMetrics)
mod <- svm(CompressiveStrength ~ ., training)
pred <- predict(mod, testing)
rmse (testing$CompressiveStrength, pred)
Try:
6.72
|
library(MASS)
library(ggplot2)
library(gridExtra)
library(cowplot)
gamma=.9
gp.th=.1
gp.r=100
gp.sigma=1e-5
testpar = c(sigma2=.02,q=0.1,A=1.5,c0=0.1,b=1)
gen <- function(par,n){
spikes = runif(n) < par['q']
G = outer(1:n,1:n,function(x,y) gamma^(x-y))
G[upper.tri(G)]=0
gp.C = outer(1:n,1:n,function(x,y) {
gp.th*exp(-(x-y)^2/(4*gp.r^2))
})
gp.B = mvrnorm(n=1,mu=rep(0,n),Sigma=gp.C+diag(gp.sigma,n))
V = gamma^(seq(0,n-1))
res = data.frame(y= par['c0']*V+par['A']*G%*%spikes +par['b']+ gp.B +rnorm(n,0,sqrt(par['sigma2'])),
B=gp.B,
spikes=spikes,
time=1:n)
return(res)
}
gibbs_sampler <- function(data,niter=100){
# hyperparameters
empirical_baseline.mean = mean(data[1:20])
empirical_baseline.sd = sd(data[1:20])
theta.mu = c(1,empirical_baseline.mean,0)
theta.Sigma = diag(c(1,1,1))
theta.Sigma.inv = solve(theta.Sigma)
alpha.sigma2=1
beta.sigma2 = .1
alpha.q=2
beta.q=100
data.len = length(data)
# Gaussian process covariance matrix
gp.C = outer(1:data.len,1:data.len,function(x,y) {
gp.th*exp(-(x-y)^2/(4*gp.r^2))
})
gp.C.inv = solve(gp.C+diag(gp.sigma,data.len))
par = testpar
par['sigma2'] = 1/rgamma(n=1,shape=alpha.sigma2,rate=beta.sigma2)
theta = mvrnorm(n=1,mu=theta.mu, Sigma=theta.Sigma)
#while(any(theta<0)) {
# theta = mvrnorm(n=1,mu=theta.mu, Sigma=theta.Sigma)
#}
par[c('A','b','c0')] = theta
par['q'] = rbeta(1,alpha.q,beta.q)
spikes = (runif(n=data.len)<alpha.q/(alpha.q+beta.q))
B = rep(0,data.len)
par.dataframe = data.frame(as.list(par))
spikes.mat = matrix(0,data.len,niter)
B.mat = matrix(0,data.len,niter)
G = outer(1:data.len,1:data.len,function(x,y) gamma^(x-y))
G[upper.tri(G)]=0
V = gamma^(seq(0,data.len-1))
W0 = t(G)%*%G
for( i in 1:niter){
## flip spikes
yt = data-par['b']-par['c0']*V-B
for(t in 1:data.len){
logalphaMH = (1-2*spikes[t])*(-par['A']^2/par['sigma2']*sum(W0[t,spikes==1]) - (1-2*spikes[t])*par['A']^2/par['sigma2']*W0[t,t]/2+par['A']/par['sigma2']*sum(G[,t]*yt)+log(par['q']/(1-par['q'])))
if(runif(1) < exp(logalphaMH)){
spikes[t] = 1-spikes[t]
}
}
## sample B
S = cbind(rowSums(G[,spikes==1]),rep(1,data.len),V)
gp.Lambda = solve(gp.C.inv+diag(1/par['sigma2'],data.len))
gp.Mu = gp.Lambda%*%(data-S%*%par[c('A','b','c0')])/par['sigma2']
B = mvrnorm(1,mu=gp.Mu,Sigma=gp.Lambda)
Lambda = solve(theta.Sigma.inv+t(S)%*%S/par['sigma2'])
Mu = Lambda%*%(theta.Sigma.inv%*%theta.mu+t(S)%*%(data-B)/par['sigma2'])
newpar = par
newpar[c('A','b','c0')] = mvrnorm(n=1,mu=Mu,Sigma=Lambda)
#while(any(newpar[c('A','b','c0')]<0)){
# newpar[c('A','b','c0')] = mvrnorm(n=1,mu=Mu,Sigma=Lambda)
#}
newpar['sigma2'] = 1/rgamma(n=1,alpha.sigma2+data.len/2,
beta.sigma2+0.5*sum((data-B-S%*%newpar[c('A','b','c0')])^2)
)
newpar['q'] = rbeta(n=1,alpha.q+sum(spikes),beta.q+sum(1-spikes))
par.dataframe = rbind(par.dataframe,newpar)
spikes.mat[,i] = spikes
B.mat[,i] = B
par=newpar
if(i%%10==0) cat(i," \r")
}
cat("\n")
return(list(par=par.dataframe,spikes=spikes.mat,B=B.mat))
}
validation <- function(samples,gt,keep=20){
nsamples = ncol(samples$spikes)
prob_spikes = rowMeans(samples$spikes[,(nsamples-keep+1):nsamples])
baseline = rowMeans(samples$B[,(nsamples-keep+1):nsamples])
time = 1:length(prob_spikes)
nspc = rowMeans(apply(samples$spikes[,(nsamples-keep+1):nsamples],2,cumsum))
df=data.frame(time=time,
prob=prob_spikes,
nspc=nspc,
true_spikes=gt[,'spikes'],
true_B = gt[,'B'],
B=baseline,
Y=gt[,'y'])
g_data=ggplot(df) + geom_line(aes(time,Y))+
geom_vline(xintercept=time[gt$spikes==1],col="green")+
geom_line(aes(time,true_B+testpar['b']),col="green")
g_prob_spike = ggplot(df)+geom_line(aes(time,prob),col="red")
#g_nspc = ggplot(df)+geom_line(aes(time,nspc)) + geom_line(aes(time,cumsum(true_spikes)),col="green")
g_baseline = ggplot(df)
for( i in 1:keep){
g_baseline = g_baseline + geom_line(data=data.frame(time=time,baseline=samples$B[,(nsamples-keep+i)]),
mapping=aes(time,baseline),col="orange")
}
g_baseline = g_baseline + geom_line(aes(time,true_B))
g = plot_grid(g_data,g_prob_spike,g_baseline,ncol=1,align="v")
plot(g)
ggsave("validation.png",g)
}
gibbs.show <- function(samples,data,keep=20){
time = 1:length(prob_spikes)
nsamples = ncol(samples$spikes)
prob_spikes = rowMeans(samples$spikes[,(nsamples-keep+1):nsamples])
nspc = rowMeans(apply(samples$spikes[,(nsamples-keep+1):nsamples],2,cumsum))
df=data.frame(time=time,
prob=prob_spikes,
nspc=nspc,
Y=gt$y)
g_data=ggplot(df) + geom_line(aes(time,Y))
g_prob_spike = ggplot(df)+geom_line(aes(time,prob),col="red")
g_nspc = ggplot(df)+geom_line(aes(time,nspc))
g = plot_grid(g_data,g_prob_spike,g_nspc,ncol=1,align="v")
plot(g)
}
| /R/sampler_gp.R | no_license | giovannidiana/pnevmatikakis2013 | R | false | false | 5,576 | r | library(MASS)
library(ggplot2)
library(gridExtra)
library(cowplot)
gamma=.9
gp.th=.1
gp.r=100
gp.sigma=1e-5
testpar = c(sigma2=.02,q=0.1,A=1.5,c0=0.1,b=1)
gen <- function(par,n){
spikes = runif(n) < par['q']
G = outer(1:n,1:n,function(x,y) gamma^(x-y))
G[upper.tri(G)]=0
gp.C = outer(1:n,1:n,function(x,y) {
gp.th*exp(-(x-y)^2/(4*gp.r^2))
})
gp.B = mvrnorm(n=1,mu=rep(0,n),Sigma=gp.C+diag(gp.sigma,n))
V = gamma^(seq(0,n-1))
res = data.frame(y= par['c0']*V+par['A']*G%*%spikes +par['b']+ gp.B +rnorm(n,0,sqrt(par['sigma2'])),
B=gp.B,
spikes=spikes,
time=1:n)
return(res)
}
gibbs_sampler <- function(data,niter=100){
# hyperparameters
empirical_baseline.mean = mean(data[1:20])
empirical_baseline.sd = sd(data[1:20])
theta.mu = c(1,empirical_baseline.mean,0)
theta.Sigma = diag(c(1,1,1))
theta.Sigma.inv = solve(theta.Sigma)
alpha.sigma2=1
beta.sigma2 = .1
alpha.q=2
beta.q=100
data.len = length(data)
# Gaussian process covariance matrix
gp.C = outer(1:data.len,1:data.len,function(x,y) {
gp.th*exp(-(x-y)^2/(4*gp.r^2))
})
gp.C.inv = solve(gp.C+diag(gp.sigma,data.len))
par = testpar
par['sigma2'] = 1/rgamma(n=1,shape=alpha.sigma2,rate=beta.sigma2)
theta = mvrnorm(n=1,mu=theta.mu, Sigma=theta.Sigma)
#while(any(theta<0)) {
# theta = mvrnorm(n=1,mu=theta.mu, Sigma=theta.Sigma)
#}
par[c('A','b','c0')] = theta
par['q'] = rbeta(1,alpha.q,beta.q)
spikes = (runif(n=data.len)<alpha.q/(alpha.q+beta.q))
B = rep(0,data.len)
par.dataframe = data.frame(as.list(par))
spikes.mat = matrix(0,data.len,niter)
B.mat = matrix(0,data.len,niter)
G = outer(1:data.len,1:data.len,function(x,y) gamma^(x-y))
G[upper.tri(G)]=0
V = gamma^(seq(0,data.len-1))
W0 = t(G)%*%G
for( i in 1:niter){
## flip spikes
yt = data-par['b']-par['c0']*V-B
for(t in 1:data.len){
logalphaMH = (1-2*spikes[t])*(-par['A']^2/par['sigma2']*sum(W0[t,spikes==1]) - (1-2*spikes[t])*par['A']^2/par['sigma2']*W0[t,t]/2+par['A']/par['sigma2']*sum(G[,t]*yt)+log(par['q']/(1-par['q'])))
if(runif(1) < exp(logalphaMH)){
spikes[t] = 1-spikes[t]
}
}
## sample B
S = cbind(rowSums(G[,spikes==1]),rep(1,data.len),V)
gp.Lambda = solve(gp.C.inv+diag(1/par['sigma2'],data.len))
gp.Mu = gp.Lambda%*%(data-S%*%par[c('A','b','c0')])/par['sigma2']
B = mvrnorm(1,mu=gp.Mu,Sigma=gp.Lambda)
Lambda = solve(theta.Sigma.inv+t(S)%*%S/par['sigma2'])
Mu = Lambda%*%(theta.Sigma.inv%*%theta.mu+t(S)%*%(data-B)/par['sigma2'])
newpar = par
newpar[c('A','b','c0')] = mvrnorm(n=1,mu=Mu,Sigma=Lambda)
#while(any(newpar[c('A','b','c0')]<0)){
# newpar[c('A','b','c0')] = mvrnorm(n=1,mu=Mu,Sigma=Lambda)
#}
newpar['sigma2'] = 1/rgamma(n=1,alpha.sigma2+data.len/2,
beta.sigma2+0.5*sum((data-B-S%*%newpar[c('A','b','c0')])^2)
)
newpar['q'] = rbeta(n=1,alpha.q+sum(spikes),beta.q+sum(1-spikes))
par.dataframe = rbind(par.dataframe,newpar)
spikes.mat[,i] = spikes
B.mat[,i] = B
par=newpar
if(i%%10==0) cat(i," \r")
}
cat("\n")
return(list(par=par.dataframe,spikes=spikes.mat,B=B.mat))
}
validation <- function(samples,gt,keep=20){
nsamples = ncol(samples$spikes)
prob_spikes = rowMeans(samples$spikes[,(nsamples-keep+1):nsamples])
baseline = rowMeans(samples$B[,(nsamples-keep+1):nsamples])
time = 1:length(prob_spikes)
nspc = rowMeans(apply(samples$spikes[,(nsamples-keep+1):nsamples],2,cumsum))
df=data.frame(time=time,
prob=prob_spikes,
nspc=nspc,
true_spikes=gt[,'spikes'],
true_B = gt[,'B'],
B=baseline,
Y=gt[,'y'])
g_data=ggplot(df) + geom_line(aes(time,Y))+
geom_vline(xintercept=time[gt$spikes==1],col="green")+
geom_line(aes(time,true_B+testpar['b']),col="green")
g_prob_spike = ggplot(df)+geom_line(aes(time,prob),col="red")
#g_nspc = ggplot(df)+geom_line(aes(time,nspc)) + geom_line(aes(time,cumsum(true_spikes)),col="green")
g_baseline = ggplot(df)
for( i in 1:keep){
g_baseline = g_baseline + geom_line(data=data.frame(time=time,baseline=samples$B[,(nsamples-keep+i)]),
mapping=aes(time,baseline),col="orange")
}
g_baseline = g_baseline + geom_line(aes(time,true_B))
g = plot_grid(g_data,g_prob_spike,g_baseline,ncol=1,align="v")
plot(g)
ggsave("validation.png",g)
}
gibbs.show <- function(samples,data,keep=20){
time = 1:length(prob_spikes)
nsamples = ncol(samples$spikes)
prob_spikes = rowMeans(samples$spikes[,(nsamples-keep+1):nsamples])
nspc = rowMeans(apply(samples$spikes[,(nsamples-keep+1):nsamples],2,cumsum))
df=data.frame(time=time,
prob=prob_spikes,
nspc=nspc,
Y=gt$y)
g_data=ggplot(df) + geom_line(aes(time,Y))
g_prob_spike = ggplot(df)+geom_line(aes(time,prob),col="red")
g_nspc = ggplot(df)+geom_line(aes(time,nspc))
g = plot_grid(g_data,g_prob_spike,g_nspc,ncol=1,align="v")
plot(g)
}
|
#--- Load input files
Social_Signals <- read.csv(file="/home/mono/Documents/Assortment/Top 25/Data/Inputs/Social_Signals.csv")
load(file="/home/mono/Documents/Assortment/Top 25/Data/ProcessedData/1_All_Review_Processed.RData")
#--- Prepare data
for(i in c(1:ncol(Social_Signals))){
Social_Signals[,i] <- as.numeric(Social_Signals[,i])
}
#--- Track retailer columns
Retailers <- colnames(Social_Signals)
#--- Calculate ratio
# System command
system('echo "# Calculating ratio..."; echo "65"')
#---------
Total_Reviews <- nrow(All_Review)
Total_Social_Signals <- sum(Social_Signals,na.rm = T)
Review_Social_Signal_Ratio <- Total_Reviews / Total_Social_Signals
#--- Calculate social signals
# System command
system('echo "# Calculating social signals..."; echo "70"')
#---------
Social_Signals$Total_SS <- rowSums(Social_Signals[,c(1:length(Retailers))], na.rm=T)
#--- Calculate Net Social Signals
Social_Signals$Net_Social_Signal <- Social_Signals$Total_SS / Review_Social_Signal_Ratio
#--- Save data
# System command
system('echo "# Saving data..."; echo "75"')
#---------
save(Social_Signals,file="/home/mono/Documents/Assortment/Top 25/Data/ProcessedData/4_Social_Signal_Data.RData")
rm(list=ls())
| /SA/2. Anu_R_Scripts/Backup/4_Calculate_Social_Signals.R | no_license | haoybl/Data-Analysis | R | false | false | 1,211 | r |
#--- Load input files
Social_Signals <- read.csv(file="/home/mono/Documents/Assortment/Top 25/Data/Inputs/Social_Signals.csv")
load(file="/home/mono/Documents/Assortment/Top 25/Data/ProcessedData/1_All_Review_Processed.RData")
#--- Prepare data
for(i in c(1:ncol(Social_Signals))){
Social_Signals[,i] <- as.numeric(Social_Signals[,i])
}
#--- Track retailer columns
Retailers <- colnames(Social_Signals)
#--- Calculate ratio
# System command
system('echo "# Calculating ratio..."; echo "65"')
#---------
Total_Reviews <- nrow(All_Review)
Total_Social_Signals <- sum(Social_Signals,na.rm = T)
Review_Social_Signal_Ratio <- Total_Reviews / Total_Social_Signals
#--- Calculate social signals
# System command
system('echo "# Calculating social signals..."; echo "70"')
#---------
Social_Signals$Total_SS <- rowSums(Social_Signals[,c(1:length(Retailers))], na.rm=T)
#--- Calculate Net Social Signals
Social_Signals$Net_Social_Signal <- Social_Signals$Total_SS / Review_Social_Signal_Ratio
#--- Save data
# System command
system('echo "# Saving data..."; echo "75"')
#---------
save(Social_Signals,file="/home/mono/Documents/Assortment/Top 25/Data/ProcessedData/4_Social_Signal_Data.RData")
rm(list=ls())
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/STAR.R
\name{STAR.align.folder}
\alias{STAR.align.folder}
\title{Align all libraries in folder with STAR}
\usage{
STAR.align.folder(
input.dir,
output.dir,
index.dir,
star.path = STAR.install(),
fastp = install.fastp(),
paired.end = FALSE,
steps = "tr-ge",
adapter.sequence = "auto",
quality.filtering = FALSE,
min.length = 20,
mismatches = 3,
trim.front = 0,
max.multimap = 10,
alignment.type = "Local",
allow.introns = TRUE,
max.cpus = min(90, BiocParallel::bpparam()$workers),
wait = TRUE,
include.subfolders = "n",
resume = NULL,
multiQC = TRUE,
keep.contaminants = FALSE,
keep.unaligned.genome = FALSE,
script.folder = system.file("STAR_Aligner", "RNA_Align_pipeline_folder.sh", package =
"ORFik"),
script.single = system.file("STAR_Aligner", "RNA_Align_pipeline.sh", package = "ORFik")
)
}
\arguments{
\item{input.dir}{path to fast files to align, the valid input files will be search for from formats:
(".fasta", ".fastq", ".fq", or ".fa") with or without compression of .gz.
Also either paired end or single end reads. Pairs will automatically be detected from
similarity of naming, separated by something as .1 and .2 in the end. If files are renamed, where pairs
are not similarily named, this process will fail to find correct pairs!}
\item{output.dir}{directory to save indices, default:
paste0(dirname(arguments[1]), "/STAR_index/"), where arguments is the
arguments input for this function.}
\item{index.dir}{path to STAR index folder. Path returned from ORFik function
STAR.index, when you created the index folders.}
\item{star.path}{path to STAR, default: STAR.install(),
if you don't have STAR installed at default location, it will install it there,
set path to a runnable star if you already have it.}
\item{fastp}{path to fastp trimmer, default: install.fastp(), if you
have it somewhere else already installed, give the path. Only works for
unix (linux or Mac OS), if not on unix, use your favorite trimmer and
give the output files from that trimmer as input.dir here.}
\item{paired.end}{a logical: default FALSE, alternative TRUE. If TRUE, will auto detect
pairs by names. Can not be a combination of both TRUE and FALSE!\cr
If running in folder mode:
The folder must then contain an even number of files
and they must be named with the same prefix and sufix of either
_1 and _2, 1 and 2, etc. If SRR numbers are used, it will start on lowest and
match with second lowest etc.}
\item{steps}{a character, default: "tr-ge", trimming then genome alignment\cr
steps of depletion and alignment wanted:
The posible candidates you can use are:\cr
\itemize{
\item{tr : }{trim reads}
\item{co : }{contamination merged depletion}
\item{ph : }{phix depletion}
\item{rR : }{rrna depletion}
\item{nc : }{ncrna depletion}
\item{tR : }{trna depletion (Mature tRNA, so no intron checks done)}
\item{ge : }{genome alignment}
\item{all: }{run steps: "tr-co-ge" or "tr-ph-rR-nc-tR-ge", depending on if you
have merged contaminants or not}
}
If not "all", a subset of these ("tr-co-ph-rR-nc-tR-ge")\cr
If co (merged contaminants) is used, non of the specific contaminants can be specified,
since they should be a subset of co.\cr
The step where you align to the genome is usually always included, unless you
are doing pure contaminant analysis or only trimming.
For Ribo-seq and TCP(RCP-seq) you should do rR (ribosomal RNA depletion),
so when you made the
STAR index you need the rRNA step, either use rRNA from .gtf or manual download.
(usually just download a Silva rRNA database
for SSU&LSU at: https://www.arb-silva.de/) for your species.}
\item{adapter.sequence}{character, default: "auto". Auto detect adapter using fastp
adapter auto detection, checking first 1.5M reads. (Auto detection of adapter will
not work 100\% of the time (if the library is of low quality), then you must rerun
this function with specified adapter from fastp adapter analysis.
, using FASTQC or other adapter detection tools, else alignment will most likely fail!).
If already trimmed or trimming not wanted:
adapter.sequence = "disable" .You can manually assign adapter like:
"ATCTCGTATGCCGTCTTCTGCTTG" or "AAAAAAAAAAAAA". You can also specify one of the three
presets:\cr
\itemize{
\item{illumina (TrueSeq ~75/100 bp sequencing): }{AGATCGGAAGAGC}
\item{small_RNA (standard for ~50 bp sequencing): }{TGGAATTCTCGG}
\item{nextera: }{CTGTCTCTTATA}
}
Paired end auto detection uses overlap sequence of pairs, to use the slower
more secure paired end adapter detection, specify as: "autoPE".}
\item{quality.filtering}{logical, default FALSE. Not needed for modern
library prep of RNA-seq, Ribo-seq etc (usually < ~ 0.5% of reads are removed).
If you are aligning bad quality data, set this to TRUE.\cr
These filters will then be applied (default of fastp), filter if:
\itemize{
\item{Number of N bases in read: }{> 5}
\item{Read quality: }{> 40\% of bases in the read are <Q15}
}}
\item{min.length}{20, minimum length of aligned read without mismatches
to pass filter. Anything under 20 is dangerous, as chance of random hits will
become high!}
\item{mismatches}{3, max non matched bases. Excludes soft-clipping, this only
filters reads that have defined mismatches in STAR.
Only applies for genome alignment step.}
\item{trim.front}{0, default trim 0 bases 5'. For Ribo-seq use default 0.
Ignored if tr (trim) is not one of the arguments in "steps"}
\item{max.multimap}{numeric, default 10. If a read maps to more locations than specified,
will skip the read. Set to 1 to only get unique mapping reads. Only applies for
genome alignment step. The depletions are allowing for multimapping.}
\item{alignment.type}{default: "Local": standard local alignment with soft-clipping allowed,
"EndToEnd" (global): force end-to-end read alignment, does not soft-clip.}
\item{allow.introns}{logical, default TRUE. Allow large gaps of N in reads
during genome alignment, if FALSE:
sets --alignIntronMax to 1 (no introns). NOTE: You will still get some spliced reads
if you assigned a gtf at the index step.}
\item{max.cpus}{integer, default: \code{min(90, BiocParallel:::bpparam()$workers)},
number of threads to use. Default is minimum of 90 and maximum cores - 2. So if you
have 8 cores it will use 6.}
\item{wait}{a logical (not \code{NA}) indicating whether the \R
interpreter should wait for the command to finish, or run it
asynchronously. This will be ignored (and the interpreter will
always wait) if \code{intern = TRUE}. When running the command
asynchronously, no output will be displayed on the \code{Rgui}
console in Windows (it will be dropped, instead).}
\item{include.subfolders}{"n" (no), do recursive search downwards for fast files if "y".}
\item{resume}{default: NULL, continue from step, lets say steps are "tr-ph-ge":
(trim, phix depletion, genome alignment) and resume is "ge", you will then use
the assumed already trimmed and phix depleted data and start at genome alignment,
useful if something crashed. Like if you specified wrong STAR version, but the trimming
step was completed. Resume mode can only run 1 step at the time.}
\item{multiQC}{logical, default TRUE. Do mutliQC comparison of STAR
alignment between all the samples. Outputted in aligned/LOGS folder.
See ?STAR.multiQC}
\item{keep.contaminants}{logical, default FALSE. Create and keep
contaminant aligning bam files, default is to only keep unaliged fastq reads,
which will be further processed in "ge" genome alignment step. Useful if you
want to do further processing on contaminants, like specific coverage of
specific tRNAs etc.}
\item{keep.unaligned.genome}{logical, default FALSE. Create and keep
reads that did not align at the genome alignment step,
default is to only keep the aliged bam file. Useful if you
want to do further processing on plasmids/custom sequences.}
\item{script.folder}{location of STAR index script,
default internal ORFik file. You can change it and give your own if you
need special alignments.}
\item{script.single}{location of STAR single file alignment script,
default internal ORFik file. You can change it and give your own if you
need special alignments.}
}
\value{
output.dir, can be used as as input in ORFik::create.experiment
}
\description{
Does either all files as paired end or single end,
so if you have mix, split them in two different folders.\cr
If STAR halts at .... loading genome, it means the STAR
index was aborted early, then you need to run:
STAR.remove.crashed.genome(), with the genome that crashed, and rerun.
}
\details{
Can only run on unix systems (Linux, Mac and WSL (Windows Subsystem Linux)),
and requires a minimum of 30GB memory on genomes like human, rat, zebrafish etc.\cr
If for some reason the internal STAR alignment bash script will not work for you,
like if you want more customization of the STAR/fastp arguments.
You can copy the internal alignment script,
edit it and give that as the script used for this function.\cr
The trimmer used is fastp (the fastest I could find), also works on
(Linux, Mac and WSL (Windows Subsystem Linux)).
If you want to use your own trimmer set file1/file2 to the location of
the trimmed files from your program.\cr
A note on trimming from creator of STAR about trimming:
"adapter trimming it definitely needed for short RNA sequencing.
For long RNA-seq, I would agree with Devon that in most cases adapter trimming
is not advantageous, since, by default, STAR performs local (not end-to-end) alignment,
i.e. it auto-trims." So trimming can be skipped for longer reads.
}
\examples{
# First specify directories wanted
annotation.dir <- "~/Bio_data/references/Human"
fastq.input.dir <- "~/Bio_data/raw_data/Ribo_seq_subtelny/"
bam.output.dir <- "~/Bio_data/processed_data/Ribo_seq_subtelny_2014/"
## Download some SRA data and metadata
# info <- download.SRA.metadata("DRR041459", fastq.input.dir)
# download.SRA(info, fastq.input.dir, rename = FALSE)
## Now align 2 different ways, without and with contaminant depletion
## No contaminant depletion:
# annotation <- getGenomeAndAnnotation("Homo sapiens", annotation.dir)
# index <- STAR.index(annotation)
# STAR.align.folder(fastq.input.dir, bam.output.dir,
# index, paired.end = FALSE)
## All contaminants merged:
# annotation <- getGenomeAndAnnotation(
# organism = "Homo_sapiens",
# phix = TRUE, ncRNA = TRUE, tRNA = TRUE, rRNA = TRUE,
# output.dir = annotation.dir
# )
# index <- STAR.index(annotation)
# STAR.align.folder(fastq.input.dir, bam.output.dir,
# index, paired.end = FALSE,
# steps = "tr-ge")
}
\seealso{
Other STAR:
\code{\link{STAR.align.single}()},
\code{\link{STAR.allsteps.multiQC}()},
\code{\link{STAR.index}()},
\code{\link{STAR.install}()},
\code{\link{STAR.multiQC}()},
\code{\link{STAR.remove.crashed.genome}()},
\code{\link{getGenomeAndAnnotation}()},
\code{\link{install.fastp}()}
}
\concept{STAR}
| /man/STAR.align.folder.Rd | permissive | Roleren/ORFik | R | false | true | 11,017 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/STAR.R
\name{STAR.align.folder}
\alias{STAR.align.folder}
\title{Align all libraries in folder with STAR}
\usage{
STAR.align.folder(
input.dir,
output.dir,
index.dir,
star.path = STAR.install(),
fastp = install.fastp(),
paired.end = FALSE,
steps = "tr-ge",
adapter.sequence = "auto",
quality.filtering = FALSE,
min.length = 20,
mismatches = 3,
trim.front = 0,
max.multimap = 10,
alignment.type = "Local",
allow.introns = TRUE,
max.cpus = min(90, BiocParallel::bpparam()$workers),
wait = TRUE,
include.subfolders = "n",
resume = NULL,
multiQC = TRUE,
keep.contaminants = FALSE,
keep.unaligned.genome = FALSE,
script.folder = system.file("STAR_Aligner", "RNA_Align_pipeline_folder.sh", package =
"ORFik"),
script.single = system.file("STAR_Aligner", "RNA_Align_pipeline.sh", package = "ORFik")
)
}
\arguments{
\item{input.dir}{path to fast files to align, the valid input files will be search for from formats:
(".fasta", ".fastq", ".fq", or ".fa") with or without compression of .gz.
Also either paired end or single end reads. Pairs will automatically be detected from
similarity of naming, separated by something as .1 and .2 in the end. If files are renamed, where pairs
are not similarily named, this process will fail to find correct pairs!}
\item{output.dir}{directory to save indices, default:
paste0(dirname(arguments[1]), "/STAR_index/"), where arguments is the
arguments input for this function.}
\item{index.dir}{path to STAR index folder. Path returned from ORFik function
STAR.index, when you created the index folders.}
\item{star.path}{path to STAR, default: STAR.install(),
if you don't have STAR installed at default location, it will install it there,
set path to a runnable star if you already have it.}
\item{fastp}{path to fastp trimmer, default: install.fastp(), if you
have it somewhere else already installed, give the path. Only works for
unix (linux or Mac OS), if not on unix, use your favorite trimmer and
give the output files from that trimmer as input.dir here.}
\item{paired.end}{a logical: default FALSE, alternative TRUE. If TRUE, will auto detect
pairs by names. Can not be a combination of both TRUE and FALSE!\cr
If running in folder mode:
The folder must then contain an even number of files
and they must be named with the same prefix and sufix of either
_1 and _2, 1 and 2, etc. If SRR numbers are used, it will start on lowest and
match with second lowest etc.}
\item{steps}{a character, default: "tr-ge", trimming then genome alignment\cr
steps of depletion and alignment wanted:
The posible candidates you can use are:\cr
\itemize{
\item{tr : }{trim reads}
\item{co : }{contamination merged depletion}
\item{ph : }{phix depletion}
\item{rR : }{rrna depletion}
\item{nc : }{ncrna depletion}
\item{tR : }{trna depletion (Mature tRNA, so no intron checks done)}
\item{ge : }{genome alignment}
\item{all: }{run steps: "tr-co-ge" or "tr-ph-rR-nc-tR-ge", depending on if you
have merged contaminants or not}
}
If not "all", a subset of these ("tr-co-ph-rR-nc-tR-ge")\cr
If co (merged contaminants) is used, non of the specific contaminants can be specified,
since they should be a subset of co.\cr
The step where you align to the genome is usually always included, unless you
are doing pure contaminant analysis or only trimming.
For Ribo-seq and TCP(RCP-seq) you should do rR (ribosomal RNA depletion),
so when you made the
STAR index you need the rRNA step, either use rRNA from .gtf or manual download.
(usually just download a Silva rRNA database
for SSU&LSU at: https://www.arb-silva.de/) for your species.}
\item{adapter.sequence}{character, default: "auto". Auto detect adapter using fastp
adapter auto detection, checking first 1.5M reads. (Auto detection of adapter will
not work 100\% of the time (if the library is of low quality), then you must rerun
this function with specified adapter from fastp adapter analysis.
, using FASTQC or other adapter detection tools, else alignment will most likely fail!).
If already trimmed or trimming not wanted:
adapter.sequence = "disable" .You can manually assign adapter like:
"ATCTCGTATGCCGTCTTCTGCTTG" or "AAAAAAAAAAAAA". You can also specify one of the three
presets:\cr
\itemize{
\item{illumina (TrueSeq ~75/100 bp sequencing): }{AGATCGGAAGAGC}
\item{small_RNA (standard for ~50 bp sequencing): }{TGGAATTCTCGG}
\item{nextera: }{CTGTCTCTTATA}
}
Paired end auto detection uses overlap sequence of pairs, to use the slower
more secure paired end adapter detection, specify as: "autoPE".}
\item{quality.filtering}{logical, default FALSE. Not needed for modern
library prep of RNA-seq, Ribo-seq etc (usually < ~ 0.5% of reads are removed).
If you are aligning bad quality data, set this to TRUE.\cr
These filters will then be applied (default of fastp), filter if:
\itemize{
\item{Number of N bases in read: }{> 5}
\item{Read quality: }{> 40\% of bases in the read are <Q15}
}}
\item{min.length}{20, minimum length of aligned read without mismatches
to pass filter. Anything under 20 is dangerous, as chance of random hits will
become high!}
\item{mismatches}{3, max non matched bases. Excludes soft-clipping, this only
filters reads that have defined mismatches in STAR.
Only applies for genome alignment step.}
\item{trim.front}{0, default trim 0 bases 5'. For Ribo-seq use default 0.
Ignored if tr (trim) is not one of the arguments in "steps"}
\item{max.multimap}{numeric, default 10. If a read maps to more locations than specified,
will skip the read. Set to 1 to only get unique mapping reads. Only applies for
genome alignment step. The depletions are allowing for multimapping.}
\item{alignment.type}{default: "Local": standard local alignment with soft-clipping allowed,
"EndToEnd" (global): force end-to-end read alignment, does not soft-clip.}
\item{allow.introns}{logical, default TRUE. Allow large gaps of N in reads
during genome alignment, if FALSE:
sets --alignIntronMax to 1 (no introns). NOTE: You will still get some spliced reads
if you assigned a gtf at the index step.}
\item{max.cpus}{integer, default: \code{min(90, BiocParallel:::bpparam()$workers)},
number of threads to use. Default is minimum of 90 and maximum cores - 2. So if you
have 8 cores it will use 6.}
\item{wait}{a logical (not \code{NA}) indicating whether the \R
interpreter should wait for the command to finish, or run it
asynchronously. This will be ignored (and the interpreter will
always wait) if \code{intern = TRUE}. When running the command
asynchronously, no output will be displayed on the \code{Rgui}
console in Windows (it will be dropped, instead).}
\item{include.subfolders}{"n" (no), do recursive search downwards for fast files if "y".}
\item{resume}{default: NULL, continue from step, lets say steps are "tr-ph-ge":
(trim, phix depletion, genome alignment) and resume is "ge", you will then use
the assumed already trimmed and phix depleted data and start at genome alignment,
useful if something crashed. Like if you specified wrong STAR version, but the trimming
step was completed. Resume mode can only run 1 step at the time.}
\item{multiQC}{logical, default TRUE. Do mutliQC comparison of STAR
alignment between all the samples. Outputted in aligned/LOGS folder.
See ?STAR.multiQC}
\item{keep.contaminants}{logical, default FALSE. Create and keep
contaminant aligning bam files, default is to only keep unaliged fastq reads,
which will be further processed in "ge" genome alignment step. Useful if you
want to do further processing on contaminants, like specific coverage of
specific tRNAs etc.}
\item{keep.unaligned.genome}{logical, default FALSE. Create and keep
reads that did not align at the genome alignment step,
default is to only keep the aliged bam file. Useful if you
want to do further processing on plasmids/custom sequences.}
\item{script.folder}{location of STAR index script,
default internal ORFik file. You can change it and give your own if you
need special alignments.}
\item{script.single}{location of STAR single file alignment script,
default internal ORFik file. You can change it and give your own if you
need special alignments.}
}
\value{
output.dir, can be used as as input in ORFik::create.experiment
}
\description{
Does either all files as paired end or single end,
so if you have mix, split them in two different folders.\cr
If STAR halts at .... loading genome, it means the STAR
index was aborted early, then you need to run:
STAR.remove.crashed.genome(), with the genome that crashed, and rerun.
}
\details{
Can only run on unix systems (Linux, Mac and WSL (Windows Subsystem Linux)),
and requires a minimum of 30GB memory on genomes like human, rat, zebrafish etc.\cr
If for some reason the internal STAR alignment bash script will not work for you,
like if you want more customization of the STAR/fastp arguments.
You can copy the internal alignment script,
edit it and give that as the script used for this function.\cr
The trimmer used is fastp (the fastest I could find), also works on
(Linux, Mac and WSL (Windows Subsystem Linux)).
If you want to use your own trimmer set file1/file2 to the location of
the trimmed files from your program.\cr
A note on trimming from creator of STAR about trimming:
"adapter trimming it definitely needed for short RNA sequencing.
For long RNA-seq, I would agree with Devon that in most cases adapter trimming
is not advantageous, since, by default, STAR performs local (not end-to-end) alignment,
i.e. it auto-trims." So trimming can be skipped for longer reads.
}
\examples{
# First specify directories wanted
annotation.dir <- "~/Bio_data/references/Human"
fastq.input.dir <- "~/Bio_data/raw_data/Ribo_seq_subtelny/"
bam.output.dir <- "~/Bio_data/processed_data/Ribo_seq_subtelny_2014/"
## Download some SRA data and metadata
# info <- download.SRA.metadata("DRR041459", fastq.input.dir)
# download.SRA(info, fastq.input.dir, rename = FALSE)
## Now align 2 different ways, without and with contaminant depletion
## No contaminant depletion:
# annotation <- getGenomeAndAnnotation("Homo sapiens", annotation.dir)
# index <- STAR.index(annotation)
# STAR.align.folder(fastq.input.dir, bam.output.dir,
# index, paired.end = FALSE)
## All contaminants merged:
# annotation <- getGenomeAndAnnotation(
# organism = "Homo_sapiens",
# phix = TRUE, ncRNA = TRUE, tRNA = TRUE, rRNA = TRUE,
# output.dir = annotation.dir
# )
# index <- STAR.index(annotation)
# STAR.align.folder(fastq.input.dir, bam.output.dir,
# index, paired.end = FALSE,
# steps = "tr-ge")
}
\seealso{
Other STAR:
\code{\link{STAR.align.single}()},
\code{\link{STAR.allsteps.multiQC}()},
\code{\link{STAR.index}()},
\code{\link{STAR.install}()},
\code{\link{STAR.multiQC}()},
\code{\link{STAR.remove.crashed.genome}()},
\code{\link{getGenomeAndAnnotation}()},
\code{\link{install.fastp}()}
}
\concept{STAR}
|
dataset <- read.table("D:/Machine Learning/Assignment_4/dataset.txt",sep=",",stringsAsFactors = FALSE,header=FALSE)
names(dataset) <- c("DGN","PRE4", "PRE5", "PRE6", "PRE7", "PRE8", "PRE9", "PRE10", "PRE11","PRE14", "PRE17", "PRE19", "PRE25", "PRE30", "PRE32", "AGE", "Risk1Y")
cols <- sapply(dataset, is.logical)
dataset[,cols] <- lapply(dataset[,cols], as.numeric)
dataset<-subset(dataset,select=-c(DGN,PRE6,PRE14))
dataset$Risk1Y <- factor(dataset$Risk1Y)
trainIndex <- sample(1:nrow(dataset), 0.8 * nrow(dataset))
train <- dataset[trainIndex, ]
test <- dataset[-trainIndex, ]
library(ipred)
library(rpart)
for(i in 0:9)
{
x=i*folds+1
y=x+folds-1
test<-dataset[x:y,]
train<-dataset[-(x:y),]
fit <- bagging(Risk1Y ~.,data=train,mfinal=15,control=rpart.control(maxdepth=7, minsplit=22))
library(e1071)
library(caret)
#Accuracy for training data
train$pred.class <- predict(fit,train)
t_pred_train<-confusionMatrix(data=factor(train$pred.class),reference=train$Risk1Y,positive='1')
conf_mat <- t_pred_train$table
accuracy_train <- sum(diag(conf_mat))/sum(conf_mat)
accuracy_train<-100*accuracy_train
cat(paste("\nAccuracy on Training dataset",accuracy_train))
#Accuracy for test data
test$pred.class <- predict(fit,test)
t_pred_test<-confusionMatrix(data=factor(test$pred.class),reference=test$Risk1Y,positive='1')
conf_mat <- t_pred_test$table
accuracy_test <- sum(diag(conf_mat))/sum(conf_mat)
accuracy_test<-100*accuracy_test
cat(paste("\nAccuracy on Testing dataset",accuracy_test))
#Precision and Recall
tp<-conf_mat[1,1]
tn<-conf_mat[2,2]
fn<-conf_mat[1,2]
fp<-conf_mat[2,1]
pr<-tp/(tp+fp)
re<-tp/(tp+fn)
cat(paste("\nPrecision:",pr))
cat(paste("\nRecall :",re))
}
| /Assignments/Classifiers/BaggingAssignment4.R | no_license | rakeshBalasubramani/Machine-Learning | R | false | false | 1,788 | r | dataset <- read.table("D:/Machine Learning/Assignment_4/dataset.txt",sep=",",stringsAsFactors = FALSE,header=FALSE)
names(dataset) <- c("DGN","PRE4", "PRE5", "PRE6", "PRE7", "PRE8", "PRE9", "PRE10", "PRE11","PRE14", "PRE17", "PRE19", "PRE25", "PRE30", "PRE32", "AGE", "Risk1Y")
cols <- sapply(dataset, is.logical)
dataset[,cols] <- lapply(dataset[,cols], as.numeric)
dataset<-subset(dataset,select=-c(DGN,PRE6,PRE14))
dataset$Risk1Y <- factor(dataset$Risk1Y)
trainIndex <- sample(1:nrow(dataset), 0.8 * nrow(dataset))
train <- dataset[trainIndex, ]
test <- dataset[-trainIndex, ]
library(ipred)
library(rpart)
for(i in 0:9)
{
x=i*folds+1
y=x+folds-1
test<-dataset[x:y,]
train<-dataset[-(x:y),]
fit <- bagging(Risk1Y ~.,data=train,mfinal=15,control=rpart.control(maxdepth=7, minsplit=22))
library(e1071)
library(caret)
#Accuracy for training data
train$pred.class <- predict(fit,train)
t_pred_train<-confusionMatrix(data=factor(train$pred.class),reference=train$Risk1Y,positive='1')
conf_mat <- t_pred_train$table
accuracy_train <- sum(diag(conf_mat))/sum(conf_mat)
accuracy_train<-100*accuracy_train
cat(paste("\nAccuracy on Training dataset",accuracy_train))
#Accuracy for test data
test$pred.class <- predict(fit,test)
t_pred_test<-confusionMatrix(data=factor(test$pred.class),reference=test$Risk1Y,positive='1')
conf_mat <- t_pred_test$table
accuracy_test <- sum(diag(conf_mat))/sum(conf_mat)
accuracy_test<-100*accuracy_test
cat(paste("\nAccuracy on Testing dataset",accuracy_test))
#Precision and Recall
tp<-conf_mat[1,1]
tn<-conf_mat[2,2]
fn<-conf_mat[1,2]
fp<-conf_mat[2,1]
pr<-tp/(tp+fp)
re<-tp/(tp+fn)
cat(paste("\nPrecision:",pr))
cat(paste("\nRecall :",re))
}
|
#' findBranches
#' Finds the branch numbers that descend from a given node of a tree.
#' @param tree An object of class phylo
#' @param node The node the descendent branches of which are of interest
#' @param tail Include the branch leading to the node of interest, or not?
#' @name findBranches
#' @export
findBranches <- function(tree, node, tail = TRUE) {
allbranches <- NULL
for (i in node) {
descs <- getDescs(tree, i)
tips <- descs[which(descs <= length(tree$tip.label))]
internal <- descs[which(descs > length(tree$tip.label))]
if (tail) {
allbranches <- c(allbranches, which((tree$edge[ , 2] == i)), which.edge(tree, tree$tip.label[tips]))
} else {
allbranches <- c(allbranches, which((tree$edge[ , 1] == i)), which.edge(tree, tree$tip.label[tips]))
allbranches <- unique(allbranches)
}
}
return(allbranches)
}
| /R/findBranches.R | no_license | hferg/hfgr | R | false | false | 870 | r | #' findBranches
#' Finds the branch numbers that descend from a given node of a tree.
#' @param tree An object of class phylo
#' @param node The node the descendent branches of which are of interest
#' @param tail Include the branch leading to the node of interest, or not?
#' @name findBranches
#' @export
findBranches <- function(tree, node, tail = TRUE) {
allbranches <- NULL
for (i in node) {
descs <- getDescs(tree, i)
tips <- descs[which(descs <= length(tree$tip.label))]
internal <- descs[which(descs > length(tree$tip.label))]
if (tail) {
allbranches <- c(allbranches, which((tree$edge[ , 2] == i)), which.edge(tree, tree$tip.label[tips]))
} else {
allbranches <- c(allbranches, which((tree$edge[ , 1] == i)), which.edge(tree, tree$tip.label[tips]))
allbranches <- unique(allbranches)
}
}
return(allbranches)
}
|
#'Fully work up a complete data set of integrating sphere data
#'
#'A fully-featured function that accepts a series of folders to automatically work up.
#'Pass in the FOLDER location of your baseline (no plant) reflectance, plant reflectance
#'and plant transmittance.
#' @name int_baseline_all
#' @param Averaging a variable that reflects how many wavelengths will be averaged together. Default: 1, so 1 point per wavelength
#' @param writeLoc If provided, will write out the collected data to the chosen directory. Provides csvs for reflectance, transmittance, baseline, and compiled.
#' @param writePrefix Standard text added to the front of filenames for written out files.
#' @param licordat Show or hide the Li-6800 LED spectra
#' @export
int_baseline_all <- function(locationBaseline, locationReflectance, locationTransmittance, Averaging=1, writeLoc = NULL, writePrefix = "",licordat=T,useSharkeySpec=F) {
#read data
baseline_data <- int_read_many(locationBaseline,Averaging = Averaging,checkTxt = "Reflection",label="Reflectance")
reflectance_data <- int_read_many(location = locationReflectance, Averaging = Averaging,checkTxt = "Reflection",label="Reflectance")
transmittance_data <- int_read_many(location = locationTransmittance, Averaging=Averaging,checkTxt = "Transmission",label="Transmittance")
#Calculate baseline adjustment
#calculates the light that is transmitted through the leaf, is reflected by the bottom sphere, gets transmitted back
#through the plant, and then gets detected by the top sphere.
#necessary for correcting for A<0% in NIR.
suppressWarnings(adjustment_matrix <- merge(transmittance_data, baseline_data, by="Wavelength"))
adjvec <- (adjustment_matrix$Transmittance/100)^2
adjustment_matrix <- tibble::add_column(adjustment_matrix,TransTrans = adjvec)
adj2 <- adjvec * baseline_data$Reflectance/100
adjustment_matrix <- tibble::add_column(adjustment_matrix,TTR = adj2)
#Apply the baseline adjustment
suppressWarnings(plant_data_adj <- merge(transmittance_data,reflectance_data,by="Wavelength"))
plant_data_adj <- tibble::add_column(plant_data_adj,TTR=adjustment_matrix$TTR)
plant_data_adj <- tibble::add_column(plant_data_adj,RefAdj=plant_data_adj$Reflectance-(plant_data_adj$TTR)*100)
plant_data_adj <- tibble::add_column(plant_data_adj,Absorptance=100-plant_data_adj$RefAdj-plant_data_adj$Transmittance)
#generate a graph
adj_data_plot <- int_graph(dplyr::select(plant_data_adj,"Wavelength","Transmittance","RefAdj","Absorptance"),licordat=licordat,sharkeySpec=useSharkeySpec)
#optionally write data
if(!is.null(writeLoc)){
setwd(writeLoc)
readr::write_csv(baseline_data,paste0(writePrefix,"Baseline reflectance.csv"))
readr::write_csv(reflectance_data,paste0(writePrefix,"Plant reflectance.csv"))
readr::write_csv(transmittance_data,paste0(writePrefix,"Plant transmittance.csv"))
readr::write_csv(plant_data_adj, paste0(writePrefix,"Compiled plantdata.csv"))
}
#return everything you want
return(list(plant_data_adj,adj_data_plot))
}
| /R/int_baseline_all.R | permissive | poales/integratingSphere | R | false | false | 3,051 | r | #'Fully work up a complete data set of integrating sphere data
#'
#'A fully-featured function that accepts a series of folders to automatically work up.
#'Pass in the FOLDER location of your baseline (no plant) reflectance, plant reflectance
#'and plant transmittance.
#' @name int_baseline_all
#' @param Averaging a variable that reflects how many wavelengths will be averaged together. Default: 1, so 1 point per wavelength
#' @param writeLoc If provided, will write out the collected data to the chosen directory. Provides csvs for reflectance, transmittance, baseline, and compiled.
#' @param writePrefix Standard text added to the front of filenames for written out files.
#' @param licordat Show or hide the Li-6800 LED spectra
#' @export
int_baseline_all <- function(locationBaseline, locationReflectance, locationTransmittance, Averaging=1, writeLoc = NULL, writePrefix = "",licordat=T,useSharkeySpec=F) {
#read data
baseline_data <- int_read_many(locationBaseline,Averaging = Averaging,checkTxt = "Reflection",label="Reflectance")
reflectance_data <- int_read_many(location = locationReflectance, Averaging = Averaging,checkTxt = "Reflection",label="Reflectance")
transmittance_data <- int_read_many(location = locationTransmittance, Averaging=Averaging,checkTxt = "Transmission",label="Transmittance")
#Calculate baseline adjustment
#calculates the light that is transmitted through the leaf, is reflected by the bottom sphere, gets transmitted back
#through the plant, and then gets detected by the top sphere.
#necessary for correcting for A<0% in NIR.
suppressWarnings(adjustment_matrix <- merge(transmittance_data, baseline_data, by="Wavelength"))
adjvec <- (adjustment_matrix$Transmittance/100)^2
adjustment_matrix <- tibble::add_column(adjustment_matrix,TransTrans = adjvec)
adj2 <- adjvec * baseline_data$Reflectance/100
adjustment_matrix <- tibble::add_column(adjustment_matrix,TTR = adj2)
#Apply the baseline adjustment
suppressWarnings(plant_data_adj <- merge(transmittance_data,reflectance_data,by="Wavelength"))
plant_data_adj <- tibble::add_column(plant_data_adj,TTR=adjustment_matrix$TTR)
plant_data_adj <- tibble::add_column(plant_data_adj,RefAdj=plant_data_adj$Reflectance-(plant_data_adj$TTR)*100)
plant_data_adj <- tibble::add_column(plant_data_adj,Absorptance=100-plant_data_adj$RefAdj-plant_data_adj$Transmittance)
#generate a graph
adj_data_plot <- int_graph(dplyr::select(plant_data_adj,"Wavelength","Transmittance","RefAdj","Absorptance"),licordat=licordat,sharkeySpec=useSharkeySpec)
#optionally write data
if(!is.null(writeLoc)){
setwd(writeLoc)
readr::write_csv(baseline_data,paste0(writePrefix,"Baseline reflectance.csv"))
readr::write_csv(reflectance_data,paste0(writePrefix,"Plant reflectance.csv"))
readr::write_csv(transmittance_data,paste0(writePrefix,"Plant transmittance.csv"))
readr::write_csv(plant_data_adj, paste0(writePrefix,"Compiled plantdata.csv"))
}
#return everything you want
return(list(plant_data_adj,adj_data_plot))
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/cdf_check.R
\name{check_processed_cdf}
\alias{check_processed_cdf}
\title{check_processed_cdf}
\usage{
check_processed_cdf(processed_cdf)
}
\arguments{
\item{processed_cdf}{output of mapvizieR.default}
}
\value{
a named list. \code{$boolean} has true false result; \code{descriptive}
has a more descriptive string describing what happened.
}
\description{
the mapvizieR takes a cdf + a roster and does some grade level lookup.
this function is a wrapper around some tests that make sure that
the output conforms to expectations
}
| /man/check_processed_cdf.Rd | no_license | rabare/mapvizieR | R | false | false | 618 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/cdf_check.R
\name{check_processed_cdf}
\alias{check_processed_cdf}
\title{check_processed_cdf}
\usage{
check_processed_cdf(processed_cdf)
}
\arguments{
\item{processed_cdf}{output of mapvizieR.default}
}
\value{
a named list. \code{$boolean} has true false result; \code{descriptive}
has a more descriptive string describing what happened.
}
\description{
the mapvizieR takes a cdf + a roster and does some grade level lookup.
this function is a wrapper around some tests that make sure that
the output conforms to expectations
}
|
\name{HPOTerms-class}
\docType{class}
\alias{class:HPOTerms}
\alias{HPOTerms-class}
\alias{HPOTerms}
\alias{initialize,HPOTerms-method}
\alias{HPOID}
\alias{HPOID,HPOTerms-method}
\alias{HPOID,HPOTermsAnnDbBimap-method}
\alias{HPOID,character-method}
\alias{Term}
\alias{Term,HPOTerms-method}
\alias{Term,HPOTermsAnnDbBimap-method}
\alias{Term,character-method}
\alias{Synonym}
\alias{Synonym,HPOTerms-method}
\alias{Synonym,HPOTermsAnnDbBimap-method}
\alias{Synonym,character-method}
\alias{Secondary}
\alias{Secondary,HPOTerms-method}
\alias{Secondary,HPOTermsAnnDbBimap-method}
\alias{Secondary,character-method}
\alias{show,HPOTerms-method}
\title{Class "HPOTerms"}
\description{A class to represent Human Phenotype Ontology nodes}
\section{Objects from the Class}{
Objects can be created by calls of the form
\code{HPOnode(HP, Term, synonym, secondary)}.
HP, Term are required.
}
\section{Slots}{
\describe{
\item{\code{HP}:}{Object of class \code{"character"} A character
string for the HPO id of a primary node.}
\item{\code{Term}:}{Object of class \code{"character"} A
character string that defines the role of gene product
corresponding to the primary HPO id.}
\item{\code{Synonym}:}{Object of class \code{"character"} other
ontology terms that are considered to be synonymous to the primary
term attached to the HPO id. Synonymous here can mean that the
synonym is an exact synonym of the primary term, is related to the
primary term, is broader than the primary term, is more precise
than the primary term, or name is related to the term, but is not
exact, broader or narrower.}
\item{\code{Secondary}:}{Object of class \code{"character"} HPO ids
that are secondary to the primary HPO id as results of merging HPO
terms so that One HPO id becomes the primary HPO id and the rest
become the secondary.}
}
}
\section{Methods}{
\describe{
\item{HP}{\code{signature(object = "HPOTerms")}:
The get method for slot HPOID.}
\item{Term}{\code{signature(object = "HPOTerms")}:
The get method for slot Term.}
\item{Synonym}{\code{signature(object = "HPOTerms")}:
The get method for slot Synonym.}
\item{Secondary}{\code{signature(object = "HPOTerms")}:
The get method for slot Secondary.}
\item{show}{\code{signature(x = "HPOTerms")}:
The method for pretty print.}
}
}
\note{HPOTerms objects are used to represent primary HPO nodes in the
SQLite-based annotation data package HPO.db}
\examples{
HPOnode <- new("HPOTerms", HP="HPOID:1234567", Term="Test")
HP(HPOnode)
Term(HPOnode)
require(HPO.db)
FirstTenHPOBimap <- HPOTERM[1:10]
Term(FirstTenHPOBimap)
ids = keys(FirstTenHPOBimap)
Term(ids)
}
\keyword{datasets}
| /man/HPOTerms-class.Rd | no_license | cran/HPO.db | R | false | false | 2,894 | rd | \name{HPOTerms-class}
\docType{class}
\alias{class:HPOTerms}
\alias{HPOTerms-class}
\alias{HPOTerms}
\alias{initialize,HPOTerms-method}
\alias{HPOID}
\alias{HPOID,HPOTerms-method}
\alias{HPOID,HPOTermsAnnDbBimap-method}
\alias{HPOID,character-method}
\alias{Term}
\alias{Term,HPOTerms-method}
\alias{Term,HPOTermsAnnDbBimap-method}
\alias{Term,character-method}
\alias{Synonym}
\alias{Synonym,HPOTerms-method}
\alias{Synonym,HPOTermsAnnDbBimap-method}
\alias{Synonym,character-method}
\alias{Secondary}
\alias{Secondary,HPOTerms-method}
\alias{Secondary,HPOTermsAnnDbBimap-method}
\alias{Secondary,character-method}
\alias{show,HPOTerms-method}
\title{Class "HPOTerms"}
\description{A class to represent Human Phenotype Ontology nodes}
\section{Objects from the Class}{
Objects can be created by calls of the form
\code{HPOnode(HP, Term, synonym, secondary)}.
HP, Term are required.
}
\section{Slots}{
\describe{
\item{\code{HP}:}{Object of class \code{"character"} A character
string for the HPO id of a primary node.}
\item{\code{Term}:}{Object of class \code{"character"} A
character string that defines the role of gene product
corresponding to the primary HPO id.}
\item{\code{Synonym}:}{Object of class \code{"character"} other
ontology terms that are considered to be synonymous to the primary
term attached to the HPO id. Synonymous here can mean that the
synonym is an exact synonym of the primary term, is related to the
primary term, is broader than the primary term, is more precise
than the primary term, or name is related to the term, but is not
exact, broader or narrower.}
\item{\code{Secondary}:}{Object of class \code{"character"} HPO ids
that are secondary to the primary HPO id as results of merging HPO
terms so that One HPO id becomes the primary HPO id and the rest
become the secondary.}
}
}
\section{Methods}{
\describe{
\item{HP}{\code{signature(object = "HPOTerms")}:
The get method for slot HPOID.}
\item{Term}{\code{signature(object = "HPOTerms")}:
The get method for slot Term.}
\item{Synonym}{\code{signature(object = "HPOTerms")}:
The get method for slot Synonym.}
\item{Secondary}{\code{signature(object = "HPOTerms")}:
The get method for slot Secondary.}
\item{show}{\code{signature(x = "HPOTerms")}:
The method for pretty print.}
}
}
\note{HPOTerms objects are used to represent primary HPO nodes in the
SQLite-based annotation data package HPO.db}
\examples{
HPOnode <- new("HPOTerms", HP="HPOID:1234567", Term="Test")
HP(HPOnode)
Term(HPOnode)
require(HPO.db)
FirstTenHPOBimap <- HPOTERM[1:10]
Term(FirstTenHPOBimap)
ids = keys(FirstTenHPOBimap)
Term(ids)
}
\keyword{datasets}
|
\name{naivedata}
\docType{data}
\alias{naivedata}
\title{Data generated by Monte Carlo}
\description{
Data generated by Monte Carlo, the first column is the response variable, the second column is the design matrix, and the rest are the instrumental variables
}
\usage{data(naivedata)}
\keyword{datasets}
| /man/naivedata.Rd | no_license | wangsanshui/naivereg | R | false | false | 318 | rd | \name{naivedata}
\docType{data}
\alias{naivedata}
\title{Data generated by Monte Carlo}
\description{
Data generated by Monte Carlo, the first column is the response variable, the second column is the design matrix, and the rest are the instrumental variables
}
\usage{data(naivedata)}
\keyword{datasets}
|
############################################
###### DFL decomposition function #########
############################################
## Version 2.0, 26. Juni 2019
## Note:
## Group 1 is the reference group. Its wage structure
# is used for the counterfactuals.
# In order to sequentially decompose the composition effect,
# variable have to be entered seperated by |
# If sequence="marginal" the marginal of the last variable entered
# is reweighted first.
# If firstrw="conditional" the conditional distribution of the first
# variable entered in the formula is reweigted first.
# library(AER)
# data("CPS1985")
# f <- wage ~ gender | experience | education | region
# result <- dfl_deco(f, data=CPS1985, group=union, tau=c(0.5,0.75,0.8), log.trans=FALSE, trim=TRUE)
# result$quantile
# result$other.stats
###########################################################
## Package to be loaded
require(Formula) #for extended formulas
#require(reldist) #for weighted distributional statistics (i.e. quantiles, gini, etc.)
require(Hmisc) #for weighted distributional statistics (i.e. quantiles, etc.)
require(survey) #for glm models with survey data
require(ggplot2) #for the plotting function
require(reshape2) #for reshaping the data.frames
require(fastglm) #for optimzed glm solving algorithm
###########################################################
# Actual decomposition function
dfl_deco <- function(formula, data, weights, group,
ws_formula=NULL,
reference=c(1,0),
na.action = na.exclude,
tau=c(10,50,90,99)/100,
firstrw=c("marginal","conditional"),
stats=TRUE,
all.stats=FALSE,
log.trans=TRUE,
fast=TRUE,
trim=FALSE){
# group: indicator variable defining the distribution to be compared
# reference: defining the group which will be reweighted, i.e. which wage structure is to be used.
# Composition effect: Difference between observed reference group and reweighted reference group
# Wage structure effect: Difference between reweighted reference group and comparison group
# tau: quantiles to be evaluated
# firstrw=conditional: sequential decompositon by reweighting conditional distribution
# of first entered covariate first, otherwise marginal distribution of
# last covariate is reweigted fist.
# fast: estimation with fastglm
# trim: automatically trims dataset in order to discard observation with very large propensities
##########################################################
##########################################################
## 1) Set up data
#Use match.call function to call data.vectors
mf = match.call()
m = match(c("formula", "data", "weights", "na.action","group"), names(mf), 0)
mf = mf[c(1, m)]
mf$drop.unused.levels = TRUE
# Retrieve formula as.Formula and model.frame
f <- as.Formula(formula)
# Make sure that all variables (incl. for detailed WS effect are selected)
if(!is.null(ws_formula)){
fws <- Formula(ws_formula)
f <- update(f,as.Formula(paste(". ~ . + ", as.character(fws, lhs=1)[2], "+", as.character(fws, rhs=1)[3])))
}
mf[[1]] = as.name("model.frame")
mf$formula <- f
mf = eval.parent(mf)
mt = attr(mf, "terms")
# Store "orignal" decomposition model again
f <- as.Formula(formula)
# Extract variables, weights and group identifier
#reg = get_all_vars(mt, mf)
dep = model.response(mf, "numeric")
weight = model.weights(mf)
if (!is.null(weight) && !is.numeric(weight)) {
stop("'weights' must be a numeric vector")
}
if (!is.null(weight)) {
weight = weight
}
else {
weight = rep(1, length(dep))
}
#weight = weight/sum(weight)
groupN = mf[, ncol(mf)]
#reg$groupN <- groupN
mf$groupN <- groupN
# Number of covariates for decomposition of composition effect
nvar = length(f)[2]
# Set reference group to 1 if not determined
if(length(reference)==2){reference=1}
cat("Reference group (to be reweighted): ", reference,"\n \n")
# If first reweighting not specified choose marginal first as default
firstrw = ifelse(length(firstrw)==2,"marginal",firstrw)
# Set Progress Bar
cat("Probit estimation...\n")
pb <- txtProgressBar(min=0,max=1, style=3)
nmods <- 1+nvar+ifelse(!is.null(ws_formula),4,0)
##########################################################
##########################################################
### 2) Fit probit models & estimate reweighting factors
##########################################################
### Unconditional/Sample probabilities
mod <- groupN ~ 1
p1 <- mean(pfit(mod,mf,weight))
p0 <- 1-p1
probs <- rep(p0/p1,nrow(mf))
setTxtProgressBar(pb,1/nmods)
##########################################################
### Conditional probabilities
### Beginning with the LAST variable entered in Formula,
### estimating then with prob
m <- 0 # only for ProgressBar
for(i in nvar:1){
mod <- update(formula(f, rhs=nvar:i, collapse=TRUE), groupN ~ .)
p1 <- pfit(mod,mf,weight)
p0 <- 1-p1
probs <- cbind(probs,p0/p1)
m <- m + 1 #Progressbar
setTxtProgressBar(pb,(1+m)/nmods)
}
##########################################################
### Reweigthing factor
###
### psi_s contains relative probabilites:
### first [P(t=1)/P(P=0)]*[P(t=1|X1)/P(P=0|X1)],
### second [P(t=0|X1)/P(P=1|X1)]*[P(t=1|X2,X1)/P(P=0|X2,X1)]
### etc.
### X1 is the variable entered last, X2 the variable entered second last etc.
psi_s <- (probs[,1]^-1)*probs[,2]
if(nvar==1){
#if there is only one variable stick to psi_s and use it as weight
psi <- as.matrix(psi_s)
}
else{
#procedure to compute the weights if nvar>1
for(i in 2:nvar){
psi_s <- cbind(psi_s,(probs[,i]^-1)*probs[,i+1])
}
# Marginal of last entered variable or conditional of first entered
# variable to be reweighted first?
first <- ifelse(firstrw=="marginal",1,nvar)
last <- ifelse(firstrw=="marginal",nvar,1)
correct <- ifelse(firstrw=="marginal",1,-1)
loopvals <- seq(first+correct,last)
# psi contains the weights actually used to reweighted the distribution
# psi contains on the first position the weight used first
# (i.e. [P(t=1)/P(P=0)]*[P(t=1|X1)/P(P=0|X1)] if the "marginal" and
# [P(t=1|X1,...,X(M-1))/P(P=0||X1,...,X(M-1))]*[P(t=1|X1,...,X(M))/P(P=0|X1,...,X(M))]
# if "conditional")
psi <- as.matrix(psi_s[,first])
colnames(psi) <- paste("psi_X",first,sep="")
for(i in loopvals){
psi <- cbind(psi,apply(psi_s[,first:i],1,prod))
colnames(psi)[length(colnames(psi))] <- paste("psi_X",first,"to",i,sep="")
}
# Remove unused object
rm(psi_s)
#end weight competion procedure if nvar>1
}
###########################################################
### Weights for decomposition of wage structure effect
if(!is.null(ws_formula)){
# What's the group variable value of reference group?
if(is.factor(groupN)){
group_val <- levels(groupN)[1+reference]
} else {
group_val = reference
}
# Set up model for unconditional prob
mod <- as.Formula(paste(as.character(fws, lhs=1)[2],"~ 1"))
p_S_1 <- pfit(mod,mf[which(groupN==group_val),],weight[which(groupN==group_val)])
p_S_0 <- pfit(mod,mf[which(groupN!=group_val),],weight[which(groupN!=group_val)])
setTxtProgressBar(pb,(nmods-2)/nmods)
# Model for conditional prob
mod <- fws
p_S_X_1 <- pfit(mod,mf[which(groupN==group_val),],weight[which(groupN==group_val)])
p_S_X_0 <- pfit(mod,mf[which(groupN!=group_val),],weight[which(groupN!=group_val)])
psi_S_1 <- (1-p_S_1)/(1-p_S_X_1)
psi_S_0 <- (1-p_S_0)/(1-p_S_X_0)
# Add wage structure weight to main data.frame
psi_S <- rep(1,nrow(mf))
select <- which(groupN==group_val)
psi_S[select] <- psi_S_1
select <- which(groupN!=group_val)
psi_S[select] <- psi_S_0
# set text bar
setTxtProgressBar(pb,1)
}
cat("\n\n")
###########################################################
###########################################################
### Compute decomposition terms
untrimmed <- NULL
trimshare <- 0
##########################################################
# compute decomposition if stats==TRUE
if(stats==TRUE){
##########################################################
# Trimming of weights beeing to large
if(trim==TRUE){
#trim main reweighting factor
trimselect <- trimming(psi[,nvar],groupN,reference)
#trim ws deco rw factor if required
if(!is.null(ws_formula)){
#Find ws variable
ws_var <- as.character(fws, lhs=1)[2]
select <- which(groupN==group_val)
trimselect[select] <- trimming(psi_S[select],mf[select,ws_var],0)
select <- which(groupN!=group_val)
trimselect[select] <- trimming(psi_S[select],mf[select,ws_var],0)
}
#Compute trim stat:
trimshare <- 1- sum(trimselect)/length(trimselect)
if(trimshare>0){
# Prepare to export untrimmed data
untrimmed <- data.frame(trimselect,dep,weight,groupN,psi)
# Create trimmted data
dep <- dep[which(trimselect==1)]
weight <- weight[which(trimselect==1)]
groupN <- groupN[which(trimselect==1)]
psi <- as.matrix(psi[which(trimselect==1),])
if(!is.null(ws_formula)){
untrimmed <- cbind(untrimmed,mf[,ws_var],psi_S)
names(untrimmend)[ncol(untrimmed)-1] <- "ws_var"
psi_S <- psi[which(trimselect==1)]
}
}
}
##########################################################
### Observed distributions
F1 <- stat(dep,weight,groupN,group=1,rwfactor=rep(1,length(weight)),
tau=tau, all.stats=all.stats, log.trans=log.trans)
F0 <- stat(dep,weight,groupN,group=0,rwfactor=rep(1,length(weight)),
tau=tau, all.stats=all.stats, log.trans=log.trans)
##########################################################
# Counterfactual distribution(s)
#if reference group==0 all rw factors are the inverse of the computed
pow <- ifelse(reference==1,1,-1)
first <- ifelse(reference==1,1,nvar)
last <- ifelse(reference==1,nvar,1)
loopvals <- seq(first,last)
FC <- NULL
for(i in 1:nvar){
FC <- cbind(FC,stat(dep,weight,groupN,group=reference,rwfactor=psi[,i]^pow,
tau=tau,all.stats=all.stats, log.trans=log.trans))
}
if(nvar==1){
FC <- as.matrix(FC)
}
##########################################################
# Decomposition of aggregate effects and detailed composition effects
Delta <- cbind(F1 - F0, F1 - FC[,nvar], FC[,nvar] - F0)
if(reference==1){
colnames(Delta) <- c("Delta_O","Delta_X","Delta_S")
} else {
colnames(Delta) <- c("Delta_O","Delta_S","Delta_X")
}
if(nvar>1){
if(reference==1){
Delta <- cbind(Delta,F1-FC[,1])
colnames(Delta)[length(colnames(Delta))] <- paste("Delta_X",1,sep="")
for(i in 2:nvar){
Delta <- cbind(Delta,FC[,i-1]-FC[,i])
colnames(Delta)[length(colnames(Delta))] <- paste("Delta_X",i,sep="")
}
} else {
for(i in nvar:2){
Delta <- cbind(Delta,FC[,i]-FC[,i-1])
colnames(Delta)[length(colnames(Delta))] <- paste("Delta_X",i,sep="")
}
Delta <- cbind(Delta,FC[,1]-F0)
colnames(Delta)[length(colnames(Delta))] <- paste("Delta_X",1,sep="")
}
}
###########################################################
### Decomposition of wage structure effect
if(!is.null(ws_formula)){
ws_var <- as.character(fws, lhs=1)[2]
ws_var_val <- 1
if(is.factor(mf[,ws_var])){
ws_var_val <- levels(mf[,ws_var])[2]
}
# Select only observations withs ws group 0
select <- which(mf[,ws_var]!=ws_var_val)
# Compute counterfactual values
FCW1 <- stat(dep[select],weight[select],groupN[select],group=1,rwfactor=psi_S[select],
tau=tau, all.stats=all.stats, log.trans=log.trans)
FCW0 <- stat(dep[select],weight[select],groupN[select],group=0,rwfactor=psi_S[select],
tau=tau, all.stats=all.stats, log.trans=log.trans)
Delta_WS_X1 <- (F1-FCW1) - (F0-FCW0)
Delta_WS_other <- Delta[,ifelse(reference==1,3,2)] - Delta_WS_X1
Delta <- cbind(Delta,Delta_WS_X1,Delta_WS_other)
}
##########################################################
# Prepare results of decomposition for export
quantile=cbind(tau,Delta[1:length(tau),])
other.stats=Delta[(length(tau)+1):nrow(Delta),]
} else {
##########################################################
#if no stats return empty objects
quantile=NULL
other.stats=NULL
}
##########################################################
### Export results
res <- list(quantile=quantile,
other.stats=other.stats,
formula=formula,
mf=mf,
weight=weight,
psi=psi,
reference=reference,
tau=tau,
firstrw=firstrw,
all.stats=all.stats,
log.trans=log.trans,
untrimmed=untrimmed,
trimshare=trimshare)
if(!is.null(ws_formula)){
# Add WS weights to the weight matrix for export
psi <- cbind(psi, psi_S)
res <- list(quantile=quantile,
other.stats=other.stats,
formula=formula,
mf=mf,
weight=weight,
psi=psi,
reference=reference,
tau=tau,
firstrw=firstrw,
formula.rw=fws,
all.stats=all.stats,
log.trans=log.trans,
untrimmed=untrimmed,
trimshare=trimshare)
}
return(res)
}
#############################################################
### Function that decomposes the wage structure effect of
### DiNardo/Lemieux 1997
dfl_deco_ws <- function(res){
# Extract results
mf <- res$mf
weight <- res$weight
groupN <- mf[,"groupN"]
reference <- res$reference
# What's the group variable value of reference group?
if(is.factor(groupN)){
group_val <- levels(groupN)[1+reference]
} else {
group_val = reference
}
# Wage structure effect variable
fws <- res$formula.rw
if(is.null(fws)){stop("No wage structure decomposition in first place")}
ws_var <- as.character(fws, lhs=1)[2]
ws_var_val <- 1
if(is.factor(mf[,ws_var])){
ws_var_val <- levels(mf[,ws_var])[2]
}
# Retrieve rw factors
psi_X <- res$psi[,ncol(res$psi)-1]
psi_S <- res$psi[,ncol(res$psi)]
# Other parameter
tau <- res$tau
all.stats=res$all.stats
log.trans=res$log.trans
###########################################
# Select group 1 and 2 observation as well
# as observation which are NOT in group
# for which detaild ws effect is estimated
select1 <- which(groupN==group_val)
select0 <- which(groupN!=group_val)
selectWS <- which(mf[,ws_var]!=ws_var_val)
selectWS1 <- intersect(selectWS,select1)
selectWS0 <- intersect(selectWS,select0)
selectNonWS1 <- intersect(which(mf[,ws_var]==ws_var_val),select1)
###########################################
# Estimations
# Set up model for unconditional prob
mod <- as.Formula(paste(as.character(fws, lhs=1)[2],"~ 1"))
p_S_0 <- mean(pfit(mod,mf[select0,],weight[select0]))
p_S_1 <- mean(pfit(mod,mf[select1,],weight[select1]))
p_S_1.C <- mean(pfit(mod,mf[select1,],weight[select1]*psi_X[select1]))
# Model for conditional prob
mod <- fws
p_S_X_1.C <- pfit(mod,mf[select1,],weight[select1]*psi_X[select1])
#Marginal weights
psi_S_1.M <- rep(1,nrow(mf))
psi_S_1.M[selectNonWS1] <- p_S_0/p_S_1
p_S_X_1 <- 1-(1-p_S_1)/psi_S[selectWS1] #can be retrieved from the WS factor for psi_1,S1
#psi_S_1.M[selectWS1] <- psi_S[selectWS1]*(1 - (p_S_0/p_S_1)*p_S_X_1)
psi_S_1.M[selectWS1] <- (psi_S[selectWS1]/(1-p_S_1))*(1 - (p_S_0/p_S_1)*p_S_X_1)
psi_S_1.M.all <- psi_S_1.M
#Sorting weights
psi_S_1.J <- (1-p_S_1.C)/(1-p_S_X_1.C)
#psi_S_0 <- (1-p_S_0)/(1-p_S_X_0)
# Add wage structure weight to main data.frame
#psi_S_1.M.all <- rep(1,nrow(mf))
#psi_S_1.M.all[select1] <- psi_S_1.M
psi_S_1.J.all <- rep(1,nrow(mf))
psi_S_1.J.all[select1] <- psi_S_1.J
#select <- which(groupN!=group_val)
#psi_S[select] <- psi_S_0
###########################################
# Statistics
# Reference group
pow <- ifelse(reference==1,1,-1)
dep <- mf[,1]
### Observed distributions
F1 <- stat(dep,weight,groupN,group=1,rwfactor=rep(1,length(weight)),
tau=tau, all.stats=all.stats, log.trans=log.trans)
F0 <- stat(dep,weight,groupN,group=0,rwfactor=rep(1,length(weight)),
tau=tau, all.stats=all.stats, log.trans=log.trans)
### Counterfactual 1: Aggregate counterfactual
FC1 <- stat(dep,weight,groupN,group=reference,rwfactor=psi_X^pow,
tau=tau,all.stats=all.stats, log.trans=log.trans)
### Counterfactual 2&3: Wage structure counterfactual like in DiNardo&Lemieux 1997
FCW1 <- stat(dep[selectWS],weight[selectWS],groupN[selectWS],group=1,rwfactor=psi_S[selectWS],
tau=tau, all.stats=all.stats, log.trans=log.trans)
FCW0 <- stat(dep[selectWS],weight[selectWS],groupN[selectWS],group=0,rwfactor=psi_S[selectWS],
tau=tau, all.stats=all.stats, log.trans=log.trans)
### Counterfactual 4: FCW0 with wage structure of t=1
FC1J <- stat(dep[selectWS],weight[selectWS],groupN[selectWS],group=1,rwfactor=psi_X[selectWS]*psi_S_1.J.all[selectWS],
tau=tau, all.stats=all.stats, log.trans=log.trans)
### Counterfactual 5: Distribution in t=1 with marginal x1 like in t=0 but x2 like in t=1
FC1M <- stat(dep,weight,groupN,group=1,rwfactor=psi_S_1.M.all,
tau=tau, all.stats=all.stats, log.trans=log.trans)
###########################################
### Decomposition:
Delta_Marginal <- F1 - FC1M
Delta_Sorting <- (FC1M - FCW1) - (FC1 - FC1J)
Delta_WS <- (FC1 - FC1J) - (F0 - FCW0)
l <- 1:length(tau)
quantile <- data.frame(tau=tau,
marginal=Delta_Marginal[l],
sorting=Delta_Sorting[l],
wage_structure=Delta_WS[l])
l <- (length(tau)+1):length(Delta_Sorting)
other.stats <- data.frame(marginal=Delta_Marginal[l],
sorting=Delta_Sorting[l],
wage_structure=Delta_WS[l])
###########################################
### Return results
res <- list(quantile=quantile,
other.stats=other.stats,
psi_S1.1=psi_S[selectWS1],
psi_S1.0=psi_S[selectWS0],
#psi_M=psi_S_1.M.all[selectWS1],
psi_M=psi_S_1.M.all,
psi_J=psi_S_1.J.all[selectWS1]
)
}
#############################################################
### Plot function for composition effect results
dfl_deco_plot <- function(result,type=c(1,2,3)){
result <- result[["quantile"]]
if(type==1|ncol(result)==4){
## type 1: Observed difference and main decomposition terms (S,X)
diff <- as.data.frame(result[,c(1:4)])
} else if(type==2){
## type 2: All individual terms besides observed difference
diff <- as.data.frame(result[,-2])
} else{
## type 3: Only detailed terms
diff <- as.data.frame(result[,-c(2:4)])
}
diff <- melt(diff, id.vars="tau", measure.vars = names(diff)[-1], variable.name= "effect", value.name="delta")
plot <- ggplot(diff, aes(tau,delta, colour = effect)) + geom_hline(yintercept = 0, colour="grey") + geom_line()+geom_point(aes(shape=effect, color=effect)) + scale_shape_manual(values=c(15:20,0:14,15:20,0:14))
return(plot)
}
#############################################################
### DFL deco: Counterfactual sorting condtional on x_1
counter_cond <- function(formula, data, weights, group,
na.action = na.exclude,
tau=c(10,50,90,99)/100,
all.stats=FALSE,
log.trans=TRUE){
##########################################################
##########################################################
## 1) Set up data
#Use match.call function to call data.vectors
mf = match.call()
m = match(c("formula", "data", "weights", "na.action","group"), names(mf), 0)
mf = mf[c(1, m)]
mf$drop.unused.levels = TRUE
# Retrieve formula as.Formula and model.frame
f <- as.Formula(formula)
if(length(f)[2] < 2) stop("Define a grouping variable!")
# Extract model.frame
mf[[1]] = as.name("model.frame")
mf$formula <- f
mf = eval.parent(mf)
mt = attr(mf, "terms")
# Extract variables, weights and group identifier
#reg = get_all_vars(mt, mf)
dep = model.response(mf, "numeric")
weight = model.weights(mf)
if (!is.null(weight) && !is.numeric(weight)) {
stop("'weights' must be a numeric vector")
}
if (!is.null(weight)) {
weight = weight
} else {
weight = rep(1, length(dep))
}
#weight = weight/sum(weight)
groupN = mf[, ncol(mf)]
#reg$groupN <- groupN
mf$groupN <- groupN
# Number of covariates for decomposition of composition effect
nvar = length(f)[2]
reference=1
##########################################################
##########################################################
## 2) Extracting reference group value of main decomposition variable
## and conditiong variable's levels
# What's the group variable value of reference group? [Maid decomposition]
if(is.factor(groupN)){
group_val <- levels(groupN)[1+reference]
} else {
group_val = reference
}
# What is the name of the conditioning var?
cond_var <- as.character(update(formula(f, rhs=nvar, collapse=TRUE), . ~ .))[3]
mf$cond_var <- mf[,cond_var]
if(is.factor(mf[,cond_var])){
group_val_cond <- levels(mf[,cond_var])[2]
} else {
group_val_cond = 1
}
##########################################################
##########################################################
## 3) Compute reweighting factor for scenario:
## What would distribution look like if distribution of x_2|x_1
## was like in t' but marginal distribution of x_1 and x_2 was like in t?
##################################
## Set the progress bar
cat("Probit estimation...\n")
pb <- txtProgressBar(min=0,max=1, style=3)
nmods <- 12
##################################
## 1) RW factor for x1=1: Main decomposition only in group x_1=1
mod <- update(formula(f, rhs=1:(nvar-1), collapse=TRUE), groupN ~ .)
select <- which(mf[,"cond_var"]==group_val_cond)
p_t_x2x1.0 <- pfit(mod,mf[select,],
weight[select],
newdata = mf)
mod <- groupN ~ 1
p_t_x1.0 <- mean(pfit(mod,mf[select,],
weight[select]))
PsiA <- ((1-p_t_x2x1.0)/p_t_x2x1.0)*((p_t_x1.0)/(1-p_t_x1.0))
# PB
setTxtProgressBar(pb,2/nmods)
##################################
## 2) Reweigting factor B for x1=0
#P(t)
mod <- groupN ~ 1
p_t.1 <- mean(pfit(mod,mf,weight))
#P(t|x'1)
mod <- groupN ~ 1
select <- which(mf[,"cond_var"]!=group_val_cond)
p_t.1_x1.0 <- mean(pfit(mod,mf[select,],weight[select]))
#P(t|x2)
mod <- update(formula(f, rhs=1:(nvar-1), collapse=TRUE), groupN ~ .)
p_t.1_x2 <- pfit(mod,mf,
weight,
newdata = mf)
# PB
setTxtProgressBar(pb,5/nmods)
#P(t|x2,x'1)
mod <- update(formula(f, rhs=1:(nvar-1), collapse=TRUE), groupN ~ .)
select <- which(mf[,"cond_var"]!=group_val_cond)
p_t.1_x2x1.0 <- pfit(mod,mf[select,],
weight[select],
newdata = mf)
# PB
setTxtProgressBar(pb,6/nmods)
#P(x'1)
mod <- cond_var ~ 1
p_x1.0 <- 1-mean(pfit(mod,mf,weight))
#P(x'1|x2)
mod <- as.character(update(formula(f, rhs=1:(nvar-1), collapse=TRUE), . ~ .))[3]
mod <- as.formula(paste(cond_var,"~",mod,sep=""))
p_x1.0_x2 <- 1-pfit(mod,mf,
weight,
newdata = mf)
# PB
setTxtProgressBar(pb,8/nmods)
# The factor
# P(t|x2) P(t|x'1) P(x'1)
# PsiB = ----------- ---------- ----------
# P(t|x2,x'1) P(t) P(x'1|x2)
PsiB = (p_t.1_x2/p_t.1_x2x1.0)*
(p_t.1_x1.0/p_t.1)*
(p_x1.0/p_x1.0_x2)
##################################
## 3) Reweigting factor C for x1=0
#P(t) (computed above: p_t.1)
#P(t')
#P(x1|t)
#P(x'1|t)
mod <- cond_var ~ 1
select <- which(groupN==group_val)
p_x1.1_t.1 <- mean(pfit(mod,mf[select,],weight[select]))
#P(x1|t')
mod <- cond_var ~ 1
select <- which(groupN!=group_val)
p_x1.1_t.0 <- mean(pfit(mod,mf[select,],weight[select]))
# PB
setTxtProgressBar(pb,10/nmods)
#P(t|x2) (computed above: p_t.1_x2)
#P(t'|x2)
#P(x1|x2,t')
mod <- as.character(update(formula(f, rhs=1:(nvar-1), collapse=TRUE), . ~ .))[3]
mod <- as.formula(paste(cond_var,"~",mod,sep=""))
select <- which(groupN!=group_val)
p_x1.1_x2t.0 <- pfit(mod,mf[select,],weight[select],
newdata = mf)
#P(x'1|x2,t)
mod <- as.character(update(formula(f, rhs=1:(nvar-1), collapse=TRUE), . ~ .))[3]
mod <- as.formula(paste(cond_var,"~",mod,sep=""))
select <- which(groupN==group_val)
p_x1.0_x2t.1 <-1-pfit(mod,mf[select,],weight[select],
newdata = mf)
# PB
setTxtProgressBar(pb,12/nmods)
cat("\n")
# The factor
# P(x1|x2,t') P(x'1|t) P(t'|x2) P(t)
# PsiC = P(x1|t) ----------- -------- -------- ----
# P(x'1|x2,t) P(x1|t') P(t|x2) P(t')
PsiC <- p_x1.1_t.1*(p_x1.1_x2t.0/p_x1.0_x2t.1)*
((1-p_t.1_x2)/p_t.1_x2)*
((1-p_x1.1_t.1)/p_x1.1_t.0)*
(p_t.1/(1-p_t.1))
##########################################################
##########################################################
## 4) Contruct weights for second counterfactual:
## What is the wage distribution if not only x_2|x_1 was like in t' but
## also the marginal of x_1 while the marginal of x_2 was still like in t.
PsiA.2 <- (p_x1.1_t.0/p_x1.1_t.1) * PsiA
PsiB.2 <- PsiB
PsiC.2 <- (p_x1.1_t.0/p_x1.1_t.1) * PsiC
##################################
## 4) Add weights two mf
mf$psi <- NA
mf$psi.2 <- NA
select <- which(mf$groupN==group_val&mf$cond_var==group_val_cond)
mf[select,"psi"] <- PsiA[select]
mf[select,"psi.2"] <- PsiA.2[select]
select <- which(mf$groupN==group_val&mf$cond_var!=group_val_cond)
mf[select,"psi"] <- PsiB[select]-PsiC[select]
mf[select,"psi.2"] <- PsiB.2[select]-PsiC.2[select]
#Return stats about psi
cat("Summary of psi:\n")
print(summary(mf$psi))
cat("\nShare of negative weights:\n")
cat(length(mf$psi[which(mf$psi<0)])/length(mf$psi[which(is.na(mf$psi)==FALSE)]),"\n")
cat("\nSummary of psi.2:\n")
print(summary(mf$psi.2))
cat("\nShare of negative weights:\n")
cat(length(mf$psi.2[which(mf$psi.2<0)])/length(mf$psi.2[which(is.na(mf$psi.2)==FALSE)]),"\n")
##########################################################
##########################################################
## 5) Create reweighting factor for group x=0 in t=1
F1 <- stat(dep,weight,groupN,group=1,rwfactor=rep(1,length(weight)),
tau=tau, all.stats=all.stats, log.trans=log.trans)
FC.1 <- stat(dep,weight,groupN,group=1,rwfactor=mf$psi,
tau=tau, all.stats=all.stats, log.trans=log.trans)
FC.2 <- stat(dep,weight,groupN,group=1,rwfactor=mf$psi.2,
tau=tau, all.stats=all.stats, log.trans=log.trans)
Delta <- cbind(F1-FC.1,FC.1-FC.2)
colnames(Delta) <- c("Delta_Sorting_X1","Delta_Marginal_X1")
quantile=cbind(tau,Delta[1:length(tau),])
other.stats=Delta[(length(tau)+1):nrow(Delta),]
res <- list(quantile=quantile,
other.stats=other.stats,
formula=f,
mf=mf,
weight=weight,
psi=mf$psi,
psi.2=mf$psi.2,
reference=reference,
tau=tau)
return(res)
}
#############################################################
### dfl_diag():
### Diagnosis tool to compare covariates distribution
### of actual and reweighted distribution
dfl_diag <- function(result, compareRef=FALSE, psi.2=FALSE){
#model and reference group
f <- as.Formula(result$formula)
reference <- result$reference
#data
mf <- result$mf
weight <- result$weight
#weights
if(psi.2==FALSE){
psi <- as.matrix(result$psi)
} else {
psi <- as.matrix(result$psi.2)
}
# Select psi
if(ncol(psi)==1){
psi <- psi[,1]
} else if(colnames(psi)[ncol(psi)]=="psi_S"){
psi <- psi[,ncol(psi)-1]
} else {
psi <- psi[,ncol(psi)]
}
# Select observations of reference group
if(is.factor(mf$groupN)){
reference <- levels(mf$groupN)[reference + 1]
}
selectRef <- which(mf$groupN==reference)
# If cond==FALSE use comparison group
# for comparison to actual values;
# else use reference group.
if(compareRef==FALSE){
selectCom <- which(mf$groupN!=reference)
} else {
selectCom <- selectRef
}
#Prepare df
mod <- formula(f, collapse=TRUE)
mRef <- model.matrix(mod,mf)[selectRef,-1]
mCom <- model.matrix(mod,mf)[selectCom,-1]
wRef <- weight[selectRef]
wCom <- weight[selectCom]
psi <- psi[selectRef]
#Find means, diff in means, var/sd
mean_obs <- apply(mCom,2,function(x) wtd.mean(x, weights=wCom))
mean_rw <- apply(mRef,2,function(x) wtd.mean(x, weights=psi*wRef))
sd_ob <- apply(mCom,2,function(x) wtd.var(x, weights=wCom))
sd_rw <- apply(mRef,2,function(x) wtd.var(x, weights=psi*wRef))
mean_diff <- mean_obs - mean_rw
sd_diff <- sqrt(sd_ob + sd_rw)
sd_ob <- sqrt(sd_ob)
sd_rw <- sqrt(sd_rw)
#Export table
res <- t(rbind(mean_obs,mean_rw,mean_diff,sd_ob, sd_rw,sd_diff))
return(res)
}
#############################################################
### dfl_stat():
### Returns decripitive statistics of covariates
dfl_stat <- function(formula,
data,
weights,
group,
na.action = na.exclude,
reference=1,
constant=FALSE){
mf = match.call()
m = match(c("formula", "data", "weights", "na.action","group"), names(mf), 0)
mf = mf[c(1, m)]
mf$drop.unused.levels = TRUE
# Retrieve formula as.Formula and model.frame
f <- as.Formula(formula)
mf[[1]] = as.name("model.frame")
mf$formula <- f
mf = eval.parent(mf)
mt = attr(mf, "terms")
# Store "orignal" decomposition model again
f <- as.Formula(formula)
# Extract variables, weights and group identifier
#reg = get_all_vars(mt, mf)
weight = model.weights(mf)
if (!is.null(weight) && !is.numeric(weight)) {
stop("'weights' must be a numeric vector")
}
if (!is.null(weight)) {
weight = weight
}
else {
weight = rep(1, nrow(mf))
}
#weight = weight/sum(weight)
groupN = mf[, ncol(mf)]
#reg$groupN <- groupN
mf$groupN <- groupN
# Select observations of reference group
comparison <- ifelse(reference==1,0,1)
if(is.factor(mf$groupN)){
reference <- levels(mf$groupN)[reference + 1]
comparison <- levels(mf$groupN)[which(levels(mf$groupN)!=reference)]
}
selectRef <- which(mf$groupN==reference)
selectCom <- which(mf$groupN!=reference)
#Prepare df
mod <- formula(f, collapse=TRUE) #include reference group of cat. variables by +0
if(constant==FALSE){
mRef <- as.matrix(model.matrix(mod,mf)[selectRef,-1])
mCom <- as.matrix(model.matrix(mod,mf)[selectCom,-1])
}else{
mRef <- as.matrix(model.matrix(mod,mf)[selectRef,])
mCom <- as.matrix(model.matrix(mod,mf)[selectCom,])
}
wRef <- weight[selectRef]
wCom <- weight[selectCom]
#Find means, diff in means, var/sd
mean_Ref <- apply(mRef,2,function(x) wtd.mean(x, weights=wRef))
mean_Com <- apply(mCom,2,function(x) wtd.mean(x, weights=wCom))
sd_Ref <- apply(mRef,2,function(x) wtd.var(x, weights=wRef))
sd_Com <- apply(mCom,2,function(x) wtd.var(x, weights=wCom))
mean_diff <- mean_Ref - mean_Com
sd_diff <- sqrt(sd_Ref + sd_Com)
sd_Ref <- sqrt(sd_Ref)
sd_Com <- sqrt(sd_Com)
# Sum of weights
N <- matrix(c(length(wRef),length(wCom),sum(wRef),sum(wCom)),ncol=2,byrow=TRUE)
colnames(N) <- c(reference,comparison)
rownames(N) <- c("Obs.","Sum of weights")
#Export table
res <- t(rbind(mean_Ref,mean_Com,mean_diff,sd_Ref, sd_Com,sd_diff))
colnames(res) <- c(paste(rep("mean",3),c(reference,comparison,"diff"),sep="_"),paste(rep("sd",3),c(reference,comparison,"diff"),sep="_"))
res <- list(means=res, N=N)
return(res)
}
#############################################################
## Function for fitting and predicting Conditional Probabilities
pfit <- function(mod,df,w, newdata=NULL, fast=TRUE){
# Without survey package
#dep <- model.frame(mod,df)[,1]
#reg <- model.matrix(mod,df)
#probit <- glm(dep~reg, weights=w, family = binomial(link = "probit"), na.action=na.exclude, y=FALSE, model=FALSE)
# With survey package
#design <- svydesign(~0, data=df, weights=~w)
#m1 <- svyglm(mod, data=df, design=design,family=quasibinomial(link="probit"))
df <- cbind(df,w)
if(fast==FALSE){
# With glm
m1 <- glm(mod, data=df, family=binomial(link="logit"),weights=w)
p_X_1 <- predict.glm(m1, newdata=newdata, type="response", na.action = na.exclude)
}else{
## With fastglm
df <- model.frame(mod, data=df,weights=w)
if(!is.numeric(df[,1])){
df[,1] <- as.numeric(df[,1]==unique(df[,1])[2])
}
m1 <- fastglm(model.matrix(mod,df),df[,1],
family = binomial(link = "logit"),
weights=df$`(weights)`, fitted=FALSE)
logit <- function(x){1/(1+exp(-x))}
if(is.null(newdata)){
p_X_1 <- logit(as.numeric(model.matrix(mod,df)%*%coef(m1)))
}else{
p_X_1 <- logit(as.numeric(model.matrix(mod,newdata)%*%coef(m1)))
}
}
# Truncate weights
#p_X_1[which(p_X_1 < 0.01)] <- 0.01
#p_X_1[which(p_X_1 > 0.99)] <- 0.99
return(p_X_1)
}
#############################################################
### Trimming function
# Adapted trimming function as suggested in Huber, Lechner, Wunsch (2013: 9f.)
trimming <- function(rwfactor,groupN,group=c(0,1)){
# Factor variables for group selection allowed
if(is.factor(groupN)){
group <- levels(groupN)[1+group]
}
# Set trimming threshold
n <- length(groupN)
t <- sqrt(n)/n
# Normalize weights
rwfactor[which(groupN== group)] <- rwfactor[which(groupN== group)]/sum(rwfactor[which(groupN==group)])
rwfactor[which(groupN!= group)] <- rwfactor[which(groupN!= group)]/sum(rwfactor[which(groupN!=group)])
# Which observations to drop?
#all in treatment group that have more weight than threshold
sel1 <- which(groupN==group&rwfactor>t)
if(length(sel1)>0){
#all in control group that have a weight like the smallest weight droped in treatment group
sel1 <- c(sel1,which(groupN!=group&rwfactor>min(rwfactor[sel1])))
}
if(length(sel1)>0){
sel <- as.numeric(!is.element(1:n,sel1))
}else{
sel <- rep(1,n)
}
return(sel)
}
#############################################################
### Gini function (code by Rothe(2015))
Gini <- function (x, w) {
n <- length(x)
w <- w/sum(w)
G <- sum(x[order(x)] * 1:n * w[order(x)])
G <- 2 * G/(n*sum(x[order(x)] * w[order(x)]))
G - 1 - (1/n)
}
#############################################################
### Function for distributional statistics
stat <- function(dep,weight,groupN,group=c(0,1),rwfactor,
tau=c(10,50,90,99)/100,
all.stats=FALSE,log.trans=FALSE){
# Factor variables for group selection allowed
if(is.factor(groupN)){
group <- levels(groupN)[1+group]
}
# Select variables
dep <- dep[which(groupN==group)]
weight <- weight[which(groupN==group)]
rwfactor <- rwfactor[which(groupN==group)]
# Normalize weights
#rwfactor <- rwfactor/sum(rwfactor)
# Add to weighting vector
w <- weight*rwfactor
### If all stats required: Are the required quantiles estimated?
if(all.stats==TRUE){
# make sure all relevant quantiles are estimated
tau <- union(c(0.1,0.5,0.9,0.95,0.99),tau)
tau <- tau[order(tau)]
}
# get quantiles statistics
quantile <- wtd.quantile(dep,weight=w,probs=tau)
# is dep variable log transformed?
if(log.trans==TRUE){
dep1 <- exp(dep)
quantile1 <- exp(quantile)
} else {
dep1 <- dep
quantile1 <- quantile
}
#Get mean and var
mu <- wtd.mean(dep1, weight=w)
sd <- sqrt(wtd.var(dep1, weight=w))
# Estimate additional stats if all stats required
if(all.stats==TRUE){
#Overall gini and income share of top 10%
gini <- Gini(dep1, w)
select <- which(dep>=quantile[match(0.95,tau)])
#gini.top <- Gini(dep1[select], w[select])
s_top05 <- (wtd.mean(dep1[select], weight=w[select])/mu)*0.05
#Decile ratios
p90p10 <- quantile1[match(0.9,tau)]/quantile1[match(0.1,tau)]
p90p50 <- quantile1[match(0.9,tau)]/quantile1[match(0.5,tau)]
p50p10 <- quantile1[match(0.5,tau)]/quantile1[match(0.1,tau)]
p99p90 <- quantile1[match(0.99,tau)]/quantile1[match(0.9,tau)]
res <- c(quantile,mu,sd,gini,
p90p10,p90p50,p50p10,p99p90,
s_top05)
names(res)[(length(tau)+1):length(res)] <- c("mean","sd","gini",
"p90p10","p90p50","p50p10","p99p90",
"top 5% share")
return(res)
} else {
# Return results if not all stats required
res <- c(c(quantile,mu,sd))
names(res)[(length(tau)+1):length(res)] <- c("mean","sd")
return(res)
}
}
#############################################################
### Function for kernel density estimates
kden <- function(dep,weight=NULL,
groupN=NULL,group=c(0,1),
rwfactor=NULL,
px=NULL,
bw = "nrd0",
kernel="gaussian",
n=512,
na.rm = TRUE){
# Factor variables for group selection allowed
if(is.factor(groupN)){
group <- levels(groupN)[1+group]
}
if(is.null(groupN)){group <- "all"}
# Prepare weights
if(is.null(weight)){weight <- rep(1,length(dep))}
if(is.null(rwfactor)){rwfactor <- rep(1,length(dep))}
# Select variables
if(is.null(groupN)==FALSE){
select <- which(groupN==group)
dep <- dep[select]
weight <- weight[select]
rwfactor <- rwfactor[select]
}
#Adjust weights
w <- weight*rwfactor
wsum <- sum(w)
w <- w/wsum
if(is.null(px)){px <- 1} else {px <- wsum/px}
if(sum(is.na(dep))!=0&na.rm==TRUE){
rm <- which(is.na(dep))
dep <- dep[-rm]
w <- w[-rm]
}
#Estimate density
d <- density(dep,weights=w,
kernel=kernel, bw=bw,n=n)
#Return results
d <- data.frame(group=rep(group,n),x=d$x,density=d$y*px)
return(d)
}
| /functions/r-DFL-deco-2-0.R | no_license | davidgallusser/Extended_DL_deco | R | false | false | 39,438 | r | ############################################
###### DFL decomposition function #########
############################################
## Version 2.0, 26. Juni 2019
## Note:
## Group 1 is the reference group. Its wage structure
# is used for the counterfactuals.
# In order to sequentially decompose the composition effect,
# variable have to be entered seperated by |
# If sequence="marginal" the marginal of the last variable entered
# is reweighted first.
# If firstrw="conditional" the conditional distribution of the first
# variable entered in the formula is reweigted first.
# library(AER)
# data("CPS1985")
# f <- wage ~ gender | experience | education | region
# result <- dfl_deco(f, data=CPS1985, group=union, tau=c(0.5,0.75,0.8), log.trans=FALSE, trim=TRUE)
# result$quantile
# result$other.stats
###########################################################
## Package to be loaded
require(Formula) #for extended formulas
#require(reldist) #for weighted distributional statistics (i.e. quantiles, gini, etc.)
require(Hmisc) #for weighted distributional statistics (i.e. quantiles, etc.)
require(survey) #for glm models with survey data
require(ggplot2) #for the plotting function
require(reshape2) #for reshaping the data.frames
require(fastglm) #for optimzed glm solving algorithm
###########################################################
# Actual decomposition function
dfl_deco <- function(formula, data, weights, group,
ws_formula=NULL,
reference=c(1,0),
na.action = na.exclude,
tau=c(10,50,90,99)/100,
firstrw=c("marginal","conditional"),
stats=TRUE,
all.stats=FALSE,
log.trans=TRUE,
fast=TRUE,
trim=FALSE){
# group: indicator variable defining the distribution to be compared
# reference: defining the group which will be reweighted, i.e. which wage structure is to be used.
# Composition effect: Difference between observed reference group and reweighted reference group
# Wage structure effect: Difference between reweighted reference group and comparison group
# tau: quantiles to be evaluated
# firstrw=conditional: sequential decompositon by reweighting conditional distribution
# of first entered covariate first, otherwise marginal distribution of
# last covariate is reweigted fist.
# fast: estimation with fastglm
# trim: automatically trims dataset in order to discard observation with very large propensities
##########################################################
##########################################################
## 1) Set up data
#Use match.call function to call data.vectors
mf = match.call()
m = match(c("formula", "data", "weights", "na.action","group"), names(mf), 0)
mf = mf[c(1, m)]
mf$drop.unused.levels = TRUE
# Retrieve formula as.Formula and model.frame
f <- as.Formula(formula)
# Make sure that all variables (incl. for detailed WS effect are selected)
if(!is.null(ws_formula)){
fws <- Formula(ws_formula)
f <- update(f,as.Formula(paste(". ~ . + ", as.character(fws, lhs=1)[2], "+", as.character(fws, rhs=1)[3])))
}
mf[[1]] = as.name("model.frame")
mf$formula <- f
mf = eval.parent(mf)
mt = attr(mf, "terms")
# Store "orignal" decomposition model again
f <- as.Formula(formula)
# Extract variables, weights and group identifier
#reg = get_all_vars(mt, mf)
dep = model.response(mf, "numeric")
weight = model.weights(mf)
if (!is.null(weight) && !is.numeric(weight)) {
stop("'weights' must be a numeric vector")
}
if (!is.null(weight)) {
weight = weight
}
else {
weight = rep(1, length(dep))
}
#weight = weight/sum(weight)
groupN = mf[, ncol(mf)]
#reg$groupN <- groupN
mf$groupN <- groupN
# Number of covariates for decomposition of composition effect
nvar = length(f)[2]
# Set reference group to 1 if not determined
if(length(reference)==2){reference=1}
cat("Reference group (to be reweighted): ", reference,"\n \n")
# If first reweighting not specified choose marginal first as default
firstrw = ifelse(length(firstrw)==2,"marginal",firstrw)
# Set Progress Bar
cat("Probit estimation...\n")
pb <- txtProgressBar(min=0,max=1, style=3)
nmods <- 1+nvar+ifelse(!is.null(ws_formula),4,0)
##########################################################
##########################################################
### 2) Fit probit models & estimate reweighting factors
##########################################################
### Unconditional/Sample probabilities
mod <- groupN ~ 1
p1 <- mean(pfit(mod,mf,weight))
p0 <- 1-p1
probs <- rep(p0/p1,nrow(mf))
setTxtProgressBar(pb,1/nmods)
##########################################################
### Conditional probabilities
### Beginning with the LAST variable entered in Formula,
### estimating then with prob
m <- 0 # only for ProgressBar
for(i in nvar:1){
mod <- update(formula(f, rhs=nvar:i, collapse=TRUE), groupN ~ .)
p1 <- pfit(mod,mf,weight)
p0 <- 1-p1
probs <- cbind(probs,p0/p1)
m <- m + 1 #Progressbar
setTxtProgressBar(pb,(1+m)/nmods)
}
##########################################################
### Reweigthing factor
###
### psi_s contains relative probabilites:
### first [P(t=1)/P(P=0)]*[P(t=1|X1)/P(P=0|X1)],
### second [P(t=0|X1)/P(P=1|X1)]*[P(t=1|X2,X1)/P(P=0|X2,X1)]
### etc.
### X1 is the variable entered last, X2 the variable entered second last etc.
psi_s <- (probs[,1]^-1)*probs[,2]
if(nvar==1){
#if there is only one variable stick to psi_s and use it as weight
psi <- as.matrix(psi_s)
}
else{
#procedure to compute the weights if nvar>1
for(i in 2:nvar){
psi_s <- cbind(psi_s,(probs[,i]^-1)*probs[,i+1])
}
# Marginal of last entered variable or conditional of first entered
# variable to be reweighted first?
first <- ifelse(firstrw=="marginal",1,nvar)
last <- ifelse(firstrw=="marginal",nvar,1)
correct <- ifelse(firstrw=="marginal",1,-1)
loopvals <- seq(first+correct,last)
# psi contains the weights actually used to reweighted the distribution
# psi contains on the first position the weight used first
# (i.e. [P(t=1)/P(P=0)]*[P(t=1|X1)/P(P=0|X1)] if the "marginal" and
# [P(t=1|X1,...,X(M-1))/P(P=0||X1,...,X(M-1))]*[P(t=1|X1,...,X(M))/P(P=0|X1,...,X(M))]
# if "conditional")
psi <- as.matrix(psi_s[,first])
colnames(psi) <- paste("psi_X",first,sep="")
for(i in loopvals){
psi <- cbind(psi,apply(psi_s[,first:i],1,prod))
colnames(psi)[length(colnames(psi))] <- paste("psi_X",first,"to",i,sep="")
}
# Remove unused object
rm(psi_s)
#end weight competion procedure if nvar>1
}
###########################################################
### Weights for decomposition of wage structure effect
if(!is.null(ws_formula)){
# What's the group variable value of reference group?
if(is.factor(groupN)){
group_val <- levels(groupN)[1+reference]
} else {
group_val = reference
}
# Set up model for unconditional prob
mod <- as.Formula(paste(as.character(fws, lhs=1)[2],"~ 1"))
p_S_1 <- pfit(mod,mf[which(groupN==group_val),],weight[which(groupN==group_val)])
p_S_0 <- pfit(mod,mf[which(groupN!=group_val),],weight[which(groupN!=group_val)])
setTxtProgressBar(pb,(nmods-2)/nmods)
# Model for conditional prob
mod <- fws
p_S_X_1 <- pfit(mod,mf[which(groupN==group_val),],weight[which(groupN==group_val)])
p_S_X_0 <- pfit(mod,mf[which(groupN!=group_val),],weight[which(groupN!=group_val)])
psi_S_1 <- (1-p_S_1)/(1-p_S_X_1)
psi_S_0 <- (1-p_S_0)/(1-p_S_X_0)
# Add wage structure weight to main data.frame
psi_S <- rep(1,nrow(mf))
select <- which(groupN==group_val)
psi_S[select] <- psi_S_1
select <- which(groupN!=group_val)
psi_S[select] <- psi_S_0
# set text bar
setTxtProgressBar(pb,1)
}
cat("\n\n")
###########################################################
###########################################################
### Compute decomposition terms
untrimmed <- NULL
trimshare <- 0
##########################################################
# compute decomposition if stats==TRUE
if(stats==TRUE){
##########################################################
# Trimming of weights beeing to large
if(trim==TRUE){
#trim main reweighting factor
trimselect <- trimming(psi[,nvar],groupN,reference)
#trim ws deco rw factor if required
if(!is.null(ws_formula)){
#Find ws variable
ws_var <- as.character(fws, lhs=1)[2]
select <- which(groupN==group_val)
trimselect[select] <- trimming(psi_S[select],mf[select,ws_var],0)
select <- which(groupN!=group_val)
trimselect[select] <- trimming(psi_S[select],mf[select,ws_var],0)
}
#Compute trim stat:
trimshare <- 1- sum(trimselect)/length(trimselect)
if(trimshare>0){
# Prepare to export untrimmed data
untrimmed <- data.frame(trimselect,dep,weight,groupN,psi)
# Create trimmted data
dep <- dep[which(trimselect==1)]
weight <- weight[which(trimselect==1)]
groupN <- groupN[which(trimselect==1)]
psi <- as.matrix(psi[which(trimselect==1),])
if(!is.null(ws_formula)){
untrimmed <- cbind(untrimmed,mf[,ws_var],psi_S)
names(untrimmend)[ncol(untrimmed)-1] <- "ws_var"
psi_S <- psi[which(trimselect==1)]
}
}
}
##########################################################
### Observed distributions
F1 <- stat(dep,weight,groupN,group=1,rwfactor=rep(1,length(weight)),
tau=tau, all.stats=all.stats, log.trans=log.trans)
F0 <- stat(dep,weight,groupN,group=0,rwfactor=rep(1,length(weight)),
tau=tau, all.stats=all.stats, log.trans=log.trans)
##########################################################
# Counterfactual distribution(s)
#if reference group==0 all rw factors are the inverse of the computed
pow <- ifelse(reference==1,1,-1)
first <- ifelse(reference==1,1,nvar)
last <- ifelse(reference==1,nvar,1)
loopvals <- seq(first,last)
FC <- NULL
for(i in 1:nvar){
FC <- cbind(FC,stat(dep,weight,groupN,group=reference,rwfactor=psi[,i]^pow,
tau=tau,all.stats=all.stats, log.trans=log.trans))
}
if(nvar==1){
FC <- as.matrix(FC)
}
##########################################################
# Decomposition of aggregate effects and detailed composition effects
Delta <- cbind(F1 - F0, F1 - FC[,nvar], FC[,nvar] - F0)
if(reference==1){
colnames(Delta) <- c("Delta_O","Delta_X","Delta_S")
} else {
colnames(Delta) <- c("Delta_O","Delta_S","Delta_X")
}
if(nvar>1){
if(reference==1){
Delta <- cbind(Delta,F1-FC[,1])
colnames(Delta)[length(colnames(Delta))] <- paste("Delta_X",1,sep="")
for(i in 2:nvar){
Delta <- cbind(Delta,FC[,i-1]-FC[,i])
colnames(Delta)[length(colnames(Delta))] <- paste("Delta_X",i,sep="")
}
} else {
for(i in nvar:2){
Delta <- cbind(Delta,FC[,i]-FC[,i-1])
colnames(Delta)[length(colnames(Delta))] <- paste("Delta_X",i,sep="")
}
Delta <- cbind(Delta,FC[,1]-F0)
colnames(Delta)[length(colnames(Delta))] <- paste("Delta_X",1,sep="")
}
}
###########################################################
### Decomposition of wage structure effect
if(!is.null(ws_formula)){
ws_var <- as.character(fws, lhs=1)[2]
ws_var_val <- 1
if(is.factor(mf[,ws_var])){
ws_var_val <- levels(mf[,ws_var])[2]
}
# Select only observations withs ws group 0
select <- which(mf[,ws_var]!=ws_var_val)
# Compute counterfactual values
FCW1 <- stat(dep[select],weight[select],groupN[select],group=1,rwfactor=psi_S[select],
tau=tau, all.stats=all.stats, log.trans=log.trans)
FCW0 <- stat(dep[select],weight[select],groupN[select],group=0,rwfactor=psi_S[select],
tau=tau, all.stats=all.stats, log.trans=log.trans)
Delta_WS_X1 <- (F1-FCW1) - (F0-FCW0)
Delta_WS_other <- Delta[,ifelse(reference==1,3,2)] - Delta_WS_X1
Delta <- cbind(Delta,Delta_WS_X1,Delta_WS_other)
}
##########################################################
# Prepare results of decomposition for export
quantile=cbind(tau,Delta[1:length(tau),])
other.stats=Delta[(length(tau)+1):nrow(Delta),]
} else {
##########################################################
#if no stats return empty objects
quantile=NULL
other.stats=NULL
}
##########################################################
### Export results
res <- list(quantile=quantile,
other.stats=other.stats,
formula=formula,
mf=mf,
weight=weight,
psi=psi,
reference=reference,
tau=tau,
firstrw=firstrw,
all.stats=all.stats,
log.trans=log.trans,
untrimmed=untrimmed,
trimshare=trimshare)
if(!is.null(ws_formula)){
# Add WS weights to the weight matrix for export
psi <- cbind(psi, psi_S)
res <- list(quantile=quantile,
other.stats=other.stats,
formula=formula,
mf=mf,
weight=weight,
psi=psi,
reference=reference,
tau=tau,
firstrw=firstrw,
formula.rw=fws,
all.stats=all.stats,
log.trans=log.trans,
untrimmed=untrimmed,
trimshare=trimshare)
}
return(res)
}
#############################################################
### Function that decomposes the wage structure effect of
### DiNardo/Lemieux 1997
dfl_deco_ws <- function(res){
# Extract results
mf <- res$mf
weight <- res$weight
groupN <- mf[,"groupN"]
reference <- res$reference
# What's the group variable value of reference group?
if(is.factor(groupN)){
group_val <- levels(groupN)[1+reference]
} else {
group_val = reference
}
# Wage structure effect variable
fws <- res$formula.rw
if(is.null(fws)){stop("No wage structure decomposition in first place")}
ws_var <- as.character(fws, lhs=1)[2]
ws_var_val <- 1
if(is.factor(mf[,ws_var])){
ws_var_val <- levels(mf[,ws_var])[2]
}
# Retrieve rw factors
psi_X <- res$psi[,ncol(res$psi)-1]
psi_S <- res$psi[,ncol(res$psi)]
# Other parameter
tau <- res$tau
all.stats=res$all.stats
log.trans=res$log.trans
###########################################
# Select group 1 and 2 observation as well
# as observation which are NOT in group
# for which detaild ws effect is estimated
select1 <- which(groupN==group_val)
select0 <- which(groupN!=group_val)
selectWS <- which(mf[,ws_var]!=ws_var_val)
selectWS1 <- intersect(selectWS,select1)
selectWS0 <- intersect(selectWS,select0)
selectNonWS1 <- intersect(which(mf[,ws_var]==ws_var_val),select1)
###########################################
# Estimations
# Set up model for unconditional prob
mod <- as.Formula(paste(as.character(fws, lhs=1)[2],"~ 1"))
p_S_0 <- mean(pfit(mod,mf[select0,],weight[select0]))
p_S_1 <- mean(pfit(mod,mf[select1,],weight[select1]))
p_S_1.C <- mean(pfit(mod,mf[select1,],weight[select1]*psi_X[select1]))
# Model for conditional prob
mod <- fws
p_S_X_1.C <- pfit(mod,mf[select1,],weight[select1]*psi_X[select1])
#Marginal weights
psi_S_1.M <- rep(1,nrow(mf))
psi_S_1.M[selectNonWS1] <- p_S_0/p_S_1
p_S_X_1 <- 1-(1-p_S_1)/psi_S[selectWS1] #can be retrieved from the WS factor for psi_1,S1
#psi_S_1.M[selectWS1] <- psi_S[selectWS1]*(1 - (p_S_0/p_S_1)*p_S_X_1)
psi_S_1.M[selectWS1] <- (psi_S[selectWS1]/(1-p_S_1))*(1 - (p_S_0/p_S_1)*p_S_X_1)
psi_S_1.M.all <- psi_S_1.M
#Sorting weights
psi_S_1.J <- (1-p_S_1.C)/(1-p_S_X_1.C)
#psi_S_0 <- (1-p_S_0)/(1-p_S_X_0)
# Add wage structure weight to main data.frame
#psi_S_1.M.all <- rep(1,nrow(mf))
#psi_S_1.M.all[select1] <- psi_S_1.M
psi_S_1.J.all <- rep(1,nrow(mf))
psi_S_1.J.all[select1] <- psi_S_1.J
#select <- which(groupN!=group_val)
#psi_S[select] <- psi_S_0
###########################################
# Statistics
# Reference group
pow <- ifelse(reference==1,1,-1)
dep <- mf[,1]
### Observed distributions
F1 <- stat(dep,weight,groupN,group=1,rwfactor=rep(1,length(weight)),
tau=tau, all.stats=all.stats, log.trans=log.trans)
F0 <- stat(dep,weight,groupN,group=0,rwfactor=rep(1,length(weight)),
tau=tau, all.stats=all.stats, log.trans=log.trans)
### Counterfactual 1: Aggregate counterfactual
FC1 <- stat(dep,weight,groupN,group=reference,rwfactor=psi_X^pow,
tau=tau,all.stats=all.stats, log.trans=log.trans)
### Counterfactual 2&3: Wage structure counterfactual like in DiNardo&Lemieux 1997
FCW1 <- stat(dep[selectWS],weight[selectWS],groupN[selectWS],group=1,rwfactor=psi_S[selectWS],
tau=tau, all.stats=all.stats, log.trans=log.trans)
FCW0 <- stat(dep[selectWS],weight[selectWS],groupN[selectWS],group=0,rwfactor=psi_S[selectWS],
tau=tau, all.stats=all.stats, log.trans=log.trans)
### Counterfactual 4: FCW0 with wage structure of t=1
FC1J <- stat(dep[selectWS],weight[selectWS],groupN[selectWS],group=1,rwfactor=psi_X[selectWS]*psi_S_1.J.all[selectWS],
tau=tau, all.stats=all.stats, log.trans=log.trans)
### Counterfactual 5: Distribution in t=1 with marginal x1 like in t=0 but x2 like in t=1
FC1M <- stat(dep,weight,groupN,group=1,rwfactor=psi_S_1.M.all,
tau=tau, all.stats=all.stats, log.trans=log.trans)
###########################################
### Decomposition:
Delta_Marginal <- F1 - FC1M
Delta_Sorting <- (FC1M - FCW1) - (FC1 - FC1J)
Delta_WS <- (FC1 - FC1J) - (F0 - FCW0)
l <- 1:length(tau)
quantile <- data.frame(tau=tau,
marginal=Delta_Marginal[l],
sorting=Delta_Sorting[l],
wage_structure=Delta_WS[l])
l <- (length(tau)+1):length(Delta_Sorting)
other.stats <- data.frame(marginal=Delta_Marginal[l],
sorting=Delta_Sorting[l],
wage_structure=Delta_WS[l])
###########################################
### Return results
res <- list(quantile=quantile,
other.stats=other.stats,
psi_S1.1=psi_S[selectWS1],
psi_S1.0=psi_S[selectWS0],
#psi_M=psi_S_1.M.all[selectWS1],
psi_M=psi_S_1.M.all,
psi_J=psi_S_1.J.all[selectWS1]
)
}
#############################################################
### Plot function for composition effect results
dfl_deco_plot <- function(result,type=c(1,2,3)){
result <- result[["quantile"]]
if(type==1|ncol(result)==4){
## type 1: Observed difference and main decomposition terms (S,X)
diff <- as.data.frame(result[,c(1:4)])
} else if(type==2){
## type 2: All individual terms besides observed difference
diff <- as.data.frame(result[,-2])
} else{
## type 3: Only detailed terms
diff <- as.data.frame(result[,-c(2:4)])
}
diff <- melt(diff, id.vars="tau", measure.vars = names(diff)[-1], variable.name= "effect", value.name="delta")
plot <- ggplot(diff, aes(tau,delta, colour = effect)) + geom_hline(yintercept = 0, colour="grey") + geom_line()+geom_point(aes(shape=effect, color=effect)) + scale_shape_manual(values=c(15:20,0:14,15:20,0:14))
return(plot)
}
#############################################################
### DFL deco: Counterfactual sorting condtional on x_1
counter_cond <- function(formula, data, weights, group,
na.action = na.exclude,
tau=c(10,50,90,99)/100,
all.stats=FALSE,
log.trans=TRUE){
##########################################################
##########################################################
## 1) Set up data
#Use match.call function to call data.vectors
mf = match.call()
m = match(c("formula", "data", "weights", "na.action","group"), names(mf), 0)
mf = mf[c(1, m)]
mf$drop.unused.levels = TRUE
# Retrieve formula as.Formula and model.frame
f <- as.Formula(formula)
if(length(f)[2] < 2) stop("Define a grouping variable!")
# Extract model.frame
mf[[1]] = as.name("model.frame")
mf$formula <- f
mf = eval.parent(mf)
mt = attr(mf, "terms")
# Extract variables, weights and group identifier
#reg = get_all_vars(mt, mf)
dep = model.response(mf, "numeric")
weight = model.weights(mf)
if (!is.null(weight) && !is.numeric(weight)) {
stop("'weights' must be a numeric vector")
}
if (!is.null(weight)) {
weight = weight
} else {
weight = rep(1, length(dep))
}
#weight = weight/sum(weight)
groupN = mf[, ncol(mf)]
#reg$groupN <- groupN
mf$groupN <- groupN
# Number of covariates for decomposition of composition effect
nvar = length(f)[2]
reference=1
##########################################################
##########################################################
## 2) Extracting reference group value of main decomposition variable
## and conditiong variable's levels
# What's the group variable value of reference group? [Maid decomposition]
if(is.factor(groupN)){
group_val <- levels(groupN)[1+reference]
} else {
group_val = reference
}
# What is the name of the conditioning var?
cond_var <- as.character(update(formula(f, rhs=nvar, collapse=TRUE), . ~ .))[3]
mf$cond_var <- mf[,cond_var]
if(is.factor(mf[,cond_var])){
group_val_cond <- levels(mf[,cond_var])[2]
} else {
group_val_cond = 1
}
##########################################################
##########################################################
## 3) Compute reweighting factor for scenario:
## What would distribution look like if distribution of x_2|x_1
## was like in t' but marginal distribution of x_1 and x_2 was like in t?
##################################
## Set the progress bar
cat("Probit estimation...\n")
pb <- txtProgressBar(min=0,max=1, style=3)
nmods <- 12
##################################
## 1) RW factor for x1=1: Main decomposition only in group x_1=1
mod <- update(formula(f, rhs=1:(nvar-1), collapse=TRUE), groupN ~ .)
select <- which(mf[,"cond_var"]==group_val_cond)
p_t_x2x1.0 <- pfit(mod,mf[select,],
weight[select],
newdata = mf)
mod <- groupN ~ 1
p_t_x1.0 <- mean(pfit(mod,mf[select,],
weight[select]))
PsiA <- ((1-p_t_x2x1.0)/p_t_x2x1.0)*((p_t_x1.0)/(1-p_t_x1.0))
# PB
setTxtProgressBar(pb,2/nmods)
##################################
## 2) Reweigting factor B for x1=0
#P(t)
mod <- groupN ~ 1
p_t.1 <- mean(pfit(mod,mf,weight))
#P(t|x'1)
mod <- groupN ~ 1
select <- which(mf[,"cond_var"]!=group_val_cond)
p_t.1_x1.0 <- mean(pfit(mod,mf[select,],weight[select]))
#P(t|x2)
mod <- update(formula(f, rhs=1:(nvar-1), collapse=TRUE), groupN ~ .)
p_t.1_x2 <- pfit(mod,mf,
weight,
newdata = mf)
# PB
setTxtProgressBar(pb,5/nmods)
#P(t|x2,x'1)
mod <- update(formula(f, rhs=1:(nvar-1), collapse=TRUE), groupN ~ .)
select <- which(mf[,"cond_var"]!=group_val_cond)
p_t.1_x2x1.0 <- pfit(mod,mf[select,],
weight[select],
newdata = mf)
# PB
setTxtProgressBar(pb,6/nmods)
#P(x'1)
mod <- cond_var ~ 1
p_x1.0 <- 1-mean(pfit(mod,mf,weight))
#P(x'1|x2)
mod <- as.character(update(formula(f, rhs=1:(nvar-1), collapse=TRUE), . ~ .))[3]
mod <- as.formula(paste(cond_var,"~",mod,sep=""))
p_x1.0_x2 <- 1-pfit(mod,mf,
weight,
newdata = mf)
# PB
setTxtProgressBar(pb,8/nmods)
# The factor
# P(t|x2) P(t|x'1) P(x'1)
# PsiB = ----------- ---------- ----------
# P(t|x2,x'1) P(t) P(x'1|x2)
PsiB = (p_t.1_x2/p_t.1_x2x1.0)*
(p_t.1_x1.0/p_t.1)*
(p_x1.0/p_x1.0_x2)
##################################
## 3) Reweigting factor C for x1=0
#P(t) (computed above: p_t.1)
#P(t')
#P(x1|t)
#P(x'1|t)
mod <- cond_var ~ 1
select <- which(groupN==group_val)
p_x1.1_t.1 <- mean(pfit(mod,mf[select,],weight[select]))
#P(x1|t')
mod <- cond_var ~ 1
select <- which(groupN!=group_val)
p_x1.1_t.0 <- mean(pfit(mod,mf[select,],weight[select]))
# PB
setTxtProgressBar(pb,10/nmods)
#P(t|x2) (computed above: p_t.1_x2)
#P(t'|x2)
#P(x1|x2,t')
mod <- as.character(update(formula(f, rhs=1:(nvar-1), collapse=TRUE), . ~ .))[3]
mod <- as.formula(paste(cond_var,"~",mod,sep=""))
select <- which(groupN!=group_val)
p_x1.1_x2t.0 <- pfit(mod,mf[select,],weight[select],
newdata = mf)
#P(x'1|x2,t)
mod <- as.character(update(formula(f, rhs=1:(nvar-1), collapse=TRUE), . ~ .))[3]
mod <- as.formula(paste(cond_var,"~",mod,sep=""))
select <- which(groupN==group_val)
p_x1.0_x2t.1 <-1-pfit(mod,mf[select,],weight[select],
newdata = mf)
# PB
setTxtProgressBar(pb,12/nmods)
cat("\n")
# The factor
# P(x1|x2,t') P(x'1|t) P(t'|x2) P(t)
# PsiC = P(x1|t) ----------- -------- -------- ----
# P(x'1|x2,t) P(x1|t') P(t|x2) P(t')
PsiC <- p_x1.1_t.1*(p_x1.1_x2t.0/p_x1.0_x2t.1)*
((1-p_t.1_x2)/p_t.1_x2)*
((1-p_x1.1_t.1)/p_x1.1_t.0)*
(p_t.1/(1-p_t.1))
##########################################################
##########################################################
## 4) Contruct weights for second counterfactual:
## What is the wage distribution if not only x_2|x_1 was like in t' but
## also the marginal of x_1 while the marginal of x_2 was still like in t.
PsiA.2 <- (p_x1.1_t.0/p_x1.1_t.1) * PsiA
PsiB.2 <- PsiB
PsiC.2 <- (p_x1.1_t.0/p_x1.1_t.1) * PsiC
##################################
## 4) Add weights two mf
mf$psi <- NA
mf$psi.2 <- NA
select <- which(mf$groupN==group_val&mf$cond_var==group_val_cond)
mf[select,"psi"] <- PsiA[select]
mf[select,"psi.2"] <- PsiA.2[select]
select <- which(mf$groupN==group_val&mf$cond_var!=group_val_cond)
mf[select,"psi"] <- PsiB[select]-PsiC[select]
mf[select,"psi.2"] <- PsiB.2[select]-PsiC.2[select]
#Return stats about psi
cat("Summary of psi:\n")
print(summary(mf$psi))
cat("\nShare of negative weights:\n")
cat(length(mf$psi[which(mf$psi<0)])/length(mf$psi[which(is.na(mf$psi)==FALSE)]),"\n")
cat("\nSummary of psi.2:\n")
print(summary(mf$psi.2))
cat("\nShare of negative weights:\n")
cat(length(mf$psi.2[which(mf$psi.2<0)])/length(mf$psi.2[which(is.na(mf$psi.2)==FALSE)]),"\n")
##########################################################
##########################################################
## 5) Create reweighting factor for group x=0 in t=1
F1 <- stat(dep,weight,groupN,group=1,rwfactor=rep(1,length(weight)),
tau=tau, all.stats=all.stats, log.trans=log.trans)
FC.1 <- stat(dep,weight,groupN,group=1,rwfactor=mf$psi,
tau=tau, all.stats=all.stats, log.trans=log.trans)
FC.2 <- stat(dep,weight,groupN,group=1,rwfactor=mf$psi.2,
tau=tau, all.stats=all.stats, log.trans=log.trans)
Delta <- cbind(F1-FC.1,FC.1-FC.2)
colnames(Delta) <- c("Delta_Sorting_X1","Delta_Marginal_X1")
quantile=cbind(tau,Delta[1:length(tau),])
other.stats=Delta[(length(tau)+1):nrow(Delta),]
res <- list(quantile=quantile,
other.stats=other.stats,
formula=f,
mf=mf,
weight=weight,
psi=mf$psi,
psi.2=mf$psi.2,
reference=reference,
tau=tau)
return(res)
}
#############################################################
### dfl_diag():
### Diagnosis tool to compare covariates distribution
### of actual and reweighted distribution
dfl_diag <- function(result, compareRef=FALSE, psi.2=FALSE){
#model and reference group
f <- as.Formula(result$formula)
reference <- result$reference
#data
mf <- result$mf
weight <- result$weight
#weights
if(psi.2==FALSE){
psi <- as.matrix(result$psi)
} else {
psi <- as.matrix(result$psi.2)
}
# Select psi
if(ncol(psi)==1){
psi <- psi[,1]
} else if(colnames(psi)[ncol(psi)]=="psi_S"){
psi <- psi[,ncol(psi)-1]
} else {
psi <- psi[,ncol(psi)]
}
# Select observations of reference group
if(is.factor(mf$groupN)){
reference <- levels(mf$groupN)[reference + 1]
}
selectRef <- which(mf$groupN==reference)
# If cond==FALSE use comparison group
# for comparison to actual values;
# else use reference group.
if(compareRef==FALSE){
selectCom <- which(mf$groupN!=reference)
} else {
selectCom <- selectRef
}
#Prepare df
mod <- formula(f, collapse=TRUE)
mRef <- model.matrix(mod,mf)[selectRef,-1]
mCom <- model.matrix(mod,mf)[selectCom,-1]
wRef <- weight[selectRef]
wCom <- weight[selectCom]
psi <- psi[selectRef]
#Find means, diff in means, var/sd
mean_obs <- apply(mCom,2,function(x) wtd.mean(x, weights=wCom))
mean_rw <- apply(mRef,2,function(x) wtd.mean(x, weights=psi*wRef))
sd_ob <- apply(mCom,2,function(x) wtd.var(x, weights=wCom))
sd_rw <- apply(mRef,2,function(x) wtd.var(x, weights=psi*wRef))
mean_diff <- mean_obs - mean_rw
sd_diff <- sqrt(sd_ob + sd_rw)
sd_ob <- sqrt(sd_ob)
sd_rw <- sqrt(sd_rw)
#Export table
res <- t(rbind(mean_obs,mean_rw,mean_diff,sd_ob, sd_rw,sd_diff))
return(res)
}
#############################################################
### dfl_stat():
### Returns decripitive statistics of covariates
dfl_stat <- function(formula,
data,
weights,
group,
na.action = na.exclude,
reference=1,
constant=FALSE){
mf = match.call()
m = match(c("formula", "data", "weights", "na.action","group"), names(mf), 0)
mf = mf[c(1, m)]
mf$drop.unused.levels = TRUE
# Retrieve formula as.Formula and model.frame
f <- as.Formula(formula)
mf[[1]] = as.name("model.frame")
mf$formula <- f
mf = eval.parent(mf)
mt = attr(mf, "terms")
# Store "orignal" decomposition model again
f <- as.Formula(formula)
# Extract variables, weights and group identifier
#reg = get_all_vars(mt, mf)
weight = model.weights(mf)
if (!is.null(weight) && !is.numeric(weight)) {
stop("'weights' must be a numeric vector")
}
if (!is.null(weight)) {
weight = weight
}
else {
weight = rep(1, nrow(mf))
}
#weight = weight/sum(weight)
groupN = mf[, ncol(mf)]
#reg$groupN <- groupN
mf$groupN <- groupN
# Select observations of reference group
comparison <- ifelse(reference==1,0,1)
if(is.factor(mf$groupN)){
reference <- levels(mf$groupN)[reference + 1]
comparison <- levels(mf$groupN)[which(levels(mf$groupN)!=reference)]
}
selectRef <- which(mf$groupN==reference)
selectCom <- which(mf$groupN!=reference)
#Prepare df
mod <- formula(f, collapse=TRUE) #include reference group of cat. variables by +0
if(constant==FALSE){
mRef <- as.matrix(model.matrix(mod,mf)[selectRef,-1])
mCom <- as.matrix(model.matrix(mod,mf)[selectCom,-1])
}else{
mRef <- as.matrix(model.matrix(mod,mf)[selectRef,])
mCom <- as.matrix(model.matrix(mod,mf)[selectCom,])
}
wRef <- weight[selectRef]
wCom <- weight[selectCom]
#Find means, diff in means, var/sd
mean_Ref <- apply(mRef,2,function(x) wtd.mean(x, weights=wRef))
mean_Com <- apply(mCom,2,function(x) wtd.mean(x, weights=wCom))
sd_Ref <- apply(mRef,2,function(x) wtd.var(x, weights=wRef))
sd_Com <- apply(mCom,2,function(x) wtd.var(x, weights=wCom))
mean_diff <- mean_Ref - mean_Com
sd_diff <- sqrt(sd_Ref + sd_Com)
sd_Ref <- sqrt(sd_Ref)
sd_Com <- sqrt(sd_Com)
# Sum of weights
N <- matrix(c(length(wRef),length(wCom),sum(wRef),sum(wCom)),ncol=2,byrow=TRUE)
colnames(N) <- c(reference,comparison)
rownames(N) <- c("Obs.","Sum of weights")
#Export table
res <- t(rbind(mean_Ref,mean_Com,mean_diff,sd_Ref, sd_Com,sd_diff))
colnames(res) <- c(paste(rep("mean",3),c(reference,comparison,"diff"),sep="_"),paste(rep("sd",3),c(reference,comparison,"diff"),sep="_"))
res <- list(means=res, N=N)
return(res)
}
#############################################################
## Function for fitting and predicting Conditional Probabilities
pfit <- function(mod,df,w, newdata=NULL, fast=TRUE){
# Without survey package
#dep <- model.frame(mod,df)[,1]
#reg <- model.matrix(mod,df)
#probit <- glm(dep~reg, weights=w, family = binomial(link = "probit"), na.action=na.exclude, y=FALSE, model=FALSE)
# With survey package
#design <- svydesign(~0, data=df, weights=~w)
#m1 <- svyglm(mod, data=df, design=design,family=quasibinomial(link="probit"))
df <- cbind(df,w)
if(fast==FALSE){
# With glm
m1 <- glm(mod, data=df, family=binomial(link="logit"),weights=w)
p_X_1 <- predict.glm(m1, newdata=newdata, type="response", na.action = na.exclude)
}else{
## With fastglm
df <- model.frame(mod, data=df,weights=w)
if(!is.numeric(df[,1])){
df[,1] <- as.numeric(df[,1]==unique(df[,1])[2])
}
m1 <- fastglm(model.matrix(mod,df),df[,1],
family = binomial(link = "logit"),
weights=df$`(weights)`, fitted=FALSE)
logit <- function(x){1/(1+exp(-x))}
if(is.null(newdata)){
p_X_1 <- logit(as.numeric(model.matrix(mod,df)%*%coef(m1)))
}else{
p_X_1 <- logit(as.numeric(model.matrix(mod,newdata)%*%coef(m1)))
}
}
# Truncate weights
#p_X_1[which(p_X_1 < 0.01)] <- 0.01
#p_X_1[which(p_X_1 > 0.99)] <- 0.99
return(p_X_1)
}
#############################################################
### Trimming function
# Adapted trimming function as suggested in Huber, Lechner, Wunsch (2013: 9f.)
trimming <- function(rwfactor,groupN,group=c(0,1)){
# Factor variables for group selection allowed
if(is.factor(groupN)){
group <- levels(groupN)[1+group]
}
# Set trimming threshold
n <- length(groupN)
t <- sqrt(n)/n
# Normalize weights
rwfactor[which(groupN== group)] <- rwfactor[which(groupN== group)]/sum(rwfactor[which(groupN==group)])
rwfactor[which(groupN!= group)] <- rwfactor[which(groupN!= group)]/sum(rwfactor[which(groupN!=group)])
# Which observations to drop?
#all in treatment group that have more weight than threshold
sel1 <- which(groupN==group&rwfactor>t)
if(length(sel1)>0){
#all in control group that have a weight like the smallest weight droped in treatment group
sel1 <- c(sel1,which(groupN!=group&rwfactor>min(rwfactor[sel1])))
}
if(length(sel1)>0){
sel <- as.numeric(!is.element(1:n,sel1))
}else{
sel <- rep(1,n)
}
return(sel)
}
#############################################################
### Gini function (code by Rothe(2015))
Gini <- function (x, w) {
n <- length(x)
w <- w/sum(w)
G <- sum(x[order(x)] * 1:n * w[order(x)])
G <- 2 * G/(n*sum(x[order(x)] * w[order(x)]))
G - 1 - (1/n)
}
#############################################################
### Function for distributional statistics
stat <- function(dep,weight,groupN,group=c(0,1),rwfactor,
tau=c(10,50,90,99)/100,
all.stats=FALSE,log.trans=FALSE){
# Factor variables for group selection allowed
if(is.factor(groupN)){
group <- levels(groupN)[1+group]
}
# Select variables
dep <- dep[which(groupN==group)]
weight <- weight[which(groupN==group)]
rwfactor <- rwfactor[which(groupN==group)]
# Normalize weights
#rwfactor <- rwfactor/sum(rwfactor)
# Add to weighting vector
w <- weight*rwfactor
### If all stats required: Are the required quantiles estimated?
if(all.stats==TRUE){
# make sure all relevant quantiles are estimated
tau <- union(c(0.1,0.5,0.9,0.95,0.99),tau)
tau <- tau[order(tau)]
}
# get quantiles statistics
quantile <- wtd.quantile(dep,weight=w,probs=tau)
# is dep variable log transformed?
if(log.trans==TRUE){
dep1 <- exp(dep)
quantile1 <- exp(quantile)
} else {
dep1 <- dep
quantile1 <- quantile
}
#Get mean and var
mu <- wtd.mean(dep1, weight=w)
sd <- sqrt(wtd.var(dep1, weight=w))
# Estimate additional stats if all stats required
if(all.stats==TRUE){
#Overall gini and income share of top 10%
gini <- Gini(dep1, w)
select <- which(dep>=quantile[match(0.95,tau)])
#gini.top <- Gini(dep1[select], w[select])
s_top05 <- (wtd.mean(dep1[select], weight=w[select])/mu)*0.05
#Decile ratios
p90p10 <- quantile1[match(0.9,tau)]/quantile1[match(0.1,tau)]
p90p50 <- quantile1[match(0.9,tau)]/quantile1[match(0.5,tau)]
p50p10 <- quantile1[match(0.5,tau)]/quantile1[match(0.1,tau)]
p99p90 <- quantile1[match(0.99,tau)]/quantile1[match(0.9,tau)]
res <- c(quantile,mu,sd,gini,
p90p10,p90p50,p50p10,p99p90,
s_top05)
names(res)[(length(tau)+1):length(res)] <- c("mean","sd","gini",
"p90p10","p90p50","p50p10","p99p90",
"top 5% share")
return(res)
} else {
# Return results if not all stats required
res <- c(c(quantile,mu,sd))
names(res)[(length(tau)+1):length(res)] <- c("mean","sd")
return(res)
}
}
#############################################################
### Function for kernel density estimates
kden <- function(dep,weight=NULL,
groupN=NULL,group=c(0,1),
rwfactor=NULL,
px=NULL,
bw = "nrd0",
kernel="gaussian",
n=512,
na.rm = TRUE){
# Factor variables for group selection allowed
if(is.factor(groupN)){
group <- levels(groupN)[1+group]
}
if(is.null(groupN)){group <- "all"}
# Prepare weights
if(is.null(weight)){weight <- rep(1,length(dep))}
if(is.null(rwfactor)){rwfactor <- rep(1,length(dep))}
# Select variables
if(is.null(groupN)==FALSE){
select <- which(groupN==group)
dep <- dep[select]
weight <- weight[select]
rwfactor <- rwfactor[select]
}
#Adjust weights
w <- weight*rwfactor
wsum <- sum(w)
w <- w/wsum
if(is.null(px)){px <- 1} else {px <- wsum/px}
if(sum(is.na(dep))!=0&na.rm==TRUE){
rm <- which(is.na(dep))
dep <- dep[-rm]
w <- w[-rm]
}
#Estimate density
d <- density(dep,weights=w,
kernel=kernel, bw=bw,n=n)
#Return results
d <- data.frame(group=rep(group,n),x=d$x,density=d$y*px)
return(d)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/base_samplesplits.R
\name{qgcomp.partials}
\alias{qgcomp.partials}
\title{Partial effect sizes, confidence intervals, hypothesis tests}
\usage{
qgcomp.partials(
fun = c("qgcomp.noboot", "qgcomp.cox.noboot", "qgcomp.zi.noboot"),
traindata = NULL,
validdata = NULL,
expnms = NULL,
...
)
}
\arguments{
\item{fun}{character variable in the set "qgcomp.noboot" (binary, count, continuous outcomes),
"qgcomp.cox.noboot" (survival outcomes),
"qgcomp.zi.noboot" (zero inflated outcomes). This describes which qgcomp package
function is used to fit the model. (default = "qgcomp.noboot")}
\item{traindata}{Data frame with training data}
\item{validdata}{Data frame with validation data}
\item{expnms}{Exposure mixture of interest}
\item{...}{Arguments to \code{\link[qgcomp]{qgcomp.noboot}},
\code{\link[qgcomp]{qgcomp.cox.noboot}}, or
\code{\link[qgcomp]{qgcomp.zi.noboot}}}
}
\value{
A 'qgcompmultifit' object, which inherits from \code{\link[base]{list}}, which contains
\describe{
\item{posmix}{character vector of variable names with positive coefficients in the qgcomp model
fit to the training data}
\item{negmix}{character vector of variable names with negative coefficients in the qgcomp model
fit to the training data}
\item{pos.fit}{a qgcompfit object fit to the validation data, in which the exposures of
interest are contained in 'posmix'}
\item{neg.fit}{a qgcompfit object fit to the validation data, in which the exposures of
interest are contained in 'negmix'}
}
}
\description{
Obtain effect estimates for "partial positive" and "partial
negative" effects using quantile g-computation. This approach uses sample
splitting to evaluate the overall impact of a set of variables with
effects in a single direction, where, using training data, all variables
with effects in the same direction are grouped.
}
\details{
In the basic (non bootstrapped) \code{qgcomp} functions, the positive and
negative "sums
of coefficients" or "partial effect sizes" are given, which equal the sum
of the negative and positive coefficients in the underlying model. Unfortunately,
these partial effects don't constitute variables for which we can derive confidence
intervals or hypothesis tests, so they are mainly for exploratory purposes. By employing
sample splitting, however, we can obtain better estimates of these partial effects.
Sample splitting proceeds by partitioning the data into two samples (40/60 training/validtion
split seems acceptable in many circumstances). The "overall mixture effect" is then
estimated in the training data, and the mixture variables with positive and negative coefficients
are split into separate groups. These two different groups are then used as
"the mixture of interest" in two additional qgcomp fits, where the mixture of interest
is adjusted for the other exposure variables. For example, if the "positive partial effect"
is of interest, then this effect is equal to the sum of the coefficients in the
qgcomp model fit to the validation data, with the mixture of interest selected by the
original fit to the training data (note that some of these coefficients may be negative
in the fit to the validation data - this is expected and necessary for valid hypothesis tests).
The positive/negative partial effects are necessarily exploratory, but sample splitting preserves
the statistical properties at the expense of wider confidence intervals and larger variances. The
two resulting mixture groups groups should be inspected for
}
\examples{
set.seed(123223)
dat = qgcomp:::.dgm_quantized(N=1000, coef=c(0.25,-0.25,0,0), ncor=1)
cor(dat)
# overall fit (more or less null due to counteracting exposures)
(overall <- qgcomp.noboot(f=y~., q=NULL, expnms=c("x1", "x2", "x3", "x4"), data=dat))
# partial effects using 40\% training/60\% validation split
trainidx <- sample(1:nrow(dat), round(nrow(dat)*0.4))
valididx <- setdiff(1:nrow(dat),trainidx)
traindata = dat[trainidx,]
validdata = dat[valididx,]
splitres <- qgcomp:::qgcomp.partials(fun="qgcomp.noboot", f=y~., q=NULL,
traindata=traindata,validdata=validdata, expnms=c("x1", "x2", "x3", "x4"))
splitres
\dontrun{
# under the null, both should give null results
set.seed(123223)
dat <- qgcomp:::.dgm_quantized(N=1000, coef=c(0,0,0,0), ncor=1)
# 40\% training/60\% validation
trainidx2 <- sample(1:nrow(dat), round(nrow(dat)*0.4))
valididx2 <- setdiff(1:nrow(dat),trainidx2)
traindata2 <- dat[trainidx2,]
validdata2 <- dat[valididx2,]
splitres2 <- qgcomp:::qgcomp.partials(fun="qgcomp.noboot", f=y~.,
q=NULL, traindata=traindata2,validdata=validdata2, expnms=c("x1", "x2", "x3", "x4"))
splitres2
# 60\% training/40\% validation
trainidx3 <- sample(1:nrow(dat), round(nrow(dat)*0.6))
valididx3 <- setdiff(1:nrow(dat),trainidx3)
traindata3 <- dat[trainidx3,]
validdata3 <- dat[valididx3,]
splitres3 <- qgcomp:::qgcomp.partials(fun="qgcomp.noboot", f=y~., q=NULL,
traindata=traindata3,validdata=validdata3, expnms=c("x1", "x2", "x3", "x4"))
splitres3
# survival outcome
set.seed(50)
N=1000
dat <- data.frame(time=(tmg <- pmin(.1,rweibull(N, 10, 0.1))),
d=1.0*(tmg<0.1), x1=runif(N)+(tmg<0.1)*0.1, x2=runif(N)-(tmg<0.1)*0.1, x3=runif(N),
x4=runif(N), x5=runif(N) , z=runif(N))
trainidx4 <- sample(1:nrow(dat), round(nrow(dat)*0.6))
valididx4 <- setdiff(1:nrow(dat),trainidx4)
traindata4 <- dat[trainidx4,]
validdata4 <- dat[valididx4,]
expnms=paste0("x", 1:5)
f = survival::Surv(time, d)~x1 + x2 + x3 + x4 + x5 + z
(fit1 <- survival::coxph(f, data = dat))
(overall <- qgcomp.cox.noboot(f, expnms = expnms, data = dat))
(splitres4 <- qgcomp:::qgcomp.partials(fun="qgcomp.cox.noboot", f=f, q=4,
traindata=traindata4,validdata=validdata4,
expnms=expnms))
# zero inflated count outcome
set.seed(50)
n=1000
dat <- data.frame(y= (yany <- rbinom(n, 1, 0.5))*(ycnt <- rpois(n, 1.2)), x1=runif(n)+ycnt*0.2,
x2=runif(n)-ycnt*0.2, x3=runif(n),
x4=runif(n) , z=runif(n))
# poisson count model, mixture in both portions, but note that the qgcomp.partials
# function defines the "positive" variables only by the count portion of the model
(overall5 <- qgcomp.zi.noboot(f=y ~ z + x1 + x2 + x3 + x4 | x1 + x2 + x3 + x4 + z,
expnms = c("x1", "x2", "x3", "x4"),
data=dat, q=4, dist="poisson"))
trainidx5 <- sample(1:nrow(dat), round(nrow(dat)*0.6))
valididx5 <- setdiff(1:nrow(dat),trainidx5)
traindata5 <- dat[trainidx5,]
validdata5 <- dat[valididx5,]
splitres5 <- qgcomp.partials(fun="qgcomp.zi.noboot",
f=y ~ x1 + x2 + x3 + x4 + z | x1 + x2 + x3 + x4 + z, q=4,
traindata=traindata5, validdata=validdata5,
expnms=c("x1", "x2", "x3", "x4"))
splitres5
}
}
| /man/qgcomp.partials.Rd | no_license | idblr/qgcomp | R | false | true | 6,812 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/base_samplesplits.R
\name{qgcomp.partials}
\alias{qgcomp.partials}
\title{Partial effect sizes, confidence intervals, hypothesis tests}
\usage{
qgcomp.partials(
fun = c("qgcomp.noboot", "qgcomp.cox.noboot", "qgcomp.zi.noboot"),
traindata = NULL,
validdata = NULL,
expnms = NULL,
...
)
}
\arguments{
\item{fun}{character variable in the set "qgcomp.noboot" (binary, count, continuous outcomes),
"qgcomp.cox.noboot" (survival outcomes),
"qgcomp.zi.noboot" (zero inflated outcomes). This describes which qgcomp package
function is used to fit the model. (default = "qgcomp.noboot")}
\item{traindata}{Data frame with training data}
\item{validdata}{Data frame with validation data}
\item{expnms}{Exposure mixture of interest}
\item{...}{Arguments to \code{\link[qgcomp]{qgcomp.noboot}},
\code{\link[qgcomp]{qgcomp.cox.noboot}}, or
\code{\link[qgcomp]{qgcomp.zi.noboot}}}
}
\value{
A 'qgcompmultifit' object, which inherits from \code{\link[base]{list}}, which contains
\describe{
\item{posmix}{character vector of variable names with positive coefficients in the qgcomp model
fit to the training data}
\item{negmix}{character vector of variable names with negative coefficients in the qgcomp model
fit to the training data}
\item{pos.fit}{a qgcompfit object fit to the validation data, in which the exposures of
interest are contained in 'posmix'}
\item{neg.fit}{a qgcompfit object fit to the validation data, in which the exposures of
interest are contained in 'negmix'}
}
}
\description{
Obtain effect estimates for "partial positive" and "partial
negative" effects using quantile g-computation. This approach uses sample
splitting to evaluate the overall impact of a set of variables with
effects in a single direction, where, using training data, all variables
with effects in the same direction are grouped.
}
\details{
In the basic (non bootstrapped) \code{qgcomp} functions, the positive and
negative "sums
of coefficients" or "partial effect sizes" are given, which equal the sum
of the negative and positive coefficients in the underlying model. Unfortunately,
these partial effects don't constitute variables for which we can derive confidence
intervals or hypothesis tests, so they are mainly for exploratory purposes. By employing
sample splitting, however, we can obtain better estimates of these partial effects.
Sample splitting proceeds by partitioning the data into two samples (40/60 training/validtion
split seems acceptable in many circumstances). The "overall mixture effect" is then
estimated in the training data, and the mixture variables with positive and negative coefficients
are split into separate groups. These two different groups are then used as
"the mixture of interest" in two additional qgcomp fits, where the mixture of interest
is adjusted for the other exposure variables. For example, if the "positive partial effect"
is of interest, then this effect is equal to the sum of the coefficients in the
qgcomp model fit to the validation data, with the mixture of interest selected by the
original fit to the training data (note that some of these coefficients may be negative
in the fit to the validation data - this is expected and necessary for valid hypothesis tests).
The positive/negative partial effects are necessarily exploratory, but sample splitting preserves
the statistical properties at the expense of wider confidence intervals and larger variances. The
two resulting mixture groups groups should be inspected for
}
\examples{
set.seed(123223)
dat = qgcomp:::.dgm_quantized(N=1000, coef=c(0.25,-0.25,0,0), ncor=1)
cor(dat)
# overall fit (more or less null due to counteracting exposures)
(overall <- qgcomp.noboot(f=y~., q=NULL, expnms=c("x1", "x2", "x3", "x4"), data=dat))
# partial effects using 40\% training/60\% validation split
trainidx <- sample(1:nrow(dat), round(nrow(dat)*0.4))
valididx <- setdiff(1:nrow(dat),trainidx)
traindata = dat[trainidx,]
validdata = dat[valididx,]
splitres <- qgcomp:::qgcomp.partials(fun="qgcomp.noboot", f=y~., q=NULL,
traindata=traindata,validdata=validdata, expnms=c("x1", "x2", "x3", "x4"))
splitres
\dontrun{
# under the null, both should give null results
set.seed(123223)
dat <- qgcomp:::.dgm_quantized(N=1000, coef=c(0,0,0,0), ncor=1)
# 40\% training/60\% validation
trainidx2 <- sample(1:nrow(dat), round(nrow(dat)*0.4))
valididx2 <- setdiff(1:nrow(dat),trainidx2)
traindata2 <- dat[trainidx2,]
validdata2 <- dat[valididx2,]
splitres2 <- qgcomp:::qgcomp.partials(fun="qgcomp.noboot", f=y~.,
q=NULL, traindata=traindata2,validdata=validdata2, expnms=c("x1", "x2", "x3", "x4"))
splitres2
# 60\% training/40\% validation
trainidx3 <- sample(1:nrow(dat), round(nrow(dat)*0.6))
valididx3 <- setdiff(1:nrow(dat),trainidx3)
traindata3 <- dat[trainidx3,]
validdata3 <- dat[valididx3,]
splitres3 <- qgcomp:::qgcomp.partials(fun="qgcomp.noboot", f=y~., q=NULL,
traindata=traindata3,validdata=validdata3, expnms=c("x1", "x2", "x3", "x4"))
splitres3
# survival outcome
set.seed(50)
N=1000
dat <- data.frame(time=(tmg <- pmin(.1,rweibull(N, 10, 0.1))),
d=1.0*(tmg<0.1), x1=runif(N)+(tmg<0.1)*0.1, x2=runif(N)-(tmg<0.1)*0.1, x3=runif(N),
x4=runif(N), x5=runif(N) , z=runif(N))
trainidx4 <- sample(1:nrow(dat), round(nrow(dat)*0.6))
valididx4 <- setdiff(1:nrow(dat),trainidx4)
traindata4 <- dat[trainidx4,]
validdata4 <- dat[valididx4,]
expnms=paste0("x", 1:5)
f = survival::Surv(time, d)~x1 + x2 + x3 + x4 + x5 + z
(fit1 <- survival::coxph(f, data = dat))
(overall <- qgcomp.cox.noboot(f, expnms = expnms, data = dat))
(splitres4 <- qgcomp:::qgcomp.partials(fun="qgcomp.cox.noboot", f=f, q=4,
traindata=traindata4,validdata=validdata4,
expnms=expnms))
# zero inflated count outcome
set.seed(50)
n=1000
dat <- data.frame(y= (yany <- rbinom(n, 1, 0.5))*(ycnt <- rpois(n, 1.2)), x1=runif(n)+ycnt*0.2,
x2=runif(n)-ycnt*0.2, x3=runif(n),
x4=runif(n) , z=runif(n))
# poisson count model, mixture in both portions, but note that the qgcomp.partials
# function defines the "positive" variables only by the count portion of the model
(overall5 <- qgcomp.zi.noboot(f=y ~ z + x1 + x2 + x3 + x4 | x1 + x2 + x3 + x4 + z,
expnms = c("x1", "x2", "x3", "x4"),
data=dat, q=4, dist="poisson"))
trainidx5 <- sample(1:nrow(dat), round(nrow(dat)*0.6))
valididx5 <- setdiff(1:nrow(dat),trainidx5)
traindata5 <- dat[trainidx5,]
validdata5 <- dat[valididx5,]
splitres5 <- qgcomp.partials(fun="qgcomp.zi.noboot",
f=y ~ x1 + x2 + x3 + x4 + z | x1 + x2 + x3 + x4 + z, q=4,
traindata=traindata5, validdata=validdata5,
expnms=c("x1", "x2", "x3", "x4"))
splitres5
}
}
|
\name{gintac}
\alias{gintac}
\title{ Initial values of theta, A and c_i,...,c_n }
\description{
See Marazzi A. (1993), p.292}
\usage{
gintac(x, y, ni, oi = 0, icase = .dFvGet()$ics, maxtt = .dFvGet()$mxt,
maxta = .dFvGet()$mxf, tolt = .dFvGet()$tlo, tola = .dFvGet()$tlo,
b = 1.1 * sqrt(np), c = 1.345)
}
\arguments{
\item{x}{ See reference}
\item{y}{ See reference}
\item{ni}{ See reference}
\item{oi}{ See reference}
\item{icase}{ See reference}
\item{maxtt}{ See reference}
\item{maxta}{ See reference}
\item{tolt}{ See reference}
\item{tola}{ See reference}
\item{b}{ See reference}
\item{c}{ See reference}
}
\value{
See reference
}
\references{
Marazzi A. (1993) \emph{Algorithm, Routines, and S functions
for Robust Statistics}. Wadsworth & Brooks/cole, Pacific Grove,
California. p.292
}
\keyword{robust}
| /man/gintac.Rd | no_license | cran/robeth | R | false | false | 891 | rd | \name{gintac}
\alias{gintac}
\title{ Initial values of theta, A and c_i,...,c_n }
\description{
See Marazzi A. (1993), p.292}
\usage{
gintac(x, y, ni, oi = 0, icase = .dFvGet()$ics, maxtt = .dFvGet()$mxt,
maxta = .dFvGet()$mxf, tolt = .dFvGet()$tlo, tola = .dFvGet()$tlo,
b = 1.1 * sqrt(np), c = 1.345)
}
\arguments{
\item{x}{ See reference}
\item{y}{ See reference}
\item{ni}{ See reference}
\item{oi}{ See reference}
\item{icase}{ See reference}
\item{maxtt}{ See reference}
\item{maxta}{ See reference}
\item{tolt}{ See reference}
\item{tola}{ See reference}
\item{b}{ See reference}
\item{c}{ See reference}
}
\value{
See reference
}
\references{
Marazzi A. (1993) \emph{Algorithm, Routines, and S functions
for Robust Statistics}. Wadsworth & Brooks/cole, Pacific Grove,
California. p.292
}
\keyword{robust}
|
## Title: Grass endophyte population model with a bayesian framework
## Purpose: Fits all vital rate models, written in STAN
## and does visualisation of posterior predictive checks
## Authors: Joshua and Tom
#############################################################
library(tidyverse)
library(rstan)
library(StanHeaders)
library(shinystan)
library(bayesplot)
library(devtools)
invlogit<-function(x){exp(x)/(1+exp(x))}
logit = function(x) { log(x/(1-x)) }
#############################################################################################
####### Data manipulation to prepare data as lists for Stan models------------------
#############################################################################################
# survival data lists are generated in the endodemog_data_processing.R file,
# within the section titled "Preparing datalists for Survival Kernel"
source("endodemog_data_processing.R")
#########################################################################################################
# GLMM for all Vital Rates ~ size_t + Endo + Origin with year and plot random effects------------------------------
#########################################################################################################
## run this code recommended to optimize computer system settings for MCMC
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
set.seed(123)
## MCMC settings
ni <- 1000
nb <- 500
nc <- 3
# Stan model -------------
## here is the Stan model
sink("endodemog_all_vr.stan")
cat("
data {
// indices
int<lower=0> nYear; // number of years
int<lower=0> nPlot; // number of plots
int<lower=0> nEndo; // number of endo treatments
// surv data
int<lower=0> N_s; // number of observations for surv model
int<lower=0> K_s; // number of predictors for surv model
int<lower=0, upper=11> year_t_s[N_s]; // year of observation for surv model
int<lower=1, upper=2> endo_index_s[N_s]; // index for endophyte effect for surv model
int<lower=0> plot_s[N_s]; // plot of observation for surv model
int<lower=0, upper=1> surv_t1[N_s]; // plant survival at time t+1
vector<lower=0>[N_s] logsize_t_s; // plant size at time t for surv model
int<lower=0,upper=1> endo_01_s[N_s]; // plant endophyte status for surv model
int<lower=0,upper=1> origin_01_s[N_s]; // plant origin status for surv model
// growth data
int<lower=0> N_g; // number of observations for growth model
int<lower=0> K_g; // number of predictors for growth model
int<lower=0> lowerlimit_g; //lower limit for truncated negative binomial
int<lower=0, upper=11> year_t_g[N_g]; // year of observation for growth model
int<lower=1, upper=2> endo_index_g[N_g]; // index for endophyte effect for growth model
int<lower=0> plot_g[N_g]; // plot of observation for growth model
int<lower=lowerlimit_g> size_t1[N_g]; // plant size at time t+1
vector<lower=0>[N_g] logsize_t_g; // plant size at time t for growth model
int<lower=0,upper=1> endo_01_g[N_g]; // plant endophyte status for growth model
int<lower=0,upper=1> origin_01_g[N_g]; // plant origin status for growth model
// flowering data
int<lower=0> N_fl; // number of observations
int<lower=0> K_fl; // number of predictors
int<lower=0> year_t_fl[N_fl]; // year of observation
int<lower=0> plot_fl[N_fl]; // plot of observation
int<lower=0, upper=1> flw_t[N_fl]; // flowering status at time t
vector<lower=-1>[N_fl] logsize_t_fl; // log of plant size at time t
int<lower=0, upper=1> endo_01_fl[N_fl]; // endophyte status
int<lower=1, upper=2> endo_index_fl[N_fl]; // index for endophyte effect
int<lower=0, upper=1> origin_01_fl[N_fl]; // origin status
// # of flw tiller data
int<lower=0> N_ft; // number of observations
int<lower=0> K_ft; // number of predictors
int<lower=0> lowerlimit_ft; //lower limit for truncated negative binomial
int<lower=0> year_t_ft[N_ft]; // year of observation
int<lower=1, upper=2> endo_index_ft[N_ft]; // index for endophyte effect
int<lower=0> plot_ft[N_ft]; // plot of observation
int<lower=lowerlimit_ft> flw_count_t[N_ft]; // plant size at time t and target variable
vector<lower=0>[N_ft] logsize_t_ft; // plant size at time t
int<lower=0,upper=1> endo_01_ft[N_ft]; // plant endophyte status
int<lower=0,upper=1> origin_01_ft[N_ft]; // plant origin status
// spikelet/infl data
int<lower=0> N_sp; // number of observations
int<lower=0> K_sp; // number of predictors
int<lower=0> year_t_sp[N_sp]; // year of observation
int<lower=1, upper=2> endo_index_sp[N_sp]; // index for endophyte effect
int<lower=0> plot_sp[N_sp]; // plot of observation
int<lower=0> spike_t[N_sp]; // no. of spike per infl at time t
vector<lower=0>[N_sp] logsize_t_sp; // plant size at time t
int<lower=0,upper=1> endo_01_sp[N_sp]; // plant endophyte status
int<lower=0,upper=1> origin_01_sp[N_sp]; // plant origin status
// seed/spiklet data
int<lower=0> N_se; // number of observations of seed/spikelet
int<lower=0> K_se; // number of predictors
real<lower=0> seed[N_se]; // number of seeds per spikelet
int<lower=0,upper=1> endo_01_se[N_se]; // plant endophyte status
}
parameters {
// surv params
vector[K_s] beta_s; // predictor parameters
vector[nYear] tau_year_s[nEndo]; // random year effect
real<lower=0> sigma_e_s[nEndo]; //year variance by endophyte effect
vector[nPlot] tau_plot_s; // random plot effect
real<lower=0> sigma_p_s; // plot variance effect
// growth params
vector[K_g] beta_g; // predictor parameters
vector[nYear] tau_year_g[nEndo]; // random year effect
real<lower=0> sigma_e_g[nEndo]; //year variance by endophyte effect
vector[nPlot] tau_plot_g; // random plot effect
real<lower=0> sigma_p_g; // plot variance
real<lower=0> phi_g; // dispersion parameter
// flower params
vector[K_fl] beta_fl; // predictor parameters
vector[nYear] tau_year_fl[nEndo]; // random year effect
real<lower=0> sigma_e_fl[nEndo]; //year variance by endophyte effect
vector[nPlot] tau_plot_fl; // random plot effect
real<lower=0> sigma_p_fl; // plot variance
// fertility params
vector[K_ft] beta_ft; // predictor parameters
vector[nYear] tau_year_ft[nEndo]; // random year effect
real<lower=0> sigma_e_ft[nEndo]; //year variance by endophyte effect
vector[nPlot] tau_plot_ft; // random plot effect
real<lower=0> sigma_p_ft; // plot variance
real<lower=0> phi_ft; // negative binomial dispersion parameter
// spike/inf params
vector[K_sp] beta_sp; // predictor parameters
vector[nYear] tau_year_sp[nEndo]; // random year effect
real<lower=0> sigma_e_sp[nEndo]; //year variance by endophyte effect
vector[nPlot] tau_plot_sp; // random plot effect
real<lower=0> sigma_p_sp; // plot variance
real<lower=0> phi_sp; // negative binomial dispersion parameter
// seed/spike params
vector[K_se] beta_se; // predictor parameters
real<lower=0> sigma_se; // seed per spikelet variance
}
transformed parameters {
real mu_s[N_s]; // surv Linear Predictor
real mu_g[N_g]; // growth Linear Predictor
real mu_fl[N_fl]; // flowering Linear Predictor
real mu_ft[N_ft]; // # of flowering tillers Linear Predictor
real mu_sp[N_sp]; // # of spikelets/infl Linear Predictor
real mu_se[N_se]; // mean seed per spikelet
for(n in 1:N_s){
mu_s[n] = beta_s[1] + beta_s[2]*logsize_t_s[n] + beta_s[3]*endo_01_s[n] +beta_s[4]*origin_01_s[n]
+ tau_year_s[endo_index_s[n],year_t_s[n]]
+ tau_plot_s[plot_s[n]];
}
for(n in 1:N_g){
mu_g[n] = beta_g[1] + beta_g[2]*logsize_t_g[n] + beta_g[3]*endo_01_g[n] +beta_g[4]*origin_01_g[n]
+ tau_year_g[endo_index_g[n],year_t_g[n]]
+ tau_plot_g[plot_g[n]];
}
for(n in 1:N_fl){
mu_fl[n] = beta_fl[1] + beta_fl[2]*logsize_t_fl[n] + beta_fl[3]*endo_01_fl[n] +beta_fl[4]*origin_01_fl[n]
+ tau_year_fl[endo_index_fl[n],year_t_fl[n]]
+ tau_plot_fl[plot_fl[n]];
}
for(n in 1:N_ft){
mu_ft[n] = beta_ft[1] + beta_ft[2]*logsize_t_ft[n] + beta_ft[3]*endo_01_ft[n] +beta_ft[4]*origin_01_ft[n]
+ tau_year_ft[endo_index_ft[n],year_t_ft[n]]
+ tau_plot_ft[plot_ft[n]];
}
for(n in 1:N_sp){
mu_sp[n] = beta_sp[1] + beta_sp[2]*logsize_t_sp[n] + beta_sp[3]*endo_01_sp[n] +beta_sp[4]*origin_01_sp[n]
+ tau_year_sp[endo_index_sp[n],year_t_sp[n]]
+ tau_plot_sp[plot_sp[n]];
}
for(n in 1:N_se){
mu_se[n] = beta_se[1] + beta_se[2]*endo_01_se[n];
}
}
model {
// Priors
// surv priors
beta_s ~ normal(0,100); // prior for predictor intercepts
tau_plot_s ~ normal(0,sigma_p_s); // prior for plot random effects
to_vector(tau_year_s[1]) ~ normal(0,sigma_e_s[1]); // prior for E- year random effects
to_vector(tau_year_s[2]) ~ normal(0,sigma_e_s[2]); // prior for E+ year random effects
// growth priors
beta_g ~ normal(0,100); // prior for predictor intercepts
tau_plot_g ~ normal(0,sigma_p_g); // prior for plot random effects
to_vector(tau_year_g[1]) ~ normal(0,sigma_e_g[1]); // prior for E- year random effects
to_vector(tau_year_g[2]) ~ normal(0,sigma_e_g[2]); // prior for E+ year random effects
phi_g ~ cauchy(0., 5.);
// flw priors
beta_fl ~ normal(0,100); // prior for predictor intercepts
tau_plot_fl ~ normal(0,sigma_p_fl); // prior for plot random effects
to_vector(tau_year_fl[1]) ~ normal(0,sigma_e_fl[1]); // prior for E- year random effects
to_vector(tau_year_fl[2]) ~ normal(0,sigma_e_fl[2]); // prior for E+ year random effects
// # of flw priors
beta_ft ~ normal(0,100); // prior for predictor intercepts
tau_plot_ft ~ normal(0,sigma_p_ft); // prior for plot random effects
to_vector(tau_year_ft[1]) ~ normal(0,sigma_e_ft[1]); // prior for E- year random effects
to_vector(tau_year_ft[2]) ~ normal(0,sigma_e_ft[2]); // prior for E+ year random effects
phi_ft ~ cauchy(0., 5.);
// # of spike priors
beta_sp ~ normal(0,100);
tau_plot_sp ~ normal(0,sigma_p_sp); // prior for plot random effects
to_vector(tau_year_sp[1]) ~ normal(0,sigma_e_sp[1]); // prior for E- year random effects
to_vector(tau_year_sp[2]) ~ normal(0,sigma_e_sp[2]); // prior for E+ year random effects
phi_sp ~ cauchy(0., 5.);
// seed mean priors
beta_se ~ normal(0,100);
// Likelihoods
// surv
surv_t1 ~ bernoulli_logit(mu_s);
// growth
for(n in 1:N_g){
size_t1[n] ~ neg_binomial_2_log(mu_g[n],phi_g);
target += -log1m(neg_binomial_2_log_lpmf(lowerlimit_g | mu_g[n], phi_g)); // manually adjusting computation of likelihood because T[,] truncation syntax doesn't compile for neg binomial
}
//flowering
flw_t ~ bernoulli_logit(mu_fl);
// # of flow
for(n in 1:N_ft){
flw_count_t[n] ~ neg_binomial_2_log(mu_ft[n],phi_ft);
target += -log1m(neg_binomial_2_log_lpmf(lowerlimit_ft | mu_ft[n], phi_ft)); // manually adjusting computation of likelihood because T[,] truncation syntax doesn't compile for neg binomial
}
// # of spikelet/infl
spike_t ~ neg_binomial_2_log(mu_sp,phi_sp);
// mean seed/spike
seed ~ normal(mu_se,sigma_se);
}
generated quantities{
}
", fill = T)
sink()
stanmodel <- stanc("endodemog_all_vr.stan")
## Run the model by calling stan()
## and save the output to .rds files so that they can be called laters
smAGPE <- stan(file = "endodemog_all_vr.stan", data = AGPE_all_vr_data_list,
iter = ni, warmup = nb, chains = nc, save_warmup = FALSE, control = list(adapt_delta = 0.99, max_treedepth = 15))
saveRDS(smAGPE, file = "endodemog_all_vr_AGPE.rds")
# smELRI <- stan(file = "endodemog_all_vr.stan", data = ELRI_all_vr_data_list,
# iter = ni, warmup = nb, chains = nc, save_warmup = FALSE, control = list(adapt_delta = 0.99, max_treedepth = 20))
# # saveRDS(smELRI, file = "endodemog_all_vr_ELRI.rds")
#
# smELVI <- stan(file = "endodemog_all_vr.stan", data = ELVI_all_vr_data_list,
# iter = ni, warmup = nb, chains = nc, save_warmup = FALSE, control = list(adapt_delta = 0.99, max_treedepth = 20))
# # saveRDS(smELVI, file = "endodemog_all_vr_ELVI.rds")
smFESU <- stan(file = "endodemog_all_vr.stan", data = FESU_all_vr_data_list,
iter = ni, warmup = nb, chains = nc, save_warmup = FALSE, control = list(adapt_delta = 0.99, max_treedepth = 15))
saveRDS(smFESU, file = "endodemog_all_vr_FESU.rds")
smLOAR <- stan(file = "endodemog_all_vr.stan", data = LOAR_all_vr_data_list,
iter = ni, warmup = nb, chains = nc, save_warmup = FALSE, control = list(adapt_delta = 0.99, max_treedepth = 20))
saveRDS(smLOAR, file = "endodemog_all_vr_LOAR.rds")
smPOAL <- stan(file = "endodemog_all_vr.stan", data = POAL_all_vr_data_list,
iter = ni, warmup = nb, chains = nc, save_warmup = FALSE, control = list(adapt_delta = 0.99, max_treedepth = 20))
saveRDS(smPOAL, file = "endodemog_all_vr_POAL.rds")
smPOSY <- stan(file = "endodemog_all_vr.stan", data = POSY_all_vr_data_list,
iter = ni, warmup = nb, chains = nc, save_warmup = FALSE, control = list(adapt_delta = 0.99, max_treedepth = 20))
saveRDS(smPOSY, file = "endodemog_all_vr_POSY.rds")
print(sm)
summary(sm)
print(sm, pars = "sigma_e")
## to read in model output without rerunning models
smAGPE <- readRDS(file = "/Users/joshuacfowler/Dropbox/EndodemogData/Model_Runs/endodemog_all_vr_AGPE.rds")
smELRI <- readRDS(file = "/Users/joshuacfowler/Dropbox/EndodemogData/Model_Runs/endodemog_all_vr_ELRI.rds")
smELVI <- readRDS(file = "/Users/joshuacfowler/Dropbox/EndodemogData/Model_Runs/endodemog_all_vr_ELVI.rds")
smFESU <- readRDS(file = "/Users/joshuacfowler/Dropbox/EndodemogData/Model_Runs/endodemog_all_vr_FESU.rds")
smLOAR <- readRDS(file = "/Users/joshuacfowler/Dropbox/EndodemogData/Model_Runs/endodemog_all_vr_LOAR.rds")
smPOAL <- readRDS(file = "/Users/joshuacfowler/Dropbox/EndodemogData/Model_Runs/endodemog_all_vr_POAL.rds")
smPOSY <- readRDS(file = "/Users/joshuacfowler/Dropbox/EndodemogData/Model_Runs/endodemog_all_vr_POSY.rds")
| /endodemog_all_vr.R | no_license | joshuacfowler/Endodemog_2019 | R | false | false | 15,740 | r | ## Title: Grass endophyte population model with a bayesian framework
## Purpose: Fits all vital rate models, written in STAN
## and does visualisation of posterior predictive checks
## Authors: Joshua and Tom
#############################################################
library(tidyverse)
library(rstan)
library(StanHeaders)
library(shinystan)
library(bayesplot)
library(devtools)
invlogit<-function(x){exp(x)/(1+exp(x))}
logit = function(x) { log(x/(1-x)) }
#############################################################################################
####### Data manipulation to prepare data as lists for Stan models------------------
#############################################################################################
# survival data lists are generated in the endodemog_data_processing.R file,
# within the section titled "Preparing datalists for Survival Kernel"
source("endodemog_data_processing.R")
#########################################################################################################
# GLMM for all Vital Rates ~ size_t + Endo + Origin with year and plot random effects------------------------------
#########################################################################################################
## run this code recommended to optimize computer system settings for MCMC
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
set.seed(123)
## MCMC settings
ni <- 1000
nb <- 500
nc <- 3
# Stan model -------------
## here is the Stan model
sink("endodemog_all_vr.stan")
cat("
data {
// indices
int<lower=0> nYear; // number of years
int<lower=0> nPlot; // number of plots
int<lower=0> nEndo; // number of endo treatments
// surv data
int<lower=0> N_s; // number of observations for surv model
int<lower=0> K_s; // number of predictors for surv model
int<lower=0, upper=11> year_t_s[N_s]; // year of observation for surv model
int<lower=1, upper=2> endo_index_s[N_s]; // index for endophyte effect for surv model
int<lower=0> plot_s[N_s]; // plot of observation for surv model
int<lower=0, upper=1> surv_t1[N_s]; // plant survival at time t+1
vector<lower=0>[N_s] logsize_t_s; // plant size at time t for surv model
int<lower=0,upper=1> endo_01_s[N_s]; // plant endophyte status for surv model
int<lower=0,upper=1> origin_01_s[N_s]; // plant origin status for surv model
// growth data
int<lower=0> N_g; // number of observations for growth model
int<lower=0> K_g; // number of predictors for growth model
int<lower=0> lowerlimit_g; //lower limit for truncated negative binomial
int<lower=0, upper=11> year_t_g[N_g]; // year of observation for growth model
int<lower=1, upper=2> endo_index_g[N_g]; // index for endophyte effect for growth model
int<lower=0> plot_g[N_g]; // plot of observation for growth model
int<lower=lowerlimit_g> size_t1[N_g]; // plant size at time t+1
vector<lower=0>[N_g] logsize_t_g; // plant size at time t for growth model
int<lower=0,upper=1> endo_01_g[N_g]; // plant endophyte status for growth model
int<lower=0,upper=1> origin_01_g[N_g]; // plant origin status for growth model
// flowering data
int<lower=0> N_fl; // number of observations
int<lower=0> K_fl; // number of predictors
int<lower=0> year_t_fl[N_fl]; // year of observation
int<lower=0> plot_fl[N_fl]; // plot of observation
int<lower=0, upper=1> flw_t[N_fl]; // flowering status at time t
vector<lower=-1>[N_fl] logsize_t_fl; // log of plant size at time t
int<lower=0, upper=1> endo_01_fl[N_fl]; // endophyte status
int<lower=1, upper=2> endo_index_fl[N_fl]; // index for endophyte effect
int<lower=0, upper=1> origin_01_fl[N_fl]; // origin status
// # of flw tiller data
int<lower=0> N_ft; // number of observations
int<lower=0> K_ft; // number of predictors
int<lower=0> lowerlimit_ft; //lower limit for truncated negative binomial
int<lower=0> year_t_ft[N_ft]; // year of observation
int<lower=1, upper=2> endo_index_ft[N_ft]; // index for endophyte effect
int<lower=0> plot_ft[N_ft]; // plot of observation
int<lower=lowerlimit_ft> flw_count_t[N_ft]; // plant size at time t and target variable
vector<lower=0>[N_ft] logsize_t_ft; // plant size at time t
int<lower=0,upper=1> endo_01_ft[N_ft]; // plant endophyte status
int<lower=0,upper=1> origin_01_ft[N_ft]; // plant origin status
// spikelet/infl data
int<lower=0> N_sp; // number of observations
int<lower=0> K_sp; // number of predictors
int<lower=0> year_t_sp[N_sp]; // year of observation
int<lower=1, upper=2> endo_index_sp[N_sp]; // index for endophyte effect
int<lower=0> plot_sp[N_sp]; // plot of observation
int<lower=0> spike_t[N_sp]; // no. of spike per infl at time t
vector<lower=0>[N_sp] logsize_t_sp; // plant size at time t
int<lower=0,upper=1> endo_01_sp[N_sp]; // plant endophyte status
int<lower=0,upper=1> origin_01_sp[N_sp]; // plant origin status
// seed/spiklet data
int<lower=0> N_se; // number of observations of seed/spikelet
int<lower=0> K_se; // number of predictors
real<lower=0> seed[N_se]; // number of seeds per spikelet
int<lower=0,upper=1> endo_01_se[N_se]; // plant endophyte status
}
parameters {
// surv params
vector[K_s] beta_s; // predictor parameters
vector[nYear] tau_year_s[nEndo]; // random year effect
real<lower=0> sigma_e_s[nEndo]; //year variance by endophyte effect
vector[nPlot] tau_plot_s; // random plot effect
real<lower=0> sigma_p_s; // plot variance effect
// growth params
vector[K_g] beta_g; // predictor parameters
vector[nYear] tau_year_g[nEndo]; // random year effect
real<lower=0> sigma_e_g[nEndo]; //year variance by endophyte effect
vector[nPlot] tau_plot_g; // random plot effect
real<lower=0> sigma_p_g; // plot variance
real<lower=0> phi_g; // dispersion parameter
// flower params
vector[K_fl] beta_fl; // predictor parameters
vector[nYear] tau_year_fl[nEndo]; // random year effect
real<lower=0> sigma_e_fl[nEndo]; //year variance by endophyte effect
vector[nPlot] tau_plot_fl; // random plot effect
real<lower=0> sigma_p_fl; // plot variance
// fertility params
vector[K_ft] beta_ft; // predictor parameters
vector[nYear] tau_year_ft[nEndo]; // random year effect
real<lower=0> sigma_e_ft[nEndo]; //year variance by endophyte effect
vector[nPlot] tau_plot_ft; // random plot effect
real<lower=0> sigma_p_ft; // plot variance
real<lower=0> phi_ft; // negative binomial dispersion parameter
// spike/inf params
vector[K_sp] beta_sp; // predictor parameters
vector[nYear] tau_year_sp[nEndo]; // random year effect
real<lower=0> sigma_e_sp[nEndo]; //year variance by endophyte effect
vector[nPlot] tau_plot_sp; // random plot effect
real<lower=0> sigma_p_sp; // plot variance
real<lower=0> phi_sp; // negative binomial dispersion parameter
// seed/spike params
vector[K_se] beta_se; // predictor parameters
real<lower=0> sigma_se; // seed per spikelet variance
}
transformed parameters {
real mu_s[N_s]; // surv Linear Predictor
real mu_g[N_g]; // growth Linear Predictor
real mu_fl[N_fl]; // flowering Linear Predictor
real mu_ft[N_ft]; // # of flowering tillers Linear Predictor
real mu_sp[N_sp]; // # of spikelets/infl Linear Predictor
real mu_se[N_se]; // mean seed per spikelet
for(n in 1:N_s){
mu_s[n] = beta_s[1] + beta_s[2]*logsize_t_s[n] + beta_s[3]*endo_01_s[n] +beta_s[4]*origin_01_s[n]
+ tau_year_s[endo_index_s[n],year_t_s[n]]
+ tau_plot_s[plot_s[n]];
}
for(n in 1:N_g){
mu_g[n] = beta_g[1] + beta_g[2]*logsize_t_g[n] + beta_g[3]*endo_01_g[n] +beta_g[4]*origin_01_g[n]
+ tau_year_g[endo_index_g[n],year_t_g[n]]
+ tau_plot_g[plot_g[n]];
}
for(n in 1:N_fl){
mu_fl[n] = beta_fl[1] + beta_fl[2]*logsize_t_fl[n] + beta_fl[3]*endo_01_fl[n] +beta_fl[4]*origin_01_fl[n]
+ tau_year_fl[endo_index_fl[n],year_t_fl[n]]
+ tau_plot_fl[plot_fl[n]];
}
for(n in 1:N_ft){
mu_ft[n] = beta_ft[1] + beta_ft[2]*logsize_t_ft[n] + beta_ft[3]*endo_01_ft[n] +beta_ft[4]*origin_01_ft[n]
+ tau_year_ft[endo_index_ft[n],year_t_ft[n]]
+ tau_plot_ft[plot_ft[n]];
}
for(n in 1:N_sp){
mu_sp[n] = beta_sp[1] + beta_sp[2]*logsize_t_sp[n] + beta_sp[3]*endo_01_sp[n] +beta_sp[4]*origin_01_sp[n]
+ tau_year_sp[endo_index_sp[n],year_t_sp[n]]
+ tau_plot_sp[plot_sp[n]];
}
for(n in 1:N_se){
mu_se[n] = beta_se[1] + beta_se[2]*endo_01_se[n];
}
}
model {
// Priors
// surv priors
beta_s ~ normal(0,100); // prior for predictor intercepts
tau_plot_s ~ normal(0,sigma_p_s); // prior for plot random effects
to_vector(tau_year_s[1]) ~ normal(0,sigma_e_s[1]); // prior for E- year random effects
to_vector(tau_year_s[2]) ~ normal(0,sigma_e_s[2]); // prior for E+ year random effects
// growth priors
beta_g ~ normal(0,100); // prior for predictor intercepts
tau_plot_g ~ normal(0,sigma_p_g); // prior for plot random effects
to_vector(tau_year_g[1]) ~ normal(0,sigma_e_g[1]); // prior for E- year random effects
to_vector(tau_year_g[2]) ~ normal(0,sigma_e_g[2]); // prior for E+ year random effects
phi_g ~ cauchy(0., 5.);
// flw priors
beta_fl ~ normal(0,100); // prior for predictor intercepts
tau_plot_fl ~ normal(0,sigma_p_fl); // prior for plot random effects
to_vector(tau_year_fl[1]) ~ normal(0,sigma_e_fl[1]); // prior for E- year random effects
to_vector(tau_year_fl[2]) ~ normal(0,sigma_e_fl[2]); // prior for E+ year random effects
// # of flw priors
beta_ft ~ normal(0,100); // prior for predictor intercepts
tau_plot_ft ~ normal(0,sigma_p_ft); // prior for plot random effects
to_vector(tau_year_ft[1]) ~ normal(0,sigma_e_ft[1]); // prior for E- year random effects
to_vector(tau_year_ft[2]) ~ normal(0,sigma_e_ft[2]); // prior for E+ year random effects
phi_ft ~ cauchy(0., 5.);
// # of spike priors
beta_sp ~ normal(0,100);
tau_plot_sp ~ normal(0,sigma_p_sp); // prior for plot random effects
to_vector(tau_year_sp[1]) ~ normal(0,sigma_e_sp[1]); // prior for E- year random effects
to_vector(tau_year_sp[2]) ~ normal(0,sigma_e_sp[2]); // prior for E+ year random effects
phi_sp ~ cauchy(0., 5.);
// seed mean priors
beta_se ~ normal(0,100);
// Likelihoods
// surv
surv_t1 ~ bernoulli_logit(mu_s);
// growth
for(n in 1:N_g){
size_t1[n] ~ neg_binomial_2_log(mu_g[n],phi_g);
target += -log1m(neg_binomial_2_log_lpmf(lowerlimit_g | mu_g[n], phi_g)); // manually adjusting computation of likelihood because T[,] truncation syntax doesn't compile for neg binomial
}
//flowering
flw_t ~ bernoulli_logit(mu_fl);
// # of flow
for(n in 1:N_ft){
flw_count_t[n] ~ neg_binomial_2_log(mu_ft[n],phi_ft);
target += -log1m(neg_binomial_2_log_lpmf(lowerlimit_ft | mu_ft[n], phi_ft)); // manually adjusting computation of likelihood because T[,] truncation syntax doesn't compile for neg binomial
}
// # of spikelet/infl
spike_t ~ neg_binomial_2_log(mu_sp,phi_sp);
// mean seed/spike
seed ~ normal(mu_se,sigma_se);
}
generated quantities{
}
", fill = T)
sink()
stanmodel <- stanc("endodemog_all_vr.stan")
## Run the model by calling stan()
## and save the output to .rds files so that they can be called laters
smAGPE <- stan(file = "endodemog_all_vr.stan", data = AGPE_all_vr_data_list,
iter = ni, warmup = nb, chains = nc, save_warmup = FALSE, control = list(adapt_delta = 0.99, max_treedepth = 15))
saveRDS(smAGPE, file = "endodemog_all_vr_AGPE.rds")
# smELRI <- stan(file = "endodemog_all_vr.stan", data = ELRI_all_vr_data_list,
# iter = ni, warmup = nb, chains = nc, save_warmup = FALSE, control = list(adapt_delta = 0.99, max_treedepth = 20))
# # saveRDS(smELRI, file = "endodemog_all_vr_ELRI.rds")
#
# smELVI <- stan(file = "endodemog_all_vr.stan", data = ELVI_all_vr_data_list,
# iter = ni, warmup = nb, chains = nc, save_warmup = FALSE, control = list(adapt_delta = 0.99, max_treedepth = 20))
# # saveRDS(smELVI, file = "endodemog_all_vr_ELVI.rds")
smFESU <- stan(file = "endodemog_all_vr.stan", data = FESU_all_vr_data_list,
iter = ni, warmup = nb, chains = nc, save_warmup = FALSE, control = list(adapt_delta = 0.99, max_treedepth = 15))
saveRDS(smFESU, file = "endodemog_all_vr_FESU.rds")
smLOAR <- stan(file = "endodemog_all_vr.stan", data = LOAR_all_vr_data_list,
iter = ni, warmup = nb, chains = nc, save_warmup = FALSE, control = list(adapt_delta = 0.99, max_treedepth = 20))
saveRDS(smLOAR, file = "endodemog_all_vr_LOAR.rds")
smPOAL <- stan(file = "endodemog_all_vr.stan", data = POAL_all_vr_data_list,
iter = ni, warmup = nb, chains = nc, save_warmup = FALSE, control = list(adapt_delta = 0.99, max_treedepth = 20))
saveRDS(smPOAL, file = "endodemog_all_vr_POAL.rds")
smPOSY <- stan(file = "endodemog_all_vr.stan", data = POSY_all_vr_data_list,
iter = ni, warmup = nb, chains = nc, save_warmup = FALSE, control = list(adapt_delta = 0.99, max_treedepth = 20))
saveRDS(smPOSY, file = "endodemog_all_vr_POSY.rds")
print(sm)
summary(sm)
print(sm, pars = "sigma_e")
## to read in model output without rerunning models
smAGPE <- readRDS(file = "/Users/joshuacfowler/Dropbox/EndodemogData/Model_Runs/endodemog_all_vr_AGPE.rds")
smELRI <- readRDS(file = "/Users/joshuacfowler/Dropbox/EndodemogData/Model_Runs/endodemog_all_vr_ELRI.rds")
smELVI <- readRDS(file = "/Users/joshuacfowler/Dropbox/EndodemogData/Model_Runs/endodemog_all_vr_ELVI.rds")
smFESU <- readRDS(file = "/Users/joshuacfowler/Dropbox/EndodemogData/Model_Runs/endodemog_all_vr_FESU.rds")
smLOAR <- readRDS(file = "/Users/joshuacfowler/Dropbox/EndodemogData/Model_Runs/endodemog_all_vr_LOAR.rds")
smPOAL <- readRDS(file = "/Users/joshuacfowler/Dropbox/EndodemogData/Model_Runs/endodemog_all_vr_POAL.rds")
smPOSY <- readRDS(file = "/Users/joshuacfowler/Dropbox/EndodemogData/Model_Runs/endodemog_all_vr_POSY.rds")
|
library(stringr)
data_month<-vector("list",length(fileName))
for(k in 1:length(fileName)){
for(j in 1:10){
s<-0
for(i in 1:3469){
if(as.numeric(substr(dataList[[k]][2][[1]][2][[1]][i],1,4))==(2006+j)){
if(substr(dataList[[k]][2][[1]][2][[1]][i],6,7)=="12") {
s<-s+as.numeric(dataList[[k]][2][[1]][4][[1]][i])
}
}
}
data_month[[k]][12*(j-1)+12]<-s
}
}
write.csv(data_month, file = "C:/Users/Administrator/Desktop/baidu_index_month12.csv", row.names = F, quote = F) | /R/按月整理12月.R | permissive | booml247/ARRB | R | false | false | 522 | r | library(stringr)
data_month<-vector("list",length(fileName))
for(k in 1:length(fileName)){
for(j in 1:10){
s<-0
for(i in 1:3469){
if(as.numeric(substr(dataList[[k]][2][[1]][2][[1]][i],1,4))==(2006+j)){
if(substr(dataList[[k]][2][[1]][2][[1]][i],6,7)=="12") {
s<-s+as.numeric(dataList[[k]][2][[1]][4][[1]][i])
}
}
}
data_month[[k]][12*(j-1)+12]<-s
}
}
write.csv(data_month, file = "C:/Users/Administrator/Desktop/baidu_index_month12.csv", row.names = F, quote = F) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{sbp}
\alias{sbp}
\title{Systolic blood pressure (SBP) data}
\format{A data frame with 10 rows and 2 variables}
\usage{
data(sbp)
}
\description{
Riley et al. (2011) analyzed a hypothetical meta-analysis.
They generated a data set of 10 studies examining the same
antihypertensive drug.
Negative estimates suggested reduced blood pressure in the
treatment group.
}
\details{
\itemize{
\item \code{y}: Standardized mean difference
\item \code{sigmak}: Standard error
\item \code{label}: Labels for each generated study
}
}
\references{
Riley, R. D., Higgins, J. P. T, and Deeks, J. J. (2011).
Interpretation of random effects meta-analyses.
\emph{BMJ.}
\strong{342}: d549.
\url{https://doi.org/10.1136/bmj.d549}
}
\keyword{datasets}
| /man/sbp.Rd | no_license | cran/pimeta | R | false | true | 875 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{sbp}
\alias{sbp}
\title{Systolic blood pressure (SBP) data}
\format{A data frame with 10 rows and 2 variables}
\usage{
data(sbp)
}
\description{
Riley et al. (2011) analyzed a hypothetical meta-analysis.
They generated a data set of 10 studies examining the same
antihypertensive drug.
Negative estimates suggested reduced blood pressure in the
treatment group.
}
\details{
\itemize{
\item \code{y}: Standardized mean difference
\item \code{sigmak}: Standard error
\item \code{label}: Labels for each generated study
}
}
\references{
Riley, R. D., Higgins, J. P. T, and Deeks, J. J. (2011).
Interpretation of random effects meta-analyses.
\emph{BMJ.}
\strong{342}: d549.
\url{https://doi.org/10.1136/bmj.d549}
}
\keyword{datasets}
|
library(titanic) # loads titanic_train data frame
library(caret)
library(tidyverse)
library(rpart)
# 3 significant digits
options(digits = 3)
# clean the data - `titanic_train` is loaded with the titanic package
titanic_clean <- titanic_train %>%
mutate(Survived = factor(Survived),
Embarked = factor(Embarked),
Age = ifelse(is.na(Age), median(Age, na.rm = TRUE), Age), # NA age to median age
FamilySize = SibSp + Parch + 1) %>% # count family members
select(Survived, Sex, Pclass, Age, Fare, SibSp, Parch, FamilySize, Embarked)
suppressWarnings(set.seed(42, sample.kind = "Rounding"))
i <- createDataPartition(y = titanic_clean$Survived, list = F, p = .2)
train <- titanic_clean[-i,]
test <- titanic_clean[i,]
mean(train$Survived == "1")
# Guessing the outcome
suppressWarnings(set.seed(3, sample.kind = "Rounding"))
y_hat_guess <- sample(c("0","1"), nrow(test), replace = T)
mean(y_hat_guess == test$Survived)
# Predicting survival by sex
mean(train$Survived == "1" & train$Sex == "female")/mean(train$Sex == "female")
mean(train$Survived == "1" & train$Sex == "male")/mean(train$Sex == "male")
y_hat_sex <- factor(ifelse(test$Sex == "female", "1", "0"), levels = levels(test$Survived))
mean(y_hat_sex == test$Survived)
confusionMatrix(y_hat_sex, test$Survived)
F_meas(y_hat_sex, test$Survived)
# Predicting survival by passenger class
train %>%
mutate(Pclass = factor(Pclass)) %>%
ggplot(aes(Pclass, fill = Survived)) +
geom_bar()
train %>%
group_by(Pclass) %>%
summarize(Survived = mean(Survived == 1))
y_hat_class <- factor(ifelse(test$Pclass == 1, "1", "0"))
mean(y_hat_class == test$Survived)
confusionMatrix(y_hat_class, test$Survived)
F_meas(y_hat_class, test$Survived)
# Both Sex and Class
train %>%
group_by(Pclass, Sex) %>%
summarise(Survived = mean(Survived == "1"))
y_hat_sex_class <- factor(ifelse(test$Pclass != 3 & test$Sex == "female", "1", "0"))
mean(y_hat_sex_class == test$Survived)
confusionMatrix(y_hat_sex_class, test$Survived)
F_meas(y_hat_sex_class, test$Survived)
# Survival by fare - LDA and QDA
# LDA
suppressWarnings(set.seed(1, sample.kind = "Rounding"))
fit_lda <- train(Survived~Fare, data = train, method = "lda")
y_hat_lda <- predict(fit_lda, test, type = "raw")
mean(y_hat_lda == test$Survived)
# QDA
suppressWarnings(set.seed(1, sample.kind = "Rounding"))
fit_qda <- train(Survived~Fare, data = train, method = "qda")
y_hat_qda <- predict(fit_qda, test, type = "raw")
mean(y_hat_qda == test$Survived)
# Logistic regression models
# Age only
suppressWarnings(set.seed(1, sample.kind = "Rounding"))
fit_glm_age <- train(Survived~Age, data = train, method = "glm")
y_hat_glm_age <- predict(fit_glm_age, newdata = test, type = "raw")
mean(y_hat_glm_age == test$Survived)
# sex, class, fare, and age.
suppressWarnings(set.seed(1, sample.kind = "Rounding"))
fit_glm <- train(Survived~Age+Sex+Fare+Pclass, data = train, method = "glm")
y_hat_glm <- predict(fit_glm, newdata = test, type = "raw")
mean(y_hat_glm == test$Survived)
# All predictors
suppressWarnings(set.seed(1, sample.kind = "Rounding"))
fit_glm_all <- train(Survived~., data = train, method = "glm")
y_hat_glm_all <- predict(fit_glm_all, newdata = test, type = "raw")
mean(y_hat_glm_all == test$Survived)
# KNN
suppressWarnings(set.seed(6, sample.kind = "Rounding"))
fit_knn <- train(Survived~.,data = train, method = "knn",
tuneGrid = data.frame(k = seq(3, 51, 2)))
fit_knn$bestTune
y_hat_knn <- predict(fit_knn, newdata = test, type = "raw")
mean(y_hat_knn == test$Survived)
plot(fit_knn)
# 10-fold Cross Validation
suppressWarnings(set.seed(8, sample.kind = "Rounding"))
trControl <- trainControl(method = "cv",number = 10, p = 0.9)
fit_knn <- train(Survived~.,data = train, method = "knn",
trControl = trControl,
tuneGrid = data.frame(k = seq(3, 51, 2)))
fit_knn$bestTune
y_hat_knn <- predict(fit_knn, newdata = test, type = "raw")
mean(y_hat_knn == test$Survived)
# Classification tree model
suppressWarnings(set.seed(10, sample.kind = "Rounding"))
fit_tree <- train(Survived~., data = train, method = "rpart",
tuneGrid = data.frame(cp = seq(0, 0.05, 0.002)))
fit_tree$bestTune
fit_tree <- fit_tree$finalModel
y_hat_tree <- predict(fit_tree,newdata = test, type = "raw")
mean(y_hat_tree == test$Survived)
plot(fit_tree, margin = 0.1)
text(fit_tree)
fit_tree
# Random Forest Model
suppressWarnings(set.seed(14, sample.kind = "Rounding"))
fit_forest <- train(Survived~., method = "rf", data = train,
tuneGrid = data.frame(mtry = c(1:7)), ntree = 100)
fit_forest$bestTune
y_hat_forest <- predict(fit_forest, newdata = test, type = "raw")
mean(y_hat_forest == test$Survived)
varImp(fit_forest)
| /machine_learning/titanic.R | no_license | azbenoit/EDX | R | false | false | 4,777 | r | library(titanic) # loads titanic_train data frame
library(caret)
library(tidyverse)
library(rpart)
# 3 significant digits
options(digits = 3)
# clean the data - `titanic_train` is loaded with the titanic package
titanic_clean <- titanic_train %>%
mutate(Survived = factor(Survived),
Embarked = factor(Embarked),
Age = ifelse(is.na(Age), median(Age, na.rm = TRUE), Age), # NA age to median age
FamilySize = SibSp + Parch + 1) %>% # count family members
select(Survived, Sex, Pclass, Age, Fare, SibSp, Parch, FamilySize, Embarked)
suppressWarnings(set.seed(42, sample.kind = "Rounding"))
i <- createDataPartition(y = titanic_clean$Survived, list = F, p = .2)
train <- titanic_clean[-i,]
test <- titanic_clean[i,]
mean(train$Survived == "1")
# Guessing the outcome
suppressWarnings(set.seed(3, sample.kind = "Rounding"))
y_hat_guess <- sample(c("0","1"), nrow(test), replace = T)
mean(y_hat_guess == test$Survived)
# Predicting survival by sex
mean(train$Survived == "1" & train$Sex == "female")/mean(train$Sex == "female")
mean(train$Survived == "1" & train$Sex == "male")/mean(train$Sex == "male")
y_hat_sex <- factor(ifelse(test$Sex == "female", "1", "0"), levels = levels(test$Survived))
mean(y_hat_sex == test$Survived)
confusionMatrix(y_hat_sex, test$Survived)
F_meas(y_hat_sex, test$Survived)
# Predicting survival by passenger class
train %>%
mutate(Pclass = factor(Pclass)) %>%
ggplot(aes(Pclass, fill = Survived)) +
geom_bar()
train %>%
group_by(Pclass) %>%
summarize(Survived = mean(Survived == 1))
y_hat_class <- factor(ifelse(test$Pclass == 1, "1", "0"))
mean(y_hat_class == test$Survived)
confusionMatrix(y_hat_class, test$Survived)
F_meas(y_hat_class, test$Survived)
# Both Sex and Class
train %>%
group_by(Pclass, Sex) %>%
summarise(Survived = mean(Survived == "1"))
y_hat_sex_class <- factor(ifelse(test$Pclass != 3 & test$Sex == "female", "1", "0"))
mean(y_hat_sex_class == test$Survived)
confusionMatrix(y_hat_sex_class, test$Survived)
F_meas(y_hat_sex_class, test$Survived)
# Survival by fare - LDA and QDA
# LDA
suppressWarnings(set.seed(1, sample.kind = "Rounding"))
fit_lda <- train(Survived~Fare, data = train, method = "lda")
y_hat_lda <- predict(fit_lda, test, type = "raw")
mean(y_hat_lda == test$Survived)
# QDA
suppressWarnings(set.seed(1, sample.kind = "Rounding"))
fit_qda <- train(Survived~Fare, data = train, method = "qda")
y_hat_qda <- predict(fit_qda, test, type = "raw")
mean(y_hat_qda == test$Survived)
# Logistic regression models
# Age only
suppressWarnings(set.seed(1, sample.kind = "Rounding"))
fit_glm_age <- train(Survived~Age, data = train, method = "glm")
y_hat_glm_age <- predict(fit_glm_age, newdata = test, type = "raw")
mean(y_hat_glm_age == test$Survived)
# sex, class, fare, and age.
suppressWarnings(set.seed(1, sample.kind = "Rounding"))
fit_glm <- train(Survived~Age+Sex+Fare+Pclass, data = train, method = "glm")
y_hat_glm <- predict(fit_glm, newdata = test, type = "raw")
mean(y_hat_glm == test$Survived)
# All predictors
suppressWarnings(set.seed(1, sample.kind = "Rounding"))
fit_glm_all <- train(Survived~., data = train, method = "glm")
y_hat_glm_all <- predict(fit_glm_all, newdata = test, type = "raw")
mean(y_hat_glm_all == test$Survived)
# KNN
suppressWarnings(set.seed(6, sample.kind = "Rounding"))
fit_knn <- train(Survived~.,data = train, method = "knn",
tuneGrid = data.frame(k = seq(3, 51, 2)))
fit_knn$bestTune
y_hat_knn <- predict(fit_knn, newdata = test, type = "raw")
mean(y_hat_knn == test$Survived)
plot(fit_knn)
# 10-fold Cross Validation
suppressWarnings(set.seed(8, sample.kind = "Rounding"))
trControl <- trainControl(method = "cv",number = 10, p = 0.9)
fit_knn <- train(Survived~.,data = train, method = "knn",
trControl = trControl,
tuneGrid = data.frame(k = seq(3, 51, 2)))
fit_knn$bestTune
y_hat_knn <- predict(fit_knn, newdata = test, type = "raw")
mean(y_hat_knn == test$Survived)
# Classification tree model
suppressWarnings(set.seed(10, sample.kind = "Rounding"))
fit_tree <- train(Survived~., data = train, method = "rpart",
tuneGrid = data.frame(cp = seq(0, 0.05, 0.002)))
fit_tree$bestTune
fit_tree <- fit_tree$finalModel
y_hat_tree <- predict(fit_tree,newdata = test, type = "raw")
mean(y_hat_tree == test$Survived)
plot(fit_tree, margin = 0.1)
text(fit_tree)
fit_tree
# Random Forest Model
suppressWarnings(set.seed(14, sample.kind = "Rounding"))
fit_forest <- train(Survived~., method = "rf", data = train,
tuneGrid = data.frame(mtry = c(1:7)), ntree = 100)
fit_forest$bestTune
y_hat_forest <- predict(fit_forest, newdata = test, type = "raw")
mean(y_hat_forest == test$Survived)
varImp(fit_forest)
|
library(tidyverse)
## Merge
immigration <- read_csv("ImmigrationData.csv")
region <- read_csv("RegionData.csv")
merged <- immigration %>% left_join(region, by = c("CountryCode" = "countrycode"))
# the number of rows should be the same before and after the merge
nrow(merged) == nrow(immigration) # TRUE
# the following should also be true
n_distinct(immigration$CountryCode) == n_distinct(region$countrycode) # TRUE
n_distinct(merged$CountryCode) == n_distinct(immigration$CountryCode) # TRUE
## Analysis
# 1. Filter all observations from countries in Africa collected in 1990.
# How many are there? (you can just look at the number of rows)
merged %>%
select(region, year) %>%
filter(region == "Africa" & year == 1990) %>%
count(region)
# There are 57 observations in the year 1990.
# 2. Which countries have a number of female migrants between 1 and 2 million excluded?
merged %>%
select(Country, FemaleMigrants) %>%
filter(FemaleMigrants > 1000000, FemaleMigrants < 2000000) %>%
arrange(FemaleMigrants) %>%
distinct(Country)
# There are 21 such countries including Israel, Netherlands, Lebanon, Thailand, etc.
# across different years.
# 3. Filter countries located in Africa or Oceania.
# How many are there? (you can just look at the number of rows)
merged %>%
select(Country, region) %>%
filter(region == "Africa" | region == "Oceania") %>%
distinct(Country)
# There are 67 countries located either in Africa or Oceania regions.
# 4. Which country has the highest number of migrants among “developed” countries in 2010?
# In answering these questions, keep only relevant columns in the output
merged %>%
filter(region == "Developed countries" & year == 2010) %>%
select(Country, Migrants, region, year) %>%
arrange(desc(Migrants)) %>%
filter(row_number()==1)
# United States had the most migrants amongst "developed" countries in the year
# 2010 with 44183643 migrants.
# 5. Reorganize the dataset so that all columns containing information
# about the immigrant population come right after the year column.
# All columns should be kept in the dataset.
reorganized <-
merged %>%
select(-contains(c("Migrants", "year")),
"year",
contains("Migrants"))
reorganized
# Should be true and it is
ncol(reorganized) == ncol(merged)
# 6. Filter observations from the following countries:
# Italy, Germany, Spain, France, Portugal, and Greece in 2015.
# Use %in% to write your filter function.
merged %>%
filter(Country %in% c("Italy", "Germany", "Spain", "France", "Portugal", "Greece") &
year == 2015)
| /week4_ex/week4_ex.r | no_license | alexkcode/pa-434 | R | false | false | 2,607 | r |
library(tidyverse)
## Merge
immigration <- read_csv("ImmigrationData.csv")
region <- read_csv("RegionData.csv")
merged <- immigration %>% left_join(region, by = c("CountryCode" = "countrycode"))
# the number of rows should be the same before and after the merge
nrow(merged) == nrow(immigration) # TRUE
# the following should also be true
n_distinct(immigration$CountryCode) == n_distinct(region$countrycode) # TRUE
n_distinct(merged$CountryCode) == n_distinct(immigration$CountryCode) # TRUE
## Analysis
# 1. Filter all observations from countries in Africa collected in 1990.
# How many are there? (you can just look at the number of rows)
merged %>%
select(region, year) %>%
filter(region == "Africa" & year == 1990) %>%
count(region)
# There are 57 observations in the year 1990.
# 2. Which countries have a number of female migrants between 1 and 2 million excluded?
merged %>%
select(Country, FemaleMigrants) %>%
filter(FemaleMigrants > 1000000, FemaleMigrants < 2000000) %>%
arrange(FemaleMigrants) %>%
distinct(Country)
# There are 21 such countries including Israel, Netherlands, Lebanon, Thailand, etc.
# across different years.
# 3. Filter countries located in Africa or Oceania.
# How many are there? (you can just look at the number of rows)
merged %>%
select(Country, region) %>%
filter(region == "Africa" | region == "Oceania") %>%
distinct(Country)
# There are 67 countries located either in Africa or Oceania regions.
# 4. Which country has the highest number of migrants among “developed” countries in 2010?
# In answering these questions, keep only relevant columns in the output
merged %>%
filter(region == "Developed countries" & year == 2010) %>%
select(Country, Migrants, region, year) %>%
arrange(desc(Migrants)) %>%
filter(row_number()==1)
# United States had the most migrants amongst "developed" countries in the year
# 2010 with 44183643 migrants.
# 5. Reorganize the dataset so that all columns containing information
# about the immigrant population come right after the year column.
# All columns should be kept in the dataset.
reorganized <-
merged %>%
select(-contains(c("Migrants", "year")),
"year",
contains("Migrants"))
reorganized
# Should be true and it is
ncol(reorganized) == ncol(merged)
# 6. Filter observations from the following countries:
# Italy, Germany, Spain, France, Portugal, and Greece in 2015.
# Use %in% to write your filter function.
merged %>%
filter(Country %in% c("Italy", "Germany", "Spain", "France", "Portugal", "Greece") &
year == 2015)
|
#' Perform a rank-based single sample gene set analysis.
#'
#' This function performs a rank-based single sample gene set analysis for
#' a set of samples (\code{data}) and a list of gene sets (\code{geneset_list}).
#'
#' @param data Either a set of samples in \code{data.frame} format
#' or a sample in a named array format.
#' @param geneset_list a named list of gene sets, each of list element is named
#' and a list ofgenes.
#' @param alternative see 'a_sample_gene_set_rank_test' for detail.
#' @param test.method \code{c('ks.test', 'wilcox.test')}. Defaults to 'ks'.
#' @export
#' @examples
#' gene_set_rank_test(data, a_geneset_list)
gene_set_rank_test <- function(data, geneset_list, row_names = NULL, alternative = "two.sided", test.method = "ks") {
if (is.data.frame(data)) {
n_samples <- ncol(data)
gsas_list <- vector("list", length(n_samples))
row_names_x <- row.names(data)
for (ii in 1:n_samples) {
x <- data[, ii]
a_gsa <- a_sample_gene_set_rank_test(x, geneset_list,
row_names = row_names_x,
alternative = alternative,
test.method = test.method)
gsas_list[[ii]] <- a_gsa
print(sprintf("%s (%d / %d) processed", colnames(data)[ii], ii, n_samples))
}
names(gsas_list) <- colnames(data)
gsas_list
} else {
list(a_sample_gene_set_rank_test(data, geneset_list,
row_names = row_names,
alternative = alternative,
test.method = test.method))
}
}
| /R/ssgsrt.gene_set_rank_test.R | no_license | dolchan/ssgsrt | R | false | false | 1,677 | r | #' Perform a rank-based single sample gene set analysis.
#'
#' This function performs a rank-based single sample gene set analysis for
#' a set of samples (\code{data}) and a list of gene sets (\code{geneset_list}).
#'
#' @param data Either a set of samples in \code{data.frame} format
#' or a sample in a named array format.
#' @param geneset_list a named list of gene sets, each of list element is named
#' and a list ofgenes.
#' @param alternative see 'a_sample_gene_set_rank_test' for detail.
#' @param test.method \code{c('ks.test', 'wilcox.test')}. Defaults to 'ks'.
#' @export
#' @examples
#' gene_set_rank_test(data, a_geneset_list)
gene_set_rank_test <- function(data, geneset_list, row_names = NULL, alternative = "two.sided", test.method = "ks") {
if (is.data.frame(data)) {
n_samples <- ncol(data)
gsas_list <- vector("list", length(n_samples))
row_names_x <- row.names(data)
for (ii in 1:n_samples) {
x <- data[, ii]
a_gsa <- a_sample_gene_set_rank_test(x, geneset_list,
row_names = row_names_x,
alternative = alternative,
test.method = test.method)
gsas_list[[ii]] <- a_gsa
print(sprintf("%s (%d / %d) processed", colnames(data)[ii], ii, n_samples))
}
names(gsas_list) <- colnames(data)
gsas_list
} else {
list(a_sample_gene_set_rank_test(data, geneset_list,
row_names = row_names,
alternative = alternative,
test.method = test.method))
}
}
|
run_analysis <- function(){
fileNameY_test = "UCI HAR Dataset\\test\\y_test.txt"
fileNameX_test = "UCI HAR Dataset\\test\\X_test.txt"
fileNameSubjecttest = "UCI HAR Dataset\\test\\subject_test.txt"
fileNameX_train = "UCI HAR Dataset\\train\\X_train.txt"
fileNameY_train = "UCI HAR Dataset\\train\\y_train.txt"
fileNameSubject_train = "UCI HAR Dataset\\train\\subject_train.txt"
fileName_features = "UCI HAR Dataset\\features.txt"
fileName_activityType = "UCI HAR Dataset\\activity_labels.txt"
trainSet_X <- read.table(fileNameX_train, header=FALSE)
trainSet_Y <- read.table(fileNameY_train, header=FALSE)
set_activityType <- read.table(fileName_activityType, header=FALSE)
trainSet_subject <- read.table(fileNameSubject_train, header=FALSE)
testSet_subject <- read.table(fileNameSubjecttest, header=FALSE)
testSet_X <- read.table(fileNameX_test, header=FALSE)
testSet_Y <- read.table(fileNameY_test, header=FALSE)
##################################################################
# 1. Merges the training and the test sets to create one data set.
##################################################################
train_data <- cbind(trainSet_X, trainSet_Y, trainSet_subject)
test_data <- cbind(testSet_X, testSet_Y, testSet_subject)
data <- rbind(train_data,test_data)
############################################################################################
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
############################################################################################
features <- read.table(fileName_features, header=FALSE, stringsAsFactors = FALSE)
meanStdFeatures <- features[grep("mean|std", features[,2]),]
meanStdDataSet <- data[,meanStdFeatures[,1]]
subjectTrainSet <- read.table(fileNameSubject_train, header=FALSE)
subjectTestSet <- read.table(fileNameSubjecttest, header=FALSE)
trainSet_subject <- rbind(subjectTrainSet, subjectTestSet)
activityTrainSet <- read.table (fileNameY_train, header = FALSE)
activityTestSet <- read.table (fileNameY_test, header = FALSE)
set_activityType <- rbind(activityTrainSet, activityTestSet)
meanStdDataSet <- cbind(meanStdDataSet, set_activityType)
meanStdDataSet <- cbind(meanStdDataSet, trainSet_subject)
names(meanStdDataSet) <- c(meanStdFeatures[,2], "Activity", "Subject")
###########################################################################
# 3. Uses descriptive activity names to name the activities in the data set
###########################################################################
library(plyr)
activitiesIds <- meanStdDataSet[,"Activity"]
activitiesFactors <- as.factor(activitiesIds)
meanStdDataSet[,"Activity"] = activitiesFactors
###########################################################################
# 4. Appropriately labels the data set with descriptive activity names.
###########################################################################
names(meanStdDataSet) <- c(meanStdFeatures[,2], "Activity", "Subject")
######################################################################################################################
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
######################################################################################################################
install.packages("data.table")
library(data.table)
tidyDataTable <- data.table(meanStdDataSet)
avgTidyDataTable <- tidyDataTable[, lapply(.SD,mean), by=c("Activity","Subject")]
newColNames = sapply(names(avgTidyDataTable)[-(1:2)], function(name) paste("mean(", name, ")", sep=""))
setnames(avgTidyDataTable, names(avgTidyDataTable), c("Activity", "Subject", newColNames))
write.csv(avgTidyDataTable, file="MeasureAvgTidySet.txt", row.names = FALSE)
} | /run_analysis.R | no_license | antoniogiuzio/course-project | R | false | false | 3,846 | r | run_analysis <- function(){
fileNameY_test = "UCI HAR Dataset\\test\\y_test.txt"
fileNameX_test = "UCI HAR Dataset\\test\\X_test.txt"
fileNameSubjecttest = "UCI HAR Dataset\\test\\subject_test.txt"
fileNameX_train = "UCI HAR Dataset\\train\\X_train.txt"
fileNameY_train = "UCI HAR Dataset\\train\\y_train.txt"
fileNameSubject_train = "UCI HAR Dataset\\train\\subject_train.txt"
fileName_features = "UCI HAR Dataset\\features.txt"
fileName_activityType = "UCI HAR Dataset\\activity_labels.txt"
trainSet_X <- read.table(fileNameX_train, header=FALSE)
trainSet_Y <- read.table(fileNameY_train, header=FALSE)
set_activityType <- read.table(fileName_activityType, header=FALSE)
trainSet_subject <- read.table(fileNameSubject_train, header=FALSE)
testSet_subject <- read.table(fileNameSubjecttest, header=FALSE)
testSet_X <- read.table(fileNameX_test, header=FALSE)
testSet_Y <- read.table(fileNameY_test, header=FALSE)
##################################################################
# 1. Merges the training and the test sets to create one data set.
##################################################################
train_data <- cbind(trainSet_X, trainSet_Y, trainSet_subject)
test_data <- cbind(testSet_X, testSet_Y, testSet_subject)
data <- rbind(train_data,test_data)
############################################################################################
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
############################################################################################
features <- read.table(fileName_features, header=FALSE, stringsAsFactors = FALSE)
meanStdFeatures <- features[grep("mean|std", features[,2]),]
meanStdDataSet <- data[,meanStdFeatures[,1]]
subjectTrainSet <- read.table(fileNameSubject_train, header=FALSE)
subjectTestSet <- read.table(fileNameSubjecttest, header=FALSE)
trainSet_subject <- rbind(subjectTrainSet, subjectTestSet)
activityTrainSet <- read.table (fileNameY_train, header = FALSE)
activityTestSet <- read.table (fileNameY_test, header = FALSE)
set_activityType <- rbind(activityTrainSet, activityTestSet)
meanStdDataSet <- cbind(meanStdDataSet, set_activityType)
meanStdDataSet <- cbind(meanStdDataSet, trainSet_subject)
names(meanStdDataSet) <- c(meanStdFeatures[,2], "Activity", "Subject")
###########################################################################
# 3. Uses descriptive activity names to name the activities in the data set
###########################################################################
library(plyr)
activitiesIds <- meanStdDataSet[,"Activity"]
activitiesFactors <- as.factor(activitiesIds)
meanStdDataSet[,"Activity"] = activitiesFactors
###########################################################################
# 4. Appropriately labels the data set with descriptive activity names.
###########################################################################
names(meanStdDataSet) <- c(meanStdFeatures[,2], "Activity", "Subject")
######################################################################################################################
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
######################################################################################################################
install.packages("data.table")
library(data.table)
tidyDataTable <- data.table(meanStdDataSet)
avgTidyDataTable <- tidyDataTable[, lapply(.SD,mean), by=c("Activity","Subject")]
newColNames = sapply(names(avgTidyDataTable)[-(1:2)], function(name) paste("mean(", name, ")", sep=""))
setnames(avgTidyDataTable, names(avgTidyDataTable), c("Activity", "Subject", newColNames))
write.csv(avgTidyDataTable, file="MeasureAvgTidySet.txt", row.names = FALSE)
} |
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1438
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1438
c
c Input Parameter (command line, file):
c input filename QBFLIB/Biere/tipdiam/nusmv.dme1-16.B-d3.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 623
c no.of clauses 1438
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1438
c
c QBFLIB/Biere/tipdiam/nusmv.dme1-16.B-d3.qdimacs 623 1438 E1 [] 0 48 575 1438 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Biere/tipdiam/nusmv.dme1-16.B-d3/nusmv.dme1-16.B-d3.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 622 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1438
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1438
c
c Input Parameter (command line, file):
c input filename QBFLIB/Biere/tipdiam/nusmv.dme1-16.B-d3.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 623
c no.of clauses 1438
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1438
c
c QBFLIB/Biere/tipdiam/nusmv.dme1-16.B-d3.qdimacs 623 1438 E1 [] 0 48 575 1438 NONE
|
gen.data<-function(n,beta,sigma,p)
{
y<-rbinom(n,size=1,prob=0.5)
y<-sort(y)
n1<-sum(y==0)
n2<-n-n1
y[y==0]<-(-n/n1)
y[y==1]<-n/n2
y1<-y[1:n1]
y2<-y[(n1+1):n]
mu1<-rep(0,p)
mu2<-sigma%*%beta
x1<-mvrnorm(n1,mu1,sigma1)
x2<-mvrnorm(n2,mu2,sigma)
x.sub<-rbind(x1,x2)
return(list(y=y,y1=y1,y2=y2,x1=x1,x2=x2,x.sub=x.sub))
} | /gen.data.r | no_license | shuanggema/NetII-SLDA | R | false | false | 348 | r | gen.data<-function(n,beta,sigma,p)
{
y<-rbinom(n,size=1,prob=0.5)
y<-sort(y)
n1<-sum(y==0)
n2<-n-n1
y[y==0]<-(-n/n1)
y[y==1]<-n/n2
y1<-y[1:n1]
y2<-y[(n1+1):n]
mu1<-rep(0,p)
mu2<-sigma%*%beta
x1<-mvrnorm(n1,mu1,sigma1)
x2<-mvrnorm(n2,mu2,sigma)
x.sub<-rbind(x1,x2)
return(list(y=y,y1=y1,y2=y2,x1=x1,x2=x2,x.sub=x.sub))
} |
library(shiny)
ui <- fluidPage(
titlePanel("DaTa Table"),
sidebarLayout(
sidebarPanel(
),
mainPanel(
DT::dataTableOutput("iris")
)
)
)
server <- shinyServer(function(input,output){
output$iris <- DT::renderDataTable(
iris,
# this line of code for 'copy','pdf','csv','excel','print'
extensions = 'Buttons', options = list(dom= 'Bfrtip', buttons = list('copy','pdf','csv','excel','print'))
)
}
)
shinyApp(ui=ui,server=server) | /Datatable.R | no_license | kuldeepjha/R | R | false | false | 484 | r | library(shiny)
ui <- fluidPage(
titlePanel("DaTa Table"),
sidebarLayout(
sidebarPanel(
),
mainPanel(
DT::dataTableOutput("iris")
)
)
)
server <- shinyServer(function(input,output){
output$iris <- DT::renderDataTable(
iris,
# this line of code for 'copy','pdf','csv','excel','print'
extensions = 'Buttons', options = list(dom= 'Bfrtip', buttons = list('copy','pdf','csv','excel','print'))
)
}
)
shinyApp(ui=ui,server=server) |
#load the necessary packages
source('/home/cris/mrcrstnherediagmez@gmail.com/Countwords/authenticate.R')
Meru_tweets=searchTwitter("MeruCabs",n=2000,lan="en")
Ola_tweets=searchTwitter("OlaCabs",n=2000,lan="en")
TaxiForSure_tweets=searchTwitter("TaxiForSure",n=2000,lan="en")
Uber_tweets=searchTwitter("Uber_Delhi",n=2000,lan="en")
# check length
length(Meru_tweets)
length(Ola_tweets)
length(TaxiForSure_tweets)
length(Uber_tweets)
#cleaning the corpus
Meru_tweets=sapply(Meru_tweets,function(x) x$getText())
Ola_tweets=sapply(Ola_tweets,function(x) x$getText())
TaxiForSure_tweets=sapply(TaxiForSure_tweets, function(x) x$getText())
Uber_tweets=sapply(Uber_tweets, function(x) x$getText())
catch.error=function(x){
#missing value for test purpose
y=NA
#try to catch that error we just created
catch_error=tryCatch(tolower(x),error=function(e) e)
#if not an error
if(!inherits(catch_error,"error"))
y=tolower(x)
#check result if error exist, otherwise the function works fine
return(y)
}
cleanTweets=function(tweet){
#clean the tweet for sentiment analysis
#remove html links, which are not required for sentiment analysis
tweet=gsub("(f|ht) (tp) (s?) (://) (.*) [.|/] (.*)"," ",tweet)
#first we will remove RT entities from sotred tweets
tweet=gsub("(RT|via) ((?:\\b\\W*@\\w+)+)"," ",tweet)
#then remove all hashtag
tweet=gsub("#\\w+"," ",tweet)
#them remove @people
tweet=gsub("@\\w+"," ",tweet)
#then remove punctuation
tweet=gsub("[[:punct:]]"," ",tweet)
#then remove numbers, we only need text for analysis
tweet=gsub("[[:digit:]]"," ",tweet)
#finally remove unnecesary spaces,tabs etc
tweet=gsub("[ \t]{2,}"," ",tweet)
tweet=gsub("^\\s+|\\s+$"," ",tweet)
#convert all the words to lower case(uniform pattern )
tweet=catch.error(tweet)
tweet
}
cleanTweetsAndRemoveNAs=function(Tweets){
TweetsCleaned=sapply(Tweets,cleanTweets)
#remove the Na tweeets from this tweet list
TweetsCleaned=TweetsCleaned[!is.na(TweetsCleaned)]
names(TweetsCleaned)=NULL
#remove the repetitive tweets from this tweet list
TweetsCleaned=unique(TweetsCleaned)
TweetsCleaned
}
Meru_tweetsCleaned=cleanTweetsAndRemoveNAs(Meru_tweets)
Ola_tweetsCleaned=cleanTweetsAndRemoveNAs(Ola_tweets)
TaxiForSure_tweetsCleaned=cleanTweetsAndRemoveNAs(TaxiForSure_tweets)
Uber_tweetsCleaned=cleanTweetsAndRemoveNAs(Uber_tweets)
#size for cleaned lists
length(Meru_tweetsCleaned)
length(Ola_tweetsCleaned)
length(TaxiForSure_tweetsCleaned)
length(Uber_tweetsCleaned)
#Estimating sentiment A
opinion.lexicon.pos=scan('/home/cris/mrcrstnherediagmez@gmail.com/Countwords/data/opinion-lexicon-English/positive-words.txt',what = 'character',comment.char = ';')
opinion.lexicon.neg=scan('/home/cris/mrcrstnherediagmez@gmail.com/Countwords/data/opinion-lexicon-English/negative-words.txt',what = 'character',comment.char = ';')
#we can add other terms (based on our reqeriments)
pos.words=c(opinion.lexicon.pos,'upgrade')
neg.words=c(opinion.lexicon.neg,'wait','waiting','wtf','cancellation')
#now we create a function to score a sentiment (computes raw sentiment based on simple matching algorithm)
getSentimentScore=function(sentences,words.positive,words.negative){
require(plyr)
require(stringr)
scores=sapply(sentences,function(sentence,words.positive,words.negative){
#remove digits,punc and control chars
sentence=gsub('[[:cntrl:]]','',gsub('[[:punct:]]','',gsub('\\d+','',sentence)))
#convert all to lower case
sentence=tolower(sentence)
#split each sentence by space delimiter
words=unlist(strsplit(sentence,'\\s+'))
#get the boolean match of each words with the positive and negative opinion lexicon
pos.matches=!is.na(match(words,words.positive))
neg.matches=!is.na(match(words,words.negative))
#get the score as total positive sentiment minus the total negatives
score=sum(pos.matches)-sum(neg.matches)
return(score)
},words.positive,words.negative)
#return data frame with respective sentence and score
return(data.frame(text=sentences,score=scores))
}
#apply preceding to each corpus
MeruRes=getSentimentScore(Meru_tweetsCleaned,pos.words,neg.words)
OlaRes=getSentimentScore(Ola_tweetsCleaned,pos.words,neg.words)
TaxiForSureRes=getSentimentScore(TaxiForSure_tweetsCleaned,pos.words,neg.words)
UberRes=getSentimentScore(Uber_tweetsCleaned,pos.words,neg.words)
#since not all samples are the same size, lets compute mean and sd
MeruMean=mean(MeruRes$score)
OlaMean=mean(OlaRes$score)
TaxiForSureMean=mean(TaxiForSureRes$score)
UberMean=mean(UberRes$score)
MeruSd=sd(MeruRes$score)
OlaSd=sd(OlaRes$score)
TaxiForSureSd=sd(TaxiForSureRes$score)
UberSd=sd(UberRes$score) | /cabsSentimentAnalysis.R | no_license | CristinaHG/Social-media-mining | R | false | false | 4,716 | r | #load the necessary packages
source('/home/cris/mrcrstnherediagmez@gmail.com/Countwords/authenticate.R')
Meru_tweets=searchTwitter("MeruCabs",n=2000,lan="en")
Ola_tweets=searchTwitter("OlaCabs",n=2000,lan="en")
TaxiForSure_tweets=searchTwitter("TaxiForSure",n=2000,lan="en")
Uber_tweets=searchTwitter("Uber_Delhi",n=2000,lan="en")
# check length
length(Meru_tweets)
length(Ola_tweets)
length(TaxiForSure_tweets)
length(Uber_tweets)
#cleaning the corpus
Meru_tweets=sapply(Meru_tweets,function(x) x$getText())
Ola_tweets=sapply(Ola_tweets,function(x) x$getText())
TaxiForSure_tweets=sapply(TaxiForSure_tweets, function(x) x$getText())
Uber_tweets=sapply(Uber_tweets, function(x) x$getText())
catch.error=function(x){
#missing value for test purpose
y=NA
#try to catch that error we just created
catch_error=tryCatch(tolower(x),error=function(e) e)
#if not an error
if(!inherits(catch_error,"error"))
y=tolower(x)
#check result if error exist, otherwise the function works fine
return(y)
}
cleanTweets=function(tweet){
#clean the tweet for sentiment analysis
#remove html links, which are not required for sentiment analysis
tweet=gsub("(f|ht) (tp) (s?) (://) (.*) [.|/] (.*)"," ",tweet)
#first we will remove RT entities from sotred tweets
tweet=gsub("(RT|via) ((?:\\b\\W*@\\w+)+)"," ",tweet)
#then remove all hashtag
tweet=gsub("#\\w+"," ",tweet)
#them remove @people
tweet=gsub("@\\w+"," ",tweet)
#then remove punctuation
tweet=gsub("[[:punct:]]"," ",tweet)
#then remove numbers, we only need text for analysis
tweet=gsub("[[:digit:]]"," ",tweet)
#finally remove unnecesary spaces,tabs etc
tweet=gsub("[ \t]{2,}"," ",tweet)
tweet=gsub("^\\s+|\\s+$"," ",tweet)
#convert all the words to lower case(uniform pattern )
tweet=catch.error(tweet)
tweet
}
cleanTweetsAndRemoveNAs=function(Tweets){
TweetsCleaned=sapply(Tweets,cleanTweets)
#remove the Na tweeets from this tweet list
TweetsCleaned=TweetsCleaned[!is.na(TweetsCleaned)]
names(TweetsCleaned)=NULL
#remove the repetitive tweets from this tweet list
TweetsCleaned=unique(TweetsCleaned)
TweetsCleaned
}
Meru_tweetsCleaned=cleanTweetsAndRemoveNAs(Meru_tweets)
Ola_tweetsCleaned=cleanTweetsAndRemoveNAs(Ola_tweets)
TaxiForSure_tweetsCleaned=cleanTweetsAndRemoveNAs(TaxiForSure_tweets)
Uber_tweetsCleaned=cleanTweetsAndRemoveNAs(Uber_tweets)
#size for cleaned lists
length(Meru_tweetsCleaned)
length(Ola_tweetsCleaned)
length(TaxiForSure_tweetsCleaned)
length(Uber_tweetsCleaned)
#Estimating sentiment A
opinion.lexicon.pos=scan('/home/cris/mrcrstnherediagmez@gmail.com/Countwords/data/opinion-lexicon-English/positive-words.txt',what = 'character',comment.char = ';')
opinion.lexicon.neg=scan('/home/cris/mrcrstnherediagmez@gmail.com/Countwords/data/opinion-lexicon-English/negative-words.txt',what = 'character',comment.char = ';')
#we can add other terms (based on our reqeriments)
pos.words=c(opinion.lexicon.pos,'upgrade')
neg.words=c(opinion.lexicon.neg,'wait','waiting','wtf','cancellation')
#now we create a function to score a sentiment (computes raw sentiment based on simple matching algorithm)
getSentimentScore=function(sentences,words.positive,words.negative){
require(plyr)
require(stringr)
scores=sapply(sentences,function(sentence,words.positive,words.negative){
#remove digits,punc and control chars
sentence=gsub('[[:cntrl:]]','',gsub('[[:punct:]]','',gsub('\\d+','',sentence)))
#convert all to lower case
sentence=tolower(sentence)
#split each sentence by space delimiter
words=unlist(strsplit(sentence,'\\s+'))
#get the boolean match of each words with the positive and negative opinion lexicon
pos.matches=!is.na(match(words,words.positive))
neg.matches=!is.na(match(words,words.negative))
#get the score as total positive sentiment minus the total negatives
score=sum(pos.matches)-sum(neg.matches)
return(score)
},words.positive,words.negative)
#return data frame with respective sentence and score
return(data.frame(text=sentences,score=scores))
}
#apply preceding to each corpus
MeruRes=getSentimentScore(Meru_tweetsCleaned,pos.words,neg.words)
OlaRes=getSentimentScore(Ola_tweetsCleaned,pos.words,neg.words)
TaxiForSureRes=getSentimentScore(TaxiForSure_tweetsCleaned,pos.words,neg.words)
UberRes=getSentimentScore(Uber_tweetsCleaned,pos.words,neg.words)
#since not all samples are the same size, lets compute mean and sd
MeruMean=mean(MeruRes$score)
OlaMean=mean(OlaRes$score)
TaxiForSureMean=mean(TaxiForSureRes$score)
UberMean=mean(UberRes$score)
MeruSd=sd(MeruRes$score)
OlaSd=sd(OlaRes$score)
TaxiForSureSd=sd(TaxiForSureRes$score)
UberSd=sd(UberRes$score) |
library("DT")
library("shinyBS")
library("leaflet")
library("shinyjs")
library("highcharter")
appCSS <- "
.leaflet-top { z-index: 999;}
#loading-content {
position: absolute;
background: #FFFFFF;
opacity: 0.9;
z-index: 100;
left: 0;
right: 0;
height: 100%;
text-align: center;
color: #000000;
}
"
# load.fontawesome()
# Define UI for application that draws a histogram
shinyUI(
fluidPage(
inlineCSS(appCSS),
theme = "animate.min.css",
useShinyjs(),
#for the circles in the table
includeScript('www/fontawesome.js'),
#for the modal window size
tags$head(tags$style(HTML(
'.modal-lg {width: 85%;}'
))),
uiOutput("timeperiod_main_DT_UI"),
p(),
uiOutput("text_total_nr"),
p(),
fluidRow(
column(uiOutput("presstype_bar"), width = 8),
column(uiOutput("map_markers"), width = 4)
),
p(),
div(id = "loading-main-table",
fluidPage(
h2(class = "animated infinite pulse", "Loading database...")
# HTML("<img src=images/cruk-logo.png width='50%'></img>")
)),
downloadButton("downloadData", "Download Table"),
p(),
fluidRow(
column(width = 1),
column(leafletOutput("map_view"), width = 10),
column(width = 1)
),
p(),
tabsetPanel(
tabPanel("Table of Oil & Wine Presses",
DT::dataTableOutput("main_DT"),
uiOutput("the_modal_call")
),
tabPanel("Summary Charts",
fluidRow(
column(uiOutput("groupby"), width = 4),
column(uiOutput("countby"), width = 4),
column(uiOutput("stackby"), width = 4)
),
fluidRow(
column(width=1),
column(highchartOutput("chart",height = "600px"),width=10),
column(width=1)
)
)
)
)
)
| /oilwine/ui.R | no_license | jan2nov/oxrep | R | false | false | 1,900 | r | library("DT")
library("shinyBS")
library("leaflet")
library("shinyjs")
library("highcharter")
appCSS <- "
.leaflet-top { z-index: 999;}
#loading-content {
position: absolute;
background: #FFFFFF;
opacity: 0.9;
z-index: 100;
left: 0;
right: 0;
height: 100%;
text-align: center;
color: #000000;
}
"
# load.fontawesome()
# Define UI for application that draws a histogram
shinyUI(
fluidPage(
inlineCSS(appCSS),
theme = "animate.min.css",
useShinyjs(),
#for the circles in the table
includeScript('www/fontawesome.js'),
#for the modal window size
tags$head(tags$style(HTML(
'.modal-lg {width: 85%;}'
))),
uiOutput("timeperiod_main_DT_UI"),
p(),
uiOutput("text_total_nr"),
p(),
fluidRow(
column(uiOutput("presstype_bar"), width = 8),
column(uiOutput("map_markers"), width = 4)
),
p(),
div(id = "loading-main-table",
fluidPage(
h2(class = "animated infinite pulse", "Loading database...")
# HTML("<img src=images/cruk-logo.png width='50%'></img>")
)),
downloadButton("downloadData", "Download Table"),
p(),
fluidRow(
column(width = 1),
column(leafletOutput("map_view"), width = 10),
column(width = 1)
),
p(),
tabsetPanel(
tabPanel("Table of Oil & Wine Presses",
DT::dataTableOutput("main_DT"),
uiOutput("the_modal_call")
),
tabPanel("Summary Charts",
fluidRow(
column(uiOutput("groupby"), width = 4),
column(uiOutput("countby"), width = 4),
column(uiOutput("stackby"), width = 4)
),
fluidRow(
column(width=1),
column(highchartOutput("chart",height = "600px"),width=10),
column(width=1)
)
)
)
)
)
|
\name{plot.NMixPredCondDensJoint2}
\alias{plot.NMixPredCondDensJoint2}
\title{
Plot computed predictive pairwise bivariate conditional densities
}
\description{
This is a basic plotting tool to visualize computed
predictive pairwise bivariate conditional densities using the
\code{\link[graphics]{image}} or \code{\link[graphics]{contour}} plot.
See also \code{\link{NMixPredCondDensJoint2}}.
}
\usage{
\method{plot}{NMixPredCondDensJoint2}(x, ixcond, imargin,
contour=FALSE,
add.contour=TRUE, col.add.contour="brown",
auto.layout=TRUE,
col, lwd=1, main, xylab, \dots)
}
\arguments{
\item{x}{an object of class \code{NMixPredCondDensJoint2}.}
\item{ixcond}{if given then conditional densities of all pairs of margins given \code{x[[icond]][ixcond]} are plotted where
\code{icond} is taken from \code{x}.
}
\item{imargin}{vector of length 2.
if given then conditional densities of the (\code{imargin[1]},
\code{imargin[2]}) pair of margins given all values of \code{x[[icond]]}
are plotted.
}
\item{contour}{logical. If \code{TRUE} then contours are drawn,
otherwise image plot is created.
}
\item{add.contour}{logical. If \code{TRUE} and \code{contour} is
\code{FALSE} (i.e., image plot is drawn) then contours are added to
the image plots.}
\item{col.add.contour}{color of contours which are added to the image plot.}
\item{auto.layout}{if \code{TRUE} then the function determines itself
how to divide the plotting region to draw densities for all margins.
}
\item{col}{color used to draw the contours or images.}
\item{lwd}{line width.}
\item{main}{main title of the plot.}
\item{xylab}{optional character vector of the length equal to
the number of margins with labels used for x and y axes on the plots.}
\item{\dots}{additional arguments passed to the \code{plot} function.}
}
\value{
\code{invisible(x)}
}
\seealso{
\code{\link{NMixPredCondDensJoint2}}, \code{\link{NMixMCMC}}.
}
\author{
Arnošt Komárek \email{arnost.komarek@mff.cuni.cz}
}
\keyword{dplot}
| /man/plot.NMixPredCondDensJoint2.Rd | no_license | cran/mixAK | R | false | false | 2,073 | rd | \name{plot.NMixPredCondDensJoint2}
\alias{plot.NMixPredCondDensJoint2}
\title{
Plot computed predictive pairwise bivariate conditional densities
}
\description{
This is a basic plotting tool to visualize computed
predictive pairwise bivariate conditional densities using the
\code{\link[graphics]{image}} or \code{\link[graphics]{contour}} plot.
See also \code{\link{NMixPredCondDensJoint2}}.
}
\usage{
\method{plot}{NMixPredCondDensJoint2}(x, ixcond, imargin,
contour=FALSE,
add.contour=TRUE, col.add.contour="brown",
auto.layout=TRUE,
col, lwd=1, main, xylab, \dots)
}
\arguments{
\item{x}{an object of class \code{NMixPredCondDensJoint2}.}
\item{ixcond}{if given then conditional densities of all pairs of margins given \code{x[[icond]][ixcond]} are plotted where
\code{icond} is taken from \code{x}.
}
\item{imargin}{vector of length 2.
if given then conditional densities of the (\code{imargin[1]},
\code{imargin[2]}) pair of margins given all values of \code{x[[icond]]}
are plotted.
}
\item{contour}{logical. If \code{TRUE} then contours are drawn,
otherwise image plot is created.
}
\item{add.contour}{logical. If \code{TRUE} and \code{contour} is
\code{FALSE} (i.e., image plot is drawn) then contours are added to
the image plots.}
\item{col.add.contour}{color of contours which are added to the image plot.}
\item{auto.layout}{if \code{TRUE} then the function determines itself
how to divide the plotting region to draw densities for all margins.
}
\item{col}{color used to draw the contours or images.}
\item{lwd}{line width.}
\item{main}{main title of the plot.}
\item{xylab}{optional character vector of the length equal to
the number of margins with labels used for x and y axes on the plots.}
\item{\dots}{additional arguments passed to the \code{plot} function.}
}
\value{
\code{invisible(x)}
}
\seealso{
\code{\link{NMixPredCondDensJoint2}}, \code{\link{NMixMCMC}}.
}
\author{
Arnošt Komárek \email{arnost.komarek@mff.cuni.cz}
}
\keyword{dplot}
|
# Functions related to running Cox proportional hazards lasso/elastic net regression
# By Daniel Golden (dgolden1 at stanford dot edu) June 2013
# $Id: cox_lasso_fcn.R 347 2013-07-17 00:05:49Z dgolden $
## Setup
library(glmnet)
library(survival)
library(cvTools)
## FUNCTION: Fake some input data
make_fake_data = function(num_patients=50, num_features=5, num_real_features=3){
set.seed(0) # Make reproducible random values
warning('Random number generator seeded at 0 in make_fake_data()')
feature_names = paste("feature", 1:num_features)
patient_names = paste("patient", 1:num_patients)
x = matrix(runif(num_patients*num_features, min=0, max=20/num_real_features), num_patients, num_features, dimnames = list(patient_names, feature_names))
event_times = apply(x[,1:(num_real_features)], 1, sum) + runif(num_patients, min=0, max=5)
# Randomly censor some values
b_censored = sample(0:1, length(event_times), replace = TRUE)
# Randomly subtract up to 5 months from the censored data set; ensure that the time values are still at least 1
event_times[b_censored] = pmax(event_times[b_censored] - runif(sum(b_censored), 1, 5), 1)
df = data.frame(x)
b_save_plots = FALSE
return(list(df=df, event_times=event_times, b_censored=b_censored, b_save_plots=b_save_plots))
}
## FUNCTION: run Cox PH lasso regression
run_cox_lasso = function(x, event_times, b_censored, alpha=1, nfolds=10, b_make_plots=FALSE, b_save_plots=FALSE) {
## Plot Kaplan-Meier survival curve
surv_obj = Surv(time=event_times, event=!b_censored, type='right')
## Run Lasso model
glmnet_res = cv.glmnet(as.matrix(x), y=surv_obj, family= 'cox', alpha=alpha, nfolds=nfolds)
# glmnet_res = tryCatch(cv.glmnet(as.matrix(x), y=surv_obj, family= 'cox', alpha=alpha, nfolds=nfolds), error = function(e) e)
# if (class(glmnet_res)[1] == 'simpleError') {
# throw(glmnet_res)
# }
if (b_make_plots) {
plot(survfit(surv_obj ~ 1), xlab='Months')
if (b_save_plots) {
dev.copy(png, "~/temp/r_km_plot.png")
dev.off()
}
plot(glmnet_res)
if (b_save_plots) {
dev.copy(png, '~/temp/r_lasso_cv_plot.png')
dev.off()
}
}
return(glmnet_res)
}
## Function handle certain kinds of glmnet errors
handle_glmnet_error = function(e) {
browser()
}
## FUNCTION run Cox PH model with lasso regularization and save linear_predictors
run_cox_lasso_outer_cv = function(x, y, b_censored, alpha=1, nfolds.outer=10, nfolds.inner=10) {
# Generate cross-validation partitions
#cvp = cvFolds(length(y), K=nfolds.outer)
# Generate cross-validation partitions stratified by censoring
cvp = cv_with_strata(length(y), nfolds=nfolds.outer, strata=b_censored, balance_by='strata')
# Loop over folds
linear_predictors = numeric(length(y))*NaN
sextiles = numeric(length(y))*NaN
b_null_model = logical(length(y))
for (kk in 1:nfolds.outer) {
t_start = proc.time()[3]
idx_train = cvp$subsets[cvp$which != kk]
idx_test = cvp$subsets[cvp$which == kk]
x_train = x[idx_train,]
y_train = y[idx_train]
b_censored_train = b_censored[idx_train]
x_test = x[idx_test,]
y_test = y[idx_test]
b_censored_test = b_censored[idx_test]
# Train model
glmnet_res = run_cox_lasso(x=x_train, event_times=as.numeric(y_train), b_censored=as.logical(b_censored_train),
nfolds=nfolds.inner, alpha=alpha, b_make_plots=FALSE)
# Get linear predictors for "test" set
linear_predictors[idx_test] = predict(glmnet_res, matrix(x_test, nrow=length(idx_test)))
# Determine which sextile (sixth) of the training set's predicted values these test set
# predicted values fall into
linear_predictors_train_set = predict(glmnet_res, matrix(x_train, nrow=length(idx_train)))
sextile_edges = quantile(linear_predictors_train_set, seq(0, 1, length.out=7))
sextile_edges[c(1, length(sextile_edges))] = c(-Inf, Inf)
# Roundoff error sometimes makes sextile_edges that should be the same slightly different;
# make them the same
sextile_edges[c(FALSE, diff(sextile_edges) < 0)] = sextile_edges[c(diff(sextile_edges) < 0, FALSE)]
sextiles[idx_test] = findInterval(linear_predictors[idx_test], vec=sextile_edges)
# Gather some statistics
b_null_model[idx_test] = glmnet_res$lambda.1se == max(glmnet_res$lambda)
print(sprintf('Processed fold %d of %d in %0.3f sec', kk, nfolds.outer, proc.time()[3] - t_start))
}
return(list(linear_predictors=linear_predictors, sextiles=sextiles, b_null_model=b_null_model))
}
## FUNCTION: make cross-validation folds with stratification
# strata is boolean vector of the same length as vals specifying classes
# over which to stratify
cv_with_strata = function(n, nfolds, strata, balance_by='strata') {
vals = 1:n
# Separately get cross-validation folds for positive and negative group
# Groups may have fewer values than nfolds, in which case, use leave-one-out
# Cross-validation for that group
vals_pos = vals[as.logical(strata)]
cvp_pos = cvFolds(length(vals_pos), K=min(nfolds, length(vals_pos)))
cvp_pos$subsets = as.matrix(vals_pos[cvp_pos$subsets])
vals_neg = vals[!as.logical(strata)]
cvp_neg = cvFolds(length(vals_neg), K=min(nfolds, length(vals_neg)))
cvp_neg$subsets = as.matrix(vals_neg[cvp_neg$subsets])
# Concatenate together
cvp = cvp_pos
cvp$n = cvp_pos$n + cvp_neg$n
cvp$subsets = rbind(cvp_pos$subsets, cvp_neg$subsets)
if (balance_by == 'strata')
cvp$which = c(cvp_pos$which, cvp_neg$which) # Total number per fold less balanced
else if (balance_by == 'num')
cvp$which = c(cvp_pos$which, nfolds - cvp_neg$which + 1) # Stratification less balanced
else
stop(paste('Invalid entry for balance_by:', balance_by))
return(cvp)
}
## FUNCTION: fixes a bug in glmnet function with the same name
jerr.coxnet.dgolden = function (n, maxit, pmax)
{
if (n > 0) {
outlist = jerr.elnet(n)
if (outlist$msg != "Unknown error")
return(outlist)
if (n == 8888)
msg = "All observations censored - cannot proceed"
else if (n == 9999)
msg = "No positive observation weights"
else if (match(n, c(20000, 30000), FALSE))
msg = "Inititialization numerical error"
else msg = "Unknown error"
list(n = n, fatal = TRUE, msg = msg)
} else if (n < 0) {
if (n <= -30000) {
msg = paste("Numerical error at ", -n - 30000, "th lambda value; solutions for larger values of lambda returned",
sep = "")
list(n = n, fatal = FALSE, msg = msg)
}
else jerr.elnet(n, maxit, pmax)
}
}
assignInNamespace('jerr.coxnet', jerr.coxnet.dgolden, 'glmnet')
| /image_features/cox_lasso_r/cox_lasso_fcn.R | permissive | dgolden1/qil-parp | R | false | false | 6,734 | r | # Functions related to running Cox proportional hazards lasso/elastic net regression
# By Daniel Golden (dgolden1 at stanford dot edu) June 2013
# $Id: cox_lasso_fcn.R 347 2013-07-17 00:05:49Z dgolden $
## Setup
library(glmnet)
library(survival)
library(cvTools)
## FUNCTION: Fake some input data
make_fake_data = function(num_patients=50, num_features=5, num_real_features=3){
set.seed(0) # Make reproducible random values
warning('Random number generator seeded at 0 in make_fake_data()')
feature_names = paste("feature", 1:num_features)
patient_names = paste("patient", 1:num_patients)
x = matrix(runif(num_patients*num_features, min=0, max=20/num_real_features), num_patients, num_features, dimnames = list(patient_names, feature_names))
event_times = apply(x[,1:(num_real_features)], 1, sum) + runif(num_patients, min=0, max=5)
# Randomly censor some values
b_censored = sample(0:1, length(event_times), replace = TRUE)
# Randomly subtract up to 5 months from the censored data set; ensure that the time values are still at least 1
event_times[b_censored] = pmax(event_times[b_censored] - runif(sum(b_censored), 1, 5), 1)
df = data.frame(x)
b_save_plots = FALSE
return(list(df=df, event_times=event_times, b_censored=b_censored, b_save_plots=b_save_plots))
}
## FUNCTION: run Cox PH lasso regression
run_cox_lasso = function(x, event_times, b_censored, alpha=1, nfolds=10, b_make_plots=FALSE, b_save_plots=FALSE) {
## Plot Kaplan-Meier survival curve
surv_obj = Surv(time=event_times, event=!b_censored, type='right')
## Run Lasso model
glmnet_res = cv.glmnet(as.matrix(x), y=surv_obj, family= 'cox', alpha=alpha, nfolds=nfolds)
# glmnet_res = tryCatch(cv.glmnet(as.matrix(x), y=surv_obj, family= 'cox', alpha=alpha, nfolds=nfolds), error = function(e) e)
# if (class(glmnet_res)[1] == 'simpleError') {
# throw(glmnet_res)
# }
if (b_make_plots) {
plot(survfit(surv_obj ~ 1), xlab='Months')
if (b_save_plots) {
dev.copy(png, "~/temp/r_km_plot.png")
dev.off()
}
plot(glmnet_res)
if (b_save_plots) {
dev.copy(png, '~/temp/r_lasso_cv_plot.png')
dev.off()
}
}
return(glmnet_res)
}
## Function handle certain kinds of glmnet errors
handle_glmnet_error = function(e) {
browser()
}
## FUNCTION run Cox PH model with lasso regularization and save linear_predictors
run_cox_lasso_outer_cv = function(x, y, b_censored, alpha=1, nfolds.outer=10, nfolds.inner=10) {
# Generate cross-validation partitions
#cvp = cvFolds(length(y), K=nfolds.outer)
# Generate cross-validation partitions stratified by censoring
cvp = cv_with_strata(length(y), nfolds=nfolds.outer, strata=b_censored, balance_by='strata')
# Loop over folds
linear_predictors = numeric(length(y))*NaN
sextiles = numeric(length(y))*NaN
b_null_model = logical(length(y))
for (kk in 1:nfolds.outer) {
t_start = proc.time()[3]
idx_train = cvp$subsets[cvp$which != kk]
idx_test = cvp$subsets[cvp$which == kk]
x_train = x[idx_train,]
y_train = y[idx_train]
b_censored_train = b_censored[idx_train]
x_test = x[idx_test,]
y_test = y[idx_test]
b_censored_test = b_censored[idx_test]
# Train model
glmnet_res = run_cox_lasso(x=x_train, event_times=as.numeric(y_train), b_censored=as.logical(b_censored_train),
nfolds=nfolds.inner, alpha=alpha, b_make_plots=FALSE)
# Get linear predictors for "test" set
linear_predictors[idx_test] = predict(glmnet_res, matrix(x_test, nrow=length(idx_test)))
# Determine which sextile (sixth) of the training set's predicted values these test set
# predicted values fall into
linear_predictors_train_set = predict(glmnet_res, matrix(x_train, nrow=length(idx_train)))
sextile_edges = quantile(linear_predictors_train_set, seq(0, 1, length.out=7))
sextile_edges[c(1, length(sextile_edges))] = c(-Inf, Inf)
# Roundoff error sometimes makes sextile_edges that should be the same slightly different;
# make them the same
sextile_edges[c(FALSE, diff(sextile_edges) < 0)] = sextile_edges[c(diff(sextile_edges) < 0, FALSE)]
sextiles[idx_test] = findInterval(linear_predictors[idx_test], vec=sextile_edges)
# Gather some statistics
b_null_model[idx_test] = glmnet_res$lambda.1se == max(glmnet_res$lambda)
print(sprintf('Processed fold %d of %d in %0.3f sec', kk, nfolds.outer, proc.time()[3] - t_start))
}
return(list(linear_predictors=linear_predictors, sextiles=sextiles, b_null_model=b_null_model))
}
## FUNCTION: make cross-validation folds with stratification
# strata is boolean vector of the same length as vals specifying classes
# over which to stratify
cv_with_strata = function(n, nfolds, strata, balance_by='strata') {
vals = 1:n
# Separately get cross-validation folds for positive and negative group
# Groups may have fewer values than nfolds, in which case, use leave-one-out
# Cross-validation for that group
vals_pos = vals[as.logical(strata)]
cvp_pos = cvFolds(length(vals_pos), K=min(nfolds, length(vals_pos)))
cvp_pos$subsets = as.matrix(vals_pos[cvp_pos$subsets])
vals_neg = vals[!as.logical(strata)]
cvp_neg = cvFolds(length(vals_neg), K=min(nfolds, length(vals_neg)))
cvp_neg$subsets = as.matrix(vals_neg[cvp_neg$subsets])
# Concatenate together
cvp = cvp_pos
cvp$n = cvp_pos$n + cvp_neg$n
cvp$subsets = rbind(cvp_pos$subsets, cvp_neg$subsets)
if (balance_by == 'strata')
cvp$which = c(cvp_pos$which, cvp_neg$which) # Total number per fold less balanced
else if (balance_by == 'num')
cvp$which = c(cvp_pos$which, nfolds - cvp_neg$which + 1) # Stratification less balanced
else
stop(paste('Invalid entry for balance_by:', balance_by))
return(cvp)
}
## FUNCTION: fixes a bug in glmnet function with the same name
jerr.coxnet.dgolden = function (n, maxit, pmax)
{
if (n > 0) {
outlist = jerr.elnet(n)
if (outlist$msg != "Unknown error")
return(outlist)
if (n == 8888)
msg = "All observations censored - cannot proceed"
else if (n == 9999)
msg = "No positive observation weights"
else if (match(n, c(20000, 30000), FALSE))
msg = "Inititialization numerical error"
else msg = "Unknown error"
list(n = n, fatal = TRUE, msg = msg)
} else if (n < 0) {
if (n <= -30000) {
msg = paste("Numerical error at ", -n - 30000, "th lambda value; solutions for larger values of lambda returned",
sep = "")
list(n = n, fatal = FALSE, msg = msg)
}
else jerr.elnet(n, maxit, pmax)
}
}
assignInNamespace('jerr.coxnet', jerr.coxnet.dgolden, 'glmnet')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plugins.R
\name{dyRibbon}
\alias{dyRibbon}
\title{dyRibbon plugin adds a horizontal band of colors that runs through the chart. It
can be useful to visualize categorical variables
(http://en.wikipedia.org/wiki/Categorical_variable) that change over time (along
the x-axis).}
\usage{
dyRibbon(dygraph, data = NULL, palette = NULL, parser = NULL,
top = 1, bottom = 0)
}
\arguments{
\item{dygraph}{Dygraph to add plugin to}
\item{data}{Vector of numeric values in the range from 0 to 1}
\item{palette}{Vector with colors palette}
\item{parser}{JavaScrip function (function (data, dygraph)) returning the array of
numeric values. Parser is used if no data was provided}
\item{top}{Vertical position of the top edge of ribbon relative to chart height.}
\item{bottom}{Vertical position of the bottom edge of ribbon relative to chart height.}
}
\description{
dyRibbon plugin adds a horizontal band of colors that runs through the chart. It
can be useful to visualize categorical variables
(http://en.wikipedia.org/wiki/Categorical_variable) that change over time (along
the x-axis).
}
\examples{
\dontrun{
library(quantmod)
getSymbols("SPY", from = "2016-12-01", auto.assign=TRUE)
difference <- SPY[, "SPY.Open"] - SPY[, "SPY.Close"]
decreasing <- which(difference < 0)
increasing <- which(difference > 0)
dyData <- SPY[, "SPY.Close"]
ribbonData <- rep(0, nrow(dyData))
ribbonData[decreasing] <- 0.5
ribbonData[increasing] <- 1
dygraph(dyData) \%>\%
dyRibbon(data = ribbonData, top = 0.1, bottom = 0.02)
}
}
| /man/dyRibbon.Rd | no_license | tr8dr/dygraphs | R | false | true | 1,596 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plugins.R
\name{dyRibbon}
\alias{dyRibbon}
\title{dyRibbon plugin adds a horizontal band of colors that runs through the chart. It
can be useful to visualize categorical variables
(http://en.wikipedia.org/wiki/Categorical_variable) that change over time (along
the x-axis).}
\usage{
dyRibbon(dygraph, data = NULL, palette = NULL, parser = NULL,
top = 1, bottom = 0)
}
\arguments{
\item{dygraph}{Dygraph to add plugin to}
\item{data}{Vector of numeric values in the range from 0 to 1}
\item{palette}{Vector with colors palette}
\item{parser}{JavaScrip function (function (data, dygraph)) returning the array of
numeric values. Parser is used if no data was provided}
\item{top}{Vertical position of the top edge of ribbon relative to chart height.}
\item{bottom}{Vertical position of the bottom edge of ribbon relative to chart height.}
}
\description{
dyRibbon plugin adds a horizontal band of colors that runs through the chart. It
can be useful to visualize categorical variables
(http://en.wikipedia.org/wiki/Categorical_variable) that change over time (along
the x-axis).
}
\examples{
\dontrun{
library(quantmod)
getSymbols("SPY", from = "2016-12-01", auto.assign=TRUE)
difference <- SPY[, "SPY.Open"] - SPY[, "SPY.Close"]
decreasing <- which(difference < 0)
increasing <- which(difference > 0)
dyData <- SPY[, "SPY.Close"]
ribbonData <- rep(0, nrow(dyData))
ribbonData[decreasing] <- 0.5
ribbonData[increasing] <- 1
dygraph(dyData) \%>\%
dyRibbon(data = ribbonData, top = 0.1, bottom = 0.02)
}
}
|
jz_data<-function (x) {
siteiv<-x
lv <- melt(siteiv, id.vars = c(colnames(siteiv)))
names(lv)[1:3] <- c("site", "sp", "iv")
lo <- na.omit(lv)
lo <- arrange(lo, site, -iv)
d <- tapply(lo$sp, lo$site, function(x) rep(1:length(x)))
lo$xh = as.vector(unlist(d[1:max(lo$site)]))
lo
} | /R/jz_data.R | no_license | ZhuLeZi/leplant | R | false | false | 296 | r | jz_data<-function (x) {
siteiv<-x
lv <- melt(siteiv, id.vars = c(colnames(siteiv)))
names(lv)[1:3] <- c("site", "sp", "iv")
lo <- na.omit(lv)
lo <- arrange(lo, site, -iv)
d <- tapply(lo$sp, lo$site, function(x) rep(1:length(x)))
lo$xh = as.vector(unlist(d[1:max(lo$site)]))
lo
} |
#' Get record details from Chemical Translation Service (CTS)
#'
#' Get record details from CTS, see \url{http://cts.fiehnlab.ucdavis.edu}
#' @import RCurl jsonlite
#' @param inchikey character; InChIkey.
#' @param verbose logical; should a verbose output be printed on the console?
#' @param ... currently not used.
#' @return a list of 7. inchikey, inchicode, molweight, exactmass, formula, synonyms and externalIds
#' @author Eduard Szoecs, \email{eduardszoecs@@gmail.com}
#' @export
#' @examples
#' \donttest{
#' # might fail if API is not available
#' out <- cts_compinfo("XEFQLINVKFYRCS-UHFFFAOYSA-N")
#' # = Triclosan
#' str(out)
#' out[1:5]
#'
#' ### multiple inputs
#' comp <- c('Triclosan', 'Aspirin')
#' inchkeys <- sapply(comp, function(x) cir_query(x, 'stdinchikey', first = TRUE))
#' # ne to strip '#InChIKey='
#' inchkeys <- gsub('InChIKey=', '', inchkeys)
#' ll <- lapply(inchkeys, function(x) cts_compinfo(x)[1:5])
#' do.call(rbind, ll)
#' }
cts_compinfo <- function(inchikey, verbose = TRUE, ...){
if (length(inchikey) > 1) {
stop('Cannot handle multiple input strings.')
}
baseurl <- "http://cts.fiehnlab.ucdavis.edu/service/compound"
qurl <- paste0(baseurl, '/', inchikey)
if (verbose)
message(qurl)
Sys.sleep(0.1)
h <- try(getURL(qurl), silent = TRUE)
if (!inherits(h, "try-error")) {
out <- fromJSON(h)
} else{
warning('Problem with web service encountered... Returning NA.')
return(NA)
}
if (length(out) == 1 && grepl('invalid', out)) {
message("invalid InChIKey. Returning NA.")
return(NA)
}
return(out)
}
#' Convert Ids using Chemical Translation Service (CTS)
#'
#' Convert Ids using Chemical Translation Service (CTS), see \url{http://cts.fiehnlab.ucdavis.edu/conversion/index}
#' @import RCurl jsonlite
#' @importFrom utils URLencode
#' @param query character; query ID.
#' @param from character; type of query ID, e.g. \code{'Chemical Name'} , \code{'InChIKey'},
#' \code{'PubChem CID'}, \code{'ChemSpider'}, \code{'CAS'}.
#' @param to character; type to convert to.
#' @param first logical; return only first result be returned?
#' @param verbose logical; should a verbose output be printed on the console?
#' @param ... currently not used.
#' @return a character vector.
#' @author Eduard Szoecs, \email{eduardszoecs@@gmail.com}
#' @details See see \url{http://cts.fiehnlab.ucdavis.edu/conversion/index}
#' for possible values of from and to.
#' @export
#' @examples
#' \donttest{
#' # might fail if API is not available
#' cts_convert('XEFQLINVKFYRCS-UHFFFAOYSA-N', 'inchikey', 'Chemical Name')
#'
#' ### multiple inputs
#' comp <- c('XEFQLINVKFYRCS-UHFFFAOYSA-N', 'BSYNRYMUTXBXSQ-UHFFFAOYSA-N')
#' sapply(comp, function(x) cts_convert(x, 'inchikey', 'Chemical Name', first = TRUE))
#' }
cts_convert <- function(query, from, to, first = FALSE, verbose = TRUE, ...){
if (length(query) > 1 | length(from) > 1 | length(to) > 1) {
stop('Cannot handle multiple input strings.')
}
if (is.na(query)) {
warning('Identifier is NA... Returning NA.')
return(NA)
}
baseurl <- "http://cts.fiehnlab.ucdavis.edu/service/convert"
qurl <- paste0(baseurl, '/', from, '/', to, '/', query)
qurl <- URLencode(qurl)
if (verbose)
message(qurl)
Sys.sleep(0.1)
h <- try(getURL(qurl), silent = TRUE)
if (!inherits(h, "try-error")) {
out <- fromJSON(h)
} else {
warning('Problem with web service encountered... Returning NA.')
return(NA)
}
if ('error' %in% names(out)) {
warning('Error in query : \n', out['error'], "\n Returning NA.")
return(NA)
} else {
out <- out$result[[1]]
}
if (length(out) == 0) {
message("Not found. Returning NA.")
return(NA)
}
if (first)
out <- out[1]
return(out)
}
| /R/cts.R | permissive | cdr6934/webchem | R | false | false | 3,746 | r | #' Get record details from Chemical Translation Service (CTS)
#'
#' Get record details from CTS, see \url{http://cts.fiehnlab.ucdavis.edu}
#' @import RCurl jsonlite
#' @param inchikey character; InChIkey.
#' @param verbose logical; should a verbose output be printed on the console?
#' @param ... currently not used.
#' @return a list of 7. inchikey, inchicode, molweight, exactmass, formula, synonyms and externalIds
#' @author Eduard Szoecs, \email{eduardszoecs@@gmail.com}
#' @export
#' @examples
#' \donttest{
#' # might fail if API is not available
#' out <- cts_compinfo("XEFQLINVKFYRCS-UHFFFAOYSA-N")
#' # = Triclosan
#' str(out)
#' out[1:5]
#'
#' ### multiple inputs
#' comp <- c('Triclosan', 'Aspirin')
#' inchkeys <- sapply(comp, function(x) cir_query(x, 'stdinchikey', first = TRUE))
#' # ne to strip '#InChIKey='
#' inchkeys <- gsub('InChIKey=', '', inchkeys)
#' ll <- lapply(inchkeys, function(x) cts_compinfo(x)[1:5])
#' do.call(rbind, ll)
#' }
cts_compinfo <- function(inchikey, verbose = TRUE, ...){
if (length(inchikey) > 1) {
stop('Cannot handle multiple input strings.')
}
baseurl <- "http://cts.fiehnlab.ucdavis.edu/service/compound"
qurl <- paste0(baseurl, '/', inchikey)
if (verbose)
message(qurl)
Sys.sleep(0.1)
h <- try(getURL(qurl), silent = TRUE)
if (!inherits(h, "try-error")) {
out <- fromJSON(h)
} else{
warning('Problem with web service encountered... Returning NA.')
return(NA)
}
if (length(out) == 1 && grepl('invalid', out)) {
message("invalid InChIKey. Returning NA.")
return(NA)
}
return(out)
}
#' Convert Ids using Chemical Translation Service (CTS)
#'
#' Convert Ids using Chemical Translation Service (CTS), see \url{http://cts.fiehnlab.ucdavis.edu/conversion/index}
#' @import RCurl jsonlite
#' @importFrom utils URLencode
#' @param query character; query ID.
#' @param from character; type of query ID, e.g. \code{'Chemical Name'} , \code{'InChIKey'},
#' \code{'PubChem CID'}, \code{'ChemSpider'}, \code{'CAS'}.
#' @param to character; type to convert to.
#' @param first logical; return only first result be returned?
#' @param verbose logical; should a verbose output be printed on the console?
#' @param ... currently not used.
#' @return a character vector.
#' @author Eduard Szoecs, \email{eduardszoecs@@gmail.com}
#' @details See see \url{http://cts.fiehnlab.ucdavis.edu/conversion/index}
#' for possible values of from and to.
#' @export
#' @examples
#' \donttest{
#' # might fail if API is not available
#' cts_convert('XEFQLINVKFYRCS-UHFFFAOYSA-N', 'inchikey', 'Chemical Name')
#'
#' ### multiple inputs
#' comp <- c('XEFQLINVKFYRCS-UHFFFAOYSA-N', 'BSYNRYMUTXBXSQ-UHFFFAOYSA-N')
#' sapply(comp, function(x) cts_convert(x, 'inchikey', 'Chemical Name', first = TRUE))
#' }
cts_convert <- function(query, from, to, first = FALSE, verbose = TRUE, ...){
if (length(query) > 1 | length(from) > 1 | length(to) > 1) {
stop('Cannot handle multiple input strings.')
}
if (is.na(query)) {
warning('Identifier is NA... Returning NA.')
return(NA)
}
baseurl <- "http://cts.fiehnlab.ucdavis.edu/service/convert"
qurl <- paste0(baseurl, '/', from, '/', to, '/', query)
qurl <- URLencode(qurl)
if (verbose)
message(qurl)
Sys.sleep(0.1)
h <- try(getURL(qurl), silent = TRUE)
if (!inherits(h, "try-error")) {
out <- fromJSON(h)
} else {
warning('Problem with web service encountered... Returning NA.')
return(NA)
}
if ('error' %in% names(out)) {
warning('Error in query : \n', out['error'], "\n Returning NA.")
return(NA)
} else {
out <- out$result[[1]]
}
if (length(out) == 0) {
message("Not found. Returning NA.")
return(NA)
}
if (first)
out <- out[1]
return(out)
}
|
testlist <- list(kern = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), val = c(9.34347294018751e-275, 5.91668024075023e-257, -2.68778430925514e+261, 4.94093247000208e+265, -1.95355702882874e+32, 5.69207924362265e-218, 1.19510767126542e-290, 2.11099796455812e-162, 1.09045377292973e+121, 4.50029412043126e+92, 7.23583252657598e-98, -4.04104651388882e+58, -1.32111998744104e+107, 6.10971082153004e-261, 8.8259168364373e-138, 1.05319062865711e-256, 9.20004710972059e+187, 3.11873113493387e-89, 7.41633495404986e-180, -6.31621849134166e-125, 0.00753173927589659))
result <- do.call(lowpassFilter:::convolve,testlist)
str(result) | /lowpassFilter/inst/testfiles/convolve/AFL_convolve/convolve_valgrind_files/1616007282-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 797 | r | testlist <- list(kern = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), val = c(9.34347294018751e-275, 5.91668024075023e-257, -2.68778430925514e+261, 4.94093247000208e+265, -1.95355702882874e+32, 5.69207924362265e-218, 1.19510767126542e-290, 2.11099796455812e-162, 1.09045377292973e+121, 4.50029412043126e+92, 7.23583252657598e-98, -4.04104651388882e+58, -1.32111998744104e+107, 6.10971082153004e-261, 8.8259168364373e-138, 1.05319062865711e-256, 9.20004710972059e+187, 3.11873113493387e-89, 7.41633495404986e-180, -6.31621849134166e-125, 0.00753173927589659))
result <- do.call(lowpassFilter:::convolve,testlist)
str(result) |
library(readxl)
library(dplyr)
# setwd("C:/Users/pavi/Desktop/UNO/IntroDataScience/Project/Data/testdat")
# data frame with columns that arent there in 2008-2014 dataset
add_cols = data.frame('JOB_INFO_WORK_POSTAL_CODE'=NA, 'EMPLOYER_NUM_EMPLOYEES'=NA, 'EMPLOYER_YR_ESTAB'=NA, 'EMPLOYER_PHONE'=NA, 'AGENT_FIRM_NAME'=NA, 'AGENT_CITY'=NA, 'AGENT_STATE'=NA, 'PW_SOC_TITLE'=NA, 'JOB_INFO_JOB_TITLE'=NA, 'JOB_INFO_EDUCATION'=NA, 'JOB_INFO_MAJOR'=NA, 'JOB_INFO_FOREIGN_ED'=NA, 'RI_1ST_AD_NEWSPAPER_NAME'=NA, 'RI_2ND_AD_NEWSPAPER_NAME'=NA, 'FW_INFO_YR_REL_EDU_COMPLETED'=NA, 'FOREIGN_WORKER_INFO_INST'=NA, 'JOB_INFO_EXPERIENCE_NUM_MONTHS'=NA)
dataset <- function (ds, y) {
if (y >= 2015) {
ds = subset(ds,select=c(YEAR, CASE_NUMBER, DECISION_DATE, CASE_STATUS, EMPLOYER_NAME, EMPLOYER_ADDRESS_1, EMPLOYER_ADDRESS_2,
EMPLOYER_CITY, EMPLOYER_STATE, EMPLOYER_POSTAL_CODE, NAICS_US_CODE, NAICS_US_TITLE,
PW_SOC_CODE, PW_JOB_TITLE_9089, PW_LEVEL_9089, PW_AMOUNT_9089, PW_UNIT_OF_PAY_9089,
WAGE_OFFER_FROM_9089, WAGE_OFFER_TO_9089, WAGE_OFFER_UNIT_OF_PAY_9089,
JOB_INFO_WORK_CITY, JOB_INFO_WORK_STATE, COUNTRY_OF_CITIZENSHIP, CLASS_OF_ADMISSION
, JOB_INFO_WORK_POSTAL_CODE ,EMPLOYER_NUM_EMPLOYEES, EMPLOYER_YR_ESTAB, EMPLOYER_PHONE, AGENT_FIRM_NAME, AGENT_CITY,
AGENT_STATE, PW_SOC_TITLE, JOB_INFO_JOB_TITLE, JOB_INFO_EDUCATION, JOB_INFO_MAJOR, JOB_INFO_FOREIGN_ED,
RI_1ST_AD_NEWSPAPER_NAME, RI_2ND_AD_NEWSPAPER_NAME, FW_INFO_YR_REL_EDU_COMPLETED,
FOREIGN_WORKER_INFO_INST, JOB_INFO_EXPERIENCE_NUM_MONTHS
))
return(ds)
}
else {
ds = subset(ds,select=c(YEAR, CASE_NUMBER, DECISION_DATE, CASE_STATUS, EMPLOYER_NAME, EMPLOYER_ADDRESS_1, EMPLOYER_ADDRESS_2,
EMPLOYER_CITY, EMPLOYER_STATE, EMPLOYER_POSTAL_CODE, NAICS_US_CODE, NAICS_US_TITLE,
PW_SOC_CODE, PW_JOB_TITLE_9089, PW_LEVEL_9089, PW_AMOUNT_9089, PW_UNIT_OF_PAY_9089,
WAGE_OFFER_FROM_9089, WAGE_OFFER_TO_9089, WAGE_OFFER_UNIT_OF_PAY_9089,
JOB_INFO_WORK_CITY, JOB_INFO_WORK_STATE, COUNTRY_OF_CITIZENSHIP, CLASS_OF_ADMISSION
))
return(data.frame(ds,add_cols))
}
}
x =read_excel("PERM_FY2008.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',25))
names(x) <- gsub(" ", "_", names(x))
names(x) <- gsub("2007_","", names(x))
x=dplyr::rename(x,COUNTRY_OF_CITIZENSHIP = COUNTRY_OF_CITZENSHIP)
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
y =read_excel("PERM_FY2008.xlsx",col_names = TRUE,sheet = 1,na = "")
names(y) <- gsub(" ", "_", names(y))
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2008',EMPLOYER_ADDRESS_2=NA)
file2008=dataset(file,2008)
x =read_excel("PERM_FY2009.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',25))
names(x) <- gsub(" ", "_", names(x))
names(x) <- gsub("2007_","", names(x))
x=dplyr::rename(x,COUNTRY_OF_CITIZENSHIP=COUNTRY_OF_CITZENSHIP)
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
y =read_excel("PERM_FY2009.xlsx",col_names = TRUE,sheet = 1,na = "")
names(y) <- gsub(" ", "_", names(y))
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2009')
file2009=dataset(file,2009)
x =read_excel("PERM_FY2010.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',27))
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
names(x) <- gsub(" ", "_", names(x))
names(x) <- gsub("2007_","", names(x))
x=dplyr::rename(x,CASE_NUMBER=CASE_NO,COUNTRY_OF_CITIZENSHIP=COUNTRY_OF_CITZENSHIP)
y =read_excel("PERM_FY2010.xlsx",col_names = TRUE,sheet = 1,na = "")
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2010')
file2010=dataset(file,2010)
x =read_excel("PERM_FY2011.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',27))
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
names(x) <- gsub(" ", "_", names(x))
names(x) <- gsub("2007_","", names(x))
x=dplyr::rename(x,CASE_NUMBER=CASE_NO,COUNTRY_OF_CITIZENSHIP=COUNTRY_OF_CITZENSHIP)
y =read_excel("PERM_FY2011.xlsx",col_names = TRUE,sheet = 1,na = "")
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2011')
file2011=dataset(file,2011)
x =read_excel("PERM_FY2012_Q4.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',26))
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
names(x) <- gsub(" ", "_", names(x))
names(x) <- gsub("2007_","", names(x))
x=dplyr::rename(x,CASE_NUMBER=CASE_NO,COUNTRY_OF_CITIZENSHIP=COUNTRY_OF_CITZENSHIP)
y =read_excel("PERM_FY2012_Q4.xlsx",col_names = TRUE,sheet = 1,na = "")
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2012',EMPLOYER_ADDRESS_2=NA)
file2012=dataset(file,2012)
x =read_excel("PERM_FY2013.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',27))
names(x) <- gsub(" ", "_", names(x))
names(x) <- toupper(gsub("2007_","", names(x)))
x=dplyr::rename(x,CASE_NUMBER=CASE_NO,WAGE_OFFER_FROM_9089=WAGE_OFFERED_FROM_9089,WAGE_OFFER_TO_9089=WAGE_OFFERED_TO_9089)
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
y =read_excel("PERM_FY2013.xlsx",col_names = TRUE,sheet = 1,na = "")
names(y) <- gsub(" ", "_", names(y))
names(y) <- toupper(gsub("2007_","", names(y)))
y=dplyr::rename(y,WAGE_OFFER_FROM_9089=WAGE_OFFERED_FROM_9089,WAGE_OFFER_TO_9089=WAGE_OFFERED_TO_9089)
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2013')
file$WAGE_OFFER_UNIT_OF_PAY_9089=file$PW_UNIT_OF_PAY_9089
file2013=dataset(file,2013)
x =read_excel("PERM_FY14_Q4.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',27))
names(x) <- gsub(" ", "_", names(x))
names(x) <- toupper(gsub("2007_","", names(x)))
x=dplyr::rename(x,CASE_NUMBER=CASE_NO,WAGE_OFFER_FROM_9089=WAGE_OFFERED_FROM_9089,WAGE_OFFER_TO_9089=WAGE_OFFERED_TO_9089,WAGE_OFFER_UNIT_OF_PAY_9089=WAGE_OFFERED_UNIT_OF_PAY_9089)
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
y =read_excel("PERM_FY14_Q4.xlsx",col_names = TRUE,sheet = 1,na = "")
names(y) <- gsub(" ", "_", names(y))
names(y) <- toupper(gsub("2007_","", names(y)))
y=dplyr::rename(y,WAGE_OFFER_FROM_9089=WAGE_OFFERED_FROM_9089,WAGE_OFFER_TO_9089=WAGE_OFFERED_TO_9089)
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2014')
file2014=dataset(file,2014)
x =read_excel("PERM_Disclosure_Data_FY15_Q4.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',125))
names(x) <- toupper(names(x))
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
y =read_excel("PERM_Disclosure_Data_FY15_Q4.xlsx",col_names = TRUE,sheet = 1,na = "")
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2015')
file2015=dataset(file,2015)
x =read_excel("PERM_Disclosure_Data_FY16.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',125))
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089,WAGE_OFFER_TO_9089))
y =read_excel("PERM_Disclosure_Data_FY16.xlsx",col_names = TRUE,sheet = 1,na = "")
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2016')
file2016=dataset(file,2016)
a = rbind(file2008,file2009,file2010,file2011,file2012,file2013,file2014,file2015,file2016)
saveRDS(a,"PermData.rds")
#cleaning all objects in my environment
#rm(list=ls(all=TRUE))
# Now read a clean raw file for data manipulation
mysample = readRDS(file="PermData.rds")
# mysample <- mysample[sample(1:nrow(mysample), 500,replace=FALSE),]
mysample=transform(mysample,DECISION_DATE=as.Date(mysample$DECISION_DATE, format = '%m/%d/%Y'))
# write.csv(mysample, file = "sampleperm.csv")
# changing all character variables to upper case
permds <- mysample %>% mutate_if(is.factor,as.character)%>% mutate_if(is.character,toupper)
# converting time values to standard name format
permds$PW_UNIT_OF_PAY_9089[permds$PW_UNIT_OF_PAY_9089 == 'HOUR'] <- 'HR'
permds$PW_UNIT_OF_PAY_9089[permds$PW_UNIT_OF_PAY_9089 == 'WEEK'] <- 'WK'
permds$PW_UNIT_OF_PAY_9089[permds$PW_UNIT_OF_PAY_9089 == 'YEAR'] <- 'YR'
permds$PW_UNIT_OF_PAY_9089[permds$PW_UNIT_OF_PAY_9089 == 'MONTH'] <- 'MTH'
permds$PW_UNIT_OF_PAY_9089[permds$PW_UNIT_OF_PAY_9089 == 'BI'] <- 'BIWK'
permds$WAGE_OFFER_UNIT_OF_PAY_9089[permds$WAGE_OFFER_UNIT_OF_PAY_9089 == 'HOUR'] <- 'HR'
permds$WAGE_OFFER_UNIT_OF_PAY_9089[permds$WAGE_OFFER_UNIT_OF_PAY_9089 == 'WEEK'] <- 'WK'
permds$WAGE_OFFER_UNIT_OF_PAY_9089[permds$WAGE_OFFER_UNIT_OF_PAY_9089 == 'YEAR'] <- 'YR'
permds$WAGE_OFFER_UNIT_OF_PAY_9089[permds$WAGE_OFFER_UNIT_OF_PAY_9089 == 'MONTH'] <- 'MTH'
permds$WAGE_OFFER_UNIT_OF_PAY_9089[permds$WAGE_OFFER_UNIT_OF_PAY_9089 == 'BI'] <- 'BIWK'
# prevailing wage normalized and renamed
normalized_prevailing_wage <- permds$PW_UNIT_OF_PAY_9089
normalized_prevailing_wage[which(normalized_prevailing_wage == 'YR')] <- 1
normalized_prevailing_wage[which(normalized_prevailing_wage == 'HR')] <- 40 * 52
normalized_prevailing_wage[which(normalized_prevailing_wage == 'BIWK')] <- 52 / 2
normalized_prevailing_wage[which(normalized_prevailing_wage == 'WK')] <- 52
normalized_prevailing_wage[which(normalized_prevailing_wage == 'MTH')] <- 12
normalized_prevailing_wage[which(is.na(normalized_prevailing_wage))] <- 0
normalized_prevailing_wage <- as.numeric(normalized_prevailing_wage)
permds$PW_AMOUNT_9089 <- normalized_prevailing_wage * permds$PW_AMOUNT_9089
# wage from and maximum normalized and renamed
normalized_wage <- permds$WAGE_OFFER_UNIT_OF_PAY_9089
normalized_wage[which(normalized_wage == 'YR')] <- 1
normalized_wage[which(normalized_wage == 'HR')] <- 40 * 52
normalized_wage[which(normalized_wage == 'BIWK')] <- 52 / 2
normalized_wage[which(normalized_wage == 'WK')] <- 52
normalized_wage[which(normalized_wage == 'MTH')] <- 12
normalized_wage[which(is.na(normalized_wage))] <- 0
normalized_wage <- as.numeric(normalized_wage)
normalized_wage_max <- as.numeric(normalized_wage)
permds$WAGE_OFFER_FROM_9089[which(is.na(permds$WAGE_OFFER_FROM_9089))] <- 0
permds$WAGE_OFFER_TO_9089[which(is.na(permds$WAGE_OFFER_TO_9089))] <- 0
permds$normalized_wage <- normalized_wage * permds$WAGE_OFFER_FROM_9089
permds$normalized_wage_max <- normalized_wage_max * permds$WAGE_OFFER_TO_9089
# renaming job postal code to worksite zip code
permds$worksite_zip_code <- gsub(" ", "", permds$JOB_INFO_WORK_POSTAL_CODE)
permds$worksite_zip_code <- gsub("-[[:digit:]]*", "", permds$worksite_zip_code)
# renaming employer postal code to employer zip code
permds$employer_zip_code <- gsub(" ", "", permds$EMPLOYER_POSTAL_CODE)
permds$employer_zip_code <- gsub("-[[:digit:]]*", "", permds$employer_zip_code)
st.codes<-data.frame(
statecode=as.character(c("AL", "AK", "AS", "AZ", "AR", "CA", "CO", "CT", "DE", "DC", "FM", "FL", "GA", "GU", "HI",
"ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MH", "MD", "MA", "MI", "MN", "MS", "MO", "MT", "NE",
"NV", "NH", "NJ", "NM", "NY", "NC", "ND", "MP", "OH", "OK", "OR", "PW", "PA", "PR", "RI", "SC", "SD",
"TN", "TX", "UT", "VT", "VI", "VA", "WA", "WV", "WI", "WY"
)),
statename=as.character(c("ALABAMA", "ALASKA", "AMERICAN SAMOA", "ARIZONA", "ARKANSAS", "CALIFORNIA", "COLORADO", "CONNECTICUT",
"DELAWARE", "DISTRICT OF COLUMBIA", "FEDERATED STATES OF MICRONESIA", "FLORIDA", "GEORGIA", "GUAM", "HAWAII", "IDAHO",
"ILLINOIS", "INDIANA", "IOWA", "KANSAS", "KENTUCKY", "LOUISIANA", "MAINE", "MARSHALL ISLANDS", "MARYLAND", "MASSACHUSETTS",
"MICHIGAN", "MINNESOTA", "MISSISSIPPI", "MISSOURI", "MONTANA", "NEBRASKA", "NEVADA", "NEW HAMPSHIRE", "NEW JERSEY", "NEW MEXICO",
"NEW YORK", "NORTH CAROLINA", "NORTH DAKOTA", "NORTHERN MARIANA ISLANDS", "OHIO", "OKLAHOMA", "OREGON", "PALAU", "PENNSYLVANIA",
"PUERTO RICO", "RHODE ISLAND", "SOUTH CAROLINA", "SOUTH DAKOTA", "TENNESSEE", "TEXAS", "UTAH", "VERMONT", "VIRGIN ISLANDS",
"VIRGINIA", "WASHINGTON", "WEST VIRGINIA", "WISCONSIN", "WYOMING"
)), stringsAsFactors = FALSE
)
# merge state codes with data frame and those items with full statenames are replaced by employerstate
permds=left_join(permds, st.codes, by = c("EMPLOYER_STATE"="statecode"))
permds$statename[which(is.na(permds$statename))]<-permds$EMPLOYER_STATE[which(is.na(permds$statename))]
permds$EMPLOYER_STATE<-permds$statename
permds=subset(permds, select = -c(statename))
permds=left_join(permds, st.codes, by = c("JOB_INFO_WORK_STATE"="statecode"))
permds$statename[which(is.na(permds$statename))]<-permds$JOB_INFO_WORK_STATE[which(is.na(permds$statename))]
permds$JOB_INFO_WORK_STATE<-permds$statename
permds=subset(permds, select = -c(statename))
permds$ID <- 1:nrow(permds)
empmapdat = permds %>% group_by(EMPLOYER_NAME,EMPLOYER_ADDRESS_1,EMPLOYER_CITY,
EMPLOYER_STATE,employer_zip_code) %>%
summarise(count = n(), mean_salary = mean(normalized_wage, na.rm = TRUE)) %>%
arrange(desc(count),desc(mean_salary))
empmapdat$ID <- 1:nrow(empmapdat)
saveRDS(permds,"PermData.rds")
saveRDS(empmapdat,"PermEmpMapsdat.rds")
#cleaning all objects in my environment
#rm(list=ls(all=TRUE))
| /perm_fileread_clean.R | permissive | bdetweiler/stat-8416-final-project | R | false | false | 14,110 | r | library(readxl)
library(dplyr)
# setwd("C:/Users/pavi/Desktop/UNO/IntroDataScience/Project/Data/testdat")
# data frame with columns that arent there in 2008-2014 dataset
add_cols = data.frame('JOB_INFO_WORK_POSTAL_CODE'=NA, 'EMPLOYER_NUM_EMPLOYEES'=NA, 'EMPLOYER_YR_ESTAB'=NA, 'EMPLOYER_PHONE'=NA, 'AGENT_FIRM_NAME'=NA, 'AGENT_CITY'=NA, 'AGENT_STATE'=NA, 'PW_SOC_TITLE'=NA, 'JOB_INFO_JOB_TITLE'=NA, 'JOB_INFO_EDUCATION'=NA, 'JOB_INFO_MAJOR'=NA, 'JOB_INFO_FOREIGN_ED'=NA, 'RI_1ST_AD_NEWSPAPER_NAME'=NA, 'RI_2ND_AD_NEWSPAPER_NAME'=NA, 'FW_INFO_YR_REL_EDU_COMPLETED'=NA, 'FOREIGN_WORKER_INFO_INST'=NA, 'JOB_INFO_EXPERIENCE_NUM_MONTHS'=NA)
dataset <- function (ds, y) {
if (y >= 2015) {
ds = subset(ds,select=c(YEAR, CASE_NUMBER, DECISION_DATE, CASE_STATUS, EMPLOYER_NAME, EMPLOYER_ADDRESS_1, EMPLOYER_ADDRESS_2,
EMPLOYER_CITY, EMPLOYER_STATE, EMPLOYER_POSTAL_CODE, NAICS_US_CODE, NAICS_US_TITLE,
PW_SOC_CODE, PW_JOB_TITLE_9089, PW_LEVEL_9089, PW_AMOUNT_9089, PW_UNIT_OF_PAY_9089,
WAGE_OFFER_FROM_9089, WAGE_OFFER_TO_9089, WAGE_OFFER_UNIT_OF_PAY_9089,
JOB_INFO_WORK_CITY, JOB_INFO_WORK_STATE, COUNTRY_OF_CITIZENSHIP, CLASS_OF_ADMISSION
, JOB_INFO_WORK_POSTAL_CODE ,EMPLOYER_NUM_EMPLOYEES, EMPLOYER_YR_ESTAB, EMPLOYER_PHONE, AGENT_FIRM_NAME, AGENT_CITY,
AGENT_STATE, PW_SOC_TITLE, JOB_INFO_JOB_TITLE, JOB_INFO_EDUCATION, JOB_INFO_MAJOR, JOB_INFO_FOREIGN_ED,
RI_1ST_AD_NEWSPAPER_NAME, RI_2ND_AD_NEWSPAPER_NAME, FW_INFO_YR_REL_EDU_COMPLETED,
FOREIGN_WORKER_INFO_INST, JOB_INFO_EXPERIENCE_NUM_MONTHS
))
return(ds)
}
else {
ds = subset(ds,select=c(YEAR, CASE_NUMBER, DECISION_DATE, CASE_STATUS, EMPLOYER_NAME, EMPLOYER_ADDRESS_1, EMPLOYER_ADDRESS_2,
EMPLOYER_CITY, EMPLOYER_STATE, EMPLOYER_POSTAL_CODE, NAICS_US_CODE, NAICS_US_TITLE,
PW_SOC_CODE, PW_JOB_TITLE_9089, PW_LEVEL_9089, PW_AMOUNT_9089, PW_UNIT_OF_PAY_9089,
WAGE_OFFER_FROM_9089, WAGE_OFFER_TO_9089, WAGE_OFFER_UNIT_OF_PAY_9089,
JOB_INFO_WORK_CITY, JOB_INFO_WORK_STATE, COUNTRY_OF_CITIZENSHIP, CLASS_OF_ADMISSION
))
return(data.frame(ds,add_cols))
}
}
x =read_excel("PERM_FY2008.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',25))
names(x) <- gsub(" ", "_", names(x))
names(x) <- gsub("2007_","", names(x))
x=dplyr::rename(x,COUNTRY_OF_CITIZENSHIP = COUNTRY_OF_CITZENSHIP)
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
y =read_excel("PERM_FY2008.xlsx",col_names = TRUE,sheet = 1,na = "")
names(y) <- gsub(" ", "_", names(y))
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2008',EMPLOYER_ADDRESS_2=NA)
file2008=dataset(file,2008)
x =read_excel("PERM_FY2009.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',25))
names(x) <- gsub(" ", "_", names(x))
names(x) <- gsub("2007_","", names(x))
x=dplyr::rename(x,COUNTRY_OF_CITIZENSHIP=COUNTRY_OF_CITZENSHIP)
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
y =read_excel("PERM_FY2009.xlsx",col_names = TRUE,sheet = 1,na = "")
names(y) <- gsub(" ", "_", names(y))
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2009')
file2009=dataset(file,2009)
x =read_excel("PERM_FY2010.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',27))
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
names(x) <- gsub(" ", "_", names(x))
names(x) <- gsub("2007_","", names(x))
x=dplyr::rename(x,CASE_NUMBER=CASE_NO,COUNTRY_OF_CITIZENSHIP=COUNTRY_OF_CITZENSHIP)
y =read_excel("PERM_FY2010.xlsx",col_names = TRUE,sheet = 1,na = "")
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2010')
file2010=dataset(file,2010)
x =read_excel("PERM_FY2011.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',27))
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
names(x) <- gsub(" ", "_", names(x))
names(x) <- gsub("2007_","", names(x))
x=dplyr::rename(x,CASE_NUMBER=CASE_NO,COUNTRY_OF_CITIZENSHIP=COUNTRY_OF_CITZENSHIP)
y =read_excel("PERM_FY2011.xlsx",col_names = TRUE,sheet = 1,na = "")
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2011')
file2011=dataset(file,2011)
x =read_excel("PERM_FY2012_Q4.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',26))
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
names(x) <- gsub(" ", "_", names(x))
names(x) <- gsub("2007_","", names(x))
x=dplyr::rename(x,CASE_NUMBER=CASE_NO,COUNTRY_OF_CITIZENSHIP=COUNTRY_OF_CITZENSHIP)
y =read_excel("PERM_FY2012_Q4.xlsx",col_names = TRUE,sheet = 1,na = "")
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2012',EMPLOYER_ADDRESS_2=NA)
file2012=dataset(file,2012)
x =read_excel("PERM_FY2013.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',27))
names(x) <- gsub(" ", "_", names(x))
names(x) <- toupper(gsub("2007_","", names(x)))
x=dplyr::rename(x,CASE_NUMBER=CASE_NO,WAGE_OFFER_FROM_9089=WAGE_OFFERED_FROM_9089,WAGE_OFFER_TO_9089=WAGE_OFFERED_TO_9089)
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
y =read_excel("PERM_FY2013.xlsx",col_names = TRUE,sheet = 1,na = "")
names(y) <- gsub(" ", "_", names(y))
names(y) <- toupper(gsub("2007_","", names(y)))
y=dplyr::rename(y,WAGE_OFFER_FROM_9089=WAGE_OFFERED_FROM_9089,WAGE_OFFER_TO_9089=WAGE_OFFERED_TO_9089)
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2013')
file$WAGE_OFFER_UNIT_OF_PAY_9089=file$PW_UNIT_OF_PAY_9089
file2013=dataset(file,2013)
x =read_excel("PERM_FY14_Q4.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',27))
names(x) <- gsub(" ", "_", names(x))
names(x) <- toupper(gsub("2007_","", names(x)))
x=dplyr::rename(x,CASE_NUMBER=CASE_NO,WAGE_OFFER_FROM_9089=WAGE_OFFERED_FROM_9089,WAGE_OFFER_TO_9089=WAGE_OFFERED_TO_9089,WAGE_OFFER_UNIT_OF_PAY_9089=WAGE_OFFERED_UNIT_OF_PAY_9089)
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
y =read_excel("PERM_FY14_Q4.xlsx",col_names = TRUE,sheet = 1,na = "")
names(y) <- gsub(" ", "_", names(y))
names(y) <- toupper(gsub("2007_","", names(y)))
y=dplyr::rename(y,WAGE_OFFER_FROM_9089=WAGE_OFFERED_FROM_9089,WAGE_OFFER_TO_9089=WAGE_OFFERED_TO_9089)
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2014')
file2014=dataset(file,2014)
x =read_excel("PERM_Disclosure_Data_FY15_Q4.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',125))
names(x) <- toupper(names(x))
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
y =read_excel("PERM_Disclosure_Data_FY15_Q4.xlsx",col_names = TRUE,sheet = 1,na = "")
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2015')
file2015=dataset(file,2015)
x =read_excel("PERM_Disclosure_Data_FY16.xlsx",col_names = TRUE,sheet = 1,na = "", col_types = rep('text',125))
x=subset(x,select=-c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089,WAGE_OFFER_TO_9089))
y =read_excel("PERM_Disclosure_Data_FY16.xlsx",col_names = TRUE,sheet = 1,na = "")
y=subset(y,select=c(DECISION_DATE,PW_AMOUNT_9089,WAGE_OFFER_FROM_9089,WAGE_OFFER_TO_9089))
file=cbind(x,y, YEAR='2016')
file2016=dataset(file,2016)
a = rbind(file2008,file2009,file2010,file2011,file2012,file2013,file2014,file2015,file2016)
saveRDS(a,"PermData.rds")
#cleaning all objects in my environment
#rm(list=ls(all=TRUE))
# Now read a clean raw file for data manipulation
mysample = readRDS(file="PermData.rds")
# mysample <- mysample[sample(1:nrow(mysample), 500,replace=FALSE),]
mysample=transform(mysample,DECISION_DATE=as.Date(mysample$DECISION_DATE, format = '%m/%d/%Y'))
# write.csv(mysample, file = "sampleperm.csv")
# changing all character variables to upper case
permds <- mysample %>% mutate_if(is.factor,as.character)%>% mutate_if(is.character,toupper)
# converting time values to standard name format
permds$PW_UNIT_OF_PAY_9089[permds$PW_UNIT_OF_PAY_9089 == 'HOUR'] <- 'HR'
permds$PW_UNIT_OF_PAY_9089[permds$PW_UNIT_OF_PAY_9089 == 'WEEK'] <- 'WK'
permds$PW_UNIT_OF_PAY_9089[permds$PW_UNIT_OF_PAY_9089 == 'YEAR'] <- 'YR'
permds$PW_UNIT_OF_PAY_9089[permds$PW_UNIT_OF_PAY_9089 == 'MONTH'] <- 'MTH'
permds$PW_UNIT_OF_PAY_9089[permds$PW_UNIT_OF_PAY_9089 == 'BI'] <- 'BIWK'
permds$WAGE_OFFER_UNIT_OF_PAY_9089[permds$WAGE_OFFER_UNIT_OF_PAY_9089 == 'HOUR'] <- 'HR'
permds$WAGE_OFFER_UNIT_OF_PAY_9089[permds$WAGE_OFFER_UNIT_OF_PAY_9089 == 'WEEK'] <- 'WK'
permds$WAGE_OFFER_UNIT_OF_PAY_9089[permds$WAGE_OFFER_UNIT_OF_PAY_9089 == 'YEAR'] <- 'YR'
permds$WAGE_OFFER_UNIT_OF_PAY_9089[permds$WAGE_OFFER_UNIT_OF_PAY_9089 == 'MONTH'] <- 'MTH'
permds$WAGE_OFFER_UNIT_OF_PAY_9089[permds$WAGE_OFFER_UNIT_OF_PAY_9089 == 'BI'] <- 'BIWK'
# prevailing wage normalized and renamed
normalized_prevailing_wage <- permds$PW_UNIT_OF_PAY_9089
normalized_prevailing_wage[which(normalized_prevailing_wage == 'YR')] <- 1
normalized_prevailing_wage[which(normalized_prevailing_wage == 'HR')] <- 40 * 52
normalized_prevailing_wage[which(normalized_prevailing_wage == 'BIWK')] <- 52 / 2
normalized_prevailing_wage[which(normalized_prevailing_wage == 'WK')] <- 52
normalized_prevailing_wage[which(normalized_prevailing_wage == 'MTH')] <- 12
normalized_prevailing_wage[which(is.na(normalized_prevailing_wage))] <- 0
normalized_prevailing_wage <- as.numeric(normalized_prevailing_wage)
permds$PW_AMOUNT_9089 <- normalized_prevailing_wage * permds$PW_AMOUNT_9089
# wage from and maximum normalized and renamed
normalized_wage <- permds$WAGE_OFFER_UNIT_OF_PAY_9089
normalized_wage[which(normalized_wage == 'YR')] <- 1
normalized_wage[which(normalized_wage == 'HR')] <- 40 * 52
normalized_wage[which(normalized_wage == 'BIWK')] <- 52 / 2
normalized_wage[which(normalized_wage == 'WK')] <- 52
normalized_wage[which(normalized_wage == 'MTH')] <- 12
normalized_wage[which(is.na(normalized_wage))] <- 0
normalized_wage <- as.numeric(normalized_wage)
normalized_wage_max <- as.numeric(normalized_wage)
permds$WAGE_OFFER_FROM_9089[which(is.na(permds$WAGE_OFFER_FROM_9089))] <- 0
permds$WAGE_OFFER_TO_9089[which(is.na(permds$WAGE_OFFER_TO_9089))] <- 0
permds$normalized_wage <- normalized_wage * permds$WAGE_OFFER_FROM_9089
permds$normalized_wage_max <- normalized_wage_max * permds$WAGE_OFFER_TO_9089
# renaming job postal code to worksite zip code
permds$worksite_zip_code <- gsub(" ", "", permds$JOB_INFO_WORK_POSTAL_CODE)
permds$worksite_zip_code <- gsub("-[[:digit:]]*", "", permds$worksite_zip_code)
# renaming employer postal code to employer zip code
permds$employer_zip_code <- gsub(" ", "", permds$EMPLOYER_POSTAL_CODE)
permds$employer_zip_code <- gsub("-[[:digit:]]*", "", permds$employer_zip_code)
st.codes<-data.frame(
statecode=as.character(c("AL", "AK", "AS", "AZ", "AR", "CA", "CO", "CT", "DE", "DC", "FM", "FL", "GA", "GU", "HI",
"ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MH", "MD", "MA", "MI", "MN", "MS", "MO", "MT", "NE",
"NV", "NH", "NJ", "NM", "NY", "NC", "ND", "MP", "OH", "OK", "OR", "PW", "PA", "PR", "RI", "SC", "SD",
"TN", "TX", "UT", "VT", "VI", "VA", "WA", "WV", "WI", "WY"
)),
statename=as.character(c("ALABAMA", "ALASKA", "AMERICAN SAMOA", "ARIZONA", "ARKANSAS", "CALIFORNIA", "COLORADO", "CONNECTICUT",
"DELAWARE", "DISTRICT OF COLUMBIA", "FEDERATED STATES OF MICRONESIA", "FLORIDA", "GEORGIA", "GUAM", "HAWAII", "IDAHO",
"ILLINOIS", "INDIANA", "IOWA", "KANSAS", "KENTUCKY", "LOUISIANA", "MAINE", "MARSHALL ISLANDS", "MARYLAND", "MASSACHUSETTS",
"MICHIGAN", "MINNESOTA", "MISSISSIPPI", "MISSOURI", "MONTANA", "NEBRASKA", "NEVADA", "NEW HAMPSHIRE", "NEW JERSEY", "NEW MEXICO",
"NEW YORK", "NORTH CAROLINA", "NORTH DAKOTA", "NORTHERN MARIANA ISLANDS", "OHIO", "OKLAHOMA", "OREGON", "PALAU", "PENNSYLVANIA",
"PUERTO RICO", "RHODE ISLAND", "SOUTH CAROLINA", "SOUTH DAKOTA", "TENNESSEE", "TEXAS", "UTAH", "VERMONT", "VIRGIN ISLANDS",
"VIRGINIA", "WASHINGTON", "WEST VIRGINIA", "WISCONSIN", "WYOMING"
)), stringsAsFactors = FALSE
)
# merge state codes with data frame and those items with full statenames are replaced by employerstate
permds=left_join(permds, st.codes, by = c("EMPLOYER_STATE"="statecode"))
permds$statename[which(is.na(permds$statename))]<-permds$EMPLOYER_STATE[which(is.na(permds$statename))]
permds$EMPLOYER_STATE<-permds$statename
permds=subset(permds, select = -c(statename))
permds=left_join(permds, st.codes, by = c("JOB_INFO_WORK_STATE"="statecode"))
permds$statename[which(is.na(permds$statename))]<-permds$JOB_INFO_WORK_STATE[which(is.na(permds$statename))]
permds$JOB_INFO_WORK_STATE<-permds$statename
permds=subset(permds, select = -c(statename))
permds$ID <- 1:nrow(permds)
empmapdat = permds %>% group_by(EMPLOYER_NAME,EMPLOYER_ADDRESS_1,EMPLOYER_CITY,
EMPLOYER_STATE,employer_zip_code) %>%
summarise(count = n(), mean_salary = mean(normalized_wage, na.rm = TRUE)) %>%
arrange(desc(count),desc(mean_salary))
empmapdat$ID <- 1:nrow(empmapdat)
saveRDS(permds,"PermData.rds")
saveRDS(empmapdat,"PermEmpMapsdat.rds")
#cleaning all objects in my environment
#rm(list=ls(all=TRUE))
|
#***********************************************************************************************
# TITLE: AFL match score progression (2001-2019)
#
# DESCRIPTION: Transformation of raw score progression data into useable formats for AFL matches (2008-)
#
# AUTHOR: InsightLane
#
# CREATED: Original 2016
# MODIFIED: Last updated 2021
#
# INPUTS: Time series score data from AFL Tables (https://afltables.com/afl/stats/times.csv)
#
# OUTPUTS: CSV files of enriched score progression data and score progression ScoreWorm data
#
# STEPS:
# 1. Download extract data from AFL Tables
# 2. Import match scores from AFL Tables big list
# 3. Join match scores onto score progression data
# 4. Transform and cleanse score progression events
# 5. Create new data frame to add rows for the moment before a score
# 6. Create new data frame with lengths of quarter for each game
# 7. Join data frames together
# 8. Create additional columns to describe score progression data
# 9. Duplicate and flip team names to double score progression data to have associated with both teams
# 10. Append both data frames together for final output
# 11. Output CSV files
#
# ***********************************************************************************************
library(dplyr)
rm(list = ls())
# -----------------------------------------------------------------------------------------------
# 1. Download extract data from AFL Tables
# -----------------------------------------------------------------------------------------------
# #Download latest zip file from AFL tables to working directory
#
# data <- download.file("http://afltables.com/public/times.zip", destfile="times.zip")
#
# # Unzip times.csv in working directory
#
# dataunz <- unzip("times.zip")
# Download latest zip file from AFL tables to working directory
download.file("https://afltables.com/afl/stats/times.csv", destfile="times.csv")
# Load CSV file
raw_score_progression <- read.csv("C:/Local Code/insightlane/score-progression/times.csv",
header = FALSE,
stringsAsFactors = FALSE)
# Rename columns as appropriate
colnames(raw_score_progression) <- c("GameID","Team1","Team2","Date",
"Quarter","Event","Timescore","Teamscore",
"Playerscore","PlayerID")
# Create new DF with adjusted times, Team Event ()
original_score_progression <- data.frame(raw_score_progression,
Dateadj = as.Date(raw_score_progression$Date, "%d-%b-%Y"),
Season = as.numeric(format(as.Date(raw_score_progression$Date, "%d-%b-%Y"), "%Y")),
Status = "Original")
# Fix Timescore to be numeric
original_score_progression$Timescore <- as.numeric(original_score_progression$Timescore)
# Fix Kangaroos to North Melbourne
original_score_progression$Team1 <- ifelse((original_score_progression$Team1 == "Kangaroos"), "North Melbourne", original_score_progression$Team1)
original_score_progression$Team2 <- ifelse((original_score_progression$Team2 == "Kangaroos"), "North Melbourne", original_score_progression$Team2)
original_score_progression$Teamscore <- ifelse((original_score_progression$Teamscore == "Kangaroos"), "North Melbourne", original_score_progression$Teamscore)
# -----------------------------------------------------------------------------------------------
# 2. Import match scores from AFL Tables big list
# -----------------------------------------------------------------------------------------------
# Import scores from http://afltables.com/afl/stats/biglists/bg3.txt, using fixed widths
#rm(Team1scores, awayscores, match_scores)
raw_match_scores <- read.fwf("http://afltables.com/afl/stats/biglists/bg3.txt",
skip = 2, header = FALSE,
widths = c(7, 17, 5, 18, 17, 18, 18, 18),
col.names = c("ScoresGameID","Date","Round","Team1",
"Team1Score","Team2","Team2Score","Venue")
)
# Split scores into goals/behinds/points for both teams
team1scores <- data.frame(do.call('rbind', strsplit(as.character(raw_match_scores$Team1Score),"\\.")))
team2scores <- data.frame(do.call('rbind', strsplit(as.character(raw_match_scores$Team2Score),"\\.")))
# Convert new goals/behinds/points columns into numeric for both teams
team1scores <- data.frame("Team1Goals" = as.numeric(as.character(team1scores$X1)),
"Team1Behinds" = as.numeric(as.character(team1scores$X2)),
"Team1Points" = as.numeric(as.character(team1scores$X3)))
team2scores <- data.frame("Team2Goals" = as.numeric(as.character(team2scores$X1)),
"Team2Behinds" = as.numeric(as.character(team2scores$X2)),
"Team2Points" = as.numeric(as.character(team2scores$X3)))
# Combine goals/behind/points into new data frame
match_scores <- data.frame(raw_match_scores, Dateadj = as.Date(raw_match_scores$Date, "%d-%b-%Y"), team1scores, team2scores)
match_scores$Team1 <- trimws(as.character(match_scores$Team1), which = "right")
match_scores$Team2 <- trimws(as.character(match_scores$Team2), which = "right")
match_scores$Round <- trimws(as.character(match_scores$Round), which = "right")
match_scores$Venue <- trimws(as.character(match_scores$Venue), which = "right")
# Fix GW Sydney to Greater Western Sydney
match_scores$Team1 <- ifelse((match_scores$Team1 == "GW Sydney"), "Greater Western Sydney", match_scores$Team1)
match_scores$Team2 <- ifelse((match_scores$Team2 == "GW Sydney"), "Greater Western Sydney", match_scores$Team2)
match_scores$Team1 <- ifelse((match_scores$Team1 == "Kangaroos"), "North Melbourne", match_scores$Team1)
match_scores$Team2 <- ifelse((match_scores$Team2 == "Kangaroos"), "North Melbourne", match_scores$Team2)
# Create new column for margin (relative to team 1) for each match
match_scores$Team1FinalMargin <- match_scores$Team1Points - match_scores$Team2Points
match_scores$Team2FinalMargin <- -1 * match_scores$Team1FinalMargin
# Create new column for margin (absolute) for each match
match_scores$AbsFinalMargin <- abs(match_scores$Team1Points - match_scores$Team2Points)
# Create new column for result (Team1 win/away win/draw) for each match
match_scores$Result <- ifelse((match_scores$Team1FinalMargin > 0), "Team 1 win",
ifelse((match_scores$Team1FinalMargin < 0), "Team 2 win",
ifelse((match_scores$Team1FinalMargin == 0), "Draw",
NA)))
# Create new column for victor (or draw) for each match
match_scores$Winner <- ifelse((match_scores$Team1FinalMargin > 0), as.character(match_scores$Team1),
ifelse((match_scores$Team1FinalMargin < 0), as.character(match_scores$Team2),
ifelse((match_scores$Team1FinalMargin == 0), "Draw",
NA)))
match_scores$Loser <- ifelse((match_scores$Team1FinalMargin > 0), as.character(match_scores$Team2),
ifelse((match_scores$Team1FinalMargin < 0), as.character(match_scores$Team1),
ifelse((match_scores$Team1FinalMargin == 0), "Draw",
NA)))
# -----------------------------------------------------------------------------------------------
# 3. Join match scores onto score progression data
# -----------------------------------------------------------------------------------------------
original_score_progression <- merge(x = original_score_progression, y = match_scores,
by = c("Dateadj", "Team1", "Team2"),
all.x = TRUE)
# -----------------------------------------------------------------------------------------------
# 4. Transform and cleanse score progression events
# -----------------------------------------------------------------------------------------------
# Rename events as Goal/Behind/Rushed Behind/Final
original_score_progression$Event[original_score_progression$Event == 0] <- "G"
original_score_progression$Event[original_score_progression$Event == 1] <- "B"
original_score_progression$Event[original_score_progression$Event == 2] <- "RB"
original_score_progression$Event[original_score_progression$Event == 3] <- "F"
# Create new data frame to add rows in for start of quarters
# Take copy of all end of quarter events (where Event = F)
qtrstarts <- original_score_progression[(original_score_progression$Event == "F"), ]
# Remove existing row names
rownames(qtrstarts) <- NULL
# Set all times to 0 for start of quarter
qtrstarts$Timescore <- 0
# Set all event codes to NA for quarter start
qtrstarts$Event <- "S"
# -----------------------------------------------------------------------------------------------
# 5. Create new data frame to add rows for the moment before a score
# -----------------------------------------------------------------------------------------------
# Take copy of all end of quarter events (where Event = G/B/RB)
prescore <- original_score_progression[(original_score_progression$Event == "G")
| (original_score_progression$Event == "B")
| (original_score_progression$Event == "RB"), ]
# Remove existing row names
rownames(prescore) <- NULL
# Set all times to 1 second before the score
prescore$Timescore <- prescore$Timescore - 1
# Set all event codes to PS for moment before the score
prescore$Event <- "PS"
original_score_progression <- rbind(original_score_progression, qtrstarts, prescore)
original_score_progression$Timescore <- ifelse((original_score_progression$Event == "F"), original_score_progression$Timescore + 1, original_score_progression$Timescore)
original_score_progression <- original_score_progression[order(original_score_progression$GameID, original_score_progression$Quarter, original_score_progression$Timescore), ]
original_score_progression$Timescore <- ifelse((original_score_progression$Event == "F"), original_score_progression$Timescore - 1, original_score_progression$Timescore)
rownames(original_score_progression) <- NULL
# -----------------------------------------------------------------------------------------------
# 6. Create new data frame with lengths of quarter for each game
# -----------------------------------------------------------------------------------------------
qtrlengths <- original_score_progression %>%
filter(Event == "F") %>%
select(GameID, Quarter, Timescore) %>%
group_by(GameID) %>%
mutate(RunningQtrLength = cumsum(Timescore))
# Rename column to QtrLength
names(qtrlengths)[names(qtrlengths) == "Timescore"] <- "QtrLength"
# -----------------------------------------------------------------------------------------------
# 7. Join data frames together
# -----------------------------------------------------------------------------------------------
# Join quarter length onto time score data
original_score_progression <- merge(x = original_score_progression, y = qtrlengths,
by = c("GameID", "Quarter"),
all.x = TRUE)
original_score_progression <- original_score_progression[order(original_score_progression$GameID, original_score_progression$Quarter, original_score_progression$Timescore), ]
# -----------------------------------------------------------------------------------------------
# 8. Create additional columns to describe score progression data
# -----------------------------------------------------------------------------------------------
# Create new column for percentage time (of quarters) of event through the match
original_score_progression$TimePerc <- ifelse((original_score_progression$Quarter == 1), 0.25*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 2), 0.25 + 0.25*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 3), 0.5 + 0.25*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 4), 0.75 + 0.25*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 5), 1 + 0.0625*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 6), 1.0625 + 0.0625*original_score_progression$Timescore/original_score_progression$QtrLength,
NA))))))
original_score_progression$QtrTimePerc <- ifelse((original_score_progression$Quarter == 1), 0.25*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 2), 0.25*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 3), 0.25*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 4), 0.25*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 5), 0.0625*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 6), 0.0625*original_score_progression$Timescore/original_score_progression$QtrLength,
NA))))))
original_score_progression$GameTime <- original_score_progression$RunningQtrLength - original_score_progression$QtrLength + original_score_progression$Timescore
# Add column for points scored for each event (6/1/0)
original_score_progression$Score <- ifelse((original_score_progression$Event == "G"), 6,
ifelse((original_score_progression$Event == "B")|(original_score_progression$Event == "RB"), 1,
0))
# Create new column for directional individual scores towards/away from team 1
original_score_progression$Team1Worm <- ifelse((original_score_progression$Team1 == original_score_progression$Teamscore),
original_score_progression$Score,
original_score_progression$Score * -1)
original_score_progression$Team2Worm <- -1 * original_score_progression$Team1Worm
original_score_progression$WinWorm <- ifelse((original_score_progression$Result == "Team 1 win"),
original_score_progression$Team1Worm,
ifelse((original_score_progression$Result == "Team 2 win"),
original_score_progression$Team2Worm,
NA))
# Create new column for individual team 1 team scores, goals, behinds and shots
original_score_progression$Team1P <- ifelse((original_score_progression$Team1 == original_score_progression$Teamscore),
original_score_progression$Score, 0)
original_score_progression$Team1G <- ifelse((original_score_progression$Team1 == original_score_progression$Teamscore)
& (original_score_progression$Event == "G"),
1, 0)
original_score_progression$Team1B <- ifelse((original_score_progression$Team1 == original_score_progression$Teamscore)
& (original_score_progression$Event == "B" | original_score_progression$Event == "RB"),
1, 0)
original_score_progression$Team1SS <- ifelse((original_score_progression$Team1 == original_score_progression$Teamscore)
& (original_score_progression$Event == "G" | original_score_progression$Event == "B" | original_score_progression$Event == "RB"),
1, 0)
# Create new column for individual away team scores
original_score_progression$Team2P <- ifelse((original_score_progression$Team2 == original_score_progression$Teamscore),
original_score_progression$Score, 0)
original_score_progression$Team2G <- ifelse((original_score_progression$Team2 == original_score_progression$Teamscore)
& (original_score_progression$Event == "G"),
1, 0)
original_score_progression$Team2B <- ifelse((original_score_progression$Team2 == original_score_progression$Teamscore)
& (original_score_progression$Event == "B" | original_score_progression$Event == "RB"),
1, 0)
original_score_progression$Team2SS <- ifelse((original_score_progression$Team2 == original_score_progression$Teamscore)
& (original_score_progression$Event == "G" | original_score_progression$Event == "B" | original_score_progression$Event == "RB"),
1, 0)
# Create new column for cumulative team 1 score, away score and team 1 margin
original_score_progression <- original_score_progression %>%
group_by(GameID) %>%
mutate(Team1CumPoints = cumsum(Team1P)) %>%
mutate(Team1CumGoals = cumsum(Team1G)) %>%
mutate(Team1CumBehinds = cumsum(Team1B)) %>%
mutate(Team1CumShots = cumsum(Team1SS)) %>%
mutate(Team2CumPoints = cumsum(Team2P)) %>%
mutate(Team2CumGoals = cumsum(Team2G)) %>%
mutate(Team2CumBehinds = cumsum(Team2B)) %>%
mutate(Team2CumShots = cumsum(Team2SS)) %>%
mutate(Team1Margin = cumsum(Team1Worm)) %>%
mutate(Team2Margin = -1 * Team1Margin) %>%
mutate(WinMargin = ifelse(Team1FinalMargin > 0, Team1Margin,
ifelse(Team1FinalMargin < 0, -1 * Team1Margin,
NA))) %>%
mutate(InFront = ifelse(Team1Margin > 0, Team1,
ifelse(Team1Margin < 0, Team2,
"Scores level"))) %>%
mutate(Behind = ifelse(Team1Margin > 0, Team2,
ifelse(Team1Margin < 0, Team1,
"Scores level"))) %>%
mutate(AbsMargin = abs(Team1Margin)) %>%
mutate(PreAbsMargin = lag(AbsMargin, 2)) %>%
mutate(ScorerMargin = ifelse(Teamscore == Team1, Team1Margin,
ifelse(Teamscore == Team2, Team2Margin,
NA))) %>%
mutate(ScorerStatus = ifelse(ScorerMargin > 0, "In front",
ifelse(ScorerMargin < 0, "Behind",
ifelse(ScorerMargin == 0, "Level",
NA)))) %>%
mutate(PreScorerMargin = ifelse(Teamscore == Team1, Team1Margin - Team1P,
ifelse(Teamscore == Team2, Team2Margin - Team2P,
NA))) %>%
mutate(PreScorerStatus = ifelse(PreScorerMargin > 0, "In front",
ifelse(PreScorerMargin < 0, "Behind",
ifelse(PreScorerMargin == 0, "Level",
NA)))) %>%
mutate(ScorerFinalMargin = ifelse(Teamscore == Team1, Team1FinalMargin,
ifelse(Teamscore == Team2, Team2FinalMargin,
NA))) %>%
mutate(LastLeadingTeam = ifelse(lag(InFront, 2) == "Scores level",
lag(InFront, 4),
lag(InFront, 2))
) %>%
mutate(LastLeadingTeam = ifelse(LastLeadingTeam == "Scores level",
lag(LastLeadingTeam, 2),
LastLeadingTeam)
) %>%
mutate(LeadChangeFlag = ifelse((InFront != LastLeadingTeam) & Event != "PS" & Event != "S" & Event != "F" & InFront != "Scores level",
1,
0)
) %>%
mutate(LeadChangeFlag = ifelse(is.na(LeadChangeFlag), 0, LeadChangeFlag)
) %>%
ungroup() %>%
group_by(GameID, Quarter) %>%
mutate(GameTimeSinceLastScore = ifelse((Event == "F"),
(GameTime - lag(GameTime, 1)),
(GameTime - lag(GameTime, 2)))) %>%
mutate(TimePercSinceLastScore = ifelse((Event == "F"),
(TimePerc - lag(TimePerc, 1)),
(TimePerc - lag(TimePerc, 2)))) %>%
mutate(LastEvent = lag(Event, 2)) %>%
mutate(LastScoreTeam = ifelse(Teamscore == lag(Teamscore, 2), "Same", "Other"))
# -----------------------------------------------------------------------------------------------
# 9. Duplicate and flip team names to double score progression data to have associated with both teams
# -----------------------------------------------------------------------------------------------
flipped_score_progression <- original_score_progression[ , c("GameID", "Quarter", "Dateadj", "Team2", "Team1",
"Date.x", "Event", "Timescore", "Teamscore", "Playerscore",
"PlayerID", "Season", "Status", "ScoresGameID", "Date.y",
"Round", "Team2Score", "Team1Score", "Venue", "Team2Goals",
"Team2Behinds", "Team2Points", "Team1Goals", "Team1Behinds", "Team1Points",
"Team2FinalMargin", "Team1FinalMargin", "AbsFinalMargin", "Result", "Winner", "Loser",
"QtrLength", "RunningQtrLength", "TimePerc", "QtrTimePerc", "GameTime", "Score",
"Team2Worm", "Team1Worm", "WinWorm", "Team2P", "Team2G", "Team2B", "Team2SS",
"Team1P", "Team1G", "Team1B", "Team1SS", "Team2CumPoints",
"Team2CumGoals", "Team2CumBehinds", "Team2CumShots", "Team1CumPoints", "Team1CumGoals",
"Team1CumBehinds", "Team1CumShots", "Team2Margin", "Team1Margin", "WinMargin", "InFront",
"Behind", "AbsMargin", "PreAbsMargin", "ScorerMargin", "ScorerStatus", "PreScorerMargin",
"PreScorerStatus", "ScorerFinalMargin", "LastLeadingTeam", "LeadChangeFlag", "GameTimeSinceLastScore", "TimePercSinceLastScore",
"LastEvent", "LastScoreTeam")]
flipped_score_progression$Status <- "Flipped"
flipped_score_progression$Result <- ifelse((flipped_score_progression$Team1FinalMargin > 0), "Team 2 win",
ifelse((flipped_score_progression$Team1FinalMargin < 0), "Team 1 win",
ifelse((flipped_score_progression$Team1FinalMargin == 0), "Draw",
NA)))
colnames(flipped_score_progression) <- c("GameID", "Quarter", "Dateadj", "Team1", "Team2",
"Date.x", "Event", "Timescore", "Teamscore", "Playerscore",
"PlayerID", "Season", "Status", "ScoresGameID", "Date.y",
"Round", "Team1Score", "Team2Score", "Venue", "Team1Goals",
"Team1Behinds", "Team1Points", "Team2Goals", "Team2Behinds", "Team2Points",
"Team1FinalMargin", "Team2FinalMargin", "AbsFinalMargin", "Result", "Winner", "Loser",
"QtrLength", "RunningQtrLength", "TimePerc", "QtrTimePerc", "GameTime", "Score",
"Team1Worm", "Team2Worm", "WinWorm", "Team1P", "Team1G", "Team1B", "Team1SS",
"Team2P", "Team2G", "Team2B", "Team2SS", "Team1CumPoints",
"Team1CumGoals", "Team1CumBehinds", "Team1CumShots", "Team2CumPoints", "Team2CumGoals",
"Team2CumBehinds", "Team2CumShots", "Team1Margin", "Team2Margin", "WinMargin", "InFront",
"Behind", "AbsMargin", "PreAbsMargin", "ScorerMargin", "ScorerStatus", "PreScorerMargin",
"PreScorerStatus", "ScorerFinalMargin", "LastLeadingTeam", "LeadChangeFlag", "GameTimeSinceLastScore", "TimePercSinceLastScore",
"LastEvent", "LastScoreTeam")
# -----------------------------------------------------------------------------------------------
# 10. Append both data frames together for final output
# -----------------------------------------------------------------------------------------------
score_progression_worm <- rbind(original_score_progression, flipped_score_progression)
rm(flipped_score_progression, prescore, qtrlengths, qtrstarts, team1scores, team2scores, raw_score_progression, raw_match_scores)
# -----------------------------------------------------------------------------------------------
# 11. Output CSV files
# -----------------------------------------------------------------------------------------------
write.csv(original_score_progression, file="C:/Local Code/insightlane/score-progression/output_files/original_score_progression.csv")
write.csv(score_progression_worm, file="C:/Local Code/insightlane/score-progression/output_files/score_progression_worm.csv")
| /score_progression_worm.R | no_license | insightlane/score-progression | R | false | false | 26,278 | r | #***********************************************************************************************
# TITLE: AFL match score progression (2001-2019)
#
# DESCRIPTION: Transformation of raw score progression data into useable formats for AFL matches (2008-)
#
# AUTHOR: InsightLane
#
# CREATED: Original 2016
# MODIFIED: Last updated 2021
#
# INPUTS: Time series score data from AFL Tables (https://afltables.com/afl/stats/times.csv)
#
# OUTPUTS: CSV files of enriched score progression data and score progression ScoreWorm data
#
# STEPS:
# 1. Download extract data from AFL Tables
# 2. Import match scores from AFL Tables big list
# 3. Join match scores onto score progression data
# 4. Transform and cleanse score progression events
# 5. Create new data frame to add rows for the moment before a score
# 6. Create new data frame with lengths of quarter for each game
# 7. Join data frames together
# 8. Create additional columns to describe score progression data
# 9. Duplicate and flip team names to double score progression data to have associated with both teams
# 10. Append both data frames together for final output
# 11. Output CSV files
#
# ***********************************************************************************************
library(dplyr)
rm(list = ls())
# -----------------------------------------------------------------------------------------------
# 1. Download extract data from AFL Tables
# -----------------------------------------------------------------------------------------------
# #Download latest zip file from AFL tables to working directory
#
# data <- download.file("http://afltables.com/public/times.zip", destfile="times.zip")
#
# # Unzip times.csv in working directory
#
# dataunz <- unzip("times.zip")
# Download latest zip file from AFL tables to working directory
download.file("https://afltables.com/afl/stats/times.csv", destfile="times.csv")
# Load CSV file
raw_score_progression <- read.csv("C:/Local Code/insightlane/score-progression/times.csv",
header = FALSE,
stringsAsFactors = FALSE)
# Rename columns as appropriate
colnames(raw_score_progression) <- c("GameID","Team1","Team2","Date",
"Quarter","Event","Timescore","Teamscore",
"Playerscore","PlayerID")
# Create new DF with adjusted times, Team Event ()
original_score_progression <- data.frame(raw_score_progression,
Dateadj = as.Date(raw_score_progression$Date, "%d-%b-%Y"),
Season = as.numeric(format(as.Date(raw_score_progression$Date, "%d-%b-%Y"), "%Y")),
Status = "Original")
# Fix Timescore to be numeric
original_score_progression$Timescore <- as.numeric(original_score_progression$Timescore)
# Fix Kangaroos to North Melbourne
original_score_progression$Team1 <- ifelse((original_score_progression$Team1 == "Kangaroos"), "North Melbourne", original_score_progression$Team1)
original_score_progression$Team2 <- ifelse((original_score_progression$Team2 == "Kangaroos"), "North Melbourne", original_score_progression$Team2)
original_score_progression$Teamscore <- ifelse((original_score_progression$Teamscore == "Kangaroos"), "North Melbourne", original_score_progression$Teamscore)
# -----------------------------------------------------------------------------------------------
# 2. Import match scores from AFL Tables big list
# -----------------------------------------------------------------------------------------------
# Import scores from http://afltables.com/afl/stats/biglists/bg3.txt, using fixed widths
#rm(Team1scores, awayscores, match_scores)
raw_match_scores <- read.fwf("http://afltables.com/afl/stats/biglists/bg3.txt",
skip = 2, header = FALSE,
widths = c(7, 17, 5, 18, 17, 18, 18, 18),
col.names = c("ScoresGameID","Date","Round","Team1",
"Team1Score","Team2","Team2Score","Venue")
)
# Split scores into goals/behinds/points for both teams
team1scores <- data.frame(do.call('rbind', strsplit(as.character(raw_match_scores$Team1Score),"\\.")))
team2scores <- data.frame(do.call('rbind', strsplit(as.character(raw_match_scores$Team2Score),"\\.")))
# Convert new goals/behinds/points columns into numeric for both teams
team1scores <- data.frame("Team1Goals" = as.numeric(as.character(team1scores$X1)),
"Team1Behinds" = as.numeric(as.character(team1scores$X2)),
"Team1Points" = as.numeric(as.character(team1scores$X3)))
team2scores <- data.frame("Team2Goals" = as.numeric(as.character(team2scores$X1)),
"Team2Behinds" = as.numeric(as.character(team2scores$X2)),
"Team2Points" = as.numeric(as.character(team2scores$X3)))
# Combine goals/behind/points into new data frame
match_scores <- data.frame(raw_match_scores, Dateadj = as.Date(raw_match_scores$Date, "%d-%b-%Y"), team1scores, team2scores)
match_scores$Team1 <- trimws(as.character(match_scores$Team1), which = "right")
match_scores$Team2 <- trimws(as.character(match_scores$Team2), which = "right")
match_scores$Round <- trimws(as.character(match_scores$Round), which = "right")
match_scores$Venue <- trimws(as.character(match_scores$Venue), which = "right")
# Fix GW Sydney to Greater Western Sydney
match_scores$Team1 <- ifelse((match_scores$Team1 == "GW Sydney"), "Greater Western Sydney", match_scores$Team1)
match_scores$Team2 <- ifelse((match_scores$Team2 == "GW Sydney"), "Greater Western Sydney", match_scores$Team2)
match_scores$Team1 <- ifelse((match_scores$Team1 == "Kangaroos"), "North Melbourne", match_scores$Team1)
match_scores$Team2 <- ifelse((match_scores$Team2 == "Kangaroos"), "North Melbourne", match_scores$Team2)
# Create new column for margin (relative to team 1) for each match
match_scores$Team1FinalMargin <- match_scores$Team1Points - match_scores$Team2Points
match_scores$Team2FinalMargin <- -1 * match_scores$Team1FinalMargin
# Create new column for margin (absolute) for each match
match_scores$AbsFinalMargin <- abs(match_scores$Team1Points - match_scores$Team2Points)
# Create new column for result (Team1 win/away win/draw) for each match
match_scores$Result <- ifelse((match_scores$Team1FinalMargin > 0), "Team 1 win",
ifelse((match_scores$Team1FinalMargin < 0), "Team 2 win",
ifelse((match_scores$Team1FinalMargin == 0), "Draw",
NA)))
# Create new column for victor (or draw) for each match
match_scores$Winner <- ifelse((match_scores$Team1FinalMargin > 0), as.character(match_scores$Team1),
ifelse((match_scores$Team1FinalMargin < 0), as.character(match_scores$Team2),
ifelse((match_scores$Team1FinalMargin == 0), "Draw",
NA)))
match_scores$Loser <- ifelse((match_scores$Team1FinalMargin > 0), as.character(match_scores$Team2),
ifelse((match_scores$Team1FinalMargin < 0), as.character(match_scores$Team1),
ifelse((match_scores$Team1FinalMargin == 0), "Draw",
NA)))
# -----------------------------------------------------------------------------------------------
# 3. Join match scores onto score progression data
# -----------------------------------------------------------------------------------------------
original_score_progression <- merge(x = original_score_progression, y = match_scores,
by = c("Dateadj", "Team1", "Team2"),
all.x = TRUE)
# -----------------------------------------------------------------------------------------------
# 4. Transform and cleanse score progression events
# -----------------------------------------------------------------------------------------------
# Rename events as Goal/Behind/Rushed Behind/Final
original_score_progression$Event[original_score_progression$Event == 0] <- "G"
original_score_progression$Event[original_score_progression$Event == 1] <- "B"
original_score_progression$Event[original_score_progression$Event == 2] <- "RB"
original_score_progression$Event[original_score_progression$Event == 3] <- "F"
# Create new data frame to add rows in for start of quarters
# Take copy of all end of quarter events (where Event = F)
qtrstarts <- original_score_progression[(original_score_progression$Event == "F"), ]
# Remove existing row names
rownames(qtrstarts) <- NULL
# Set all times to 0 for start of quarter
qtrstarts$Timescore <- 0
# Set all event codes to NA for quarter start
qtrstarts$Event <- "S"
# -----------------------------------------------------------------------------------------------
# 5. Create new data frame to add rows for the moment before a score
# -----------------------------------------------------------------------------------------------
# Take copy of all end of quarter events (where Event = G/B/RB)
prescore <- original_score_progression[(original_score_progression$Event == "G")
| (original_score_progression$Event == "B")
| (original_score_progression$Event == "RB"), ]
# Remove existing row names
rownames(prescore) <- NULL
# Set all times to 1 second before the score
prescore$Timescore <- prescore$Timescore - 1
# Set all event codes to PS for moment before the score
prescore$Event <- "PS"
original_score_progression <- rbind(original_score_progression, qtrstarts, prescore)
original_score_progression$Timescore <- ifelse((original_score_progression$Event == "F"), original_score_progression$Timescore + 1, original_score_progression$Timescore)
original_score_progression <- original_score_progression[order(original_score_progression$GameID, original_score_progression$Quarter, original_score_progression$Timescore), ]
original_score_progression$Timescore <- ifelse((original_score_progression$Event == "F"), original_score_progression$Timescore - 1, original_score_progression$Timescore)
rownames(original_score_progression) <- NULL
# -----------------------------------------------------------------------------------------------
# 6. Create new data frame with lengths of quarter for each game
# -----------------------------------------------------------------------------------------------
qtrlengths <- original_score_progression %>%
filter(Event == "F") %>%
select(GameID, Quarter, Timescore) %>%
group_by(GameID) %>%
mutate(RunningQtrLength = cumsum(Timescore))
# Rename column to QtrLength
names(qtrlengths)[names(qtrlengths) == "Timescore"] <- "QtrLength"
# -----------------------------------------------------------------------------------------------
# 7. Join data frames together
# -----------------------------------------------------------------------------------------------
# Join quarter length onto time score data
original_score_progression <- merge(x = original_score_progression, y = qtrlengths,
by = c("GameID", "Quarter"),
all.x = TRUE)
original_score_progression <- original_score_progression[order(original_score_progression$GameID, original_score_progression$Quarter, original_score_progression$Timescore), ]
# -----------------------------------------------------------------------------------------------
# 8. Create additional columns to describe score progression data
# -----------------------------------------------------------------------------------------------
# Create new column for percentage time (of quarters) of event through the match
original_score_progression$TimePerc <- ifelse((original_score_progression$Quarter == 1), 0.25*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 2), 0.25 + 0.25*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 3), 0.5 + 0.25*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 4), 0.75 + 0.25*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 5), 1 + 0.0625*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 6), 1.0625 + 0.0625*original_score_progression$Timescore/original_score_progression$QtrLength,
NA))))))
original_score_progression$QtrTimePerc <- ifelse((original_score_progression$Quarter == 1), 0.25*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 2), 0.25*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 3), 0.25*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 4), 0.25*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 5), 0.0625*original_score_progression$Timescore/original_score_progression$QtrLength,
ifelse((original_score_progression$Quarter == 6), 0.0625*original_score_progression$Timescore/original_score_progression$QtrLength,
NA))))))
original_score_progression$GameTime <- original_score_progression$RunningQtrLength - original_score_progression$QtrLength + original_score_progression$Timescore
# Add column for points scored for each event (6/1/0)
original_score_progression$Score <- ifelse((original_score_progression$Event == "G"), 6,
ifelse((original_score_progression$Event == "B")|(original_score_progression$Event == "RB"), 1,
0))
# Create new column for directional individual scores towards/away from team 1
original_score_progression$Team1Worm <- ifelse((original_score_progression$Team1 == original_score_progression$Teamscore),
original_score_progression$Score,
original_score_progression$Score * -1)
original_score_progression$Team2Worm <- -1 * original_score_progression$Team1Worm
original_score_progression$WinWorm <- ifelse((original_score_progression$Result == "Team 1 win"),
original_score_progression$Team1Worm,
ifelse((original_score_progression$Result == "Team 2 win"),
original_score_progression$Team2Worm,
NA))
# Create new column for individual team 1 team scores, goals, behinds and shots
original_score_progression$Team1P <- ifelse((original_score_progression$Team1 == original_score_progression$Teamscore),
original_score_progression$Score, 0)
original_score_progression$Team1G <- ifelse((original_score_progression$Team1 == original_score_progression$Teamscore)
& (original_score_progression$Event == "G"),
1, 0)
original_score_progression$Team1B <- ifelse((original_score_progression$Team1 == original_score_progression$Teamscore)
& (original_score_progression$Event == "B" | original_score_progression$Event == "RB"),
1, 0)
original_score_progression$Team1SS <- ifelse((original_score_progression$Team1 == original_score_progression$Teamscore)
& (original_score_progression$Event == "G" | original_score_progression$Event == "B" | original_score_progression$Event == "RB"),
1, 0)
# Create new column for individual away team scores
original_score_progression$Team2P <- ifelse((original_score_progression$Team2 == original_score_progression$Teamscore),
original_score_progression$Score, 0)
original_score_progression$Team2G <- ifelse((original_score_progression$Team2 == original_score_progression$Teamscore)
& (original_score_progression$Event == "G"),
1, 0)
original_score_progression$Team2B <- ifelse((original_score_progression$Team2 == original_score_progression$Teamscore)
& (original_score_progression$Event == "B" | original_score_progression$Event == "RB"),
1, 0)
original_score_progression$Team2SS <- ifelse((original_score_progression$Team2 == original_score_progression$Teamscore)
& (original_score_progression$Event == "G" | original_score_progression$Event == "B" | original_score_progression$Event == "RB"),
1, 0)
# Create new column for cumulative team 1 score, away score and team 1 margin
original_score_progression <- original_score_progression %>%
group_by(GameID) %>%
mutate(Team1CumPoints = cumsum(Team1P)) %>%
mutate(Team1CumGoals = cumsum(Team1G)) %>%
mutate(Team1CumBehinds = cumsum(Team1B)) %>%
mutate(Team1CumShots = cumsum(Team1SS)) %>%
mutate(Team2CumPoints = cumsum(Team2P)) %>%
mutate(Team2CumGoals = cumsum(Team2G)) %>%
mutate(Team2CumBehinds = cumsum(Team2B)) %>%
mutate(Team2CumShots = cumsum(Team2SS)) %>%
mutate(Team1Margin = cumsum(Team1Worm)) %>%
mutate(Team2Margin = -1 * Team1Margin) %>%
mutate(WinMargin = ifelse(Team1FinalMargin > 0, Team1Margin,
ifelse(Team1FinalMargin < 0, -1 * Team1Margin,
NA))) %>%
mutate(InFront = ifelse(Team1Margin > 0, Team1,
ifelse(Team1Margin < 0, Team2,
"Scores level"))) %>%
mutate(Behind = ifelse(Team1Margin > 0, Team2,
ifelse(Team1Margin < 0, Team1,
"Scores level"))) %>%
mutate(AbsMargin = abs(Team1Margin)) %>%
mutate(PreAbsMargin = lag(AbsMargin, 2)) %>%
mutate(ScorerMargin = ifelse(Teamscore == Team1, Team1Margin,
ifelse(Teamscore == Team2, Team2Margin,
NA))) %>%
mutate(ScorerStatus = ifelse(ScorerMargin > 0, "In front",
ifelse(ScorerMargin < 0, "Behind",
ifelse(ScorerMargin == 0, "Level",
NA)))) %>%
mutate(PreScorerMargin = ifelse(Teamscore == Team1, Team1Margin - Team1P,
ifelse(Teamscore == Team2, Team2Margin - Team2P,
NA))) %>%
mutate(PreScorerStatus = ifelse(PreScorerMargin > 0, "In front",
ifelse(PreScorerMargin < 0, "Behind",
ifelse(PreScorerMargin == 0, "Level",
NA)))) %>%
mutate(ScorerFinalMargin = ifelse(Teamscore == Team1, Team1FinalMargin,
ifelse(Teamscore == Team2, Team2FinalMargin,
NA))) %>%
mutate(LastLeadingTeam = ifelse(lag(InFront, 2) == "Scores level",
lag(InFront, 4),
lag(InFront, 2))
) %>%
mutate(LastLeadingTeam = ifelse(LastLeadingTeam == "Scores level",
lag(LastLeadingTeam, 2),
LastLeadingTeam)
) %>%
mutate(LeadChangeFlag = ifelse((InFront != LastLeadingTeam) & Event != "PS" & Event != "S" & Event != "F" & InFront != "Scores level",
1,
0)
) %>%
mutate(LeadChangeFlag = ifelse(is.na(LeadChangeFlag), 0, LeadChangeFlag)
) %>%
ungroup() %>%
group_by(GameID, Quarter) %>%
mutate(GameTimeSinceLastScore = ifelse((Event == "F"),
(GameTime - lag(GameTime, 1)),
(GameTime - lag(GameTime, 2)))) %>%
mutate(TimePercSinceLastScore = ifelse((Event == "F"),
(TimePerc - lag(TimePerc, 1)),
(TimePerc - lag(TimePerc, 2)))) %>%
mutate(LastEvent = lag(Event, 2)) %>%
mutate(LastScoreTeam = ifelse(Teamscore == lag(Teamscore, 2), "Same", "Other"))
# -----------------------------------------------------------------------------------------------
# 9. Duplicate and flip team names to double score progression data to have associated with both teams
# -----------------------------------------------------------------------------------------------
flipped_score_progression <- original_score_progression[ , c("GameID", "Quarter", "Dateadj", "Team2", "Team1",
"Date.x", "Event", "Timescore", "Teamscore", "Playerscore",
"PlayerID", "Season", "Status", "ScoresGameID", "Date.y",
"Round", "Team2Score", "Team1Score", "Venue", "Team2Goals",
"Team2Behinds", "Team2Points", "Team1Goals", "Team1Behinds", "Team1Points",
"Team2FinalMargin", "Team1FinalMargin", "AbsFinalMargin", "Result", "Winner", "Loser",
"QtrLength", "RunningQtrLength", "TimePerc", "QtrTimePerc", "GameTime", "Score",
"Team2Worm", "Team1Worm", "WinWorm", "Team2P", "Team2G", "Team2B", "Team2SS",
"Team1P", "Team1G", "Team1B", "Team1SS", "Team2CumPoints",
"Team2CumGoals", "Team2CumBehinds", "Team2CumShots", "Team1CumPoints", "Team1CumGoals",
"Team1CumBehinds", "Team1CumShots", "Team2Margin", "Team1Margin", "WinMargin", "InFront",
"Behind", "AbsMargin", "PreAbsMargin", "ScorerMargin", "ScorerStatus", "PreScorerMargin",
"PreScorerStatus", "ScorerFinalMargin", "LastLeadingTeam", "LeadChangeFlag", "GameTimeSinceLastScore", "TimePercSinceLastScore",
"LastEvent", "LastScoreTeam")]
flipped_score_progression$Status <- "Flipped"
flipped_score_progression$Result <- ifelse((flipped_score_progression$Team1FinalMargin > 0), "Team 2 win",
ifelse((flipped_score_progression$Team1FinalMargin < 0), "Team 1 win",
ifelse((flipped_score_progression$Team1FinalMargin == 0), "Draw",
NA)))
colnames(flipped_score_progression) <- c("GameID", "Quarter", "Dateadj", "Team1", "Team2",
"Date.x", "Event", "Timescore", "Teamscore", "Playerscore",
"PlayerID", "Season", "Status", "ScoresGameID", "Date.y",
"Round", "Team1Score", "Team2Score", "Venue", "Team1Goals",
"Team1Behinds", "Team1Points", "Team2Goals", "Team2Behinds", "Team2Points",
"Team1FinalMargin", "Team2FinalMargin", "AbsFinalMargin", "Result", "Winner", "Loser",
"QtrLength", "RunningQtrLength", "TimePerc", "QtrTimePerc", "GameTime", "Score",
"Team1Worm", "Team2Worm", "WinWorm", "Team1P", "Team1G", "Team1B", "Team1SS",
"Team2P", "Team2G", "Team2B", "Team2SS", "Team1CumPoints",
"Team1CumGoals", "Team1CumBehinds", "Team1CumShots", "Team2CumPoints", "Team2CumGoals",
"Team2CumBehinds", "Team2CumShots", "Team1Margin", "Team2Margin", "WinMargin", "InFront",
"Behind", "AbsMargin", "PreAbsMargin", "ScorerMargin", "ScorerStatus", "PreScorerMargin",
"PreScorerStatus", "ScorerFinalMargin", "LastLeadingTeam", "LeadChangeFlag", "GameTimeSinceLastScore", "TimePercSinceLastScore",
"LastEvent", "LastScoreTeam")
# -----------------------------------------------------------------------------------------------
# 10. Append both data frames together for final output
# -----------------------------------------------------------------------------------------------
score_progression_worm <- rbind(original_score_progression, flipped_score_progression)
rm(flipped_score_progression, prescore, qtrlengths, qtrstarts, team1scores, team2scores, raw_score_progression, raw_match_scores)
# -----------------------------------------------------------------------------------------------
# 11. Output CSV files
# -----------------------------------------------------------------------------------------------
write.csv(original_score_progression, file="C:/Local Code/insightlane/score-progression/output_files/original_score_progression.csv")
write.csv(score_progression_worm, file="C:/Local Code/insightlane/score-progression/output_files/score_progression_worm.csv")
|
# convert
# Setup dataframe columns for new variables
exon.lengths=NULL
exon.starts=NULL
meta.exons$exons.start=rep(NA)
meta.exons$No.exons=rep(NA)
meta.exons$exon.lengths=rep(NA)
gene.id=NULL
for(i in 1:nrow(meta.exons)){
# If it's the first entry, record the gene.id
if(i ==1){gene.id=meta.exons$V10[i]}
if(meta.exons$V10[i]==gene.id){
# set the first exon start site to zero for each gene
if(length(exon.starts) < 1){ exon.starts <- append(exon.starts,'0')}
# calculate the length of each exon
exon.lengths <- append(exon.lengths,as.character((meta.exons$V5[i]-meta.exons$V4[i])))
# record the start site of each exon
exon.starts <- append(exon.starts,meta.exons$V4[i])
} else{
# when the gene.id changes, its a new gene - so summerize the previous genes information
meta.exons$No.exons[i-1] <- as.character(length(exon.lengths))
meta.exons$exon.lengths[i-1]<- paste(as.character(exon.lengths),collapse=", ")
exon.starts<-exon.starts[-2]
meta.exons$exons.start[i-1] <- paste(as.character(exon.starts),collapse=", ")
# For the next gene set all vectors to NULL
exon.starts=NULL; exon.lengths=NULL;
gene.id=meta.exons$V10[i]
# set gene.id to the new gene
if(meta.exons$V10[i]==gene.id){
# for the first entry - set the exon length, no of exons and start to 0.
if(length(exon.starts) < 1){ exon.starts <- append(exon.starts,'0')}
exon.starts <- append(exon.starts,meta.exons$V4[i])
exon.lengths <- append(exon.lengths,as.character((meta.exons$V5[i]-meta.exons$V4[i])))
# for the last entry - (i-1 is invalid) so we hack.
if(i == nrow(meta.exons)){
exon.starts = NULL
exon.lengths = NULL
exon.starts <- append(exon.starts,meta.exons$V4[i])
exon.lengths <- append(exon.lengths,as.character((meta.exons$V5[i]-meta.exons$V4[i])))
meta.exons$No.exons[i] <- as.character(length(exon.lengths))
meta.exons$exon.lengths[i]<- paste(as.character(exon.lengths),collapse=", ")
exon.starts<-exon.starts[-2]
meta.exons$exons.start[i] <- paste(as.character(exon.starts),collapse=", ")
}
}
}
}
meta.exons$blank=rep(0)
meta.exons$blank2=rep(0)
| /gtf2bed.R | no_license | GlastonburyC/gtf_to_BED12 | R | false | false | 2,082 | r |
# convert
# Setup dataframe columns for new variables
exon.lengths=NULL
exon.starts=NULL
meta.exons$exons.start=rep(NA)
meta.exons$No.exons=rep(NA)
meta.exons$exon.lengths=rep(NA)
gene.id=NULL
for(i in 1:nrow(meta.exons)){
# If it's the first entry, record the gene.id
if(i ==1){gene.id=meta.exons$V10[i]}
if(meta.exons$V10[i]==gene.id){
# set the first exon start site to zero for each gene
if(length(exon.starts) < 1){ exon.starts <- append(exon.starts,'0')}
# calculate the length of each exon
exon.lengths <- append(exon.lengths,as.character((meta.exons$V5[i]-meta.exons$V4[i])))
# record the start site of each exon
exon.starts <- append(exon.starts,meta.exons$V4[i])
} else{
# when the gene.id changes, its a new gene - so summerize the previous genes information
meta.exons$No.exons[i-1] <- as.character(length(exon.lengths))
meta.exons$exon.lengths[i-1]<- paste(as.character(exon.lengths),collapse=", ")
exon.starts<-exon.starts[-2]
meta.exons$exons.start[i-1] <- paste(as.character(exon.starts),collapse=", ")
# For the next gene set all vectors to NULL
exon.starts=NULL; exon.lengths=NULL;
gene.id=meta.exons$V10[i]
# set gene.id to the new gene
if(meta.exons$V10[i]==gene.id){
# for the first entry - set the exon length, no of exons and start to 0.
if(length(exon.starts) < 1){ exon.starts <- append(exon.starts,'0')}
exon.starts <- append(exon.starts,meta.exons$V4[i])
exon.lengths <- append(exon.lengths,as.character((meta.exons$V5[i]-meta.exons$V4[i])))
# for the last entry - (i-1 is invalid) so we hack.
if(i == nrow(meta.exons)){
exon.starts = NULL
exon.lengths = NULL
exon.starts <- append(exon.starts,meta.exons$V4[i])
exon.lengths <- append(exon.lengths,as.character((meta.exons$V5[i]-meta.exons$V4[i])))
meta.exons$No.exons[i] <- as.character(length(exon.lengths))
meta.exons$exon.lengths[i]<- paste(as.character(exon.lengths),collapse=", ")
exon.starts<-exon.starts[-2]
meta.exons$exons.start[i] <- paste(as.character(exon.starts),collapse=", ")
}
}
}
}
meta.exons$blank=rep(0)
meta.exons$blank2=rep(0)
|
#PACE
MergeCompleteData <- MergeCompleteData %>% mutate(Pace = as.integer(0.45*Acceleration + 0.55*Speed))
#Shooting
MergeCompleteData <- MergeCompleteData %>% mutate(Shooting = as.integer(0.05*Attacking_Position + 0.45*Finishing +0.20*Shot_Power +0.20*Long_Shots + 0.05*Volleys + 0.05*Penalties))
MergeCompleteData <- MergeCompleteData %>% mutate(Passing = as.integer(0.2*Vision + 0.2*Crossing + 0.05*Freekick_Accuracy + 0.35*Short_Pass + 0.15*Long_Pass + 0.05*Curve))
MergeCompleteData <- MergeCompleteData %>% mutate(Dribblingx = as.integer(0.1*Agility + 0.05*Balance + 0.05*Reactions + 0.3*Ball_Control + 0.5*Dribbling))
MergeCompleteData <- MergeCompleteData %>% mutate(Defending = as.integer(0.2*Interceptions + 0.1*Heading + 0.3*Marking + 0.3*Standing_Tackle + 0.1*Sliding_Tackle))
MergeCompleteData <- MergeCompleteData %>% mutate(Physicality = as.integer(0.05*Jumping + 0.25*Stamina + 0.5*Strength + 0.2*Aggression))
MergeCompleteData <- MergeCompleteData %>% mutate(GK_Score = as.integer((GK_Positioning + GK_Diving + GK_Kicking + GK_Handling + GK_Reflexes )/5))
| /loaddata/manipData.R | no_license | quace/BusinessIntelligence | R | false | false | 1,080 | r | #PACE
MergeCompleteData <- MergeCompleteData %>% mutate(Pace = as.integer(0.45*Acceleration + 0.55*Speed))
#Shooting
MergeCompleteData <- MergeCompleteData %>% mutate(Shooting = as.integer(0.05*Attacking_Position + 0.45*Finishing +0.20*Shot_Power +0.20*Long_Shots + 0.05*Volleys + 0.05*Penalties))
MergeCompleteData <- MergeCompleteData %>% mutate(Passing = as.integer(0.2*Vision + 0.2*Crossing + 0.05*Freekick_Accuracy + 0.35*Short_Pass + 0.15*Long_Pass + 0.05*Curve))
MergeCompleteData <- MergeCompleteData %>% mutate(Dribblingx = as.integer(0.1*Agility + 0.05*Balance + 0.05*Reactions + 0.3*Ball_Control + 0.5*Dribbling))
MergeCompleteData <- MergeCompleteData %>% mutate(Defending = as.integer(0.2*Interceptions + 0.1*Heading + 0.3*Marking + 0.3*Standing_Tackle + 0.1*Sliding_Tackle))
MergeCompleteData <- MergeCompleteData %>% mutate(Physicality = as.integer(0.05*Jumping + 0.25*Stamina + 0.5*Strength + 0.2*Aggression))
MergeCompleteData <- MergeCompleteData %>% mutate(GK_Score = as.integer((GK_Positioning + GK_Diving + GK_Kicking + GK_Handling + GK_Reflexes )/5))
|
library(leaflet)
library(RColorBrewer)
library(DT)
ui <- bootstrapPage(
tags$style(type = "text/css", "html, body {width:100%;height:100%}"),
leafletOutput("map", width = "100%", height = "100%"),
absolutePanel(top = 10, right = 10,
sliderInput("range", "Magnitudes", min(malaysia$AMOUNT_TOTAL), max(malaysia$AMOUNT_TOTAL),
value = range(malaysia$AMOUNT_TOTAL), step = 0.1
),
selectInput("location", "Location",
c('All',levels(malaysia$LOCATION))
),
selectInput("family", "Family",
c('All',levels(malaysia$FAMILY))
),
selectInput("species", "Species",
c('All',levels(malaysia$SPECIES))
),
sliderInput("size", "Size of points", 1, 1000,
value = 2 , step = 10
),
selectInput("colors", "Color Scheme",
rownames(subset(brewer.pal.info, category %in% c("seq", "div")))
),
checkboxInput("legend", "Show legend", TRUE)
),
absolutePanel(top = 10, left = 10,
DT::dataTableOutput('x1')
)
)
server <- function(input, output, session) {
# Reactive expression for the data subsetted to what the user selected
filteredData <- reactive({
data = malaysia[malaysia$AMOUNT_TOTAL >= input$range[1] & malaysia$AMOUNT_TOTAL <= input$range[2],]
if (input$family != "All") {
data = data[data$FAMILY == input$family,]
}
if (input$location != "All") {
data = data[data$LOCATION == input$location,]
}
if (input$species != "All") {
data = data[data$SPECIES == input$species,]
}
data
})
output$x1 <- DT::renderDataTable(DT::datatable({
data <- filteredData()
data
}))
colorpal <- reactive({
colorNumeric(input$colors, malaysia$AMOUNT_TOTAL)
})
output$map <- renderLeaflet({
# the map
leaflet(malaysia) %>% addTiles() %>%
fitBounds(~min(LONGITUDE), ~min(LATITUDE), ~max(LONGITUDE), ~max(LATITUDE))
})
observe({
pal <- colorpal()
leafletProxy("map", data = filteredData()) %>%
clearShapes() %>%
addCircles(radius = input$size, weight = input$scale, color = "#777777",
fillColor = ~pal(AMOUNT_TOTAL), fillOpacity = 0.7, popup = ~SPECIES
)
})
observe({
proxy <- leafletProxy("map", data = malaysia)
proxy %>% clearControls()
if (input$legend) {
pal <- colorpal()
proxy %>% addLegend(position = "bottomright",
pal = pal, values = ~AMOUNT_TOTAL
)
}
})
}
shinyApp(ui, server)
| /shiny.R | no_license | franciscorichter/Borneo | R | false | false | 2,782 | r | library(leaflet)
library(RColorBrewer)
library(DT)
ui <- bootstrapPage(
tags$style(type = "text/css", "html, body {width:100%;height:100%}"),
leafletOutput("map", width = "100%", height = "100%"),
absolutePanel(top = 10, right = 10,
sliderInput("range", "Magnitudes", min(malaysia$AMOUNT_TOTAL), max(malaysia$AMOUNT_TOTAL),
value = range(malaysia$AMOUNT_TOTAL), step = 0.1
),
selectInput("location", "Location",
c('All',levels(malaysia$LOCATION))
),
selectInput("family", "Family",
c('All',levels(malaysia$FAMILY))
),
selectInput("species", "Species",
c('All',levels(malaysia$SPECIES))
),
sliderInput("size", "Size of points", 1, 1000,
value = 2 , step = 10
),
selectInput("colors", "Color Scheme",
rownames(subset(brewer.pal.info, category %in% c("seq", "div")))
),
checkboxInput("legend", "Show legend", TRUE)
),
absolutePanel(top = 10, left = 10,
DT::dataTableOutput('x1')
)
)
server <- function(input, output, session) {
# Reactive expression for the data subsetted to what the user selected
filteredData <- reactive({
data = malaysia[malaysia$AMOUNT_TOTAL >= input$range[1] & malaysia$AMOUNT_TOTAL <= input$range[2],]
if (input$family != "All") {
data = data[data$FAMILY == input$family,]
}
if (input$location != "All") {
data = data[data$LOCATION == input$location,]
}
if (input$species != "All") {
data = data[data$SPECIES == input$species,]
}
data
})
output$x1 <- DT::renderDataTable(DT::datatable({
data <- filteredData()
data
}))
colorpal <- reactive({
colorNumeric(input$colors, malaysia$AMOUNT_TOTAL)
})
output$map <- renderLeaflet({
# the map
leaflet(malaysia) %>% addTiles() %>%
fitBounds(~min(LONGITUDE), ~min(LATITUDE), ~max(LONGITUDE), ~max(LATITUDE))
})
observe({
pal <- colorpal()
leafletProxy("map", data = filteredData()) %>%
clearShapes() %>%
addCircles(radius = input$size, weight = input$scale, color = "#777777",
fillColor = ~pal(AMOUNT_TOTAL), fillOpacity = 0.7, popup = ~SPECIES
)
})
observe({
proxy <- leafletProxy("map", data = malaysia)
proxy %>% clearControls()
if (input$legend) {
pal <- colorpal()
proxy %>% addLegend(position = "bottomright",
pal = pal, values = ~AMOUNT_TOTAL
)
}
})
}
shinyApp(ui, server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/marble_plot.R
\name{marble_plot}
\alias{marble_plot}
\title{marble_plot}
\usage{
marble_plot(df, alpha_level = 0.4)
}
\arguments{
\item{df}{A dataframe resulting from calling the marble_game funciton}
\item{alpha_level}{A number between 0 and 1 representing the opacity of plotted lines. Default value is 0.4}
}
\value{
A ggplot object plotting the results of a marble_game() call
}
\description{
marble_plot
}
| /man/marble_plot.Rd | no_license | alspur/marblr | R | false | true | 490 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/marble_plot.R
\name{marble_plot}
\alias{marble_plot}
\title{marble_plot}
\usage{
marble_plot(df, alpha_level = 0.4)
}
\arguments{
\item{df}{A dataframe resulting from calling the marble_game funciton}
\item{alpha_level}{A number between 0 and 1 representing the opacity of plotted lines. Default value is 0.4}
}
\value{
A ggplot object plotting the results of a marble_game() call
}
\description{
marble_plot
}
|
# Used in BASiCS_VarianceDecomp
HiddenVarDecomp <- function(Chain)
{
if (!is(Chain, "BASiCS_Chain"))
stop("'Chain' is not a BASiCS_Chain class object.")
N <- nrow(Chain@parameters$delta)
q.bio <- ncol(Chain@parameters$delta)
UniqueBatch <- colnames(Chain@parameters$theta)
nBatch <- length(UniqueBatch)
CellName <- colnames(Chain@parameters$s)
if (nBatch > 1) { Theta <- matrixStats::rowMedians(Chain@parameters$theta) }
else { Theta <- as.vector(Chain@parameters$theta) }
# To store global values (uses median values across all cells)
if("phi" %in% names(Chain@parameters))
{
PhiS <- matrixStats::rowMedians(Chain@parameters$phi * Chain@parameters$s)
}
else
{
PhiS <- matrixStats::rowMedians(Chain@parameters$s)
}
Aux <- (1/(PhiS * Chain@parameters$mu)) + Chain@parameters$delta * (Theta + 1)
TechVarGlobal <- Theta/(Aux + Theta)
BioVarGlobal <- (Chain@parameters$delta * (Theta + 1))/(Aux + Theta)
# To store batch specific values (in arrays)
TechVarBatch <- array(0, dim = c(N, q.bio, nBatch)) # Technical
BioVarBatch <- array(0, dim = c(N, q.bio, nBatch)) # Biological
if (nBatch > 1)
{
for (Batch in seq_len(nBatch))
{
SBatch <- Chain@parameters$s[, grep(UniqueBatch[Batch], CellName)]
if("phi" %in% names(Chain@parameters))
{
PhiBatch <- Chain@parameters$phi[, grep(UniqueBatch[Batch], CellName)]
PhiSBatch <- matrixStats::rowMedians(PhiBatch * SBatch)
}
else
{
PhiSBatch <- matrixStats::rowMedians(SBatch)
}
Aux <- (1/(PhiSBatch * Chain@parameters$mu)) +
Chain@parameters$delta * (Chain@parameters$theta[,Batch] + 1)
TechVarBatch[,,Batch] <-
Chain@parameters$theta[,Batch] / (Aux + Chain@parameters$theta[,Batch])
BioVarBatch[,,Batch] <-
(Chain@parameters$delta * (Chain@parameters$theta[,Batch] + 1)) /
(Aux + Chain@parameters$theta[,Batch])
}
}
if (nBatch > 1)
{
list(TechVarGlobal = TechVarGlobal,
BioVarGlobal = BioVarGlobal,
TechVarBatch = TechVarBatch,
BioVarBatch = BioVarBatch)
}
else { list(TechVarGlobal = TechVarGlobal, BioVarGlobal = BioVarGlobal) }
}
| /R/HiddenVarDecomp.R | no_license | Shians/BASiCS | R | false | false | 2,259 | r | # Used in BASiCS_VarianceDecomp
HiddenVarDecomp <- function(Chain)
{
if (!is(Chain, "BASiCS_Chain"))
stop("'Chain' is not a BASiCS_Chain class object.")
N <- nrow(Chain@parameters$delta)
q.bio <- ncol(Chain@parameters$delta)
UniqueBatch <- colnames(Chain@parameters$theta)
nBatch <- length(UniqueBatch)
CellName <- colnames(Chain@parameters$s)
if (nBatch > 1) { Theta <- matrixStats::rowMedians(Chain@parameters$theta) }
else { Theta <- as.vector(Chain@parameters$theta) }
# To store global values (uses median values across all cells)
if("phi" %in% names(Chain@parameters))
{
PhiS <- matrixStats::rowMedians(Chain@parameters$phi * Chain@parameters$s)
}
else
{
PhiS <- matrixStats::rowMedians(Chain@parameters$s)
}
Aux <- (1/(PhiS * Chain@parameters$mu)) + Chain@parameters$delta * (Theta + 1)
TechVarGlobal <- Theta/(Aux + Theta)
BioVarGlobal <- (Chain@parameters$delta * (Theta + 1))/(Aux + Theta)
# To store batch specific values (in arrays)
TechVarBatch <- array(0, dim = c(N, q.bio, nBatch)) # Technical
BioVarBatch <- array(0, dim = c(N, q.bio, nBatch)) # Biological
if (nBatch > 1)
{
for (Batch in seq_len(nBatch))
{
SBatch <- Chain@parameters$s[, grep(UniqueBatch[Batch], CellName)]
if("phi" %in% names(Chain@parameters))
{
PhiBatch <- Chain@parameters$phi[, grep(UniqueBatch[Batch], CellName)]
PhiSBatch <- matrixStats::rowMedians(PhiBatch * SBatch)
}
else
{
PhiSBatch <- matrixStats::rowMedians(SBatch)
}
Aux <- (1/(PhiSBatch * Chain@parameters$mu)) +
Chain@parameters$delta * (Chain@parameters$theta[,Batch] + 1)
TechVarBatch[,,Batch] <-
Chain@parameters$theta[,Batch] / (Aux + Chain@parameters$theta[,Batch])
BioVarBatch[,,Batch] <-
(Chain@parameters$delta * (Chain@parameters$theta[,Batch] + 1)) /
(Aux + Chain@parameters$theta[,Batch])
}
}
if (nBatch > 1)
{
list(TechVarGlobal = TechVarGlobal,
BioVarGlobal = BioVarGlobal,
TechVarBatch = TechVarBatch,
BioVarBatch = BioVarBatch)
}
else { list(TechVarGlobal = TechVarGlobal, BioVarGlobal = BioVarGlobal) }
}
|
#create vector objects.
state <- c("Rajasthan","Bihar","Hariyana","Gujrat")
city <- c("raila","Bhilwara","raipur","mandal")
zipcode <- c(33602,98104,06161,80294)
addresses <- cbind(city,State,zipcode)
cat("# # # # The data frame ")
print(addresses)
new.address <- data.frame(
city = c("Lowry", "Charloate"),
State = c("mandal", "raipur"),
zipcode = c("80230","33949"),
stringsAsFactors = FALSE
)
print(new.address)
all.addresses <- rbind(addresses,new.address)
print(all.addresses)
| /Data_frames_2.r | no_license | chandraprakashh/R_python | R | false | false | 532 | r | #create vector objects.
state <- c("Rajasthan","Bihar","Hariyana","Gujrat")
city <- c("raila","Bhilwara","raipur","mandal")
zipcode <- c(33602,98104,06161,80294)
addresses <- cbind(city,State,zipcode)
cat("# # # # The data frame ")
print(addresses)
new.address <- data.frame(
city = c("Lowry", "Charloate"),
State = c("mandal", "raipur"),
zipcode = c("80230","33949"),
stringsAsFactors = FALSE
)
print(new.address)
all.addresses <- rbind(addresses,new.address)
print(all.addresses)
|
#' Plot brain parcellations
#'
#' \code{ggseg} plots and returns a ggplot object of plotted
#' aparc areas.
#' @author Athanasia Mowinckel and Didac Pineiro
#'
#' @param .data A .data.frame to use for plot aesthetics. Should include a
#' column called "area" corresponding to aparc areas.
#'
#' @param atlas Either a string with the name of atlas to use,
#' or a .data.frame containing atlas information (i.e. pre-loaded atlas).
#' @param ... other options sent to ggplot2::geom_polygon for plotting, including
#' mapping aes (cannot include x, y, and group aethetics).
#' @param hemisphere String to choose hemisphere to plot. Any of c("left","right")[default].
#' @param view String to choose view of the .data. Any of c("lateral","medial")[default].
#' @param position String choosing how to view the .data. Either "dispersed"[default] or "stacked".
#' @param adapt_scales if \code{TRUE}, then the axes will
#' be hemisphere without ticks. If \code{FALSE}, then will be latitude
#' longitude values. Also affected by \code{position} argument
#'
#' @details
#' \describe{
#'
#' \item{`dkt`}{
#' The Desikan-Killiany Cortical Atlas [default], Freesurfer cortical segmentations.}
#'
#' \item{`aseg`}{
#' Freesurfer automatic subcortical segmentation of a brain volume}
#'
#' }
#'
#' @return a ggplot object
#'
#' @import ggplot2
#' @importFrom dplyr select group_by summarise_at vars funs mutate filter full_join distinct summarise case_when
#' @importFrom tidyr unite_ unnest
#' @importFrom magrittr "%>%"
#' @importFrom stats na.omit sd
#'
#' @examples
#' library(ggplot2)
#' ggseg()
#' ggseg(mapping=aes(fill=area))
#' ggseg(colour="black", size=.7, mapping=aes(fill=area)) + theme_void()
#' ggseg(position = "stacked")
#' ggseg(adapt_scales = FALSE)
#'
#' @seealso [ggplot2][ggplot2::ggplot], [aes][ggplot2::aes],
#' [geom_polygon][ggplot2::geom_polygon], [coord_fixed][ggplot2::coord_fixed]
#'
#' @export
ggseg = function(.data = NULL,
atlas = "dkt",
position = "dispersed",
view = NULL,
hemisphere = NULL,
adapt_scales = TRUE,
...){
# Grab the atlas, even if it has been provided as character string
geobrain <- if(!is.character(atlas)){
atlas
}else{
get(atlas)
}
if(!is_ggseg_atlas(geobrain)){
warning("This is not a ggseg_atlas-class. Attempting to convert with `as_ggseg_atlas()`")
geobrain <- as_ggseg_atlas(geobrain)
}
geobrain <- unnest(geobrain, ggseg)
stack <- case_when(
grepl("stack", position) ~ "stacked",
grepl("disperse", position) ~ "dispersed",
TRUE ~ "unknown"
)
if(stack == "stacked"){
if(any(!geobrain %>% dplyr::select(side) %>% unique %>% unlist() %in% c("medial","lateral"))){
warning("Cannot stack atlas. Check if atlas has medial views.")
}else{
geobrain <- stack_brain(geobrain)
} # If possible to stack
}else if(stack == "unknown"){
warning(paste0("Cannot recognise position = '", position,
"'. Please use either 'stacked' or 'dispersed', returning dispersed.")
)
stack <- "dispersed"
} # If stacked
# Remove .data we don't want to plot
if(!is.null(hemisphere)) geobrain <- dplyr::filter(geobrain, hemi %in% hemisphere)
if(!is.null(view)) geobrain <- dplyr::filter(geobrain, side %in% view)
# If .data has been supplied, merge it
if(!is.null(.data)){
geobrain <- data_merge(.data, geobrain)
}
# Create the plot
gg <- ggplot2::ggplot(data = geobrain, ggplot2::aes(x=.long, y=.lat, group=.id)) +
ggplot2::geom_polygon(...) +
ggplot2::coord_fixed()
# Scales may be adapted, for more convenient vieweing
if(adapt_scales){
gg <- gg +
scale_y_brain(geobrain, stack) +
scale_x_brain(geobrain, stack) +
scale_labs_brain(geobrain, stack)
}
gg + theme_brain()
}
## quiets concerns of R CMD check
if(getRversion() >= "2.15.1"){
utils::globalVariables(c(".data","dkt"))
}
| /R/ggseg.R | permissive | richardbeare/ggseg | R | false | false | 3,979 | r | #' Plot brain parcellations
#'
#' \code{ggseg} plots and returns a ggplot object of plotted
#' aparc areas.
#' @author Athanasia Mowinckel and Didac Pineiro
#'
#' @param .data A .data.frame to use for plot aesthetics. Should include a
#' column called "area" corresponding to aparc areas.
#'
#' @param atlas Either a string with the name of atlas to use,
#' or a .data.frame containing atlas information (i.e. pre-loaded atlas).
#' @param ... other options sent to ggplot2::geom_polygon for plotting, including
#' mapping aes (cannot include x, y, and group aethetics).
#' @param hemisphere String to choose hemisphere to plot. Any of c("left","right")[default].
#' @param view String to choose view of the .data. Any of c("lateral","medial")[default].
#' @param position String choosing how to view the .data. Either "dispersed"[default] or "stacked".
#' @param adapt_scales if \code{TRUE}, then the axes will
#' be hemisphere without ticks. If \code{FALSE}, then will be latitude
#' longitude values. Also affected by \code{position} argument
#'
#' @details
#' \describe{
#'
#' \item{`dkt`}{
#' The Desikan-Killiany Cortical Atlas [default], Freesurfer cortical segmentations.}
#'
#' \item{`aseg`}{
#' Freesurfer automatic subcortical segmentation of a brain volume}
#'
#' }
#'
#' @return a ggplot object
#'
#' @import ggplot2
#' @importFrom dplyr select group_by summarise_at vars funs mutate filter full_join distinct summarise case_when
#' @importFrom tidyr unite_ unnest
#' @importFrom magrittr "%>%"
#' @importFrom stats na.omit sd
#'
#' @examples
#' library(ggplot2)
#' ggseg()
#' ggseg(mapping=aes(fill=area))
#' ggseg(colour="black", size=.7, mapping=aes(fill=area)) + theme_void()
#' ggseg(position = "stacked")
#' ggseg(adapt_scales = FALSE)
#'
#' @seealso [ggplot2][ggplot2::ggplot], [aes][ggplot2::aes],
#' [geom_polygon][ggplot2::geom_polygon], [coord_fixed][ggplot2::coord_fixed]
#'
#' @export
ggseg = function(.data = NULL,
atlas = "dkt",
position = "dispersed",
view = NULL,
hemisphere = NULL,
adapt_scales = TRUE,
...){
# Grab the atlas, even if it has been provided as character string
geobrain <- if(!is.character(atlas)){
atlas
}else{
get(atlas)
}
if(!is_ggseg_atlas(geobrain)){
warning("This is not a ggseg_atlas-class. Attempting to convert with `as_ggseg_atlas()`")
geobrain <- as_ggseg_atlas(geobrain)
}
geobrain <- unnest(geobrain, ggseg)
stack <- case_when(
grepl("stack", position) ~ "stacked",
grepl("disperse", position) ~ "dispersed",
TRUE ~ "unknown"
)
if(stack == "stacked"){
if(any(!geobrain %>% dplyr::select(side) %>% unique %>% unlist() %in% c("medial","lateral"))){
warning("Cannot stack atlas. Check if atlas has medial views.")
}else{
geobrain <- stack_brain(geobrain)
} # If possible to stack
}else if(stack == "unknown"){
warning(paste0("Cannot recognise position = '", position,
"'. Please use either 'stacked' or 'dispersed', returning dispersed.")
)
stack <- "dispersed"
} # If stacked
# Remove .data we don't want to plot
if(!is.null(hemisphere)) geobrain <- dplyr::filter(geobrain, hemi %in% hemisphere)
if(!is.null(view)) geobrain <- dplyr::filter(geobrain, side %in% view)
# If .data has been supplied, merge it
if(!is.null(.data)){
geobrain <- data_merge(.data, geobrain)
}
# Create the plot
gg <- ggplot2::ggplot(data = geobrain, ggplot2::aes(x=.long, y=.lat, group=.id)) +
ggplot2::geom_polygon(...) +
ggplot2::coord_fixed()
# Scales may be adapted, for more convenient vieweing
if(adapt_scales){
gg <- gg +
scale_y_brain(geobrain, stack) +
scale_x_brain(geobrain, stack) +
scale_labs_brain(geobrain, stack)
}
gg + theme_brain()
}
## quiets concerns of R CMD check
if(getRversion() >= "2.15.1"){
utils::globalVariables(c(".data","dkt"))
}
|
library(annotatr)
namespace <- function() "contractr"
argument_type_handler <- create_handler("argument-type",
match_datatype,
insert_argument_type_contract,
"once",
TRUE)
return_type_handler <- create_handler("return-type",
match_datatype,
insert_return_type_contract,
"once",
TRUE)
.onAttach <- function(libname, pkgname) {
register_annotation_handler(namespace(),
"function_formals",
argument_type_handler)
register_annotation_handler(namespace(),
"function_body",
return_type_handler)
}
| /R/contractr.R | no_license | aviralg/contractr | R | false | false | 933 | r | library(annotatr)
namespace <- function() "contractr"
argument_type_handler <- create_handler("argument-type",
match_datatype,
insert_argument_type_contract,
"once",
TRUE)
return_type_handler <- create_handler("return-type",
match_datatype,
insert_return_type_contract,
"once",
TRUE)
.onAttach <- function(libname, pkgname) {
register_annotation_handler(namespace(),
"function_formals",
argument_type_handler)
register_annotation_handler(namespace(),
"function_body",
return_type_handler)
}
|
source(file = "Maps/RPSEGPLANCidades.R")
source(file = "Maps/MicrorregiãoCidades.R")
ArrangeCidadesMicroRPSEGPLAN <- ggarrange(MicrorregiaoCidades, RPSEGPLANcidades, align = "h") | /Arranges/Cidades da Microrregião e RPSEGPLAN.R | no_license | supervedovatto/AnexoA | R | false | false | 180 | r | source(file = "Maps/RPSEGPLANCidades.R")
source(file = "Maps/MicrorregiãoCidades.R")
ArrangeCidadesMicroRPSEGPLAN <- ggarrange(MicrorregiaoCidades, RPSEGPLANcidades, align = "h") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cv.glmSparseNet.R
\docType{methods}
\name{cv.glmSparseNet.mclapply,SummarizedExperiment-method}
\alias{cv.glmSparseNet.mclapply,SummarizedExperiment-method}
\title{Calculate GLM model with network-based regularization}
\usage{
\S4method{cv.glmSparseNet.mclapply}{SummarizedExperiment}(xdata, ydata,
network, network.options = network.options.default(), ...)
}
\description{
Calculate GLM model with network-based regularization
}
| /man/cv.glmSparseNet.mclapply-SummarizedExperiment-method.Rd | no_license | averissimo/glmSparseNetPaper | R | false | true | 510 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cv.glmSparseNet.R
\docType{methods}
\name{cv.glmSparseNet.mclapply,SummarizedExperiment-method}
\alias{cv.glmSparseNet.mclapply,SummarizedExperiment-method}
\title{Calculate GLM model with network-based regularization}
\usage{
\S4method{cv.glmSparseNet.mclapply}{SummarizedExperiment}(xdata, ydata,
network, network.options = network.options.default(), ...)
}
\description{
Calculate GLM model with network-based regularization
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dpca.filters.R
\name{dpca.filters}
\alias{dpca.filters}
\title{Compute DPCA filter coefficients}
\usage{
dpca.filters(F, Ndpc = dim(F$operators)[1], q = 30)
}
\arguments{
\item{F}{\eqn{(d\times d)} spectral density matrix, provided as an object of class \code{freqdom}.}
\item{Ndpc}{an integer \eqn{\in\{1,\ldots, d\}}. It is the number of dynamic principal
components to be computed. By default it is set equal to \eqn{d}.}
\item{q}{a non-negative integer. DPCA filter coefficients at lags \eqn{|h|\leq} \code{q} will be computed.}
}
\value{
An object of class \code{timedom}. The list has the following components:
\itemize{
\item \code{operators} \eqn{\quad} an array. Each matrix in this array has dimension \code{Ndpc} \eqn{\times d} and is
assigned to a certain lag. For a given lag \eqn{k}, the rows of the matrix correpsond to
\eqn{\phi_{\ell k}}.
\item \code{lags} \eqn{\quad} a vector with the lags of the filter coefficients.
}
}
\description{
For a given spectral density matrix dynamic principal component filter sequences are computed.
}
\details{
Dynamic principal components are linear filters \eqn{(\phi_{\ell k}\colon k\in \mathbf{Z})},
\eqn{1 \leq \ell \leq d}. They are defined as the Fourier coefficients of the dynamic eigenvector
\eqn{\varphi_\ell(\omega)} of a spectral density matrix \eqn{\mathcal{F}_\omega}:
\deqn{
\phi_{\ell k}:=\frac{1}{2\pi}\int_{-\pi}^\pi \varphi_\ell(\omega) \exp(-ik\omega) d\omega.
}
The index \eqn{\ell} is referring to the \eqn{\ell}-th #'largest dynamic eigenvalue. Since the \eqn{\phi_{\ell k}} are
real, we have \deqn{
\phi_{\ell k}^\prime=\phi_{\ell k}^*=\frac{1}{2\pi}\int_{-\pi}^\pi \varphi_\ell^* \exp(ik\omega)d\omega.
}
For a given
spectral density (provided as on object of class \code{freqdom}) the function
\code{dpca.filters()} computes \eqn{(\phi_{\ell k})} for \eqn{|k| \leq} \code{q} and \eqn{1 \leq \ell \leq} \code{Ndpc}.
For more details we refer to Chapter 9 in Brillinger (2001), Chapter 7.8 in Shumway and
Stoffer (2006) and to Hormann et al. (2015).
}
\references{
Hormann, S., Kidzinski, L., and Hallin, M.
\emph{Dynamic functional principal components.} Journal of the Royal
Statistical Society: Series B (Statistical Methodology) 77.2 (2015): 319-348.
Brillinger, D.
\emph{Time Series} (2001), SIAM, San Francisco.
Shumway, R.H., and Stoffer, D.S.
\emph{Time Series Analysis and Its Applications} (2006), Springer, New York.
}
\seealso{
\code{\link{dpca.var}}, \code{\link{dpca.scores}}, \code{\link{dpca.KLexpansion}}
}
\keyword{DPCA}
| /man/dpca.filters.Rd | no_license | kidzik/freqdom | R | false | true | 2,602 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dpca.filters.R
\name{dpca.filters}
\alias{dpca.filters}
\title{Compute DPCA filter coefficients}
\usage{
dpca.filters(F, Ndpc = dim(F$operators)[1], q = 30)
}
\arguments{
\item{F}{\eqn{(d\times d)} spectral density matrix, provided as an object of class \code{freqdom}.}
\item{Ndpc}{an integer \eqn{\in\{1,\ldots, d\}}. It is the number of dynamic principal
components to be computed. By default it is set equal to \eqn{d}.}
\item{q}{a non-negative integer. DPCA filter coefficients at lags \eqn{|h|\leq} \code{q} will be computed.}
}
\value{
An object of class \code{timedom}. The list has the following components:
\itemize{
\item \code{operators} \eqn{\quad} an array. Each matrix in this array has dimension \code{Ndpc} \eqn{\times d} and is
assigned to a certain lag. For a given lag \eqn{k}, the rows of the matrix correpsond to
\eqn{\phi_{\ell k}}.
\item \code{lags} \eqn{\quad} a vector with the lags of the filter coefficients.
}
}
\description{
For a given spectral density matrix dynamic principal component filter sequences are computed.
}
\details{
Dynamic principal components are linear filters \eqn{(\phi_{\ell k}\colon k\in \mathbf{Z})},
\eqn{1 \leq \ell \leq d}. They are defined as the Fourier coefficients of the dynamic eigenvector
\eqn{\varphi_\ell(\omega)} of a spectral density matrix \eqn{\mathcal{F}_\omega}:
\deqn{
\phi_{\ell k}:=\frac{1}{2\pi}\int_{-\pi}^\pi \varphi_\ell(\omega) \exp(-ik\omega) d\omega.
}
The index \eqn{\ell} is referring to the \eqn{\ell}-th #'largest dynamic eigenvalue. Since the \eqn{\phi_{\ell k}} are
real, we have \deqn{
\phi_{\ell k}^\prime=\phi_{\ell k}^*=\frac{1}{2\pi}\int_{-\pi}^\pi \varphi_\ell^* \exp(ik\omega)d\omega.
}
For a given
spectral density (provided as on object of class \code{freqdom}) the function
\code{dpca.filters()} computes \eqn{(\phi_{\ell k})} for \eqn{|k| \leq} \code{q} and \eqn{1 \leq \ell \leq} \code{Ndpc}.
For more details we refer to Chapter 9 in Brillinger (2001), Chapter 7.8 in Shumway and
Stoffer (2006) and to Hormann et al. (2015).
}
\references{
Hormann, S., Kidzinski, L., and Hallin, M.
\emph{Dynamic functional principal components.} Journal of the Royal
Statistical Society: Series B (Statistical Methodology) 77.2 (2015): 319-348.
Brillinger, D.
\emph{Time Series} (2001), SIAM, San Francisco.
Shumway, R.H., and Stoffer, D.S.
\emph{Time Series Analysis and Its Applications} (2006), Springer, New York.
}
\seealso{
\code{\link{dpca.var}}, \code{\link{dpca.scores}}, \code{\link{dpca.KLexpansion}}
}
\keyword{DPCA}
|
#
# Whole origin data set download from below link:
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
#
#
#Libraries used
library(tidyr)
library(dplyr)
library(stringr)
#Functions and constants
#For adding activity name into y_train and y_test data frames
gen_col_by_cell<-function(x){
y=c()
act_name_value=""
for(act in x){
act_name_value<-toString(unique(activity_labels[ which(activity_labels$act_label == act),
"act_name"]))
y<-c(y,act_name_value)
}
return(y)
}
#Constants
#Identifier for data from train or test
train_flag="1"
test_flag="0"
#Read data from "./samsung/"
##Read activity_labels.txt into Data frame:activity_labels
activity_labels<- read.table("samsung/activity_labels.txt",col.names = c("act_label","act_name"))
##Read features.txt into Data frame:features
features<- read.table("samsung/features.txt")
##Read X_train.txt into Data frame:xtrain_data,and use vector data in 2nd column of feature as columns' name
xtrain_data <- read.table("samsung/train/X_train.txt",col.names=features$V2)
##Read subject_train.txt.txt into Data frame:subject_train
subject_train<- read.table("samsung/train/subject_train.txt",col.names = c("subject"))
##Read y_train.txt into Data frame:y_train
y_train<- read.table("samsung/train/y_train.txt",col.names = c("activity_label"))
##Read body_acc_x_train.txt into Data frame:body_acc_x_train, and add new column:train_test as identifier
body_acc_x_train<- read.table("samsung/train/Inertial Signals/body_acc_x_train.txt")
body_acc_x_train$train_test <- rep(train_flag,nrow(body_acc_x_train))
##Read body_acc_y_train.txt into Data frame:body_acc_y_train, and add new column:train_test as identifier
body_acc_y_train<- read.table("samsung/train/Inertial Signals/body_acc_y_train.txt")
body_acc_y_train$train_test <- rep(train_flag,nrow(body_acc_y_train))
##Read body_acc_z_train.txt into Data frame:body_acc_z_train, and add new column:train_test as identifier
body_acc_z_train<- read.table("samsung/train/Inertial Signals/body_acc_z_train.txt")
body_acc_z_train$train_test <- rep(train_flag,nrow(body_acc_z_train))
##Read body_gyro_x_train.txt into Data frame:body_gyro_x_train, and add new column:train_test as identifier
body_gyro_x_train<- read.table("samsung/train/Inertial Signals/body_gyro_x_train.txt")
body_gyro_x_train$train_test <- rep(train_flag,nrow(body_gyro_x_train))
##Read body_gyro_y_train.txt into Data frame:body_gyro_y_train, and add new column:train_test as identifier
body_gyro_y_train<- read.table("samsung/train/Inertial Signals/body_gyro_y_train.txt")
body_gyro_y_train$train_test <- rep(train_flag,nrow(body_gyro_y_train))
##Read body_gyro_z_train.txt into Data frame:body_gyro_z_train, and add new column:train_test as identifier
body_gyro_z_train<- read.table("samsung/train/Inertial Signals/body_gyro_z_train.txt")
body_gyro_z_train$train_test <- rep(train_flag,nrow(body_gyro_z_train))
##Read total_acc_x_train.txt into Data frame:total_acc_x_train, and add new column:train_test as identifier
total_acc_x_train<- read.table("samsung/train/Inertial Signals/total_acc_x_train.txt")
total_acc_x_train$train_test <- rep(train_flag,nrow(total_acc_x_train))
##Read total_acc_y_train.txt into Data frame:total_acc_y_train, and add new column:train_test as identifier
total_acc_y_train<- read.table("samsung/train/Inertial Signals/total_acc_y_train.txt")
total_acc_y_train$train_test <- rep(train_flag,nrow(total_acc_y_train))
##Read total_acc_z_train.txt into Data frame:total_acc_z_train, and add new column:train_test as identifier
total_acc_z_train<- read.table("samsung/train/Inertial Signals/total_acc_z_train.txt")
total_acc_z_train$train_test <- rep(train_flag,nrow(total_acc_z_train))
##Read X_test.txt into Data frame:xtest_data,and use vector data in 2nd column of feature as columns' name
xtest_data <- read.table("samsung/test/X_test.txt",col.names=features$V2)
##Read subject_test.txt.txt into Data frame:subject_test
subject_test<- read.table("samsung/test/subject_test.txt",col.names = c("subject"))
##Read y_test.txt.txt into Data frame:y_test
y_test<- read.table("samsung/test/y_test.txt",col.names = c("activity_label"))
##Read body_acc_x_test.txt into Data frame:body_acc_x_test, and add new column:train_test as identifier
body_acc_x_test<- read.table("samsung/test/Inertial Signals/body_acc_x_test.txt")
body_acc_x_test$train_test <- rep(test_flag,nrow(body_acc_x_test))
##Read body_acc_y_test.txt into Data frame:body_acc_y_test, and add new column:train_test as identifier
body_acc_y_test<- read.table("samsung/test/Inertial Signals/body_acc_y_test.txt")
body_acc_y_test$train_test <- rep(test_flag,nrow(body_acc_y_test))
##Read body_acc_z_test.txt into Data frame:body_acc_z_test, and add new column:train_test as identifier
body_acc_z_test<- read.table("samsung/test/Inertial Signals/body_acc_z_test.txt")
body_acc_z_test$train_test <- rep(test_flag,nrow(body_acc_z_test))
##Read body_gyro_x_test.txt into Data frame:body_gyro_x_test, and add new column:train_test as identifier
body_gyro_x_test<- read.table("samsung/test/Inertial Signals/body_gyro_x_test.txt")
body_gyro_x_test$train_test <- rep(test_flag,nrow(body_gyro_x_test))
##Read body_gyro_y_test.txt into Data frame:body_gyro_y_test, and add new column:train_test as identifier
body_gyro_y_test<- read.table("samsung/test/Inertial Signals/body_gyro_y_test.txt")
body_gyro_y_test$train_test <- rep(test_flag,nrow(body_gyro_y_test))
##Read body_gyro_z_test.txt into Data frame:body_gyro_z_test, and add new column:train_test as identifier
body_gyro_z_test<- read.table("samsung/test/Inertial Signals/body_gyro_z_test.txt")
body_gyro_z_test$train_test <- rep(test_flag,nrow(body_gyro_z_test))
##Read total_acc_x_test.txt into Data frame:total_acc_x_test, and add new column:train_test as identifier
total_acc_x_test<- read.table("samsung/test/Inertial Signals/total_acc_x_test.txt")
total_acc_x_test$train_test <- rep(test_flag,nrow(total_acc_x_test))
##Read total_acc_y_test.txt into Data frame:total_acc_y_test, and add new column:train_test as identifier
total_acc_y_test<- read.table("samsung/test/Inertial Signals/total_acc_y_test.txt")
total_acc_y_test$train_test <- rep(test_flag,nrow(total_acc_y_test))
##Read total_acc_z_test.txt into Data frame:total_acc_z_test, and add new column:train_test as identifier
total_acc_z_test<- read.table("samsung/test/Inertial Signals/total_acc_z_test.txt")
total_acc_z_test$train_test <- rep(test_flag,nrow(total_acc_z_test))
#Combine data from train and test
##Create a new column "train_test" for y_train as identifire for train data or test data
y_train$train_test <- rep(train_flag,nrow(y_train))
##Create a new column "train_test" for y_test as identifire for train data or test data
y_test$train_test <- rep(test_flag,nrow(y_test))
##combine y_train and y_test
y_all <- rbind(y_train,y_test)
##combine subject_train and subject_test
subject_all <- rbind(subject_train,subject_test)
##combine xtrain_data and xtest_data
xall_data <- rbind(xtrain_data,xtest_data)
##combine measurements from train and test
body_acc_x_all <- rbind(body_acc_x_train,body_acc_x_test)
body_acc_y_all <- rbind(body_acc_y_train,body_acc_y_test)
body_acc_z_all <- rbind(body_acc_z_train,body_acc_z_test)
body_gyro_x_all <- rbind(body_gyro_x_train,body_gyro_x_test)
body_gyro_y_all <- rbind(body_gyro_y_train,body_gyro_y_test)
body_gyro_z_all <- rbind(body_gyro_z_train,body_gyro_z_test)
total_acc_x_all <- rbind(total_acc_x_train,total_acc_x_test)
total_acc_y_all <- rbind(total_acc_y_train,total_acc_y_test)
total_acc_z_all <- rbind(total_acc_z_train,total_acc_z_test)
#Add mean,std columns to those measurement files
body_acc_x_all[[make.names(c("tBodyAcc-mean()-X"), unique = TRUE)]]<-xall_data$tBodyAcc.mean...X
body_acc_x_all[[make.names(c("tBodyAcc-std()-X"), unique = TRUE)]]<-xall_data$tBodyAcc.std...X
body_acc_y_all[[make.names(c("tBodyAcc-mean()-Y"), unique = TRUE)]]<-xall_data$tBodyAcc.mean...Y
body_acc_y_all[[make.names(c("tBodyAcc-std()-Y"), unique = TRUE)]]<-xall_data$tBodyAcc.std...Y
body_acc_z_all[[make.names(c("tBodyAcc-mean()-Z"), unique = TRUE)]]<-xall_data$tBodyAcc.mean...Z
body_acc_z_all[[make.names(c("tBodyAcc-std()-Z"), unique = TRUE)]]<-xall_data$tBodyAcc.std...Z
body_gyro_x_all[[make.names(c("tBodyGyro-mean()-X"), unique = TRUE)]]<-xall_data$tBodyGyro.mean...X
body_gyro_x_all[[make.names(c("tBodyGyro-std()-X"), unique = TRUE)]]<-xall_data$tBodyGyro.std...X
body_gyro_y_all[[make.names(c("tBodyGyro-mean()-Y"), unique = TRUE)]]<-xall_data$tBodyGyro.mean...Y
body_gyro_y_all[[make.names(c("tBodyGyro-std()-Y"), unique = TRUE)]]<-xall_data$tBodyGyro.std...Y
body_gyro_z_all[[make.names(c("tBodyGyro-mean()-Z"), unique = TRUE)]]<-xall_data$tBodyGyro.mean...Z
body_gyro_z_all[[make.names(c("tBodyGyro-std()-Z"), unique = TRUE)]]<-xall_data$tBodyGyro.std...Z
#Create a new column "activity_name"
y_all$activity_name<-gen_col_by_cell(y_all$activity_label)
##merge y_all and subject_all into subject_acitivity_all
subject_activity_all<-bind_cols(subject_all, y_all)
##merge subject_activity_all with xall_data
subject_activity_features_all<-bind_cols(subject_activity_all, xall_data)
#Create a tidy data set with the average of each variable for each activity and each subject
tidy_data<-aggregate( subject_activity_features_all[,5:565], subject_activity_features_all[,1:2], FUN = mean )
#Add activity name to tidy_data
a_name <- gen_col_by_cell(tidy_data$activity_label)
tidy_data<-as.data.frame(append(tidy_data, list(activity_name = a_name), after = 2))
#Write tidy_data to tidy_data.csv
write.csv(tidy_data, file = "tidy_data.csv")
| /run_analysis.R | no_license | chrisyxsh/Human-Activity-Recognition | R | false | false | 9,700 | r | #
# Whole origin data set download from below link:
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
#
#
#Libraries used
library(tidyr)
library(dplyr)
library(stringr)
#Functions and constants
#For adding activity name into y_train and y_test data frames
gen_col_by_cell<-function(x){
y=c()
act_name_value=""
for(act in x){
act_name_value<-toString(unique(activity_labels[ which(activity_labels$act_label == act),
"act_name"]))
y<-c(y,act_name_value)
}
return(y)
}
#Constants
#Identifier for data from train or test
train_flag="1"
test_flag="0"
#Read data from "./samsung/"
##Read activity_labels.txt into Data frame:activity_labels
activity_labels<- read.table("samsung/activity_labels.txt",col.names = c("act_label","act_name"))
##Read features.txt into Data frame:features
features<- read.table("samsung/features.txt")
##Read X_train.txt into Data frame:xtrain_data,and use vector data in 2nd column of feature as columns' name
xtrain_data <- read.table("samsung/train/X_train.txt",col.names=features$V2)
##Read subject_train.txt.txt into Data frame:subject_train
subject_train<- read.table("samsung/train/subject_train.txt",col.names = c("subject"))
##Read y_train.txt into Data frame:y_train
y_train<- read.table("samsung/train/y_train.txt",col.names = c("activity_label"))
##Read body_acc_x_train.txt into Data frame:body_acc_x_train, and add new column:train_test as identifier
body_acc_x_train<- read.table("samsung/train/Inertial Signals/body_acc_x_train.txt")
body_acc_x_train$train_test <- rep(train_flag,nrow(body_acc_x_train))
##Read body_acc_y_train.txt into Data frame:body_acc_y_train, and add new column:train_test as identifier
body_acc_y_train<- read.table("samsung/train/Inertial Signals/body_acc_y_train.txt")
body_acc_y_train$train_test <- rep(train_flag,nrow(body_acc_y_train))
##Read body_acc_z_train.txt into Data frame:body_acc_z_train, and add new column:train_test as identifier
body_acc_z_train<- read.table("samsung/train/Inertial Signals/body_acc_z_train.txt")
body_acc_z_train$train_test <- rep(train_flag,nrow(body_acc_z_train))
##Read body_gyro_x_train.txt into Data frame:body_gyro_x_train, and add new column:train_test as identifier
body_gyro_x_train<- read.table("samsung/train/Inertial Signals/body_gyro_x_train.txt")
body_gyro_x_train$train_test <- rep(train_flag,nrow(body_gyro_x_train))
##Read body_gyro_y_train.txt into Data frame:body_gyro_y_train, and add new column:train_test as identifier
body_gyro_y_train<- read.table("samsung/train/Inertial Signals/body_gyro_y_train.txt")
body_gyro_y_train$train_test <- rep(train_flag,nrow(body_gyro_y_train))
##Read body_gyro_z_train.txt into Data frame:body_gyro_z_train, and add new column:train_test as identifier
body_gyro_z_train<- read.table("samsung/train/Inertial Signals/body_gyro_z_train.txt")
body_gyro_z_train$train_test <- rep(train_flag,nrow(body_gyro_z_train))
##Read total_acc_x_train.txt into Data frame:total_acc_x_train, and add new column:train_test as identifier
total_acc_x_train<- read.table("samsung/train/Inertial Signals/total_acc_x_train.txt")
total_acc_x_train$train_test <- rep(train_flag,nrow(total_acc_x_train))
##Read total_acc_y_train.txt into Data frame:total_acc_y_train, and add new column:train_test as identifier
total_acc_y_train<- read.table("samsung/train/Inertial Signals/total_acc_y_train.txt")
total_acc_y_train$train_test <- rep(train_flag,nrow(total_acc_y_train))
##Read total_acc_z_train.txt into Data frame:total_acc_z_train, and add new column:train_test as identifier
total_acc_z_train<- read.table("samsung/train/Inertial Signals/total_acc_z_train.txt")
total_acc_z_train$train_test <- rep(train_flag,nrow(total_acc_z_train))
##Read X_test.txt into Data frame:xtest_data,and use vector data in 2nd column of feature as columns' name
xtest_data <- read.table("samsung/test/X_test.txt",col.names=features$V2)
##Read subject_test.txt.txt into Data frame:subject_test
subject_test<- read.table("samsung/test/subject_test.txt",col.names = c("subject"))
##Read y_test.txt.txt into Data frame:y_test
y_test<- read.table("samsung/test/y_test.txt",col.names = c("activity_label"))
##Read body_acc_x_test.txt into Data frame:body_acc_x_test, and add new column:train_test as identifier
body_acc_x_test<- read.table("samsung/test/Inertial Signals/body_acc_x_test.txt")
body_acc_x_test$train_test <- rep(test_flag,nrow(body_acc_x_test))
##Read body_acc_y_test.txt into Data frame:body_acc_y_test, and add new column:train_test as identifier
body_acc_y_test<- read.table("samsung/test/Inertial Signals/body_acc_y_test.txt")
body_acc_y_test$train_test <- rep(test_flag,nrow(body_acc_y_test))
##Read body_acc_z_test.txt into Data frame:body_acc_z_test, and add new column:train_test as identifier
body_acc_z_test<- read.table("samsung/test/Inertial Signals/body_acc_z_test.txt")
body_acc_z_test$train_test <- rep(test_flag,nrow(body_acc_z_test))
##Read body_gyro_x_test.txt into Data frame:body_gyro_x_test, and add new column:train_test as identifier
body_gyro_x_test<- read.table("samsung/test/Inertial Signals/body_gyro_x_test.txt")
body_gyro_x_test$train_test <- rep(test_flag,nrow(body_gyro_x_test))
##Read body_gyro_y_test.txt into Data frame:body_gyro_y_test, and add new column:train_test as identifier
body_gyro_y_test<- read.table("samsung/test/Inertial Signals/body_gyro_y_test.txt")
body_gyro_y_test$train_test <- rep(test_flag,nrow(body_gyro_y_test))
##Read body_gyro_z_test.txt into Data frame:body_gyro_z_test, and add new column:train_test as identifier
body_gyro_z_test<- read.table("samsung/test/Inertial Signals/body_gyro_z_test.txt")
body_gyro_z_test$train_test <- rep(test_flag,nrow(body_gyro_z_test))
##Read total_acc_x_test.txt into Data frame:total_acc_x_test, and add new column:train_test as identifier
total_acc_x_test<- read.table("samsung/test/Inertial Signals/total_acc_x_test.txt")
total_acc_x_test$train_test <- rep(test_flag,nrow(total_acc_x_test))
##Read total_acc_y_test.txt into Data frame:total_acc_y_test, and add new column:train_test as identifier
total_acc_y_test<- read.table("samsung/test/Inertial Signals/total_acc_y_test.txt")
total_acc_y_test$train_test <- rep(test_flag,nrow(total_acc_y_test))
##Read total_acc_z_test.txt into Data frame:total_acc_z_test, and add new column:train_test as identifier
total_acc_z_test<- read.table("samsung/test/Inertial Signals/total_acc_z_test.txt")
total_acc_z_test$train_test <- rep(test_flag,nrow(total_acc_z_test))
#Combine data from train and test
##Create a new column "train_test" for y_train as identifire for train data or test data
y_train$train_test <- rep(train_flag,nrow(y_train))
##Create a new column "train_test" for y_test as identifire for train data or test data
y_test$train_test <- rep(test_flag,nrow(y_test))
##combine y_train and y_test
y_all <- rbind(y_train,y_test)
##combine subject_train and subject_test
subject_all <- rbind(subject_train,subject_test)
##combine xtrain_data and xtest_data
xall_data <- rbind(xtrain_data,xtest_data)
##combine measurements from train and test
body_acc_x_all <- rbind(body_acc_x_train,body_acc_x_test)
body_acc_y_all <- rbind(body_acc_y_train,body_acc_y_test)
body_acc_z_all <- rbind(body_acc_z_train,body_acc_z_test)
body_gyro_x_all <- rbind(body_gyro_x_train,body_gyro_x_test)
body_gyro_y_all <- rbind(body_gyro_y_train,body_gyro_y_test)
body_gyro_z_all <- rbind(body_gyro_z_train,body_gyro_z_test)
total_acc_x_all <- rbind(total_acc_x_train,total_acc_x_test)
total_acc_y_all <- rbind(total_acc_y_train,total_acc_y_test)
total_acc_z_all <- rbind(total_acc_z_train,total_acc_z_test)
#Add mean,std columns to those measurement files
body_acc_x_all[[make.names(c("tBodyAcc-mean()-X"), unique = TRUE)]]<-xall_data$tBodyAcc.mean...X
body_acc_x_all[[make.names(c("tBodyAcc-std()-X"), unique = TRUE)]]<-xall_data$tBodyAcc.std...X
body_acc_y_all[[make.names(c("tBodyAcc-mean()-Y"), unique = TRUE)]]<-xall_data$tBodyAcc.mean...Y
body_acc_y_all[[make.names(c("tBodyAcc-std()-Y"), unique = TRUE)]]<-xall_data$tBodyAcc.std...Y
body_acc_z_all[[make.names(c("tBodyAcc-mean()-Z"), unique = TRUE)]]<-xall_data$tBodyAcc.mean...Z
body_acc_z_all[[make.names(c("tBodyAcc-std()-Z"), unique = TRUE)]]<-xall_data$tBodyAcc.std...Z
body_gyro_x_all[[make.names(c("tBodyGyro-mean()-X"), unique = TRUE)]]<-xall_data$tBodyGyro.mean...X
body_gyro_x_all[[make.names(c("tBodyGyro-std()-X"), unique = TRUE)]]<-xall_data$tBodyGyro.std...X
body_gyro_y_all[[make.names(c("tBodyGyro-mean()-Y"), unique = TRUE)]]<-xall_data$tBodyGyro.mean...Y
body_gyro_y_all[[make.names(c("tBodyGyro-std()-Y"), unique = TRUE)]]<-xall_data$tBodyGyro.std...Y
body_gyro_z_all[[make.names(c("tBodyGyro-mean()-Z"), unique = TRUE)]]<-xall_data$tBodyGyro.mean...Z
body_gyro_z_all[[make.names(c("tBodyGyro-std()-Z"), unique = TRUE)]]<-xall_data$tBodyGyro.std...Z
#Create a new column "activity_name"
y_all$activity_name<-gen_col_by_cell(y_all$activity_label)
##merge y_all and subject_all into subject_acitivity_all
subject_activity_all<-bind_cols(subject_all, y_all)
##merge subject_activity_all with xall_data
subject_activity_features_all<-bind_cols(subject_activity_all, xall_data)
#Create a tidy data set with the average of each variable for each activity and each subject
tidy_data<-aggregate( subject_activity_features_all[,5:565], subject_activity_features_all[,1:2], FUN = mean )
#Add activity name to tidy_data
a_name <- gen_col_by_cell(tidy_data$activity_label)
tidy_data<-as.data.frame(append(tidy_data, list(activity_name = a_name), after = 2))
#Write tidy_data to tidy_data.csv
write.csv(tidy_data, file = "tidy_data.csv")
|
## some utility functions used internally within the ALA4R library: not
## exported
##-----------------------------------------------------------------------------
empty <- function(x) is.null(x) || nrow(x) < 1 || ncol(x) < 1
is.notempty.string <- function(x) {
is.string(x) && !is.na(x) && nchar(x) > 0
}
##-----------------------------------------------------------------------------
## internal function for converting chr data types to numeric or logical
convert_dt <- function(x, test_numeric = TRUE) {
## set test_numeric to FALSE to skip checking for numeric columns - might
## be a little faster if not needed
assert_that(is.flag(test_numeric))
if (see_if(is.character(x))) {
ux <- unique(x)
## non-valid encoding of strings here will cause failure
encoding_ok <- function(s) {
## will be TRUE if successful, or an error message if not
temp <- try({
nchar(s); TRUE
}, silent = TRUE)
is.logical(temp) && temp
}
if (!encoding_ok(ux)) {
x <- enc2utf8(x) ## force to utf8
ux <- unique(x)
}
if (all(nchar(ux) < 1)) {
## all empty strings - leave as is
} else if (all(ux %in% c("true", "false", "TRUE", "FALSE", "", "NA"))) {
x <- as.logical(x)
} else if (test_numeric) {
if (all(nchar(ux) < 1 | ux == "NA" |
!is.na(suppressWarnings(as.numeric(ux))))) {
x <- as.numeric(x)
}
}
}
x
}
##-----------------------------------------------------------------------------
clean_string <- function(x) {
## only used in search_names and search_partial_name
## characters causes problems with hyphenated names and seems likely not to
## behave well with internationalisation anyway
x <- str_trim(x) ## remove leading and trailing whitespaces
gsub("\\s+", " ", x) ## replace multiple whitespaces with single
}
##-----------------------------------------------------------------------------
##convert to camel case ... modified from help forum example
## not exported for users: internal ALA4R use only
tocamel <- function(x, delim = "[^[:alnum:]]", upper = FALSE, sep = "") {
assert_that(is.character(x))
assert_that(is.string(delim))
s <- strsplit(x, delim)
tfun <- function(y) {
if (any(is.na(y))) {
y
} else {
first <- substring(y, 1, 1)
if (isTRUE(upper))
first <- toupper(first)
else first[-1] <- toupper(first[-1])
paste(first, substring(y, 2), sep = "", collapse = sep)
}
}
vapply(s, tfun, FUN.VALUE = "", USE.NAMES = FALSE)
}
##-----------------------------------------------------------------------------
## define column names that we will remove from the results because we don't
## think they will be useful in the ALA4R context
unwanted_columns <- function(type) {
type <- match.arg(tolower(type), c("general", "layers", "occurrence",
"occurrence_stored",
"occurrence_indexed", "assertions"))
switch(type,
"general" = c("rawRank", "rawRankString", "rankId", "rankID",
"left", "right", "idxType", "highlight",
"linkIdentifier", "isExcluded"),
## rawRank appears to be a duplicate of rank or rankString
"layers" = c("pid", "path", "path_orig", "path_1km", "enabled",
"uid", "licence_level", "lookuptablepath", "mdhrlv",
"mddatest", "datalang", "grid", "shape", "enabled",
"indb", "spid", "sid", "sdesc", "sname",
"defaultlayer", "namesearch", "intersect",
"layerbranch", "analysis", "addtomap"),
## datalang appears to be all "eng" "Eng" "enu" "" or NA
## (2x"enu" records appear to be in English and from DEH/DEWHA)
## grid is redundant: all env layers are grid==TRUE, all
## contextual layers are grid==NA
## ditto for shape: all contextual are TRUE, all grid are NA
## mddatest is an internal metadata testing date of some sort?
## enabled appears to be all TRUE
## spid is redundant with id
## no idea what sid,sname, or sdesc are, but don't look
## particularly useful in our context
"occurrence_stored"=,
"occurrence_indexed"=,
"occurrence" = c("lft", "rgt", "rankId"),
## lft and rgt look the same as left and right in general fields
c("")
)
}
##-----------------------------------------------------------------------------
rename_variables <- function(varnames, type, verbose = ala_config()$verbose) {
if (length(varnames) < 1) {
## catch in case names from empty data frame were passed
return(varnames)
}
assert_that(is.character(varnames))
assert_that(is.string(type))
## use "other" to make no variable name substitutions, just enforce
## case/separator conventions
type <- match.arg(tolower(type), c("general", "layers", "occurrence",
"occurrence_stored",
"occurrence_indexed", "assertions",
"other"))
## change all to camelCase
varnames <- tocamel(make.names(varnames))
## try to convert some all-lowercase names to camel, e.g.
## environmentalvaluemax minlatitude minlongitude
for (kw in c("longitude", "latitude", "value", "units")) {
varnames <- str_replace_all(varnames, kw,
paste(toupper(substring(kw, 1, 1)),
substring(kw, 2), sep = ""))
}
## some that only seem to appear at the ends of variable names, so be
## conservative with these replacements
for (kw in c("min", "max", "path")) {
varnames <- str_replace_all(varnames, paste(kw, "$", sep = ""),
paste(toupper(substr(kw, 1, 1)),
substring(kw, 2), sep = ""))
}
## enforce first letter lowercase
varnames <- paste(tolower(substr(varnames, 1, 1)),
substring(varnames, 2), sep = "")
## some global re-naming by data type
if (type == "general") {
## general names, from e.g. name searching
varnames[varnames == "occCount"] <- "occurrenceCount"
varnames[varnames == "classs"] <- "class"
if (!any(varnames == "commonName")) {
## taxinfo_download provides "vernacularName", others "commonName"
varnames[varnames == "vernacularName"] <- "commonName"
## search_guids provides "commonNameSingle", others "commonName"
varnames[varnames == "commonNameSingle"] <- "commonName"
}
varnames <- str_replace_all(varnames, "conservationStatusInAustralia",
"conservationStatusAUS")
varnames <- str_replace_all(varnames, "conservationStatusIn",
"conservationStatus")
## taxinfo_download returns the former, but should be the latter for
## consistency elsewhere
varnames <- str_replace_all(varnames,
"scientificNameForAcceptedConcept",
"acceptedConceptName")
if (any(varnames == "rank") & any(varnames == "rankString")) {
if (verbose) {
warning("data contains both \"rank\" and \"rankString\" columns,
not renaming \"rankString\"")
}
} else {
## returned as "rank" by some services and "rankString" by others
varnames[varnames == "rankString"] <- "rank"
}
## ditto for taxonRank
if (any(varnames == "rank") & any(varnames == "taxonRank")) {
if (verbose) {
warning("data contains both \"rank\" and \"taxonRank\" columns,
not renaming \"taxonRank\"")
}
} else {
## returned as "Taxon.Rank" (camelcased to "taxonRank") by
## taxinfo_download
varnames[varnames == "taxonRank"] <- "rank"
}
} else if (type == "layers") {
varnames[varnames == "desc"] <- "description"
} else if (type %in% c("occurrence", "occurrence_stored",
"occurrence_indexed")) {
## old columns: Scientific Name, Matched Scientific Name
## new columns: Scientific Name - original, Scientific Name
varnames[varnames == "recordID"] <- "id"
varnames[varnames == "xVersion"] <- "version"
varnames <- str_replace_all(varnames, regex("axonconceptguid",
ignore_case = TRUE),
"axonConceptLsid")
varnames <- str_replace_all(varnames, "vernacularName", "commonName")
varnames <- str_replace_all(varnames, "taxonRank", "rank")
## rawSomething to somethingOriginal
## first-letter lowercase will be lost here but gets fixed below
varnames <- str_replace_all(varnames, "^raw(.*)$", "\\1Original")
## dump "matched", "processed", and "parsed"
varnames <- str_replace_all(varnames,
regex("(matched|processed|parsed)",
ignore_case = TRUE), "")
} else if (type == "assertions") {
a <- ala_fields("assertions", as_is = TRUE)
## want all assertion field names to match those in a$name
## but some may be camelCased versions of the description
## use "other" here to avoid this renaming code block, just apply
## camelCasing etc
a$description <- rename_variables(a$description, type = "other")
varnames <- vapply(varnames, function(z) {
ifelse(z %in% a$name, z, ifelse(sum(z == a$description) == 1,
a$name[a$description == z], z))
}, FUN.VALUE = "", USE.NAMES = FALSE)
}
## do this again, it may have been lost in the processing: enforce first
## letter lowercase
varnames <- paste(tolower(substr(varnames, 1, 1)), substring(varnames, 2),
sep = "")
if (type %in% c("layers", "occurrence", "occurrence_stored",
"occurrence_indexed")) {
## but some acronyms in layer names should remain all-uppercase
## currently this list is:
## c("iBRA", "iMCRA", "aCTTAMS", "gER", "nZ", "nSW", "lGA", "nRM",
## "rAMSAR", "nDVI", "nPP", "aSRI", "gEOMACS")
## but since these all occur at the start of variable names, we can
## catch them with a regular expression and not need to hardcode a list
idx <- str_detect(varnames, "^[a-z][A-Z]")
temp <- varnames[idx]
varnames[idx] <- paste(toupper(substr(temp, 1, 1)), substring(temp, 2),
sep = "")
## "seaWIFS" to "SeaWIFS"
varnames <- str_replace_all(varnames, "seaWIFS", "SeaWIFS")
}
if (type == "assertions") { ###hardcoded assertion variable name changes
## these assertions come back from the ALA service with the wrong names
if ("coordinatesAreOutOfRangeForSpecies" %in% varnames) {
varnames[varnames == "coordinatesAreOutOfRangeForSpecies"] <-
"coordinatesOutOfRange"
}
if ("collectionDateMissing" %in% varnames) {
varnames[varnames == "collectionDateMissing"] <-
"missingCollectionDate"
}
if ("coordinateUncertaintyNotSpecified" %in% varnames) {
varnames[varnames == "coordinateUncertaintyNotSpecified"] <-
"uncertaintyNotSpecified"
}
}
##return the varnames
varnames
}
## construct url path, taking care to remove multiple forward slashes,
## leading slash
clean_path <- function(..., sep = "/") {
## collapse individual arguments
path1 <- vapply(list(...), function(z) paste(z, sep = sep, collapse = sep),
FUN.VALUE = "", USE.NAMES = FALSE)
## workaround to avoid replacing "http://" with "http:/", since this is
## now used in GUID strings (July 2016)
path <- paste(path1, sep = sep, collapse = sep) ## paste parts together
path <- gsub("http://", "http:@@", path, fixed = TRUE)
path <- gsub(paste0("[", sep, "]+"), sep, path) ## remove multiple slashes
path <- gsub("http:@@", "http://", path, fixed = TRUE)
sub(paste0("^", sep), "", path) ## remove leading slash
}
## convenience function for building urls
## pass path in one of several ways
## as single string: build_url_from_parts(base_url,"path/to/thing")
## as a character vector or list: build_url_from_parts(base_url,
## c("path", "to", "thing"))
## or a combination
build_url_from_parts <- function(base_url, path = NULL, query = list()) {
this_url <- parse_url(base_url)
this_url$path <- clean_path(this_url$path, path)
if (length(query) > 0) {
this_url$query <- query
}
build_url(this_url)
}
## wrapper around read.csv but suppressing "incomplete final line" warning
read_csv_quietly <- function(...) {
read_warnings <- NULL
w_handler <- function(w) {
if (!grepl("incomplete final line", as.character(w),
ignore.case = TRUE)) {
read_warnings <<- c(read_warnings, list(w))
invokeRestart("muffleWarning")
}
}
out <- withCallingHandlers({
read.csv(...)
}, warning = w_handler)
## now throw any warnings that got collected, because they weren't about a
## final missing line break
for (w in read_warnings) warning(w)
out
}
replace_nonbreaking_spaces <- function(s)
gsub("\ua0", " ", s)
| /R/utilities_internal.R | no_license | AtlasOfLivingAustralia/ALA4R | R | false | false | 14,135 | r | ## some utility functions used internally within the ALA4R library: not
## exported
##-----------------------------------------------------------------------------
empty <- function(x) is.null(x) || nrow(x) < 1 || ncol(x) < 1
is.notempty.string <- function(x) {
is.string(x) && !is.na(x) && nchar(x) > 0
}
##-----------------------------------------------------------------------------
## internal function for converting chr data types to numeric or logical
convert_dt <- function(x, test_numeric = TRUE) {
## set test_numeric to FALSE to skip checking for numeric columns - might
## be a little faster if not needed
assert_that(is.flag(test_numeric))
if (see_if(is.character(x))) {
ux <- unique(x)
## non-valid encoding of strings here will cause failure
encoding_ok <- function(s) {
## will be TRUE if successful, or an error message if not
temp <- try({
nchar(s); TRUE
}, silent = TRUE)
is.logical(temp) && temp
}
if (!encoding_ok(ux)) {
x <- enc2utf8(x) ## force to utf8
ux <- unique(x)
}
if (all(nchar(ux) < 1)) {
## all empty strings - leave as is
} else if (all(ux %in% c("true", "false", "TRUE", "FALSE", "", "NA"))) {
x <- as.logical(x)
} else if (test_numeric) {
if (all(nchar(ux) < 1 | ux == "NA" |
!is.na(suppressWarnings(as.numeric(ux))))) {
x <- as.numeric(x)
}
}
}
x
}
##-----------------------------------------------------------------------------
clean_string <- function(x) {
## only used in search_names and search_partial_name
## characters causes problems with hyphenated names and seems likely not to
## behave well with internationalisation anyway
x <- str_trim(x) ## remove leading and trailing whitespaces
gsub("\\s+", " ", x) ## replace multiple whitespaces with single
}
##-----------------------------------------------------------------------------
##convert to camel case ... modified from help forum example
## not exported for users: internal ALA4R use only
tocamel <- function(x, delim = "[^[:alnum:]]", upper = FALSE, sep = "") {
assert_that(is.character(x))
assert_that(is.string(delim))
s <- strsplit(x, delim)
tfun <- function(y) {
if (any(is.na(y))) {
y
} else {
first <- substring(y, 1, 1)
if (isTRUE(upper))
first <- toupper(first)
else first[-1] <- toupper(first[-1])
paste(first, substring(y, 2), sep = "", collapse = sep)
}
}
vapply(s, tfun, FUN.VALUE = "", USE.NAMES = FALSE)
}
##-----------------------------------------------------------------------------
## define column names that we will remove from the results because we don't
## think they will be useful in the ALA4R context
unwanted_columns <- function(type) {
type <- match.arg(tolower(type), c("general", "layers", "occurrence",
"occurrence_stored",
"occurrence_indexed", "assertions"))
switch(type,
"general" = c("rawRank", "rawRankString", "rankId", "rankID",
"left", "right", "idxType", "highlight",
"linkIdentifier", "isExcluded"),
## rawRank appears to be a duplicate of rank or rankString
"layers" = c("pid", "path", "path_orig", "path_1km", "enabled",
"uid", "licence_level", "lookuptablepath", "mdhrlv",
"mddatest", "datalang", "grid", "shape", "enabled",
"indb", "spid", "sid", "sdesc", "sname",
"defaultlayer", "namesearch", "intersect",
"layerbranch", "analysis", "addtomap"),
## datalang appears to be all "eng" "Eng" "enu" "" or NA
## (2x"enu" records appear to be in English and from DEH/DEWHA)
## grid is redundant: all env layers are grid==TRUE, all
## contextual layers are grid==NA
## ditto for shape: all contextual are TRUE, all grid are NA
## mddatest is an internal metadata testing date of some sort?
## enabled appears to be all TRUE
## spid is redundant with id
## no idea what sid,sname, or sdesc are, but don't look
## particularly useful in our context
"occurrence_stored"=,
"occurrence_indexed"=,
"occurrence" = c("lft", "rgt", "rankId"),
## lft and rgt look the same as left and right in general fields
c("")
)
}
##-----------------------------------------------------------------------------
rename_variables <- function(varnames, type, verbose = ala_config()$verbose) {
if (length(varnames) < 1) {
## catch in case names from empty data frame were passed
return(varnames)
}
assert_that(is.character(varnames))
assert_that(is.string(type))
## use "other" to make no variable name substitutions, just enforce
## case/separator conventions
type <- match.arg(tolower(type), c("general", "layers", "occurrence",
"occurrence_stored",
"occurrence_indexed", "assertions",
"other"))
## change all to camelCase
varnames <- tocamel(make.names(varnames))
## try to convert some all-lowercase names to camel, e.g.
## environmentalvaluemax minlatitude minlongitude
for (kw in c("longitude", "latitude", "value", "units")) {
varnames <- str_replace_all(varnames, kw,
paste(toupper(substring(kw, 1, 1)),
substring(kw, 2), sep = ""))
}
## some that only seem to appear at the ends of variable names, so be
## conservative with these replacements
for (kw in c("min", "max", "path")) {
varnames <- str_replace_all(varnames, paste(kw, "$", sep = ""),
paste(toupper(substr(kw, 1, 1)),
substring(kw, 2), sep = ""))
}
## enforce first letter lowercase
varnames <- paste(tolower(substr(varnames, 1, 1)),
substring(varnames, 2), sep = "")
## some global re-naming by data type
if (type == "general") {
## general names, from e.g. name searching
varnames[varnames == "occCount"] <- "occurrenceCount"
varnames[varnames == "classs"] <- "class"
if (!any(varnames == "commonName")) {
## taxinfo_download provides "vernacularName", others "commonName"
varnames[varnames == "vernacularName"] <- "commonName"
## search_guids provides "commonNameSingle", others "commonName"
varnames[varnames == "commonNameSingle"] <- "commonName"
}
varnames <- str_replace_all(varnames, "conservationStatusInAustralia",
"conservationStatusAUS")
varnames <- str_replace_all(varnames, "conservationStatusIn",
"conservationStatus")
## taxinfo_download returns the former, but should be the latter for
## consistency elsewhere
varnames <- str_replace_all(varnames,
"scientificNameForAcceptedConcept",
"acceptedConceptName")
if (any(varnames == "rank") & any(varnames == "rankString")) {
if (verbose) {
warning("data contains both \"rank\" and \"rankString\" columns,
not renaming \"rankString\"")
}
} else {
## returned as "rank" by some services and "rankString" by others
varnames[varnames == "rankString"] <- "rank"
}
## ditto for taxonRank
if (any(varnames == "rank") & any(varnames == "taxonRank")) {
if (verbose) {
warning("data contains both \"rank\" and \"taxonRank\" columns,
not renaming \"taxonRank\"")
}
} else {
## returned as "Taxon.Rank" (camelcased to "taxonRank") by
## taxinfo_download
varnames[varnames == "taxonRank"] <- "rank"
}
} else if (type == "layers") {
varnames[varnames == "desc"] <- "description"
} else if (type %in% c("occurrence", "occurrence_stored",
"occurrence_indexed")) {
## old columns: Scientific Name, Matched Scientific Name
## new columns: Scientific Name - original, Scientific Name
varnames[varnames == "recordID"] <- "id"
varnames[varnames == "xVersion"] <- "version"
varnames <- str_replace_all(varnames, regex("axonconceptguid",
ignore_case = TRUE),
"axonConceptLsid")
varnames <- str_replace_all(varnames, "vernacularName", "commonName")
varnames <- str_replace_all(varnames, "taxonRank", "rank")
## rawSomething to somethingOriginal
## first-letter lowercase will be lost here but gets fixed below
varnames <- str_replace_all(varnames, "^raw(.*)$", "\\1Original")
## dump "matched", "processed", and "parsed"
varnames <- str_replace_all(varnames,
regex("(matched|processed|parsed)",
ignore_case = TRUE), "")
} else if (type == "assertions") {
a <- ala_fields("assertions", as_is = TRUE)
## want all assertion field names to match those in a$name
## but some may be camelCased versions of the description
## use "other" here to avoid this renaming code block, just apply
## camelCasing etc
a$description <- rename_variables(a$description, type = "other")
varnames <- vapply(varnames, function(z) {
ifelse(z %in% a$name, z, ifelse(sum(z == a$description) == 1,
a$name[a$description == z], z))
}, FUN.VALUE = "", USE.NAMES = FALSE)
}
## do this again, it may have been lost in the processing: enforce first
## letter lowercase
varnames <- paste(tolower(substr(varnames, 1, 1)), substring(varnames, 2),
sep = "")
if (type %in% c("layers", "occurrence", "occurrence_stored",
"occurrence_indexed")) {
## but some acronyms in layer names should remain all-uppercase
## currently this list is:
## c("iBRA", "iMCRA", "aCTTAMS", "gER", "nZ", "nSW", "lGA", "nRM",
## "rAMSAR", "nDVI", "nPP", "aSRI", "gEOMACS")
## but since these all occur at the start of variable names, we can
## catch them with a regular expression and not need to hardcode a list
idx <- str_detect(varnames, "^[a-z][A-Z]")
temp <- varnames[idx]
varnames[idx] <- paste(toupper(substr(temp, 1, 1)), substring(temp, 2),
sep = "")
## "seaWIFS" to "SeaWIFS"
varnames <- str_replace_all(varnames, "seaWIFS", "SeaWIFS")
}
if (type == "assertions") { ###hardcoded assertion variable name changes
## these assertions come back from the ALA service with the wrong names
if ("coordinatesAreOutOfRangeForSpecies" %in% varnames) {
varnames[varnames == "coordinatesAreOutOfRangeForSpecies"] <-
"coordinatesOutOfRange"
}
if ("collectionDateMissing" %in% varnames) {
varnames[varnames == "collectionDateMissing"] <-
"missingCollectionDate"
}
if ("coordinateUncertaintyNotSpecified" %in% varnames) {
varnames[varnames == "coordinateUncertaintyNotSpecified"] <-
"uncertaintyNotSpecified"
}
}
##return the varnames
varnames
}
## construct url path, taking care to remove multiple forward slashes,
## leading slash
clean_path <- function(..., sep = "/") {
## collapse individual arguments
path1 <- vapply(list(...), function(z) paste(z, sep = sep, collapse = sep),
FUN.VALUE = "", USE.NAMES = FALSE)
## workaround to avoid replacing "http://" with "http:/", since this is
## now used in GUID strings (July 2016)
path <- paste(path1, sep = sep, collapse = sep) ## paste parts together
path <- gsub("http://", "http:@@", path, fixed = TRUE)
path <- gsub(paste0("[", sep, "]+"), sep, path) ## remove multiple slashes
path <- gsub("http:@@", "http://", path, fixed = TRUE)
sub(paste0("^", sep), "", path) ## remove leading slash
}
## convenience function for building urls
## pass path in one of several ways
## as single string: build_url_from_parts(base_url,"path/to/thing")
## as a character vector or list: build_url_from_parts(base_url,
## c("path", "to", "thing"))
## or a combination
build_url_from_parts <- function(base_url, path = NULL, query = list()) {
this_url <- parse_url(base_url)
this_url$path <- clean_path(this_url$path, path)
if (length(query) > 0) {
this_url$query <- query
}
build_url(this_url)
}
## wrapper around read.csv but suppressing "incomplete final line" warning
read_csv_quietly <- function(...) {
read_warnings <- NULL
w_handler <- function(w) {
if (!grepl("incomplete final line", as.character(w),
ignore.case = TRUE)) {
read_warnings <<- c(read_warnings, list(w))
invokeRestart("muffleWarning")
}
}
out <- withCallingHandlers({
read.csv(...)
}, warning = w_handler)
## now throw any warnings that got collected, because they weren't about a
## final missing line break
for (w in read_warnings) warning(w)
out
}
replace_nonbreaking_spaces <- function(s)
gsub("\ua0", " ", s)
|
# create a new variable for simplified favorability (combining "very" and "somewhat" responses)
vote$fav_trump_2016_simple[vote$fav_trump_2016 %in% c(1, 2)] <- "fav"
vote$fav_trump_2016_simple[vote$fav_trump_2016 %in% c(3, 4)] <- "unfav"
vote$fav_trump_2016_simple[vote$fav_trump_2016 == 8] <- "dk"
vote$fav_trump_2017_simple[vote$fav_trump_2017 %in% c(1, 2)] <- "fav"
vote$fav_trump_2017_simple[vote$fav_trump_2017 %in% c(3, 4)] <- "unfav"
vote$fav_trump_2017_simple[vote$fav_trump_2017 == 8] <- "dk"
vote$fav_trump_2018_simple[vote$fav_trump_2018 %in% c(1, 2)] <- "fav"
vote$fav_trump_2018_simple[vote$fav_trump_2018 %in% c(3, 4)] <- "unfav"
vote$fav_trump_2018_simple[vote$fav_trump_2018 == 8] <- "dk"
#str(vote)
# create a new variable for change in favorability from 2016 to 2017
vote$change_16_17[vote$fav_trump_2016_simple == "fav" & vote$fav_trump_2017_simple %in% c("unfav", "dk")] <- "decrease"
vote$change_16_17[vote$fav_trump_2016_simple == "dk" & vote$fav_trump_2017_simple == "unfav"] <- "decrease"
vote$change_16_17[vote$fav_trump_2016_simple == "unfav" & vote$fav_trump_2017_simple %in% c("fav", "dk")] <- "increase"
vote$change_16_17[vote$fav_trump_2016_simple == "dk" & vote$fav_trump_2017_simple == "fav"] <- "increase"
vote$change_16_17[vote$fav_trump_2016_simple == "fav" & vote$fav_trump_2017_simple == "fav"] <- "remain fav"
vote$change_16_17[vote$fav_trump_2016_simple == "unfav" & vote$fav_trump_2017_simple == "unfav"] <- "remain unfav"
vote$change_16_17[vote$fav_trump_2016_simple == "dk" & vote$fav_trump_2017_simple == "dk"] <- "remain dk"
table(vote$change_16_17)
# and from 2017 to 2018
vote$change_17_18[vote$fav_trump_2017_simple == "fav" & vote$fav_trump_2018_simple %in% c("unfav", "dk")] <- "decrease"
vote$change_17_18[vote$fav_trump_2017_simple == "dk" & vote$fav_trump_2018_simple == "unfav"] <- "decrease"
vote$change_17_18[vote$fav_trump_2017_simple == "unfav" & vote$fav_trump_2018_simple %in% c("fav", "dk")] <- "increase"
vote$change_17_18[vote$fav_trump_2017_simple == "dk" & vote$fav_trump_2018_simple == "fav"] <- "increase"
vote$change_17_18[vote$fav_trump_2017_simple == "fav" & vote$fav_trump_2018_simple == "fav"] <- "remain fav"
vote$change_17_18[vote$fav_trump_2017_simple == "unfav" & vote$fav_trump_2018_simple == "unfav"] <- "remain unfav"
vote$change_17_18[vote$fav_trump_2017_simple == "dk" & vote$fav_trump_2018_simple == "dk"] <- "remain dk"
table(vote$change_17_18)
| /favorability.R | no_license | wmerrow/trump-voters | R | false | false | 2,430 | r |
# create a new variable for simplified favorability (combining "very" and "somewhat" responses)
vote$fav_trump_2016_simple[vote$fav_trump_2016 %in% c(1, 2)] <- "fav"
vote$fav_trump_2016_simple[vote$fav_trump_2016 %in% c(3, 4)] <- "unfav"
vote$fav_trump_2016_simple[vote$fav_trump_2016 == 8] <- "dk"
vote$fav_trump_2017_simple[vote$fav_trump_2017 %in% c(1, 2)] <- "fav"
vote$fav_trump_2017_simple[vote$fav_trump_2017 %in% c(3, 4)] <- "unfav"
vote$fav_trump_2017_simple[vote$fav_trump_2017 == 8] <- "dk"
vote$fav_trump_2018_simple[vote$fav_trump_2018 %in% c(1, 2)] <- "fav"
vote$fav_trump_2018_simple[vote$fav_trump_2018 %in% c(3, 4)] <- "unfav"
vote$fav_trump_2018_simple[vote$fav_trump_2018 == 8] <- "dk"
#str(vote)
# create a new variable for change in favorability from 2016 to 2017
vote$change_16_17[vote$fav_trump_2016_simple == "fav" & vote$fav_trump_2017_simple %in% c("unfav", "dk")] <- "decrease"
vote$change_16_17[vote$fav_trump_2016_simple == "dk" & vote$fav_trump_2017_simple == "unfav"] <- "decrease"
vote$change_16_17[vote$fav_trump_2016_simple == "unfav" & vote$fav_trump_2017_simple %in% c("fav", "dk")] <- "increase"
vote$change_16_17[vote$fav_trump_2016_simple == "dk" & vote$fav_trump_2017_simple == "fav"] <- "increase"
vote$change_16_17[vote$fav_trump_2016_simple == "fav" & vote$fav_trump_2017_simple == "fav"] <- "remain fav"
vote$change_16_17[vote$fav_trump_2016_simple == "unfav" & vote$fav_trump_2017_simple == "unfav"] <- "remain unfav"
vote$change_16_17[vote$fav_trump_2016_simple == "dk" & vote$fav_trump_2017_simple == "dk"] <- "remain dk"
table(vote$change_16_17)
# and from 2017 to 2018
vote$change_17_18[vote$fav_trump_2017_simple == "fav" & vote$fav_trump_2018_simple %in% c("unfav", "dk")] <- "decrease"
vote$change_17_18[vote$fav_trump_2017_simple == "dk" & vote$fav_trump_2018_simple == "unfav"] <- "decrease"
vote$change_17_18[vote$fav_trump_2017_simple == "unfav" & vote$fav_trump_2018_simple %in% c("fav", "dk")] <- "increase"
vote$change_17_18[vote$fav_trump_2017_simple == "dk" & vote$fav_trump_2018_simple == "fav"] <- "increase"
vote$change_17_18[vote$fav_trump_2017_simple == "fav" & vote$fav_trump_2018_simple == "fav"] <- "remain fav"
vote$change_17_18[vote$fav_trump_2017_simple == "unfav" & vote$fav_trump_2018_simple == "unfav"] <- "remain unfav"
vote$change_17_18[vote$fav_trump_2017_simple == "dk" & vote$fav_trump_2018_simple == "dk"] <- "remain dk"
table(vote$change_17_18)
|
cur_dir = "C:/dev/R/Coursera/Proj2" #working directory where .rds files are located
setwd(cur_dir)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
NEI.Baltimore <- subset(NEI,fips == "24510") # Baltimore City, Maryland (fips == "24510")
SCC <- readRDS("Source_Classification_Code.rds")
df.tot.emiss <- aggregate(NEI.Baltimore[, "Emissions"], list(Year = NEI.Baltimore$year, Type = NEI.Baltimore$type), FUN = "sum")
library(ggplot2)
library(scales)
gg <- ggplot(df.tot.emiss, aes(x = Year, y = x)) + geom_line() + scale_y_continuous(name="Total Emissions", labels = comma)
gg <- gg + ggtitle("Baltimore City, Maryland")
gg <- gg + facet_wrap(~Type)
dev.copy(png,'plot3.png', width = 480, height = 480)
dev.off()
#print(gg)
| /plot3.R | no_license | rofeld/ExData_Prjoj2 | R | false | false | 783 | r | cur_dir = "C:/dev/R/Coursera/Proj2" #working directory where .rds files are located
setwd(cur_dir)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
NEI.Baltimore <- subset(NEI,fips == "24510") # Baltimore City, Maryland (fips == "24510")
SCC <- readRDS("Source_Classification_Code.rds")
df.tot.emiss <- aggregate(NEI.Baltimore[, "Emissions"], list(Year = NEI.Baltimore$year, Type = NEI.Baltimore$type), FUN = "sum")
library(ggplot2)
library(scales)
gg <- ggplot(df.tot.emiss, aes(x = Year, y = x)) + geom_line() + scale_y_continuous(name="Total Emissions", labels = comma)
gg <- gg + ggtitle("Baltimore City, Maryland")
gg <- gg + facet_wrap(~Type)
dev.copy(png,'plot3.png', width = 480, height = 480)
dev.off()
#print(gg)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getTrack.R
\name{getTrack}
\alias{getTrack}
\title{Get a Track}
\usage{
getTrack(track_id, token)
}
\arguments{
\item{track_id}{The Spotify ID for the track.}
\item{token}{An OAuth token created with \code{spotifyOAuth}.}
}
\description{
Get Spotify catalog information for a single track identified by its unique Spotify ID.
}
| /man/getTrack.Rd | no_license | cran/Rspotify | R | false | true | 407 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getTrack.R
\name{getTrack}
\alias{getTrack}
\title{Get a Track}
\usage{
getTrack(track_id, token)
}
\arguments{
\item{track_id}{The Spotify ID for the track.}
\item{token}{An OAuth token created with \code{spotifyOAuth}.}
}
\description{
Get Spotify catalog information for a single track identified by its unique Spotify ID.
}
|
## Put comments here that give an overall description of what your
## functions do
## The function makeCacheMatrix takes a matrix and returns
## a special matrix and also it stores the inverse of the matrix
## in cache
makeCacheMatrix <- function(x = matrix()) {
mat_in <- NULL
set <- function(y){
x <<- y
mat_in <<- NULL
}
get <- function() x
setinverse <- function(solve) mat_in <<- solve
getinverse <- function() mat_in
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## caheSolve takes the output matrix of the makeCacheMatrix
## and returns the inverse matrix. However, first it checks
## to see if the inversed matrix is already saved in cache and
## if so it returns the saved one instead of computing
## it again which saves computing resourses and time
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
mat_in <- x$getinverse()
if(!is.null(mat_in)){
message("getting cached data")
return(mat_in)
}
data <- x$get()
mat_in <- solve(data,...)
x$setinverse(mat_in)
mat_in
}
| /cachematrix.R | no_license | RezaKatebi/ProgrammingAssignment2 | R | false | false | 1,260 | r | ## Put comments here that give an overall description of what your
## functions do
## The function makeCacheMatrix takes a matrix and returns
## a special matrix and also it stores the inverse of the matrix
## in cache
makeCacheMatrix <- function(x = matrix()) {
mat_in <- NULL
set <- function(y){
x <<- y
mat_in <<- NULL
}
get <- function() x
setinverse <- function(solve) mat_in <<- solve
getinverse <- function() mat_in
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## caheSolve takes the output matrix of the makeCacheMatrix
## and returns the inverse matrix. However, first it checks
## to see if the inversed matrix is already saved in cache and
## if so it returns the saved one instead of computing
## it again which saves computing resourses and time
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
mat_in <- x$getinverse()
if(!is.null(mat_in)){
message("getting cached data")
return(mat_in)
}
data <- x$get()
mat_in <- solve(data,...)
x$setinverse(mat_in)
mat_in
}
|
flipBox <-
function (..., back_content, id, front_title = NULL, back_title = NULL,
front_btn_text = "More", back_btn_text = "Back to main",
header_img = NULL, main_img = NULL, width = 6)
{
id_front <- id
id_back <- id_front + 10000
if (is.null(id))
stop("card id cannot be null and must be unique")
flipBoxTag <- shiny::tags$div(class = paste0("col sm-",
width), shiny::tags$div(class = "rotate-container",
shiny::tags$div(class = paste0("card card-front-", id,
" text-center"), style = "background-color: #ffffff;",
shiny::tags$div(class = paste0("card-background-",
id)), shiny::tags$div(class = "card-block", style="margin-top:-15%",
shiny::tags$img(src = main_img),
shiny::tags$h3(class = "card-title", front_title), shiny::tags$p(...), shiny::tags$button(id = paste0("btn-",
id_front), class = "btn btn-primary btn-rotate",
shiny::tags$i(class = "fa fa-long-arrow-right"),
front_btn_text))), shiny::tags$div(class = paste0("card card-back-",
id, " text-center"), style = "background-color: #ffffff;",
shiny::br(), shiny::tags$div(class = "card-header",
shiny::tags$p(shiny::tags$button(id = paste0("btn-",
id_back), class = "btn btn-primary btn-rotate",
shiny::tags$i(class = "fa fa-long-arrow-left"),
back_btn_text), shiny::h4(back_title))), shiny::hr(),
shiny::tags$div(class = "card-block", shiny::tags$p(back_content)))))
shiny::tagList(shiny::singleton(shiny::tags$head(shiny::tags$style(shiny::HTML(paste0("/* Card styles for rotation */\n .rotate-container {\n position: relative;\n }\n .rotate-container .card-front-",
id, ", .rotate-container .card-back-", id, " {\n width: 100%;\n height: 100%;\n -webkit-transform: perspective(600px) rotateY(0deg);\n transform: perspective(600px) rotateY(0deg);\n -webkit-backface-visibility: hidden;\n backface-visibility: hidden;\n transition: all 0.5s linear 0s;\n }\n .rotate-container .card-back-",
id, " {\n -webkit-transform: perspective(1600px) rotateY(180deg);\n transform: perspective(1600px) rotateY(180deg);\n position: absolute;\n top: 0;\n left: 0;\n right: 0;\n }\n .rotate-container .rotate-card-front-",
id, " {\n -webkit-transform: perspective(1600px) rotateY(-180deg);\n transform: perspective(1600px) rotateY(-180deg);\n }\n .rotate-container .rotate-card-back-",
id, " {\n -webkit-transform: perspective(1600px) rotateY(0deg);\n transform: perspective(1600px) rotateY(0deg);\n }\n \n /* Modified card styles */\n .card {\n box-shadow: 0 8px 6px -6px rgba(0, 0, 0, 0.5);\n }\n .card .card-header p {\n margin: 0;\n }\n \n .card .card-background-",
id, " {\n background: url('", header_img,
"');\n height: 8em;\n background-position: center center;\n background-size: cover;\n }\n .card .avatar {\n max-width: 6em;\n max-height: 6em;\n margin-top: -4em;\n margin-bottom: 1em;\n border: 4px solid white;\n border-radius: 50%;\n background: radial-gradient(#e3e3e3, #329A7C, #109381);\n }\n .card .btn {\n margin-bottom: 1em;\n cursor: pointer;\n }\n .card .social-links li {\n margin: 0.5em;\n }\n .card .social-links a {\n font-size: 1.5em;\n }\n "))),
shiny::tags$script(shiny::HTML(paste0("$(function() {\n // For card rotation\n $('#btn-",
id_front, "').click(function(){\n $('.card-front-",
id, "').addClass(' rotate-card-front-", id, "');\n $('.card-back-",
id, "').addClass(' rotate-card-back-", id, "');\n });\n $('#btn-",
id_back, "').click(function(){\n $('.card-front-",
id, "').removeClass(' rotate-card-front-", id, "');\n $('.card-back-",
id, "').removeClass(' rotate-card-back-", id, "');\n });\n });\n "))))),
flipBoxTag)
} | /inst/app/flipBox.R | no_license | cran/compareGroups | R | false | false | 9,447 | r | flipBox <-
function (..., back_content, id, front_title = NULL, back_title = NULL,
front_btn_text = "More", back_btn_text = "Back to main",
header_img = NULL, main_img = NULL, width = 6)
{
id_front <- id
id_back <- id_front + 10000
if (is.null(id))
stop("card id cannot be null and must be unique")
flipBoxTag <- shiny::tags$div(class = paste0("col sm-",
width), shiny::tags$div(class = "rotate-container",
shiny::tags$div(class = paste0("card card-front-", id,
" text-center"), style = "background-color: #ffffff;",
shiny::tags$div(class = paste0("card-background-",
id)), shiny::tags$div(class = "card-block", style="margin-top:-15%",
shiny::tags$img(src = main_img),
shiny::tags$h3(class = "card-title", front_title), shiny::tags$p(...), shiny::tags$button(id = paste0("btn-",
id_front), class = "btn btn-primary btn-rotate",
shiny::tags$i(class = "fa fa-long-arrow-right"),
front_btn_text))), shiny::tags$div(class = paste0("card card-back-",
id, " text-center"), style = "background-color: #ffffff;",
shiny::br(), shiny::tags$div(class = "card-header",
shiny::tags$p(shiny::tags$button(id = paste0("btn-",
id_back), class = "btn btn-primary btn-rotate",
shiny::tags$i(class = "fa fa-long-arrow-left"),
back_btn_text), shiny::h4(back_title))), shiny::hr(),
shiny::tags$div(class = "card-block", shiny::tags$p(back_content)))))
shiny::tagList(shiny::singleton(shiny::tags$head(shiny::tags$style(shiny::HTML(paste0("/* Card styles for rotation */\n .rotate-container {\n position: relative;\n }\n .rotate-container .card-front-",
id, ", .rotate-container .card-back-", id, " {\n width: 100%;\n height: 100%;\n -webkit-transform: perspective(600px) rotateY(0deg);\n transform: perspective(600px) rotateY(0deg);\n -webkit-backface-visibility: hidden;\n backface-visibility: hidden;\n transition: all 0.5s linear 0s;\n }\n .rotate-container .card-back-",
id, " {\n -webkit-transform: perspective(1600px) rotateY(180deg);\n transform: perspective(1600px) rotateY(180deg);\n position: absolute;\n top: 0;\n left: 0;\n right: 0;\n }\n .rotate-container .rotate-card-front-",
id, " {\n -webkit-transform: perspective(1600px) rotateY(-180deg);\n transform: perspective(1600px) rotateY(-180deg);\n }\n .rotate-container .rotate-card-back-",
id, " {\n -webkit-transform: perspective(1600px) rotateY(0deg);\n transform: perspective(1600px) rotateY(0deg);\n }\n \n /* Modified card styles */\n .card {\n box-shadow: 0 8px 6px -6px rgba(0, 0, 0, 0.5);\n }\n .card .card-header p {\n margin: 0;\n }\n \n .card .card-background-",
id, " {\n background: url('", header_img,
"');\n height: 8em;\n background-position: center center;\n background-size: cover;\n }\n .card .avatar {\n max-width: 6em;\n max-height: 6em;\n margin-top: -4em;\n margin-bottom: 1em;\n border: 4px solid white;\n border-radius: 50%;\n background: radial-gradient(#e3e3e3, #329A7C, #109381);\n }\n .card .btn {\n margin-bottom: 1em;\n cursor: pointer;\n }\n .card .social-links li {\n margin: 0.5em;\n }\n .card .social-links a {\n font-size: 1.5em;\n }\n "))),
shiny::tags$script(shiny::HTML(paste0("$(function() {\n // For card rotation\n $('#btn-",
id_front, "').click(function(){\n $('.card-front-",
id, "').addClass(' rotate-card-front-", id, "');\n $('.card-back-",
id, "').addClass(' rotate-card-back-", id, "');\n });\n $('#btn-",
id_back, "').click(function(){\n $('.card-front-",
id, "').removeClass(' rotate-card-front-", id, "');\n $('.card-back-",
id, "').removeClass(' rotate-card-back-", id, "');\n });\n });\n "))))),
flipBoxTag)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SR.R
\name{SR}
\alias{SR}
\title{Calculate SR from mvr}
\usage{
SR(mvr)
}
\arguments{
\item{mvr}{pls model from pls package}
\item{ncomp}{ncomp}
}
\description{
Mostly edited and made to work by Y. Uwadaira of \code{plsropt} package.
Validated with the values from MATLAB's PLS toolbox v8.1.1.
}
| /man/SR.Rd | no_license | chengvt/cheng | R | false | true | 376 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SR.R
\name{SR}
\alias{SR}
\title{Calculate SR from mvr}
\usage{
SR(mvr)
}
\arguments{
\item{mvr}{pls model from pls package}
\item{ncomp}{ncomp}
}
\description{
Mostly edited and made to work by Y. Uwadaira of \code{plsropt} package.
Validated with the values from MATLAB's PLS toolbox v8.1.1.
}
|
Func = function(x) {exp(x) - x*pi}
fSecante <- function(x0,x1) {
x = seq(x0,x1,0.1)
plot(x,Func(x),type="l",col="red")
abline(h=0)
x2 = x1-(((x1-x0)*(Func(x1)))/(Func(x1)-Func(x0)))
error = abs(x2-x1)/x2
i = 0
aux = Func(x2)
points(rbind(c(x2,aux)),pch=15,cex=0.4,col="blue")
cat("Iteracion=",i,"\tFunc(x)=",Func(x2),"\tX=",x2,"\tError=",error,"\n")
while (error > 1.e-8) {
x0 = x1
x1 = x2
x2 = x1-(((x1-x0)*(Func(x1)))/(Func(x1)-Func(x0)))
i = i+1
error = abs(x2-x1)/x2
aux = Func(x2)
points(rbind(c(x2,aux)),pch=15,cex=0.4,col="blue")
cat("Iteracion=",i,"\tFunc(x)=",Func(x2),"\tX=",x2,"\tError=",error,"\n")
}
}
fSecante(0.2,1) | /Tareas/Tarea#2/secante.R | no_license | DavidHerreraC18/Analisis-Numerico | R | false | false | 886 | r | Func = function(x) {exp(x) - x*pi}
fSecante <- function(x0,x1) {
x = seq(x0,x1,0.1)
plot(x,Func(x),type="l",col="red")
abline(h=0)
x2 = x1-(((x1-x0)*(Func(x1)))/(Func(x1)-Func(x0)))
error = abs(x2-x1)/x2
i = 0
aux = Func(x2)
points(rbind(c(x2,aux)),pch=15,cex=0.4,col="blue")
cat("Iteracion=",i,"\tFunc(x)=",Func(x2),"\tX=",x2,"\tError=",error,"\n")
while (error > 1.e-8) {
x0 = x1
x1 = x2
x2 = x1-(((x1-x0)*(Func(x1)))/(Func(x1)-Func(x0)))
i = i+1
error = abs(x2-x1)/x2
aux = Func(x2)
points(rbind(c(x2,aux)),pch=15,cex=0.4,col="blue")
cat("Iteracion=",i,"\tFunc(x)=",Func(x2),"\tX=",x2,"\tError=",error,"\n")
}
}
fSecante(0.2,1) |
datafile <- "E:/CourseraDataScience/Working Directory/household_power_consumption.txt"
data <- read.table(datafile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
Subs <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
str(Subs)
globalActivePower <- as.numeric(Subs$Global_active_power)
Date_Time <- strptime(paste(Subs$Date, Subs$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
subMetering1 <- as.numeric(Subs$Sub_metering_1)
subMetering2 <- as.numeric(Subs$Sub_metering_2)
subMetering3 <- as.numeric(Subs$Sub_metering_3)
plot(Date_Time, subMetering1, type="l", ylab="Energy Submetering", xlab="")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
lines(Date_Time, subMetering2, type="l", col="red")
lines(Date_Time, subMetering3, type="l", col="blue")
png("plot3.png", width=480, height=480)
plot(Date_Time, subMetering1, type="l", ylab="Energy Submetering", xlab="")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
lines(Date_Time, subMetering2, type="l", col="red")
lines(Date_Time, subMetering3, type="l", col="blue")
dev.off() | /plot3.R | no_license | pareenj/ExData_Plotting1 | R | false | false | 1,178 | r | datafile <- "E:/CourseraDataScience/Working Directory/household_power_consumption.txt"
data <- read.table(datafile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
Subs <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
str(Subs)
globalActivePower <- as.numeric(Subs$Global_active_power)
Date_Time <- strptime(paste(Subs$Date, Subs$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
subMetering1 <- as.numeric(Subs$Sub_metering_1)
subMetering2 <- as.numeric(Subs$Sub_metering_2)
subMetering3 <- as.numeric(Subs$Sub_metering_3)
plot(Date_Time, subMetering1, type="l", ylab="Energy Submetering", xlab="")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
lines(Date_Time, subMetering2, type="l", col="red")
lines(Date_Time, subMetering3, type="l", col="blue")
png("plot3.png", width=480, height=480)
plot(Date_Time, subMetering1, type="l", ylab="Energy Submetering", xlab="")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
lines(Date_Time, subMetering2, type="l", col="red")
lines(Date_Time, subMetering3, type="l", col="blue")
dev.off() |
"cirock" <-
function(nvar,em=.dFvGet()$em,cr=.dFvGet()$cr,iopt=1) {
if (missing(nvar)) messagena("nvar")
vk <- single(1)
f.res <- .Fortran("cirockz",
em=to.single(em),
cr=to.single(cr),
nvar=to.integer(nvar),
iopt=to.integer(iopt),
vk=to.single(vk))
list(vk=f.res$vk)
}
| /R/cirock.R | no_license | cran/robeth | R | false | false | 282 | r | "cirock" <-
function(nvar,em=.dFvGet()$em,cr=.dFvGet()$cr,iopt=1) {
if (missing(nvar)) messagena("nvar")
vk <- single(1)
f.res <- .Fortran("cirockz",
em=to.single(em),
cr=to.single(cr),
nvar=to.integer(nvar),
iopt=to.integer(iopt),
vk=to.single(vk))
list(vk=f.res$vk)
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{make.design.matrix}
\alias{make.design.matrix}
\title{Construct standard LLM design matrix.}
\usage{
make.design.matrix(k = 3, order.max = k - 1, rasch = FALSE)
}
\arguments{
\item{k}{The number of lists}
\item{order.max}{The maximum number of lists to include in interaction terms}
\item{rasch}{Logical: if TRUE, include a column for the square of the number
of captures}
}
\value{
A design matrix as a data frame
}
\description{
Makes a design matrix for a (local) log-linear model
}
\author{
Zach Kurtz
}
| /man/make.design.matrix.Rd | permissive | zkurtz/lllcrc | R | false | false | 570 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{make.design.matrix}
\alias{make.design.matrix}
\title{Construct standard LLM design matrix.}
\usage{
make.design.matrix(k = 3, order.max = k - 1, rasch = FALSE)
}
\arguments{
\item{k}{The number of lists}
\item{order.max}{The maximum number of lists to include in interaction terms}
\item{rasch}{Logical: if TRUE, include a column for the square of the number
of captures}
}
\value{
A design matrix as a data frame
}
\description{
Makes a design matrix for a (local) log-linear model
}
\author{
Zach Kurtz
}
|
#model <- dfParams[row, "tune"]$model$value
#winnow <- dfParams[row, "tune"]$winnow$value
#trials.min <- dfParams[row, "tune"]$trials$min
#trials.max <- dfParams[row, "tune"]$trials$max
#trials.step <- dfParams[row, "tune"]$trials$step
model <- dfParams$tune[[index]]$model$value[1]
trials.min <- as.numeric(dfParams$tune[[index]]$trials$min[2])
trials.max <- as.numeric(dfParams$tune[[index]]$trials$max[2])
trials.step <- as.numeric(dfParams$tune[[index]]$trials$step[2])
winnow <- dfParams$tune[[index]]$winnow$value[3]
grid <- expand.grid(
model = model,
winnow = winnow,
trials = seq(from = trials.min, to = trials.max, by = trials.step)
)
| /scripts/grid/C5.0.R | permissive | LabRatGroup/LabRat-R-Server | R | false | false | 658 | r | #model <- dfParams[row, "tune"]$model$value
#winnow <- dfParams[row, "tune"]$winnow$value
#trials.min <- dfParams[row, "tune"]$trials$min
#trials.max <- dfParams[row, "tune"]$trials$max
#trials.step <- dfParams[row, "tune"]$trials$step
model <- dfParams$tune[[index]]$model$value[1]
trials.min <- as.numeric(dfParams$tune[[index]]$trials$min[2])
trials.max <- as.numeric(dfParams$tune[[index]]$trials$max[2])
trials.step <- as.numeric(dfParams$tune[[index]]$trials$step[2])
winnow <- dfParams$tune[[index]]$winnow$value[3]
grid <- expand.grid(
model = model,
winnow = winnow,
trials = seq(from = trials.min, to = trials.max, by = trials.step)
)
|
\name{refpkg}
\alias{refpkg}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Summary data and plots for reference packages
}
\description{
Summary data and plots for reference packages
}
\usage{
refpkg(refpkg_path,type="summary",rank_tree="species",
rank_pie=c("phylum","class","order","family","genus"),
scale_pie=TRUE,alpha_order=TRUE,cex.text=0.7,
cex.legend=1,asb=TRUE,rotate_label=TRUE,
out_krona="for_krona.txt",text2krona=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{refpkg_path}{
The path of the reference package directory
}
\item{type}{
The type of summary to perform with "summary", "taxonomy", "info", "tree", "pie" or "krona" available
}
\item{rank_tree}{
The desired rank for tree coloring
}
\item{rank_pie}{
The ranks to be plot for the taxonomy pie chart
}
\item{scale_pie}{
Wether or not to take into account the number of sequences available within the reference package for the pie chart
}
\item{alpha_order}{
Wether or not the color should follow taxa alpahabetic order when type set to "tree"
}
\item{cex.text}{
The tip labels cex parameter when type is set to "tree" and the text cex parameter when type is set to "pie"
}
\item{cex.legend}{
The size of the legend when type set to "tree"
}
\item{asb}{
Add a scale bar on the tree
}
\item{rotate_label}{
Rotates the pie slice labels
}
\item{out_krona}{
The name of the output file when type is set to "krona".
}
\item{text2krona}{
The full path to the krona "ImportText.pl" script when KronaTools is installed and you wish to directly produce the html krona file.
}
}
\value{
A summary print on screen when type set to "summary".
A data frame when type set to "taxonomy" or "info".
A file written to the disk when type is set to "krona".
A plot otherwise.
}
\references{
https://github.com/marbl/Krona/wiki/KronaTools
http://fhcrc.github.io/taxtastic/
}
\author{
pierre lefeuvre
}
\examples{
refpkg_path <- paste(find.package("BoSSA"),"/extdata/example.refpkg",sep="")
### summary
refpkg(refpkg_path)
### taxonomy
taxonomy <- refpkg(refpkg_path,type="taxonomy")
head(taxonomy)
### info
refpkg(refpkg_path,type="info")
### tree
refpkg(refpkg_path,type="tree",rank_tree="order",cex.text=0.5)
### pie
refpkg(refpkg_path,type="pie",rank_pie=c("class","order","family"),cex.text=0.6)
### krona
# it will produce a flat text file
# this file can be use as input for the the "ImportText.pl" krona script
# see https://github.com/marbl/Krona/wiki/KronaTools for more details on krona
\dontrun{
refpkg(refpkg_path,type="krona",out_krona="for_krona.txt")
}
}
| /man/refpkg.Rd | no_license | cran/BoSSA | R | false | false | 2,613 | rd | \name{refpkg}
\alias{refpkg}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Summary data and plots for reference packages
}
\description{
Summary data and plots for reference packages
}
\usage{
refpkg(refpkg_path,type="summary",rank_tree="species",
rank_pie=c("phylum","class","order","family","genus"),
scale_pie=TRUE,alpha_order=TRUE,cex.text=0.7,
cex.legend=1,asb=TRUE,rotate_label=TRUE,
out_krona="for_krona.txt",text2krona=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{refpkg_path}{
The path of the reference package directory
}
\item{type}{
The type of summary to perform with "summary", "taxonomy", "info", "tree", "pie" or "krona" available
}
\item{rank_tree}{
The desired rank for tree coloring
}
\item{rank_pie}{
The ranks to be plot for the taxonomy pie chart
}
\item{scale_pie}{
Wether or not to take into account the number of sequences available within the reference package for the pie chart
}
\item{alpha_order}{
Wether or not the color should follow taxa alpahabetic order when type set to "tree"
}
\item{cex.text}{
The tip labels cex parameter when type is set to "tree" and the text cex parameter when type is set to "pie"
}
\item{cex.legend}{
The size of the legend when type set to "tree"
}
\item{asb}{
Add a scale bar on the tree
}
\item{rotate_label}{
Rotates the pie slice labels
}
\item{out_krona}{
The name of the output file when type is set to "krona".
}
\item{text2krona}{
The full path to the krona "ImportText.pl" script when KronaTools is installed and you wish to directly produce the html krona file.
}
}
\value{
A summary print on screen when type set to "summary".
A data frame when type set to "taxonomy" or "info".
A file written to the disk when type is set to "krona".
A plot otherwise.
}
\references{
https://github.com/marbl/Krona/wiki/KronaTools
http://fhcrc.github.io/taxtastic/
}
\author{
pierre lefeuvre
}
\examples{
refpkg_path <- paste(find.package("BoSSA"),"/extdata/example.refpkg",sep="")
### summary
refpkg(refpkg_path)
### taxonomy
taxonomy <- refpkg(refpkg_path,type="taxonomy")
head(taxonomy)
### info
refpkg(refpkg_path,type="info")
### tree
refpkg(refpkg_path,type="tree",rank_tree="order",cex.text=0.5)
### pie
refpkg(refpkg_path,type="pie",rank_pie=c("class","order","family"),cex.text=0.6)
### krona
# it will produce a flat text file
# this file can be use as input for the the "ImportText.pl" krona script
# see https://github.com/marbl/Krona/wiki/KronaTools for more details on krona
\dontrun{
refpkg(refpkg_path,type="krona",out_krona="for_krona.txt")
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.