content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
###############################################################
## Post-processing analysis for Microtus californicus ENMs
## Author: Tainá Rocha
## DAte: MAy 2020
## Last Update: 16 April 2020
###############################################################
### Library pckgs
library(raster)
library(rgdal)
########## Present
setwd("Vole_raw_mean_Past/")
sub <- list.dirs(full.names=TRUE, recursive=FALSE)
for(j in 1:length(sub)) {
print(sub[j])
h <- list.files(path=sub[j], recursive=TRUE, full.names=TRUE, pattern='.tif')
print(h)
stack_present <- stack(h)
print(stack_present)
binary_0.2 <- stack_present >=0.20
b <- paste0(names(binary_0.2@data),"_bin.tif")
writeRaster(binary_0.2, filename=b, bylayer=TRUE, overwrite=TRUE)
continu <- stack_present * binary_0.2
c <- paste0(names(binary_0.2@data),"_cont.tif")
writeRaster(continu, filename=c, bylayer=TRUE, overwrite=TRUE)
}
########## End
# Crie uma pasta para os novos resultados pos processados
############ DAqui pra baixo não funciona
#continu <- stack_present * binary_0.2
#c <- paste0(names(binary_0.2@data@names),"_cont.tif")
#c <- paste0(names(binary_0.2@data),"_cont.tif")
#layer_names_cont <- names(binary_0.2)
#layer_names_cont
#names(continu) <- layer_names_cont
#writeRaster(continu, filename=c, bylayer=TRUE, overwrite=TRUE)
|
/Rscripts/LIG_Post-processing_analysis.R
|
no_license
|
Tai-Rocha/Vole
|
R
| false
| false
| 1,375
|
r
|
###############################################################
## Post-processing analysis for Microtus californicus ENMs
## Author: Tainá Rocha
## DAte: MAy 2020
## Last Update: 16 April 2020
###############################################################
### Library pckgs
library(raster)
library(rgdal)
########## Present
setwd("Vole_raw_mean_Past/")
sub <- list.dirs(full.names=TRUE, recursive=FALSE)
for(j in 1:length(sub)) {
print(sub[j])
h <- list.files(path=sub[j], recursive=TRUE, full.names=TRUE, pattern='.tif')
print(h)
stack_present <- stack(h)
print(stack_present)
binary_0.2 <- stack_present >=0.20
b <- paste0(names(binary_0.2@data),"_bin.tif")
writeRaster(binary_0.2, filename=b, bylayer=TRUE, overwrite=TRUE)
continu <- stack_present * binary_0.2
c <- paste0(names(binary_0.2@data),"_cont.tif")
writeRaster(continu, filename=c, bylayer=TRUE, overwrite=TRUE)
}
########## End
# Crie uma pasta para os novos resultados pos processados
############ DAqui pra baixo não funciona
#continu <- stack_present * binary_0.2
#c <- paste0(names(binary_0.2@data@names),"_cont.tif")
#c <- paste0(names(binary_0.2@data),"_cont.tif")
#layer_names_cont <- names(binary_0.2)
#layer_names_cont
#names(continu) <- layer_names_cont
#writeRaster(continu, filename=c, bylayer=TRUE, overwrite=TRUE)
|
#library for PROSAIL and also Spectral Angle Mapper
library(hsdar)
#library that has some extra random generation tools
library(MCMCglmm)
#Latin hypercube comes from here
library(FME)
#for more options on generating random samples
library(MCMCglmm)
#the single target classifier
library(randomForest)
#investigate
library(randomForestSRC)
#GIS/RS
library(maptools)
library(rgeos)
library(rgdal)
library(raster)
library(sp)
#N Structure parameter
#Cab chlorophyll content
#Car Carotenoid content
#Cbrown Brown pigment content
#Cw Equivalent water thickness
#Cm Dry matter content
#psoil Dry/Wet soil factor
#LAI Leaf area index
#TypeLidf Type of leaf angle distribution. See details section
#lidfa Leaf angle distribution. See details section
#lidfb Leaf angle distribution. See details section
#hspot Hotspot parameter
#tts Solar zenith angle
#tto Observer zenith angle
#psi Relative azimuth angle
#parameterList An optional object of class 'data.frame'. Function will iterate over rows of parameterList setting missing entries to default values. See examples section.
#rsoil background (soil) reflectance. N
#these parameters com from here
#https://www.sciencedirect.com/science/article/pii/S0303243415000100
#also of interest: https://www.mdpi.com/2072-4292/11/13/1597/htm
#car limits taken from: #limits taken from: https://www.mdpi.com/2072-4292/8/2/119
param.maxmin <- matrix(c(1.5, 1.9, #leaf layers or leaf structure
15,55, #Cab
0,15, #Car
0.005,0.02, #Cw #original it was from [0.01 to 0.02] but i suspect saturation
0.005,0.01, #Cm
0.1,8,#LAI
0.05,0.1), #hotstop
nrow=7,ncol = 2,byrow = T)
#we can now generate thousands of sampling, lets do 500 per trait:
prosail.runs <- nrow(param.maxmin)*1000
LHS <- Latinhyper(param.maxmin,prosail.runs)
#now lets traing a mRF on all these parameters at once and see if we are able to do good predictions
#from snap s2
#sun_zenith mean: 25.8734
#sun_azimuth mean: 141.9867
#view_zenith mean: 5.4748
#view_azimuth mean: 281.0692
sun_zenith = 25.8734
obs_zenith = 5.4748
rel_azimut = 281.0692 - 5.4748
#first we must generate a new sampling of the trait space
param.list <- data.frame(C1=LHS[,1],C2=LHS[,2],
C3=LHS[,3],C4=LHS[,4],
C5=LHS[,5],C6=LHS[,6],
C7=LHS[,7],
TypeLidf="Erectophile",
tts=sun_zenith, #fixed zenith, azimuth and rel azimuth
tto=obs_zenith,
psi=rel_azimut,
psoil=1, #psoil - fixed this as the average that i saw from the paper [0.5 to 1.5]
lidfa=65) #fixed also the avreage leaf angle
#this might require change if you change something before
#now the names must be changed
names.traits <- c("N","Cab",
"Car","Cw",
"Cm","LAI","hspot")
c(names.traits,names(param.list)[8:13])
colnames(param.list)<-c(names.traits,names(param.list)[8:13])
mRF.spclib <- PROSAIL(parameterList = param.list)
mRF.s2.spclib <- spectralResampling(mRF.spclib,
"Sentinel2",response_function = TRUE)
plot(mRF.spclib)
plot(mRF.s2.spclib)
#lets pick only the bands that are on our dataset
#b 2,3,4,5,6,7,8a,11,12
mRF.s2.spc.sel <- mRF.s2.spclib[,c(2,3,4,5,6,7,9,12,13)]
head(mRF.s2.spc.sel)
#brings it all together
full.df <- cbind(param.list,as.data.frame(mRF.s2.spc.sel))
names(full.df)
#next step is optional
names(full.df) <- c(c(names.traits,names(param.list)[8:13]),
"B02","B03","B04",
"B05","B06","B07",
"B8A","B11","B12")
names(full.df)
#before going into the model we should remove everything that is constant... since we DONT want to predict that
#just one type of leaf
full.df <- full.df[,-c(8:13)]
names(full.df)
#training the mfr
mRF_all <- rfsrc(Multivar(N,Cab,Car,
Cw,Cm,LAI,hspot)~.,data=full.df,block.size = 50)
#now, we will get the samples from the point shapefile we created elsewhere - this case only grasslands
neon.test.shp <- readShapePoints("D:/NEON_Data/CLBJ/NLCD_subsection/NLCD_grass_AOI_small_pts_5kSample.shp")
#now lets extract the values from the sentinel image
s2.AOI.stack <- stack("D:/NEON_Data/CLBJ/S2A_MSIL2A_20190420/SL2A_20190420_T14SPB_AOI.tif")/10000 #to convert to reflectance
#the names of the layers come in different order, for simplicity lets make it all the same
names(s2.AOI.stack)<-c("B02","B03","B04",
"B05","B06","B07",
"B8A","B11","B12")
s2.AOI.refl.shp <- raster::extract(s2.AOI.stack,
neon.test.shp,
sp=T)
#now lets load all the data from the traits of NEON and have that also in our shape
#loading the raster
path2neon <- list.files("D:/NEON_Data/CLBJ/Processed/",
pattern=".tif",
full.names=T)
name2neon <- list.files("D:/NEON_Data/CLBJ/Processed/",
pattern=".tif")
#now we remove the one that doesn't matter.. BEWARE - if you change anything .tif in th folder, this might change
path2neon <- path2neon[-1]
name2neon <- name2neon[-1]
path2neon
name2neon
#they also need to be cropped to the same extents or we cant stack them
AOI_small.shp <- readShapePoly("D:/NEON_Data/CLBJ/qgis_shape_bound/qgis_shape_bound_small_Area_diss.shp")
for (i in 1:length(path2neon)){
out.file <- paste("D:/NEON_Data/CLBJ/Processed_cropped/Small_",name2neon[i],sep="")
print(out.file)
crop(x=raster(path2neon[i]), y=AOI_small.shp,
filename=out.file,
options=c("COMPRESS=LZW"),
overwrite=TRUE)
}
#loading the raster
path2neon.small <- list.files("D:/NEON_Data/CLBJ/Processed_cropped/",
pattern=".tif",
full.names=T)
name2neon.small <- list.files("D:/NEON_Data/CLBJ/Processed_cropped/",
pattern=".tif")
#now we can stack and extract everything in one go
valid.shp <- raster::extract(stack(path2neon.small ),
s2.AOI.refl.shp,
sp=T)
head(valid.shp)
#now we want proper names... these names are so freaking bad
names(valid.shp) <- c(names(valid.shp)[1:12],
c("biomass","chm","fpar",
"lai","msi","ndii",
"ndwi","ndmi","wbi"))
head(valid.shp)
#no we have everything to make some prediction, we can go use our trained mRF to predict some outputs
valid.df <- as.data.frame(valid.shp)
head(valid.df)
#only the bands to avoid confusion
valid.df.bandsOnly <- valid.df[,c(4:12)]
head(valid.df.bandsOnly)
#now lets do some predictions
mRF.test.pred <- predict(mRF_all,newdata=valid.df.bandsOnly)
out.mRF.test.pred <- get.mv.predicted(mRF.test.pred)
#lets wrap it up in a dataframe
test.df <- as.data.frame(out.mRF.test.pred)
names(test.df)
valid.df.neonOnly <- valid.df[,c(13:21)]
names(valid.df.neonOnly)
test.df <- cbind(test.df,valid.df.neonOnly)
#using the function of https://www.mdpi.com/2072-4292/11/13/1597/htm - AGB as a function of LAI*cm
par(mfrow=c(1,1))
plot(test.df$lai,test.df$LAI,xlab="NEON data",ylab="Prediction (mRF): LAI")
#canopy water content indices
par(mfrow=c(3,2))
plot(test.df$msi,test.df$Cw,xlab="NEON data: MSI",ylab="Prediction (mRF): Cw")
plot(test.df$ndii,test.df$Cw,xlab="NEON data: ndii",ylab="Prediction (mRF): Cw")
plot(test.df$ndwi,test.df$Cw,xlab="NEON data: ndwi",ylab="Prediction (mRF): Cw")
plot(test.df$ndmi,test.df$Cw,xlab="NEON data: ndmi",ylab="Prediction (mRF): Cw")
plot(test.df$wbi,test.df$Cw,xlab="NEON data: wbi",ylab="Prediction (mRF): Cw")
#using the function of https://www.mdpi.com/2072-4292/11/13/1597/htm - AGB as a function of LAI*cm
par(mfrow=c(1,1))
plot(test.df$biomass,(test.df$Cm*test.df$LAI),xlab="NEON data: biomass",ylab="Prediction (mRF): Cm*LAI")
#testing to fpar
plot(test.df$fpar,(test.df$LAI),xlab="NEON data: fpar",ylab="Prediction (mRF): LAI")
#check this
names(test.df)
paste("mRF_",names(test.df)[1:7],sep="")
paste("NEON_",names(test.df)[8:16],sep="")
names(test.df) <- c(paste("mRF_",names(test.df)[1:7],sep=""),
paste("NEON_",names(test.df)[8:16],sep=""))
names(test.df)
library(corrplot)
M<-cor(test.df)
corrplot(M, type="upper")
##recreate the reflectances to see if we can calculate the indices
new.param.list <- as.data.frame(out.mRF.test.pred )
head(new.param.list)
new.param.list$TypeLidf="Erectophile"
new.param.list$tts=sun_zenith #fixed zenith, azimuth and rel azimuth
new.param.list$tto=obs_zenith
new.param.list$psi=rel_azimut
new.param.list$psoil=1 #psoil - fixed this as the average that i saw from the paper [0.5 to 1.5]
new.param.list$lidfa=65
head(new.param.list)
pred.prosail.scplib <- PROSAIL(parameterList = new.param.list)
par(mfrow=c(1,1))
plot(pred.prosail.scplib)
#now we create a dataframe to receive everything
pred.spectra <- as.data.frame(spectra(pred.prosail.scplib))
names(pred.spectra) <- paste("band_",pred.prosail.scplib@wavelength,sep="")
names(pred.spectra)
#now we calculate the indices
pred.indices <- data.frame(linenr=seq(1:nrow(pred.spectra)))
#sources: https://www.harrisgeospatial.com/docs/CanopyWaterContent.html
pred.indices$MSI <- pred.spectra$band_1599/pred.spectra$band_819
pred.indices$NDII <- (pred.spectra$band_819-pred.spectra$band_1649)/(pred.spectra$band_819+pred.spectra$band_1649)
pred.indices$NDWI <- (pred.spectra$band_857-pred.spectra$band_1241)/(pred.spectra$band_857+pred.spectra$band_1241)
pred.indices$NDMI <- (pred.spectra$band_860-(pred.spectra$band_1640-pred.spectra$band_2130))/(pred.spectra$band_860+(pred.spectra$band_1640-pred.spectra$band_2130))
pred.indices$WBI <- pred.spectra$band_970/pred.spectra$band_900
#canopy water content indices
par(mfrow=c(3,2))
plot(test.df$NEON_msi ,pred.indices$MSI,xlab="NEON data: MSI",ylab="Predicted MSI")
plot(test.df$NEON_ndii,pred.indices$NDII,xlab="NEON data: ndii",ylab="Predicted NDII")
plot(test.df$NEON_ndwi,pred.indices$NDWI,xlab="NEON data: ndwi",ylab="Predicted NDWI")
plot(test.df$NEON_ndmi,pred.indices$NDMI,xlab="NEON data: ndmi",ylab="Predicted NDMI")
plot(test.df$NEON_wbi ,pred.indices$WBI,xlab="NEON data: wbi",ylab="PredictedWBI")
#can it be an effect of outliers?
par(mfrow=c(1,1))
ncol(test.df)
par(mfrow=c(3,3))
for (i in 8:ncol(test.df)){
boxplot(test.df[,i])
}
test.df.redux <- test.df
for (i in 8:ncol(test.df)){
outlier.redux <- boxplot(test.df.redux[,i], plot=FALSE)$out
print(length(outlier.redux))
if (length(outlier.redux)!=0){
test.df.redux<-test.df.redux[-which(test.df.redux[,i] %in% outlier.redux),]
}
boxplot(test.df.redux[,i],main=names(test.df.redux)[i])
}
#so now, i removed all the outliers from the validation data. How many points i have left?
nrow(test.df.redux)
#still over 4k, lets repeat the previous
par(mfrow=c(1,1))
plot(test.df.redux$NEON_lai,test.df.redux$mRF_LAI,xlab="NEON data (no outliers)",ylab="Prediction (mRF): LAI")
#canopy water content indices
par(mfrow=c(3,2))
plot(test.df.redux$NEON_msi ,test.df.redux$mRF_Cw,xlab="NEON data: MSI",ylab="Prediction (mRF): Cw")
plot(test.df.redux$NEON_ndii,test.df.redux$mRF_Cw,xlab="NEON data: ndii",ylab="Prediction (mRF): Cw")
plot(test.df.redux$NEON_ndwi,test.df.redux$mRF_Cw,xlab="NEON data: ndwi",ylab="Prediction (mRF): Cw")
plot(test.df.redux$NEON_ndmi,test.df.redux$mRF_Cw,xlab="NEON data: ndmi",ylab="Prediction (mRF): Cw")
plot(test.df.redux$NEON_wbi ,test.df.redux$mRF_Cw,xlab="NEON data: wbi",ylab="Prediction (mRF): Cw")
##recreate the reflectances to see if we can calculate the indices - redux dataset
names(test.df.redux)
new.param.list.redux <- test.df.redux[,1:7]
head(new.param.list.redux)
names(new.param.list.redux) <- names(as.data.frame(out.mRF.test.pred ))
head(new.param.list.redux)
new.param.list.redux$TypeLidf="Erectophile"
new.param.list.redux$tts=sun_zenith #fixed zenith, azimuth and rel azimuth
new.param.list.redux$tto=obs_zenith
new.param.list.redux$psi=rel_azimut
new.param.list.redux$psoil=1 #psoil - fixed this as the average that i saw from the paper [0.5 to 1.5]
new.param.list.redux$lidfa=65
head(new.param.list.redux)
pred.prosail.scplib.redux <- PROSAIL(parameterList = new.param.list.redux)
par(mfrow=c(1,1))
plot(pred.prosail.scplib.redux)
#now we create a dataframe to receive everything
pred.spectra.redux <- as.data.frame(spectra(pred.prosail.scplib.redux))
names(pred.spectra.redux) <- paste("band_",pred.prosail.scplib.redux@wavelength,sep="")
names(pred.spectra.redux)
#now we calculate the indices
pred.indices.redux <- data.frame(linenr=seq(1:nrow(pred.spectra.redux)))
#sources: https://www.harrisgeospatial.com/docs/CanopyWaterContent.html
pred.indices.redux$MSI <- pred.spectra.redux$band_1599/pred.spectra.redux$band_819
pred.indices.redux$NDII <- (pred.spectra.redux$band_819-pred.spectra.redux$band_1649)/(pred.spectra.redux$band_819+pred.spectra.redux$band_1649)
pred.indices.redux$NDWI <- (pred.spectra.redux$band_857-pred.spectra.redux$band_1241)/(pred.spectra.redux$band_857+pred.spectra.redux$band_1241)
pred.indices.redux$NDMI <- (pred.spectra.redux$band_860-(pred.spectra.redux$band_1640-pred.spectra.redux$band_2130))/(pred.spectra.redux$band_860+(pred.spectra.redux$band_1640-pred.spectra.redux$band_2130))
pred.indices.redux$WBI <- pred.spectra.redux$band_970/pred.spectra.redux$band_900
#canopy water content indices
par(mfrow=c(3,2))
plot(test.df.redux$NEON_msi ,pred.indices.redux$MSI,xlab="NEON data: MSI",ylab="Predicted MSI")
plot(test.df.redux$NEON_ndii,pred.indices.redux$NDII,xlab="NEON data: ndii",ylab="Predicted NDII")
plot(test.df.redux$NEON_ndwi,pred.indices.redux$NDWI,xlab="NEON data: ndwi",ylab="Predicted NDWI")
plot(test.df.redux$NEON_ndmi,pred.indices.redux$NDMI,xlab="NEON data: ndmi",ylab="Predicted NDMI")
plot(test.df.redux$NEON_wbi ,pred.indices.redux$WBI,xlab="NEON data: wbi",ylab="PredictedWBI")
#test r squared
summary(lm(pred.indices.redux$MSI~test.df.redux$NEON_msi))$r.squared
names(test.df.redux)
names(pred.indices.redux)
redux.indices.table <- cbind(test.df.redux[,12:16],pred.indices.redux[,c(2:6)])
head(redux.indices.table)
library(corrplot)
par(mfrow=c(1,1))
M.indices.redux <-cor(redux.indices.table)
corrplot(M.indices.redux, type="upper")
#lets bring the unreduced
nrow(test.df)
nrow(pred.indices)
indices.table <- cbind(test.df[,c(12:16)],pred.indices[,c(2:6)])
names(indices.table)
M.indices <-cor(indices.table)
corrplot(M.indices, type="upper")
par(mfrow=c(1,2))
corrplot(M.indices, type="upper",title="With outliers",mar=c(0,0,1,0),addCoef.col = "black",diag=FALSE)
corrplot(M.indices.redux, type="upper",main="Without outliers",mar=c(0,0,1,0),addCoef.col = "black",diag=FALSE)
|
/MiscScripts/mRF_NeonData.r
|
no_license
|
nunocesarsa/Vegetation-trait-modelling-
|
R
| false
| false
| 14,929
|
r
|
#library for PROSAIL and also Spectral Angle Mapper
library(hsdar)
#library that has some extra random generation tools
library(MCMCglmm)
#Latin hypercube comes from here
library(FME)
#for more options on generating random samples
library(MCMCglmm)
#the single target classifier
library(randomForest)
#investigate
library(randomForestSRC)
#GIS/RS
library(maptools)
library(rgeos)
library(rgdal)
library(raster)
library(sp)
#N Structure parameter
#Cab chlorophyll content
#Car Carotenoid content
#Cbrown Brown pigment content
#Cw Equivalent water thickness
#Cm Dry matter content
#psoil Dry/Wet soil factor
#LAI Leaf area index
#TypeLidf Type of leaf angle distribution. See details section
#lidfa Leaf angle distribution. See details section
#lidfb Leaf angle distribution. See details section
#hspot Hotspot parameter
#tts Solar zenith angle
#tto Observer zenith angle
#psi Relative azimuth angle
#parameterList An optional object of class 'data.frame'. Function will iterate over rows of parameterList setting missing entries to default values. See examples section.
#rsoil background (soil) reflectance. N
#these parameters com from here
#https://www.sciencedirect.com/science/article/pii/S0303243415000100
#also of interest: https://www.mdpi.com/2072-4292/11/13/1597/htm
#car limits taken from: #limits taken from: https://www.mdpi.com/2072-4292/8/2/119
param.maxmin <- matrix(c(1.5, 1.9, #leaf layers or leaf structure
15,55, #Cab
0,15, #Car
0.005,0.02, #Cw #original it was from [0.01 to 0.02] but i suspect saturation
0.005,0.01, #Cm
0.1,8,#LAI
0.05,0.1), #hotstop
nrow=7,ncol = 2,byrow = T)
#we can now generate thousands of sampling, lets do 500 per trait:
prosail.runs <- nrow(param.maxmin)*1000
LHS <- Latinhyper(param.maxmin,prosail.runs)
#now lets traing a mRF on all these parameters at once and see if we are able to do good predictions
#from snap s2
#sun_zenith mean: 25.8734
#sun_azimuth mean: 141.9867
#view_zenith mean: 5.4748
#view_azimuth mean: 281.0692
sun_zenith = 25.8734
obs_zenith = 5.4748
rel_azimut = 281.0692 - 5.4748
#first we must generate a new sampling of the trait space
param.list <- data.frame(C1=LHS[,1],C2=LHS[,2],
C3=LHS[,3],C4=LHS[,4],
C5=LHS[,5],C6=LHS[,6],
C7=LHS[,7],
TypeLidf="Erectophile",
tts=sun_zenith, #fixed zenith, azimuth and rel azimuth
tto=obs_zenith,
psi=rel_azimut,
psoil=1, #psoil - fixed this as the average that i saw from the paper [0.5 to 1.5]
lidfa=65) #fixed also the avreage leaf angle
#this might require change if you change something before
#now the names must be changed
names.traits <- c("N","Cab",
"Car","Cw",
"Cm","LAI","hspot")
c(names.traits,names(param.list)[8:13])
colnames(param.list)<-c(names.traits,names(param.list)[8:13])
mRF.spclib <- PROSAIL(parameterList = param.list)
mRF.s2.spclib <- spectralResampling(mRF.spclib,
"Sentinel2",response_function = TRUE)
plot(mRF.spclib)
plot(mRF.s2.spclib)
#lets pick only the bands that are on our dataset
#b 2,3,4,5,6,7,8a,11,12
mRF.s2.spc.sel <- mRF.s2.spclib[,c(2,3,4,5,6,7,9,12,13)]
head(mRF.s2.spc.sel)
#brings it all together
full.df <- cbind(param.list,as.data.frame(mRF.s2.spc.sel))
names(full.df)
#next step is optional
names(full.df) <- c(c(names.traits,names(param.list)[8:13]),
"B02","B03","B04",
"B05","B06","B07",
"B8A","B11","B12")
names(full.df)
#before going into the model we should remove everything that is constant... since we DONT want to predict that
#just one type of leaf
full.df <- full.df[,-c(8:13)]
names(full.df)
#training the mfr
mRF_all <- rfsrc(Multivar(N,Cab,Car,
Cw,Cm,LAI,hspot)~.,data=full.df,block.size = 50)
#now, we will get the samples from the point shapefile we created elsewhere - this case only grasslands
neon.test.shp <- readShapePoints("D:/NEON_Data/CLBJ/NLCD_subsection/NLCD_grass_AOI_small_pts_5kSample.shp")
#now lets extract the values from the sentinel image
s2.AOI.stack <- stack("D:/NEON_Data/CLBJ/S2A_MSIL2A_20190420/SL2A_20190420_T14SPB_AOI.tif")/10000 #to convert to reflectance
#the names of the layers come in different order, for simplicity lets make it all the same
names(s2.AOI.stack)<-c("B02","B03","B04",
"B05","B06","B07",
"B8A","B11","B12")
s2.AOI.refl.shp <- raster::extract(s2.AOI.stack,
neon.test.shp,
sp=T)
#now lets load all the data from the traits of NEON and have that also in our shape
#loading the raster
path2neon <- list.files("D:/NEON_Data/CLBJ/Processed/",
pattern=".tif",
full.names=T)
name2neon <- list.files("D:/NEON_Data/CLBJ/Processed/",
pattern=".tif")
#now we remove the one that doesn't matter.. BEWARE - if you change anything .tif in th folder, this might change
path2neon <- path2neon[-1]
name2neon <- name2neon[-1]
path2neon
name2neon
#they also need to be cropped to the same extents or we cant stack them
AOI_small.shp <- readShapePoly("D:/NEON_Data/CLBJ/qgis_shape_bound/qgis_shape_bound_small_Area_diss.shp")
for (i in 1:length(path2neon)){
out.file <- paste("D:/NEON_Data/CLBJ/Processed_cropped/Small_",name2neon[i],sep="")
print(out.file)
crop(x=raster(path2neon[i]), y=AOI_small.shp,
filename=out.file,
options=c("COMPRESS=LZW"),
overwrite=TRUE)
}
#loading the raster
path2neon.small <- list.files("D:/NEON_Data/CLBJ/Processed_cropped/",
pattern=".tif",
full.names=T)
name2neon.small <- list.files("D:/NEON_Data/CLBJ/Processed_cropped/",
pattern=".tif")
#now we can stack and extract everything in one go
valid.shp <- raster::extract(stack(path2neon.small ),
s2.AOI.refl.shp,
sp=T)
head(valid.shp)
#now we want proper names... these names are so freaking bad
names(valid.shp) <- c(names(valid.shp)[1:12],
c("biomass","chm","fpar",
"lai","msi","ndii",
"ndwi","ndmi","wbi"))
head(valid.shp)
#no we have everything to make some prediction, we can go use our trained mRF to predict some outputs
valid.df <- as.data.frame(valid.shp)
head(valid.df)
#only the bands to avoid confusion
valid.df.bandsOnly <- valid.df[,c(4:12)]
head(valid.df.bandsOnly)
#now lets do some predictions
mRF.test.pred <- predict(mRF_all,newdata=valid.df.bandsOnly)
out.mRF.test.pred <- get.mv.predicted(mRF.test.pred)
#lets wrap it up in a dataframe
test.df <- as.data.frame(out.mRF.test.pred)
names(test.df)
valid.df.neonOnly <- valid.df[,c(13:21)]
names(valid.df.neonOnly)
test.df <- cbind(test.df,valid.df.neonOnly)
#using the function of https://www.mdpi.com/2072-4292/11/13/1597/htm - AGB as a function of LAI*cm
par(mfrow=c(1,1))
plot(test.df$lai,test.df$LAI,xlab="NEON data",ylab="Prediction (mRF): LAI")
#canopy water content indices
par(mfrow=c(3,2))
plot(test.df$msi,test.df$Cw,xlab="NEON data: MSI",ylab="Prediction (mRF): Cw")
plot(test.df$ndii,test.df$Cw,xlab="NEON data: ndii",ylab="Prediction (mRF): Cw")
plot(test.df$ndwi,test.df$Cw,xlab="NEON data: ndwi",ylab="Prediction (mRF): Cw")
plot(test.df$ndmi,test.df$Cw,xlab="NEON data: ndmi",ylab="Prediction (mRF): Cw")
plot(test.df$wbi,test.df$Cw,xlab="NEON data: wbi",ylab="Prediction (mRF): Cw")
#using the function of https://www.mdpi.com/2072-4292/11/13/1597/htm - AGB as a function of LAI*cm
par(mfrow=c(1,1))
plot(test.df$biomass,(test.df$Cm*test.df$LAI),xlab="NEON data: biomass",ylab="Prediction (mRF): Cm*LAI")
#testing to fpar
plot(test.df$fpar,(test.df$LAI),xlab="NEON data: fpar",ylab="Prediction (mRF): LAI")
#check this
names(test.df)
paste("mRF_",names(test.df)[1:7],sep="")
paste("NEON_",names(test.df)[8:16],sep="")
names(test.df) <- c(paste("mRF_",names(test.df)[1:7],sep=""),
paste("NEON_",names(test.df)[8:16],sep=""))
names(test.df)
library(corrplot)
M<-cor(test.df)
corrplot(M, type="upper")
##recreate the reflectances to see if we can calculate the indices
new.param.list <- as.data.frame(out.mRF.test.pred )
head(new.param.list)
new.param.list$TypeLidf="Erectophile"
new.param.list$tts=sun_zenith #fixed zenith, azimuth and rel azimuth
new.param.list$tto=obs_zenith
new.param.list$psi=rel_azimut
new.param.list$psoil=1 #psoil - fixed this as the average that i saw from the paper [0.5 to 1.5]
new.param.list$lidfa=65
head(new.param.list)
pred.prosail.scplib <- PROSAIL(parameterList = new.param.list)
par(mfrow=c(1,1))
plot(pred.prosail.scplib)
#now we create a dataframe to receive everything
pred.spectra <- as.data.frame(spectra(pred.prosail.scplib))
names(pred.spectra) <- paste("band_",pred.prosail.scplib@wavelength,sep="")
names(pred.spectra)
#now we calculate the indices
pred.indices <- data.frame(linenr=seq(1:nrow(pred.spectra)))
#sources: https://www.harrisgeospatial.com/docs/CanopyWaterContent.html
pred.indices$MSI <- pred.spectra$band_1599/pred.spectra$band_819
pred.indices$NDII <- (pred.spectra$band_819-pred.spectra$band_1649)/(pred.spectra$band_819+pred.spectra$band_1649)
pred.indices$NDWI <- (pred.spectra$band_857-pred.spectra$band_1241)/(pred.spectra$band_857+pred.spectra$band_1241)
pred.indices$NDMI <- (pred.spectra$band_860-(pred.spectra$band_1640-pred.spectra$band_2130))/(pred.spectra$band_860+(pred.spectra$band_1640-pred.spectra$band_2130))
pred.indices$WBI <- pred.spectra$band_970/pred.spectra$band_900
#canopy water content indices
par(mfrow=c(3,2))
plot(test.df$NEON_msi ,pred.indices$MSI,xlab="NEON data: MSI",ylab="Predicted MSI")
plot(test.df$NEON_ndii,pred.indices$NDII,xlab="NEON data: ndii",ylab="Predicted NDII")
plot(test.df$NEON_ndwi,pred.indices$NDWI,xlab="NEON data: ndwi",ylab="Predicted NDWI")
plot(test.df$NEON_ndmi,pred.indices$NDMI,xlab="NEON data: ndmi",ylab="Predicted NDMI")
plot(test.df$NEON_wbi ,pred.indices$WBI,xlab="NEON data: wbi",ylab="PredictedWBI")
#can it be an effect of outliers?
par(mfrow=c(1,1))
ncol(test.df)
par(mfrow=c(3,3))
for (i in 8:ncol(test.df)){
boxplot(test.df[,i])
}
test.df.redux <- test.df
for (i in 8:ncol(test.df)){
outlier.redux <- boxplot(test.df.redux[,i], plot=FALSE)$out
print(length(outlier.redux))
if (length(outlier.redux)!=0){
test.df.redux<-test.df.redux[-which(test.df.redux[,i] %in% outlier.redux),]
}
boxplot(test.df.redux[,i],main=names(test.df.redux)[i])
}
#so now, i removed all the outliers from the validation data. How many points i have left?
nrow(test.df.redux)
#still over 4k, lets repeat the previous
par(mfrow=c(1,1))
plot(test.df.redux$NEON_lai,test.df.redux$mRF_LAI,xlab="NEON data (no outliers)",ylab="Prediction (mRF): LAI")
#canopy water content indices
par(mfrow=c(3,2))
plot(test.df.redux$NEON_msi ,test.df.redux$mRF_Cw,xlab="NEON data: MSI",ylab="Prediction (mRF): Cw")
plot(test.df.redux$NEON_ndii,test.df.redux$mRF_Cw,xlab="NEON data: ndii",ylab="Prediction (mRF): Cw")
plot(test.df.redux$NEON_ndwi,test.df.redux$mRF_Cw,xlab="NEON data: ndwi",ylab="Prediction (mRF): Cw")
plot(test.df.redux$NEON_ndmi,test.df.redux$mRF_Cw,xlab="NEON data: ndmi",ylab="Prediction (mRF): Cw")
plot(test.df.redux$NEON_wbi ,test.df.redux$mRF_Cw,xlab="NEON data: wbi",ylab="Prediction (mRF): Cw")
##recreate the reflectances to see if we can calculate the indices - redux dataset
names(test.df.redux)
new.param.list.redux <- test.df.redux[,1:7]
head(new.param.list.redux)
names(new.param.list.redux) <- names(as.data.frame(out.mRF.test.pred ))
head(new.param.list.redux)
new.param.list.redux$TypeLidf="Erectophile"
new.param.list.redux$tts=sun_zenith #fixed zenith, azimuth and rel azimuth
new.param.list.redux$tto=obs_zenith
new.param.list.redux$psi=rel_azimut
new.param.list.redux$psoil=1 #psoil - fixed this as the average that i saw from the paper [0.5 to 1.5]
new.param.list.redux$lidfa=65
head(new.param.list.redux)
pred.prosail.scplib.redux <- PROSAIL(parameterList = new.param.list.redux)
par(mfrow=c(1,1))
plot(pred.prosail.scplib.redux)
#now we create a dataframe to receive everything
pred.spectra.redux <- as.data.frame(spectra(pred.prosail.scplib.redux))
names(pred.spectra.redux) <- paste("band_",pred.prosail.scplib.redux@wavelength,sep="")
names(pred.spectra.redux)
#now we calculate the indices
pred.indices.redux <- data.frame(linenr=seq(1:nrow(pred.spectra.redux)))
#sources: https://www.harrisgeospatial.com/docs/CanopyWaterContent.html
pred.indices.redux$MSI <- pred.spectra.redux$band_1599/pred.spectra.redux$band_819
pred.indices.redux$NDII <- (pred.spectra.redux$band_819-pred.spectra.redux$band_1649)/(pred.spectra.redux$band_819+pred.spectra.redux$band_1649)
pred.indices.redux$NDWI <- (pred.spectra.redux$band_857-pred.spectra.redux$band_1241)/(pred.spectra.redux$band_857+pred.spectra.redux$band_1241)
pred.indices.redux$NDMI <- (pred.spectra.redux$band_860-(pred.spectra.redux$band_1640-pred.spectra.redux$band_2130))/(pred.spectra.redux$band_860+(pred.spectra.redux$band_1640-pred.spectra.redux$band_2130))
pred.indices.redux$WBI <- pred.spectra.redux$band_970/pred.spectra.redux$band_900
#canopy water content indices
par(mfrow=c(3,2))
plot(test.df.redux$NEON_msi ,pred.indices.redux$MSI,xlab="NEON data: MSI",ylab="Predicted MSI")
plot(test.df.redux$NEON_ndii,pred.indices.redux$NDII,xlab="NEON data: ndii",ylab="Predicted NDII")
plot(test.df.redux$NEON_ndwi,pred.indices.redux$NDWI,xlab="NEON data: ndwi",ylab="Predicted NDWI")
plot(test.df.redux$NEON_ndmi,pred.indices.redux$NDMI,xlab="NEON data: ndmi",ylab="Predicted NDMI")
plot(test.df.redux$NEON_wbi ,pred.indices.redux$WBI,xlab="NEON data: wbi",ylab="PredictedWBI")
#test r squared
summary(lm(pred.indices.redux$MSI~test.df.redux$NEON_msi))$r.squared
names(test.df.redux)
names(pred.indices.redux)
redux.indices.table <- cbind(test.df.redux[,12:16],pred.indices.redux[,c(2:6)])
head(redux.indices.table)
library(corrplot)
par(mfrow=c(1,1))
M.indices.redux <-cor(redux.indices.table)
corrplot(M.indices.redux, type="upper")
#lets bring the unreduced
nrow(test.df)
nrow(pred.indices)
indices.table <- cbind(test.df[,c(12:16)],pred.indices[,c(2:6)])
names(indices.table)
M.indices <-cor(indices.table)
corrplot(M.indices, type="upper")
par(mfrow=c(1,2))
corrplot(M.indices, type="upper",title="With outliers",mar=c(0,0,1,0),addCoef.col = "black",diag=FALSE)
corrplot(M.indices.redux, type="upper",main="Without outliers",mar=c(0,0,1,0),addCoef.col = "black",diag=FALSE)
|
library(shiny)
library(ggplot2)
ui <- fluidPage(
sidebarLayout(
sidebarPanel(sliderInput("df",
"Degrees of freedom",
min = 1,
max = 20,
value = 2,
step = 1)
),
mainPanel(
plotOutput("distplot", height = "200"),
plotOutput("CDFplot", height = "200")
)
)
)
server <- function(input, output) {
output$distplot <- renderPlot({
X <- seq(from = -20, to = 20, length.out = 1000)
data <- data.frame(X = X, Density = dt(X, df = input$df))
ggplot(data = data, aes(x = X, y = Density)) +
geom_line(color = "firebrick") +
theme_bw() +
labs(x = "X", y = "Density") +
ylim(c(0, 0.42))
})
output$CDFplot <- renderPlot({
X <- seq(from = -20, to = 20, length.out = 1000)
data <- data.frame(X = X, Prob = pt(X, df = input$df))
ggplot(data = data, aes(x = X, y = Prob)) +
geom_line(color = "firebrick") +
theme_bw() +
labs(x = "X", y = "Cumulative Probability")
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/shinyApps/Dist_T/app.R
|
no_license
|
IMSMWU/MRDA2018
|
R
| false
| false
| 1,206
|
r
|
library(shiny)
library(ggplot2)
ui <- fluidPage(
sidebarLayout(
sidebarPanel(sliderInput("df",
"Degrees of freedom",
min = 1,
max = 20,
value = 2,
step = 1)
),
mainPanel(
plotOutput("distplot", height = "200"),
plotOutput("CDFplot", height = "200")
)
)
)
server <- function(input, output) {
output$distplot <- renderPlot({
X <- seq(from = -20, to = 20, length.out = 1000)
data <- data.frame(X = X, Density = dt(X, df = input$df))
ggplot(data = data, aes(x = X, y = Density)) +
geom_line(color = "firebrick") +
theme_bw() +
labs(x = "X", y = "Density") +
ylim(c(0, 0.42))
})
output$CDFplot <- renderPlot({
X <- seq(from = -20, to = 20, length.out = 1000)
data <- data.frame(X = X, Prob = pt(X, df = input$df))
ggplot(data = data, aes(x = X, y = Prob)) +
geom_line(color = "firebrick") +
theme_bw() +
labs(x = "X", y = "Cumulative Probability")
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
require(mosaic)
shinyServer(function(input, output) {
# Expression that generates a histogram. The expression is
# wrapped in a call to renderPlot to indicate that:
#
# 1) It is "reactive" and therefore should re-execute automatically
# when inputs change
# 2) Its output type is a plot
ppln <- reactive({
switch(input$dist,
dist1 = rweibull(10000,shape=1.5,scale=8), # Weibull population
dist2 = c(rexp(5000,0.27),rnorm(5000,13,2)), # bimodal population
dist3 = runif(10000, 9,26.5)) # uniform population
})
pLims <- reactive({
switch(input$dist,
dist1 = c(0,25), # Weibull population
dist2 = c(0,18), # bimodal population
dist3 = c(8.5,27)) # uniform population
})
output$distPlot <- renderPlot({
histogram(~ppln(), col = 'red', border = 'white',
xlab="x values", main="Population",
xlim=pLims(), n=25)
})
manyXBars <- reactive({
do(2000) * mean(~ resample(ppln(), input$num))
})
output$nullDistPlot <- renderPlot({
histogram(~result, data=manyXBars(), col = 'skyblue',
xlim=pLims(), n=50,
border = 'white', fit="normal", xlab="x-bar values",
main="Approx. sampling distribution for sample means")
})
output$qqPlot <- renderPlot({
qqmath(~result, data=manyXBars(), xlab="Normal quantile",
pch=19, cex=0.4,
ylab=expression(paste(bar(x), " values")),
main="Quantile-quantile plot of sampling dist.")
})
output$text1 <- renderText({
approxStdErr <- round(sd(~result,data=manyXBars()), digits=4)
paste("The standard deviation of the approx. sampling dist. is ",
approxStdErr)
})
output$text2 <- renderText({
actualStdErr <- round(sd(ppln())/sqrt(input$num), digits=4)
paste("The true standard error is ", actualStdErr)
})
})
|
/testing/cltMeans/server.R
|
no_license
|
mathsco/shinyApps
|
R
| false
| false
| 1,922
|
r
|
require(mosaic)
shinyServer(function(input, output) {
# Expression that generates a histogram. The expression is
# wrapped in a call to renderPlot to indicate that:
#
# 1) It is "reactive" and therefore should re-execute automatically
# when inputs change
# 2) Its output type is a plot
ppln <- reactive({
switch(input$dist,
dist1 = rweibull(10000,shape=1.5,scale=8), # Weibull population
dist2 = c(rexp(5000,0.27),rnorm(5000,13,2)), # bimodal population
dist3 = runif(10000, 9,26.5)) # uniform population
})
pLims <- reactive({
switch(input$dist,
dist1 = c(0,25), # Weibull population
dist2 = c(0,18), # bimodal population
dist3 = c(8.5,27)) # uniform population
})
output$distPlot <- renderPlot({
histogram(~ppln(), col = 'red', border = 'white',
xlab="x values", main="Population",
xlim=pLims(), n=25)
})
manyXBars <- reactive({
do(2000) * mean(~ resample(ppln(), input$num))
})
output$nullDistPlot <- renderPlot({
histogram(~result, data=manyXBars(), col = 'skyblue',
xlim=pLims(), n=50,
border = 'white', fit="normal", xlab="x-bar values",
main="Approx. sampling distribution for sample means")
})
output$qqPlot <- renderPlot({
qqmath(~result, data=manyXBars(), xlab="Normal quantile",
pch=19, cex=0.4,
ylab=expression(paste(bar(x), " values")),
main="Quantile-quantile plot of sampling dist.")
})
output$text1 <- renderText({
approxStdErr <- round(sd(~result,data=manyXBars()), digits=4)
paste("The standard deviation of the approx. sampling dist. is ",
approxStdErr)
})
output$text2 <- renderText({
actualStdErr <- round(sd(ppln())/sqrt(input$num), digits=4)
paste("The true standard error is ", actualStdErr)
})
})
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# String function helpers
#' Get `stringr` pattern options
#'
#' This function assigns definitions for the `stringr` pattern modifier
#' functions (`fixed()`, `regex()`, etc.) inside itself, and uses them to
#' evaluate the quoted expression `pattern`, returning a list that is used
#' to control pattern matching behavior in internal `arrow` functions.
#'
#' @param pattern Unevaluated expression containing a call to a `stringr`
#' pattern modifier function
#'
#' @return List containing elements `pattern`, `fixed`, and `ignore_case`
#' @keywords internal
get_stringr_pattern_options <- function(pattern) {
fixed <- function(pattern, ignore_case = FALSE, ...) {
check_dots(...)
list(pattern = pattern, fixed = TRUE, ignore_case = ignore_case)
}
regex <- function(pattern, ignore_case = FALSE, ...) {
check_dots(...)
list(pattern = pattern, fixed = FALSE, ignore_case = ignore_case)
}
coll <- function(...) {
arrow_not_supported("Pattern modifier `coll()`")
}
boundary <- function(...) {
arrow_not_supported("Pattern modifier `boundary()`")
}
check_dots <- function(...) {
dots <- list(...)
if (length(dots)) {
warning(
"Ignoring pattern modifier ",
ngettext(length(dots), "argument ", "arguments "),
"not supported in Arrow: ",
oxford_paste(names(dots)),
call. = FALSE
)
}
}
ensure_opts <- function(opts) {
if (is.character(opts)) {
opts <- list(pattern = opts, fixed = FALSE, ignore_case = FALSE)
}
opts
}
ensure_opts(eval(pattern))
}
#' Does this string contain regex metacharacters?
#'
#' @param string String to be tested
#' @keywords internal
#' @return Logical: does `string` contain regex metacharacters?
contains_regex <- function(string) {
grepl("[.\\|()[{^$*+?]", string)
}
# format `pattern` as needed for case insensitivity and literal matching by RE2
format_string_pattern <- function(pattern, ignore.case, fixed) {
# Arrow lacks native support for case-insensitive literal string matching and
# replacement, so we use the regular expression engine (RE2) to do this.
# https://github.com/google/re2/wiki/Syntax
if (ignore.case) {
if (fixed) {
# Everything between "\Q" and "\E" is treated as literal text.
# If the search text contains any literal "\E" strings, make them
# lowercase so they won't signal the end of the literal text:
pattern <- gsub("\\E", "\\e", pattern, fixed = TRUE)
pattern <- paste0("\\Q", pattern, "\\E")
}
# Prepend "(?i)" for case-insensitive matching
pattern <- paste0("(?i)", pattern)
}
pattern
}
# format `replacement` as needed for literal replacement by RE2
format_string_replacement <- function(replacement, ignore.case, fixed) {
# Arrow lacks native support for case-insensitive literal string
# replacement, so we use the regular expression engine (RE2) to do this.
# https://github.com/google/re2/wiki/Syntax
if (ignore.case && fixed) {
# Escape single backslashes in the regex replacement text so they are
# interpreted as literal backslashes:
replacement <- gsub("\\", "\\\\", replacement, fixed = TRUE)
}
replacement
}
# Currently, Arrow does not supports a locale option for string case conversion
# functions, contrast to stringr's API, so the 'locale' argument is only valid
# for stringr's default value ("en"). The following are string functions that
# take a 'locale' option as its second argument:
# str_to_lower
# str_to_upper
# str_to_title
#
# Arrow locale will be supported with ARROW-14126
stop_if_locale_provided <- function(locale) {
if (!identical(locale, "en")) {
stop("Providing a value for 'locale' other than the default ('en') is not supported in Arrow. ",
"To change locale, use 'Sys.setlocale()'",
call. = FALSE
)
}
}
# Split up into several register functions by category to satisfy the linter
register_bindings_string <- function() {
register_bindings_string_join()
register_bindings_string_regex()
register_bindings_string_other()
}
register_bindings_string_join <- function() {
arrow_string_join_function <- function(null_handling, null_replacement = NULL) {
# the `binary_join_element_wise` Arrow C++ compute kernel takes the separator
# as the last argument, so pass `sep` as the last dots arg to this function
function(...) {
args <- lapply(list(...), function(arg) {
# handle scalar literal args, and cast all args to string for
# consistency with base::paste(), base::paste0(), and stringr::str_c()
if (!inherits(arg, "Expression")) {
assert_that(
length(arg) == 1,
msg = "Literal vectors of length != 1 not supported in string concatenation"
)
Expression$scalar(as.character(arg))
} else {
call_binding("as.character", arg)
}
})
Expression$create(
"binary_join_element_wise",
args = args,
options = list(
null_handling = null_handling,
null_replacement = null_replacement
)
)
}
}
register_binding("paste", function(..., sep = " ", collapse = NULL, recycle0 = FALSE) {
assert_that(
is.null(collapse),
msg = "paste() with the collapse argument is not yet supported in Arrow"
)
if (!inherits(sep, "Expression")) {
assert_that(!is.na(sep), msg = "Invalid separator")
}
arrow_string_join_function(NullHandlingBehavior$REPLACE, "NA")(..., sep)
})
register_binding("paste0", function(..., collapse = NULL, recycle0 = FALSE) {
assert_that(
is.null(collapse),
msg = "paste0() with the collapse argument is not yet supported in Arrow"
)
arrow_string_join_function(NullHandlingBehavior$REPLACE, "NA")(..., "")
})
register_binding("str_c", function(..., sep = "", collapse = NULL) {
assert_that(
is.null(collapse),
msg = "str_c() with the collapse argument is not yet supported in Arrow"
)
arrow_string_join_function(NullHandlingBehavior$EMIT_NULL)(..., sep)
})
}
register_bindings_string_regex <- function() {
create_string_match_expr <- function(arrow_fun, string, pattern, ignore_case) {
out <- Expression$create(
arrow_fun,
string,
options = list(pattern = pattern, ignore_case = ignore_case)
)
}
register_binding("grepl", function(pattern, x, ignore.case = FALSE, fixed = FALSE) {
arrow_fun <- ifelse(fixed, "match_substring", "match_substring_regex")
out <- create_string_match_expr(
arrow_fun,
string = x,
pattern = pattern,
ignore_case = ignore.case
)
call_binding("if_else", call_binding("is.na", out), FALSE, out)
})
register_binding("str_detect", function(string, pattern, negate = FALSE) {
opts <- get_stringr_pattern_options(enexpr(pattern))
arrow_fun <- ifelse(opts$fixed, "match_substring", "match_substring_regex")
out <- create_string_match_expr(arrow_fun,
string = string,
pattern = opts$pattern,
ignore_case = opts$ignore_case
)
if (negate) {
out <- !out
}
out
})
register_binding("str_like", function(string, pattern, ignore_case = TRUE) {
Expression$create(
"match_like",
string,
options = list(pattern = pattern, ignore_case = ignore_case)
)
})
register_binding("str_count", function(string, pattern) {
opts <- get_stringr_pattern_options(enexpr(pattern))
if (!is.string(pattern)) {
arrow_not_supported("`pattern` must be a length 1 character vector; other values")
}
arrow_fun <- ifelse(opts$fixed, "count_substring", "count_substring_regex")
Expression$create(
arrow_fun,
string,
options = list(pattern = opts$pattern, ignore_case = opts$ignore_case)
)
})
register_binding("startsWith", function(x, prefix) {
Expression$create(
"starts_with",
x,
options = list(pattern = prefix)
)
})
register_binding("endsWith", function(x, suffix) {
Expression$create(
"ends_with",
x,
options = list(pattern = suffix)
)
})
register_binding("str_starts", function(string, pattern, negate = FALSE) {
opts <- get_stringr_pattern_options(enexpr(pattern))
if (opts$fixed) {
out <- call_binding("startsWith", x = string, prefix = opts$pattern)
} else {
out <- create_string_match_expr(
arrow_fun = "match_substring_regex",
string = string,
pattern = paste0("^", opts$pattern),
ignore_case = opts$ignore_case
)
}
if (negate) {
out <- !out
}
out
})
register_binding("str_ends", function(string, pattern, negate = FALSE) {
opts <- get_stringr_pattern_options(enexpr(pattern))
if (opts$fixed) {
out <- call_binding("endsWith", x = string, suffix = opts$pattern)
} else {
out <- create_string_match_expr(
arrow_fun = "match_substring_regex",
string = string,
pattern = paste0(opts$pattern, "$"),
ignore_case = opts$ignore_case
)
}
if (negate) {
out <- !out
}
out
})
# Encapsulate some common logic for sub/gsub/str_replace/str_replace_all
arrow_r_string_replace_function <- function(max_replacements) {
function(pattern, replacement, x, ignore.case = FALSE, fixed = FALSE) {
Expression$create(
ifelse(fixed && !ignore.case, "replace_substring", "replace_substring_regex"),
x,
options = list(
pattern = format_string_pattern(pattern, ignore.case, fixed),
replacement = format_string_replacement(replacement, ignore.case, fixed),
max_replacements = max_replacements
)
)
}
}
arrow_stringr_string_replace_function <- function(max_replacements) {
force(max_replacements)
function(string, pattern, replacement) {
opts <- get_stringr_pattern_options(enexpr(pattern))
arrow_r_string_replace_function(max_replacements)(
pattern = opts$pattern,
replacement = replacement,
x = string,
ignore.case = opts$ignore_case,
fixed = opts$fixed
)
}
}
register_binding("sub", arrow_r_string_replace_function(1L))
register_binding("gsub", arrow_r_string_replace_function(-1L))
register_binding("str_replace", arrow_stringr_string_replace_function(1L))
register_binding("str_replace_all", arrow_stringr_string_replace_function(-1L))
register_binding("strsplit", function(x, split, fixed = FALSE, perl = FALSE,
useBytes = FALSE) {
assert_that(is.string(split))
arrow_fun <- ifelse(fixed, "split_pattern", "split_pattern_regex")
# warn when the user specifies both fixed = TRUE and perl = TRUE, for
# consistency with the behavior of base::strsplit()
if (fixed && perl) {
warning("Argument 'perl = TRUE' will be ignored", call. = FALSE)
}
# since split is not a regex, proceed without any warnings or errors regardless
# of the value of perl, for consistency with the behavior of base::strsplit()
Expression$create(
arrow_fun,
x,
options = list(pattern = split, reverse = FALSE, max_splits = -1L)
)
})
register_binding("str_split", function(string, pattern, n = Inf, simplify = FALSE) {
opts <- get_stringr_pattern_options(enexpr(pattern))
arrow_fun <- ifelse(opts$fixed, "split_pattern", "split_pattern_regex")
if (opts$ignore_case) {
arrow_not_supported("Case-insensitive string splitting")
}
if (n == 0) {
arrow_not_supported("Splitting strings into zero parts")
}
if (identical(n, Inf)) {
n <- 0L
}
if (simplify) {
warning("Argument 'simplify = TRUE' will be ignored", call. = FALSE)
}
# The max_splits option in the Arrow C++ library controls the maximum number
# of places at which the string is split, whereas the argument n to
# str_split() controls the maximum number of pieces to return. So we must
# subtract 1 from n to get max_splits.
Expression$create(
arrow_fun,
string,
options = list(
pattern = opts$pattern,
reverse = FALSE,
max_splits = n - 1L
)
)
})
}
register_bindings_string_other <- function() {
register_binding("nchar", function(x, type = "chars", allowNA = FALSE, keepNA = NA) {
if (allowNA) {
arrow_not_supported("allowNA = TRUE")
}
if (is.na(keepNA)) {
keepNA <- !identical(type, "width")
}
if (!keepNA) {
# TODO: I think there is a fill_null kernel we could use, set null to 2
arrow_not_supported("keepNA = TRUE")
}
if (identical(type, "bytes")) {
Expression$create("binary_length", x)
} else {
Expression$create("utf8_length", x)
}
})
register_binding("str_to_lower", function(string, locale = "en") {
stop_if_locale_provided(locale)
Expression$create("utf8_lower", string)
})
register_binding("str_to_upper", function(string, locale = "en") {
stop_if_locale_provided(locale)
Expression$create("utf8_upper", string)
})
register_binding("str_to_title", function(string, locale = "en") {
stop_if_locale_provided(locale)
Expression$create("utf8_title", string)
})
register_binding("str_trim", function(string, side = c("both", "left", "right")) {
side <- match.arg(side)
trim_fun <- switch(side,
left = "utf8_ltrim_whitespace",
right = "utf8_rtrim_whitespace",
both = "utf8_trim_whitespace"
)
Expression$create(trim_fun, string)
})
register_binding("substr", function(x, start, stop) {
assert_that(
length(start) == 1,
msg = "`start` must be length 1 - other lengths are not supported in Arrow"
)
assert_that(
length(stop) == 1,
msg = "`stop` must be length 1 - other lengths are not supported in Arrow"
)
# substr treats values as if they're on a continous number line, so values
# 0 are effectively blank characters - set `start` to 1 here so Arrow mimics
# this behavior
if (start <= 0) {
start <- 1
}
# if `stop` is lower than `start`, this is invalid, so set `stop` to
# 0 so that an empty string will be returned (consistent with base::substr())
if (stop < start) {
stop <- 0
}
Expression$create(
"utf8_slice_codeunits",
x,
# we don't need to subtract 1 from `stop` as C++ counts exclusively
# which effectively cancels out the difference in indexing between R & C++
options = list(start = start - 1L, stop = stop)
)
})
register_binding("substring", function(text, first, last) {
call_binding("substr", x = text, start = first, stop = last)
})
register_binding("str_sub", function(string, start = 1L, end = -1L) {
assert_that(
length(start) == 1,
msg = "`start` must be length 1 - other lengths are not supported in Arrow"
)
assert_that(
length(end) == 1,
msg = "`end` must be length 1 - other lengths are not supported in Arrow"
)
# In stringr::str_sub, an `end` value of -1 means the end of the string, so
# set it to the maximum integer to match this behavior
if (end == -1) {
end <- .Machine$integer.max
}
# An end value lower than a start value returns an empty string in
# stringr::str_sub so set end to 0 here to match this behavior
if (end < start) {
end <- 0
}
# subtract 1 from `start` because C++ is 0-based and R is 1-based
# str_sub treats a `start` value of 0 or 1 as the same thing so don't subtract 1 when `start` == 0
# when `start` < 0, both str_sub and utf8_slice_codeunits count backwards from the end
if (start > 0) {
start <- start - 1L
}
Expression$create(
"utf8_slice_codeunits",
string,
options = list(start = start, stop = end)
)
})
register_binding("str_pad", function(string, width, side = c("left", "right", "both"), pad = " ") {
assert_that(is_integerish(width))
side <- match.arg(side)
assert_that(is.string(pad))
if (side == "left") {
pad_func <- "utf8_lpad"
} else if (side == "right") {
pad_func <- "utf8_rpad"
} else if (side == "both") {
pad_func <- "utf8_center"
}
Expression$create(
pad_func,
string,
options = list(width = width, padding = pad)
)
})
}
|
/r/R/dplyr-funcs-string.R
|
permissive
|
tallamjr/arrow
|
R
| false
| false
| 17,222
|
r
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# String function helpers
#' Get `stringr` pattern options
#'
#' This function assigns definitions for the `stringr` pattern modifier
#' functions (`fixed()`, `regex()`, etc.) inside itself, and uses them to
#' evaluate the quoted expression `pattern`, returning a list that is used
#' to control pattern matching behavior in internal `arrow` functions.
#'
#' @param pattern Unevaluated expression containing a call to a `stringr`
#' pattern modifier function
#'
#' @return List containing elements `pattern`, `fixed`, and `ignore_case`
#' @keywords internal
get_stringr_pattern_options <- function(pattern) {
fixed <- function(pattern, ignore_case = FALSE, ...) {
check_dots(...)
list(pattern = pattern, fixed = TRUE, ignore_case = ignore_case)
}
regex <- function(pattern, ignore_case = FALSE, ...) {
check_dots(...)
list(pattern = pattern, fixed = FALSE, ignore_case = ignore_case)
}
coll <- function(...) {
arrow_not_supported("Pattern modifier `coll()`")
}
boundary <- function(...) {
arrow_not_supported("Pattern modifier `boundary()`")
}
check_dots <- function(...) {
dots <- list(...)
if (length(dots)) {
warning(
"Ignoring pattern modifier ",
ngettext(length(dots), "argument ", "arguments "),
"not supported in Arrow: ",
oxford_paste(names(dots)),
call. = FALSE
)
}
}
ensure_opts <- function(opts) {
if (is.character(opts)) {
opts <- list(pattern = opts, fixed = FALSE, ignore_case = FALSE)
}
opts
}
ensure_opts(eval(pattern))
}
#' Does this string contain regex metacharacters?
#'
#' @param string String to be tested
#' @keywords internal
#' @return Logical: does `string` contain regex metacharacters?
contains_regex <- function(string) {
grepl("[.\\|()[{^$*+?]", string)
}
# format `pattern` as needed for case insensitivity and literal matching by RE2
format_string_pattern <- function(pattern, ignore.case, fixed) {
# Arrow lacks native support for case-insensitive literal string matching and
# replacement, so we use the regular expression engine (RE2) to do this.
# https://github.com/google/re2/wiki/Syntax
if (ignore.case) {
if (fixed) {
# Everything between "\Q" and "\E" is treated as literal text.
# If the search text contains any literal "\E" strings, make them
# lowercase so they won't signal the end of the literal text:
pattern <- gsub("\\E", "\\e", pattern, fixed = TRUE)
pattern <- paste0("\\Q", pattern, "\\E")
}
# Prepend "(?i)" for case-insensitive matching
pattern <- paste0("(?i)", pattern)
}
pattern
}
# format `replacement` as needed for literal replacement by RE2
format_string_replacement <- function(replacement, ignore.case, fixed) {
# Arrow lacks native support for case-insensitive literal string
# replacement, so we use the regular expression engine (RE2) to do this.
# https://github.com/google/re2/wiki/Syntax
if (ignore.case && fixed) {
# Escape single backslashes in the regex replacement text so they are
# interpreted as literal backslashes:
replacement <- gsub("\\", "\\\\", replacement, fixed = TRUE)
}
replacement
}
# Currently, Arrow does not supports a locale option for string case conversion
# functions, contrast to stringr's API, so the 'locale' argument is only valid
# for stringr's default value ("en"). The following are string functions that
# take a 'locale' option as its second argument:
# str_to_lower
# str_to_upper
# str_to_title
#
# Arrow locale will be supported with ARROW-14126
stop_if_locale_provided <- function(locale) {
if (!identical(locale, "en")) {
stop("Providing a value for 'locale' other than the default ('en') is not supported in Arrow. ",
"To change locale, use 'Sys.setlocale()'",
call. = FALSE
)
}
}
# Split up into several register functions by category to satisfy the linter
register_bindings_string <- function() {
register_bindings_string_join()
register_bindings_string_regex()
register_bindings_string_other()
}
register_bindings_string_join <- function() {
arrow_string_join_function <- function(null_handling, null_replacement = NULL) {
# the `binary_join_element_wise` Arrow C++ compute kernel takes the separator
# as the last argument, so pass `sep` as the last dots arg to this function
function(...) {
args <- lapply(list(...), function(arg) {
# handle scalar literal args, and cast all args to string for
# consistency with base::paste(), base::paste0(), and stringr::str_c()
if (!inherits(arg, "Expression")) {
assert_that(
length(arg) == 1,
msg = "Literal vectors of length != 1 not supported in string concatenation"
)
Expression$scalar(as.character(arg))
} else {
call_binding("as.character", arg)
}
})
Expression$create(
"binary_join_element_wise",
args = args,
options = list(
null_handling = null_handling,
null_replacement = null_replacement
)
)
}
}
register_binding("paste", function(..., sep = " ", collapse = NULL, recycle0 = FALSE) {
assert_that(
is.null(collapse),
msg = "paste() with the collapse argument is not yet supported in Arrow"
)
if (!inherits(sep, "Expression")) {
assert_that(!is.na(sep), msg = "Invalid separator")
}
arrow_string_join_function(NullHandlingBehavior$REPLACE, "NA")(..., sep)
})
register_binding("paste0", function(..., collapse = NULL, recycle0 = FALSE) {
assert_that(
is.null(collapse),
msg = "paste0() with the collapse argument is not yet supported in Arrow"
)
arrow_string_join_function(NullHandlingBehavior$REPLACE, "NA")(..., "")
})
register_binding("str_c", function(..., sep = "", collapse = NULL) {
assert_that(
is.null(collapse),
msg = "str_c() with the collapse argument is not yet supported in Arrow"
)
arrow_string_join_function(NullHandlingBehavior$EMIT_NULL)(..., sep)
})
}
register_bindings_string_regex <- function() {
create_string_match_expr <- function(arrow_fun, string, pattern, ignore_case) {
out <- Expression$create(
arrow_fun,
string,
options = list(pattern = pattern, ignore_case = ignore_case)
)
}
register_binding("grepl", function(pattern, x, ignore.case = FALSE, fixed = FALSE) {
arrow_fun <- ifelse(fixed, "match_substring", "match_substring_regex")
out <- create_string_match_expr(
arrow_fun,
string = x,
pattern = pattern,
ignore_case = ignore.case
)
call_binding("if_else", call_binding("is.na", out), FALSE, out)
})
register_binding("str_detect", function(string, pattern, negate = FALSE) {
opts <- get_stringr_pattern_options(enexpr(pattern))
arrow_fun <- ifelse(opts$fixed, "match_substring", "match_substring_regex")
out <- create_string_match_expr(arrow_fun,
string = string,
pattern = opts$pattern,
ignore_case = opts$ignore_case
)
if (negate) {
out <- !out
}
out
})
register_binding("str_like", function(string, pattern, ignore_case = TRUE) {
Expression$create(
"match_like",
string,
options = list(pattern = pattern, ignore_case = ignore_case)
)
})
register_binding("str_count", function(string, pattern) {
opts <- get_stringr_pattern_options(enexpr(pattern))
if (!is.string(pattern)) {
arrow_not_supported("`pattern` must be a length 1 character vector; other values")
}
arrow_fun <- ifelse(opts$fixed, "count_substring", "count_substring_regex")
Expression$create(
arrow_fun,
string,
options = list(pattern = opts$pattern, ignore_case = opts$ignore_case)
)
})
register_binding("startsWith", function(x, prefix) {
Expression$create(
"starts_with",
x,
options = list(pattern = prefix)
)
})
register_binding("endsWith", function(x, suffix) {
Expression$create(
"ends_with",
x,
options = list(pattern = suffix)
)
})
register_binding("str_starts", function(string, pattern, negate = FALSE) {
opts <- get_stringr_pattern_options(enexpr(pattern))
if (opts$fixed) {
out <- call_binding("startsWith", x = string, prefix = opts$pattern)
} else {
out <- create_string_match_expr(
arrow_fun = "match_substring_regex",
string = string,
pattern = paste0("^", opts$pattern),
ignore_case = opts$ignore_case
)
}
if (negate) {
out <- !out
}
out
})
register_binding("str_ends", function(string, pattern, negate = FALSE) {
opts <- get_stringr_pattern_options(enexpr(pattern))
if (opts$fixed) {
out <- call_binding("endsWith", x = string, suffix = opts$pattern)
} else {
out <- create_string_match_expr(
arrow_fun = "match_substring_regex",
string = string,
pattern = paste0(opts$pattern, "$"),
ignore_case = opts$ignore_case
)
}
if (negate) {
out <- !out
}
out
})
# Encapsulate some common logic for sub/gsub/str_replace/str_replace_all
arrow_r_string_replace_function <- function(max_replacements) {
function(pattern, replacement, x, ignore.case = FALSE, fixed = FALSE) {
Expression$create(
ifelse(fixed && !ignore.case, "replace_substring", "replace_substring_regex"),
x,
options = list(
pattern = format_string_pattern(pattern, ignore.case, fixed),
replacement = format_string_replacement(replacement, ignore.case, fixed),
max_replacements = max_replacements
)
)
}
}
arrow_stringr_string_replace_function <- function(max_replacements) {
force(max_replacements)
function(string, pattern, replacement) {
opts <- get_stringr_pattern_options(enexpr(pattern))
arrow_r_string_replace_function(max_replacements)(
pattern = opts$pattern,
replacement = replacement,
x = string,
ignore.case = opts$ignore_case,
fixed = opts$fixed
)
}
}
register_binding("sub", arrow_r_string_replace_function(1L))
register_binding("gsub", arrow_r_string_replace_function(-1L))
register_binding("str_replace", arrow_stringr_string_replace_function(1L))
register_binding("str_replace_all", arrow_stringr_string_replace_function(-1L))
register_binding("strsplit", function(x, split, fixed = FALSE, perl = FALSE,
useBytes = FALSE) {
assert_that(is.string(split))
arrow_fun <- ifelse(fixed, "split_pattern", "split_pattern_regex")
# warn when the user specifies both fixed = TRUE and perl = TRUE, for
# consistency with the behavior of base::strsplit()
if (fixed && perl) {
warning("Argument 'perl = TRUE' will be ignored", call. = FALSE)
}
# since split is not a regex, proceed without any warnings or errors regardless
# of the value of perl, for consistency with the behavior of base::strsplit()
Expression$create(
arrow_fun,
x,
options = list(pattern = split, reverse = FALSE, max_splits = -1L)
)
})
register_binding("str_split", function(string, pattern, n = Inf, simplify = FALSE) {
opts <- get_stringr_pattern_options(enexpr(pattern))
arrow_fun <- ifelse(opts$fixed, "split_pattern", "split_pattern_regex")
if (opts$ignore_case) {
arrow_not_supported("Case-insensitive string splitting")
}
if (n == 0) {
arrow_not_supported("Splitting strings into zero parts")
}
if (identical(n, Inf)) {
n <- 0L
}
if (simplify) {
warning("Argument 'simplify = TRUE' will be ignored", call. = FALSE)
}
# The max_splits option in the Arrow C++ library controls the maximum number
# of places at which the string is split, whereas the argument n to
# str_split() controls the maximum number of pieces to return. So we must
# subtract 1 from n to get max_splits.
Expression$create(
arrow_fun,
string,
options = list(
pattern = opts$pattern,
reverse = FALSE,
max_splits = n - 1L
)
)
})
}
register_bindings_string_other <- function() {
register_binding("nchar", function(x, type = "chars", allowNA = FALSE, keepNA = NA) {
if (allowNA) {
arrow_not_supported("allowNA = TRUE")
}
if (is.na(keepNA)) {
keepNA <- !identical(type, "width")
}
if (!keepNA) {
# TODO: I think there is a fill_null kernel we could use, set null to 2
arrow_not_supported("keepNA = TRUE")
}
if (identical(type, "bytes")) {
Expression$create("binary_length", x)
} else {
Expression$create("utf8_length", x)
}
})
register_binding("str_to_lower", function(string, locale = "en") {
stop_if_locale_provided(locale)
Expression$create("utf8_lower", string)
})
register_binding("str_to_upper", function(string, locale = "en") {
stop_if_locale_provided(locale)
Expression$create("utf8_upper", string)
})
register_binding("str_to_title", function(string, locale = "en") {
stop_if_locale_provided(locale)
Expression$create("utf8_title", string)
})
register_binding("str_trim", function(string, side = c("both", "left", "right")) {
side <- match.arg(side)
trim_fun <- switch(side,
left = "utf8_ltrim_whitespace",
right = "utf8_rtrim_whitespace",
both = "utf8_trim_whitespace"
)
Expression$create(trim_fun, string)
})
register_binding("substr", function(x, start, stop) {
assert_that(
length(start) == 1,
msg = "`start` must be length 1 - other lengths are not supported in Arrow"
)
assert_that(
length(stop) == 1,
msg = "`stop` must be length 1 - other lengths are not supported in Arrow"
)
# substr treats values as if they're on a continous number line, so values
# 0 are effectively blank characters - set `start` to 1 here so Arrow mimics
# this behavior
if (start <= 0) {
start <- 1
}
# if `stop` is lower than `start`, this is invalid, so set `stop` to
# 0 so that an empty string will be returned (consistent with base::substr())
if (stop < start) {
stop <- 0
}
Expression$create(
"utf8_slice_codeunits",
x,
# we don't need to subtract 1 from `stop` as C++ counts exclusively
# which effectively cancels out the difference in indexing between R & C++
options = list(start = start - 1L, stop = stop)
)
})
register_binding("substring", function(text, first, last) {
call_binding("substr", x = text, start = first, stop = last)
})
register_binding("str_sub", function(string, start = 1L, end = -1L) {
assert_that(
length(start) == 1,
msg = "`start` must be length 1 - other lengths are not supported in Arrow"
)
assert_that(
length(end) == 1,
msg = "`end` must be length 1 - other lengths are not supported in Arrow"
)
# In stringr::str_sub, an `end` value of -1 means the end of the string, so
# set it to the maximum integer to match this behavior
if (end == -1) {
end <- .Machine$integer.max
}
# An end value lower than a start value returns an empty string in
# stringr::str_sub so set end to 0 here to match this behavior
if (end < start) {
end <- 0
}
# subtract 1 from `start` because C++ is 0-based and R is 1-based
# str_sub treats a `start` value of 0 or 1 as the same thing so don't subtract 1 when `start` == 0
# when `start` < 0, both str_sub and utf8_slice_codeunits count backwards from the end
if (start > 0) {
start <- start - 1L
}
Expression$create(
"utf8_slice_codeunits",
string,
options = list(start = start, stop = end)
)
})
register_binding("str_pad", function(string, width, side = c("left", "right", "both"), pad = " ") {
assert_that(is_integerish(width))
side <- match.arg(side)
assert_that(is.string(pad))
if (side == "left") {
pad_func <- "utf8_lpad"
} else if (side == "right") {
pad_func <- "utf8_rpad"
} else if (side == "both") {
pad_func <- "utf8_center"
}
Expression$create(
pad_func,
string,
options = list(width = width, padding = pad)
)
})
}
|
require("plotly")
require("data.table")
require("ggplot2")
completeData = na.omit(fread("complete.csv", nrows = 50000))
#Get all the unique subId's
completeData$date <- as.Date(as.character(completeData$date), format = "%Y%m%d")
uniquename <- sort(unique(completeData$name))
rangeSlider = c(min(completeData$close),
max(completeData$close))
#Generate buttons for dropdown 1 - SubId
buttonList1 = list()
for (i in uniquename)
{
element = list(
method = "restyle",
args = list("transforms[0].value" , i),
label = i
)
buttonList1 = c(buttonList1, list(element))
}
columns <- colnames(completeData)
# 1st, 2nd and 5th columns --ActivityId, subId, timestamp
p <-
plot_ly(data = completeData, transforms = list(
list(
type = 'filter',
target = ~ name,
operation = "=",
value = uniquename[1]
)
)) %>%
add_trace(
data = completeData,
text = "time" ,
type = 'scatter',
mode = 'lines',
x = completeData$date,
y = completeData$close,
visible = T
) %>%
layout(data = completeData,
updatemenus = list(
# Dropdown 1
list(x = 0.25,
y = 1.15,
buttons = buttonList1)
))
p
|
/Analysis.R
|
no_license
|
lucy2329/Stock-Market-Analysis
|
R
| false
| false
| 1,259
|
r
|
require("plotly")
require("data.table")
require("ggplot2")
completeData = na.omit(fread("complete.csv", nrows = 50000))
#Get all the unique subId's
completeData$date <- as.Date(as.character(completeData$date), format = "%Y%m%d")
uniquename <- sort(unique(completeData$name))
rangeSlider = c(min(completeData$close),
max(completeData$close))
#Generate buttons for dropdown 1 - SubId
buttonList1 = list()
for (i in uniquename)
{
element = list(
method = "restyle",
args = list("transforms[0].value" , i),
label = i
)
buttonList1 = c(buttonList1, list(element))
}
columns <- colnames(completeData)
# 1st, 2nd and 5th columns --ActivityId, subId, timestamp
p <-
plot_ly(data = completeData, transforms = list(
list(
type = 'filter',
target = ~ name,
operation = "=",
value = uniquename[1]
)
)) %>%
add_trace(
data = completeData,
text = "time" ,
type = 'scatter',
mode = 'lines',
x = completeData$date,
y = completeData$close,
visible = T
) %>%
layout(data = completeData,
updatemenus = list(
# Dropdown 1
list(x = 0.25,
y = 1.15,
buttons = buttonList1)
))
p
|
# Checks to see if the data is in any reasonable location, if it can't find it or the base zip file, then it downloads and extracts to a data subfolder.
if(file.exists("data/household_power_consumption.txt")){
filedest = "data/household_power_consumption.txt"
} else if(file.exists("household_power_consumption.txt")){
filedest = "household_power_consumption.txt"
} else if(file.exists("exdata_data_household_power_consumption.zip")){
require(utils)
unzip("exdata_data_household_power_consumption.zip", exdir = "data")
filedest = "data/household_power_consumption.txt"
} else {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "exdata_data_household_power_consumption.zip", method = "curl")
require(utils)
unzip("exdata_data_household_power_consumption.zip", exdir = "data")
filedest = "data/household_power_consumption.txt"
}
# This data set is very large, so I only want to extract the relevant data when reading the text file into R storage. Will only be using data from 2007-02-01 and 2007-02-02. These start at row 66638 and end at row 69517 (for a total of 2880 rows)
# the header will be read in seperately
# The column names, read seperately.
heads <- read.table("data/household_power_consumption.txt",sep =";",nrows = 1)
dat <- read.table("data/household_power_consumption.txt", sep = ";", skip = 66637, nrows = 2880)
heads <- sapply(heads, as.character)
colnames(dat)<-heads
#Create the plot
png(filename = "plot1.png")
hist(dat$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
|
/plot1.R
|
no_license
|
wittyalias/ExData_Plotting1
|
R
| false
| false
| 1,692
|
r
|
# Checks to see if the data is in any reasonable location, if it can't find it or the base zip file, then it downloads and extracts to a data subfolder.
if(file.exists("data/household_power_consumption.txt")){
filedest = "data/household_power_consumption.txt"
} else if(file.exists("household_power_consumption.txt")){
filedest = "household_power_consumption.txt"
} else if(file.exists("exdata_data_household_power_consumption.zip")){
require(utils)
unzip("exdata_data_household_power_consumption.zip", exdir = "data")
filedest = "data/household_power_consumption.txt"
} else {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "exdata_data_household_power_consumption.zip", method = "curl")
require(utils)
unzip("exdata_data_household_power_consumption.zip", exdir = "data")
filedest = "data/household_power_consumption.txt"
}
# This data set is very large, so I only want to extract the relevant data when reading the text file into R storage. Will only be using data from 2007-02-01 and 2007-02-02. These start at row 66638 and end at row 69517 (for a total of 2880 rows)
# the header will be read in seperately
# The column names, read seperately.
heads <- read.table("data/household_power_consumption.txt",sep =";",nrows = 1)
dat <- read.table("data/household_power_consumption.txt", sep = ";", skip = 66637, nrows = 2880)
heads <- sapply(heads, as.character)
colnames(dat)<-heads
#Create the plot
png(filename = "plot1.png")
hist(dat$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
|
#DEMO TANK MODEL FOR APP
# #test data
# fnam="tankhist.csv"
# dat=read.csv(fnam,header=T)
# A=Sys.time()
# out=tankPerformance(data=dat,
# roofArea=50,
# nPeople=1,
# tankVol=3000,
# write.file=TRUE,
# fnam="tank_demo_sam.csv")
# B=Sys.time()
# print(B-A)
#
# systemArgs<-list(roofArea=50,
# nPeople=1,
# tankVol=3000,
# firstFlush=1,
# write.file=T,
# fnam="inception.csv",
# metric="reliability")
#' Wrapper function for a rain water tank system model
#'
#' \code{tankWrapper} is a wrapper function for a rainwater tank system model in foreSIGHT. This function is used in examples in function help files and vignettes.
#' This function may also be used as an example to create wrapper functions for other system models with scenarios generated using foreSIGHT in \code{R} or other programming languages.
#' @param data data.frame; contains observed daily precipitation and temperature to be used to run the rain water tank system model in a data.frame with columns named \emph{year} \emph{month} \emph{day} \emph{P} \emph{Temp}.
#' Note that the first three columns of the data.frame contain the year, month, and day of observation. The columns have to be named as specified.
#' Please refer data provided with the package that may be loaded using \code{data(tankDat)} for an example of the expected format of \code{data}.
#' @param systemArgs a list; contains the input arguments to the rain water tank system model. The valid fields in the list are:
#' \itemize{
#' \item {\code{roofArea}} {: numeric; the roof area in sq.m}
#' \item {\code{nPeople}} {: integer; number of people using water}
#' \item {\code{tankVol}} {: numeric; volume of the tank in L}
#' \item {\code{firstFlush}} {: numeric; first flush depth over roof in mm}
#' \item {\code{write.file}} {: logical; indicates whether output is to be written to file}
#' \item {\code{fnam}} {: string; name of the output file}
#' }
#' @param metrics string vector; the metrics of performance of the system model to be reported. The valid strings may be viewed using the function \code{viewTankMetrics()}
#' @return The function returns a list containing the calculated values of the performance metrics specified in \code{metrics} after running the system model.
#' @seealso \code{runSystemModel}, \code{viewTankMetrics}
#' @examples
#' # view available performance metrics
#' viewTankMetrics()
#' # load example climate data to run the system model
#' data(tankDat)
#' systemArgs <- list(roofArea = 205, nPeople = 1, tankVol = 2400,
#' firstFlush = 2.0, write.file = FALSE)
#' tankWrapper(tank_obs, systemArgs,
#' metrics = c("average daily deficit (L)", "reliability (fraction)"))
#' @export
tankWrapper<-function(data,
systemArgs,
metrics
) {
performance<-tankPerformance(data=data,
roofArea=systemArgs$roofArea,
nPeople=systemArgs$nPeople,
tankVol=systemArgs$tankVol,
firstFlush=systemArgs$firstFlush,
write.file=systemArgs$write.file,
fnam=systemArgs$fnam)
performanceSubset <- performance[metrics]
return(performanceSubset)
}
#' Prints the names of the performance metrics of the rain water tank system model
#'
#' \code{viewTankMetrics} prints the names of the performance metrics available in the example rain water tank system model. T
#' @details This is a helper function that does not take any input arguments. The user may specify one or more of the metric names
#' as the \code{metric} argument of \code{tankWrapper} to select the performance metrics from the tank system model.
#' to select the performance metrics.
#' @seealso \code{tankWrapper}
#' @examples
#' viewTankMetrics()
#' @export
viewTankMetrics <- function() {
print(tankMetrics)
}
#---------------------------------------------------------
tank_model<-function(roofArea=50, #Roof area in m2
nPeople=1, #No of people(?)
tankVol=3000, #tank volume in L
firstFlush=1, #first flush diverter size in mm
rainTS=NULL, #daily rain time series
tempTS=NULL, #daily temperature time series
date=NULL #date - data.frame c("year","month","day")
){
nday=length(rainTS) #how many days to simulate
#Set up vectors
tankVolSim=rep(NA,nday)
tankInflow=rep(NA,nday)
tankSpill=rep(NA,nday)
supply=rep(NA,nday)
#Get initial tank volume
initTankVol=tankVol/2.0 #start tank half full
#DEMAND...
indoorDemand=10*nPeople #constant indoor demand (toilet flushing)
outdoorDemandSum=739
seasPattern=c(0.229,0.188,0.142,0.064,0.030,0,0.001,0.007,0.014,0.049,0.107,0.171) #proportions from Goyder household water use report
outdoorDemand=rep(NA,nday)
for(i in 1:12){
ind=which(date$month == i)
outdoorDemand[ind]=seasPattern[i]*outdoorDemandSum
}
#upper temp threshold - lower temp threshold
lowTempThresh=12; upTempThresh=28
indUp=which(tempTS>=upTempThresh) # made up temperature multipliers (Masters report foudn above 28 degs more watering)
outdoorDemand[indUp]=outdoorDemand[indUp]*1.25
indLow=which(tempTS<=lowTempThresh)
outdoorDemand[indLow]=outdoorDemand[indLow]*0.6
#combined demand
demand=rep(indoorDemand,nday)+outdoorDemand
#FIRST FLUSH -removed at the start of each storm
divertAmount=firstFlush*roofArea #1mm x roof area (Ls)
#HOW MUCH FLOW?
inflow=rainTS*roofArea #mm x m2 (Ls) (100% conversion to runoff)
roofFlow=inflow #save as roof flow
#WET DRY DAY PATTERN
stormDur=rep(NA,nday)
if(inflow[1]>0){stormDur[1]=1}else{stormDur[1]=0} #assume day prior to start is dry
for(i in 2:nday){
if(inflow[i]>0){ # wet day today
stormDur[i]=stormDur[i-1]+1 #wet -wet or dry-wet
}else{ # dry day today
stormDur[i]=0 # dd or wd pattern
}
}
#REMOVE FIRST FLUSH FROM EACH STORM
for(i in 1:nday){
if(stormDur[i]==1){ #first wet day in storm (divert Amount re-set to max as emptied in between storms)
temp=inflow[i]-divertAmount
if(temp<0){
divertRemainder=abs(temp)
inflow[i]=0 #no left over flow
}else{
divertRemainder=0
inflow[i]=temp #new flow (minus divert)
}
}else{
if(stormDur[i]>1){ #not first day of storm
temp=inflow[i]-divertRemainder
if(temp<0){
divertRemainder=abs(temp)
inflow[i]=0 #no left over flow
}else{
divertRemainder=0
inflow[i]=temp #new flow (minus divert)
}
}else{
divertRemainder=0
}
}
}
#CALCULATE STARTING POINT
if((initTankVol+inflow[1]-demand[1])<0){ # Tank has run dry
tankVolSim[1]=0 # MINIMUM IS ZERO
tankSpill[1]=0
supply[1]=initTankVol+inflow[1] # Gave what you could
tankInflow[1]=inflow[1]
}else{
if((initTankVol+inflow[1]-demand[1])>tankVol){ # Tank is overtopped
tankVolSim[1]=tankVol
tankSpill[1]=(initTankVol+inflow[1]-demand[1])-tankVol
tankInflow[1]=tankVol-(initTankVol-demand[1])
supply[1]=demand[1]
}else{ # Tank is partially filled
tankVolSim[1]=initTankVol+inflow[1]-demand[1]
tankSpill[1]=0
supply[1]=demand[1]
tankInflow[1]=inflow[1]
}
}
# loop over days
for(i in 2:nday){
if((tankVolSim[i-1]+inflow[i]-demand[i])<0){ # Tank has run dry
tankVolSim[i]=0
tankSpill[i]=0
supply[i]=tankVolSim[i-1]+inflow[i] # Gave what you could
tankInflow[i]=inflow[i] # all inflow travels through tank
}else{
if((tankVolSim[i-1]+inflow[i]-demand[i])>tankVol){ # Tank is overtopped
tankVolSim[i]=tankVol
tankSpill[i]=(tankVolSim[i-1]+inflow[i]-demand[i])-tankVol
supply[i]=demand[i]
tankInflow[i]=tankVol-(tankVolSim[i-1]-demand[i]) # took what could fit (once demand also taken)
}else{ # Tank is partially filled
tankVolSim[i]=tankVolSim[i-1]+inflow[i]-demand[i]
tankSpill[i]=0
supply[i]=demand[i]
tankInflow[i]=inflow[i] # all inflow travels through tank
}
}
}
return(list(rainTS=rainTS,roofFlow=roofFlow,inflow=inflow,tankInflow=tankInflow,tankSpill=tankSpill,tankVolSim=tankVolSim,demand=demand,supply=supply))
}
# placed outside functions so that it can viewed using a helper function
tankMetrics <- c("volumetric reliability (fraction)",
"reliability (fraction)",
"system efficiency (%)",
"storage efficiency (%)",
"average tank storage (L)",
"average daily deficit (L)")
tankPerformance<-function(data=NULL,
roofArea=50,
nPeople=1,
tankVol=3000,
firstFlush=1,
write.file=TRUE,
fnam="tankperformance.csv"
){
out=tank_model(roofArea=roofArea,
nPeople=nPeople,
tankVol=tankVol,
firstFlush=firstFlush,
rainTS=data$P,
tempTS=data$Temp,
date=data[,c("year","month","day")])
# to test need to write to csv
if(write.file==TRUE){
utils::write.csv(out, file=fnam, quote=F, row.names=F) #write tank info to file
}
#-----tank model eval - sort of based on university of warwick metrics--------
#reliability - fraction of days (total demand) met nSatisfied/nday
reliability = length(which(out$demand==out$supply))/length(out$supply)
#Totals (first year removed)
Qin_tot=sum(out$tankInflow)
Quse_tot=sum(out$supply)
Qspill_tot=sum(out$tankSpill)
Qroof_tot=sum(out$roofFlow)
#system efficiency = (1- Quse/Qin)*100
sysEff=Quse_tot/Qroof_tot*100
#storage efficiency spilled v captured
storEff=(Qspill_tot/Qin_tot)*100
#volumetric reliability
volRel=sum(out$supply)/sum(out$demand)
#Av. water stored by tank on a daily basis (av. tank level)
avTankStor=sum(out$tankVolSim)/length(out$tankVolSim)
#avDeficit (max(demand-supply,0)/ndays)
temp=(out$demand-out$supply);temp[which(temp<0)]=0
avDeficit=sum(temp)/length(temp)
outList <- list(volRel, reliability, sysEff, storEff, avTankStor, avDeficit)
names(outList) <- tankMetrics
return(outList)
# return(list(tankMetrics[1]=volRel,tankMetrics[2]=reliability,tankMetrics[3]=sysEff,
# tankMetrics[4]=storEff,tankMetrics[5]=avTankStor,tankMetrics[6]=avDeficit))
}
|
/R/demoTankModel.R
|
no_license
|
cran/foreSIGHT
|
R
| false
| false
| 11,511
|
r
|
#DEMO TANK MODEL FOR APP
# #test data
# fnam="tankhist.csv"
# dat=read.csv(fnam,header=T)
# A=Sys.time()
# out=tankPerformance(data=dat,
# roofArea=50,
# nPeople=1,
# tankVol=3000,
# write.file=TRUE,
# fnam="tank_demo_sam.csv")
# B=Sys.time()
# print(B-A)
#
# systemArgs<-list(roofArea=50,
# nPeople=1,
# tankVol=3000,
# firstFlush=1,
# write.file=T,
# fnam="inception.csv",
# metric="reliability")
#' Wrapper function for a rain water tank system model
#'
#' \code{tankWrapper} is a wrapper function for a rainwater tank system model in foreSIGHT. This function is used in examples in function help files and vignettes.
#' This function may also be used as an example to create wrapper functions for other system models with scenarios generated using foreSIGHT in \code{R} or other programming languages.
#' @param data data.frame; contains observed daily precipitation and temperature to be used to run the rain water tank system model in a data.frame with columns named \emph{year} \emph{month} \emph{day} \emph{P} \emph{Temp}.
#' Note that the first three columns of the data.frame contain the year, month, and day of observation. The columns have to be named as specified.
#' Please refer data provided with the package that may be loaded using \code{data(tankDat)} for an example of the expected format of \code{data}.
#' @param systemArgs a list; contains the input arguments to the rain water tank system model. The valid fields in the list are:
#' \itemize{
#' \item {\code{roofArea}} {: numeric; the roof area in sq.m}
#' \item {\code{nPeople}} {: integer; number of people using water}
#' \item {\code{tankVol}} {: numeric; volume of the tank in L}
#' \item {\code{firstFlush}} {: numeric; first flush depth over roof in mm}
#' \item {\code{write.file}} {: logical; indicates whether output is to be written to file}
#' \item {\code{fnam}} {: string; name of the output file}
#' }
#' @param metrics string vector; the metrics of performance of the system model to be reported. The valid strings may be viewed using the function \code{viewTankMetrics()}
#' @return The function returns a list containing the calculated values of the performance metrics specified in \code{metrics} after running the system model.
#' @seealso \code{runSystemModel}, \code{viewTankMetrics}
#' @examples
#' # view available performance metrics
#' viewTankMetrics()
#' # load example climate data to run the system model
#' data(tankDat)
#' systemArgs <- list(roofArea = 205, nPeople = 1, tankVol = 2400,
#' firstFlush = 2.0, write.file = FALSE)
#' tankWrapper(tank_obs, systemArgs,
#' metrics = c("average daily deficit (L)", "reliability (fraction)"))
#' @export
tankWrapper<-function(data,
systemArgs,
metrics
) {
performance<-tankPerformance(data=data,
roofArea=systemArgs$roofArea,
nPeople=systemArgs$nPeople,
tankVol=systemArgs$tankVol,
firstFlush=systemArgs$firstFlush,
write.file=systemArgs$write.file,
fnam=systemArgs$fnam)
performanceSubset <- performance[metrics]
return(performanceSubset)
}
#' Prints the names of the performance metrics of the rain water tank system model
#'
#' \code{viewTankMetrics} prints the names of the performance metrics available in the example rain water tank system model. T
#' @details This is a helper function that does not take any input arguments. The user may specify one or more of the metric names
#' as the \code{metric} argument of \code{tankWrapper} to select the performance metrics from the tank system model.
#' to select the performance metrics.
#' @seealso \code{tankWrapper}
#' @examples
#' viewTankMetrics()
#' @export
viewTankMetrics <- function() {
print(tankMetrics)
}
#---------------------------------------------------------
tank_model<-function(roofArea=50, #Roof area in m2
nPeople=1, #No of people(?)
tankVol=3000, #tank volume in L
firstFlush=1, #first flush diverter size in mm
rainTS=NULL, #daily rain time series
tempTS=NULL, #daily temperature time series
date=NULL #date - data.frame c("year","month","day")
){
nday=length(rainTS) #how many days to simulate
#Set up vectors
tankVolSim=rep(NA,nday)
tankInflow=rep(NA,nday)
tankSpill=rep(NA,nday)
supply=rep(NA,nday)
#Get initial tank volume
initTankVol=tankVol/2.0 #start tank half full
#DEMAND...
indoorDemand=10*nPeople #constant indoor demand (toilet flushing)
outdoorDemandSum=739
seasPattern=c(0.229,0.188,0.142,0.064,0.030,0,0.001,0.007,0.014,0.049,0.107,0.171) #proportions from Goyder household water use report
outdoorDemand=rep(NA,nday)
for(i in 1:12){
ind=which(date$month == i)
outdoorDemand[ind]=seasPattern[i]*outdoorDemandSum
}
#upper temp threshold - lower temp threshold
lowTempThresh=12; upTempThresh=28
indUp=which(tempTS>=upTempThresh) # made up temperature multipliers (Masters report foudn above 28 degs more watering)
outdoorDemand[indUp]=outdoorDemand[indUp]*1.25
indLow=which(tempTS<=lowTempThresh)
outdoorDemand[indLow]=outdoorDemand[indLow]*0.6
#combined demand
demand=rep(indoorDemand,nday)+outdoorDemand
#FIRST FLUSH -removed at the start of each storm
divertAmount=firstFlush*roofArea #1mm x roof area (Ls)
#HOW MUCH FLOW?
inflow=rainTS*roofArea #mm x m2 (Ls) (100% conversion to runoff)
roofFlow=inflow #save as roof flow
#WET DRY DAY PATTERN
stormDur=rep(NA,nday)
if(inflow[1]>0){stormDur[1]=1}else{stormDur[1]=0} #assume day prior to start is dry
for(i in 2:nday){
if(inflow[i]>0){ # wet day today
stormDur[i]=stormDur[i-1]+1 #wet -wet or dry-wet
}else{ # dry day today
stormDur[i]=0 # dd or wd pattern
}
}
#REMOVE FIRST FLUSH FROM EACH STORM
for(i in 1:nday){
if(stormDur[i]==1){ #first wet day in storm (divert Amount re-set to max as emptied in between storms)
temp=inflow[i]-divertAmount
if(temp<0){
divertRemainder=abs(temp)
inflow[i]=0 #no left over flow
}else{
divertRemainder=0
inflow[i]=temp #new flow (minus divert)
}
}else{
if(stormDur[i]>1){ #not first day of storm
temp=inflow[i]-divertRemainder
if(temp<0){
divertRemainder=abs(temp)
inflow[i]=0 #no left over flow
}else{
divertRemainder=0
inflow[i]=temp #new flow (minus divert)
}
}else{
divertRemainder=0
}
}
}
#CALCULATE STARTING POINT
if((initTankVol+inflow[1]-demand[1])<0){ # Tank has run dry
tankVolSim[1]=0 # MINIMUM IS ZERO
tankSpill[1]=0
supply[1]=initTankVol+inflow[1] # Gave what you could
tankInflow[1]=inflow[1]
}else{
if((initTankVol+inflow[1]-demand[1])>tankVol){ # Tank is overtopped
tankVolSim[1]=tankVol
tankSpill[1]=(initTankVol+inflow[1]-demand[1])-tankVol
tankInflow[1]=tankVol-(initTankVol-demand[1])
supply[1]=demand[1]
}else{ # Tank is partially filled
tankVolSim[1]=initTankVol+inflow[1]-demand[1]
tankSpill[1]=0
supply[1]=demand[1]
tankInflow[1]=inflow[1]
}
}
# loop over days
for(i in 2:nday){
if((tankVolSim[i-1]+inflow[i]-demand[i])<0){ # Tank has run dry
tankVolSim[i]=0
tankSpill[i]=0
supply[i]=tankVolSim[i-1]+inflow[i] # Gave what you could
tankInflow[i]=inflow[i] # all inflow travels through tank
}else{
if((tankVolSim[i-1]+inflow[i]-demand[i])>tankVol){ # Tank is overtopped
tankVolSim[i]=tankVol
tankSpill[i]=(tankVolSim[i-1]+inflow[i]-demand[i])-tankVol
supply[i]=demand[i]
tankInflow[i]=tankVol-(tankVolSim[i-1]-demand[i]) # took what could fit (once demand also taken)
}else{ # Tank is partially filled
tankVolSim[i]=tankVolSim[i-1]+inflow[i]-demand[i]
tankSpill[i]=0
supply[i]=demand[i]
tankInflow[i]=inflow[i] # all inflow travels through tank
}
}
}
return(list(rainTS=rainTS,roofFlow=roofFlow,inflow=inflow,tankInflow=tankInflow,tankSpill=tankSpill,tankVolSim=tankVolSim,demand=demand,supply=supply))
}
# placed outside functions so that it can viewed using a helper function
tankMetrics <- c("volumetric reliability (fraction)",
"reliability (fraction)",
"system efficiency (%)",
"storage efficiency (%)",
"average tank storage (L)",
"average daily deficit (L)")
tankPerformance<-function(data=NULL,
roofArea=50,
nPeople=1,
tankVol=3000,
firstFlush=1,
write.file=TRUE,
fnam="tankperformance.csv"
){
out=tank_model(roofArea=roofArea,
nPeople=nPeople,
tankVol=tankVol,
firstFlush=firstFlush,
rainTS=data$P,
tempTS=data$Temp,
date=data[,c("year","month","day")])
# to test need to write to csv
if(write.file==TRUE){
utils::write.csv(out, file=fnam, quote=F, row.names=F) #write tank info to file
}
#-----tank model eval - sort of based on university of warwick metrics--------
#reliability - fraction of days (total demand) met nSatisfied/nday
reliability = length(which(out$demand==out$supply))/length(out$supply)
#Totals (first year removed)
Qin_tot=sum(out$tankInflow)
Quse_tot=sum(out$supply)
Qspill_tot=sum(out$tankSpill)
Qroof_tot=sum(out$roofFlow)
#system efficiency = (1- Quse/Qin)*100
sysEff=Quse_tot/Qroof_tot*100
#storage efficiency spilled v captured
storEff=(Qspill_tot/Qin_tot)*100
#volumetric reliability
volRel=sum(out$supply)/sum(out$demand)
#Av. water stored by tank on a daily basis (av. tank level)
avTankStor=sum(out$tankVolSim)/length(out$tankVolSim)
#avDeficit (max(demand-supply,0)/ndays)
temp=(out$demand-out$supply);temp[which(temp<0)]=0
avDeficit=sum(temp)/length(temp)
outList <- list(volRel, reliability, sysEff, storEff, avTankStor, avDeficit)
names(outList) <- tankMetrics
return(outList)
# return(list(tankMetrics[1]=volRel,tankMetrics[2]=reliability,tankMetrics[3]=sysEff,
# tankMetrics[4]=storEff,tankMetrics[5]=avTankStor,tankMetrics[6]=avDeficit))
}
|
library(phyloseq)
library(ggplot2)
library(reshape)
library(qiime2R)
setwd('/Users/sophiecurio/Dropbox/APC_project/16s/Sophie/')
phy<-qza_to_phyloseq("table.qza", "rooted-tree.qza", "taxonomy.qza","metadata_phyloseq2.tsv")
Genotypes = c("APC_WT3", "WT_ctrl")
Timepoints = c("18")
name <- paste(Genotypes, Timepoints, collapse = '_')
# only keep samples with > 2500 feature IDs
phy = prune_samples(sample_sums(phy) > 2500, phy)
# select subset
phy = subset_samples(phy, Genotype %in% Genotypes)
phy = subset_samples(phy, Timepoint %in% Timepoints)
# plot all alpha diversity parameters by timepoint/genotype
png(file = paste("R/", paste('richness1.png'), sep = ''), units="in", width=18, height=10, res=300)
plot_richness(phy, x = 'Timepoint', color = 'Genotype')
dev.off()
# plot only specific parameteres with boxplot with p value
rich = estimate_richness(phy)
p_value <- wilcox.test(rich$Shannon, sample_data(phy)$Genotype)$p.value
rich$Shannon
shannon <- estimate_richness(phy, measure=c("Shannon"))
shannon
png(file = paste("R/Alpha_plots/", paste(name, '_alpha_observed.png'), sep = ''), units="in", width=5, height=5, res=300)
p <- plot_richness(phy, x="Genotype", measures=c("Shannon"))
plot(p)
# + geom_text(aes(label=Mouse_ID)) + ggtitle(paste(name, '\np =', p_value))
dev.off()
|
/Microbiota/alpha.r
|
no_license
|
sophiecurio/phd
|
R
| false
| false
| 1,302
|
r
|
library(phyloseq)
library(ggplot2)
library(reshape)
library(qiime2R)
setwd('/Users/sophiecurio/Dropbox/APC_project/16s/Sophie/')
phy<-qza_to_phyloseq("table.qza", "rooted-tree.qza", "taxonomy.qza","metadata_phyloseq2.tsv")
Genotypes = c("APC_WT3", "WT_ctrl")
Timepoints = c("18")
name <- paste(Genotypes, Timepoints, collapse = '_')
# only keep samples with > 2500 feature IDs
phy = prune_samples(sample_sums(phy) > 2500, phy)
# select subset
phy = subset_samples(phy, Genotype %in% Genotypes)
phy = subset_samples(phy, Timepoint %in% Timepoints)
# plot all alpha diversity parameters by timepoint/genotype
png(file = paste("R/", paste('richness1.png'), sep = ''), units="in", width=18, height=10, res=300)
plot_richness(phy, x = 'Timepoint', color = 'Genotype')
dev.off()
# plot only specific parameteres with boxplot with p value
rich = estimate_richness(phy)
p_value <- wilcox.test(rich$Shannon, sample_data(phy)$Genotype)$p.value
rich$Shannon
shannon <- estimate_richness(phy, measure=c("Shannon"))
shannon
png(file = paste("R/Alpha_plots/", paste(name, '_alpha_observed.png'), sep = ''), units="in", width=5, height=5, res=300)
p <- plot_richness(phy, x="Genotype", measures=c("Shannon"))
plot(p)
# + geom_text(aes(label=Mouse_ID)) + ggtitle(paste(name, '\np =', p_value))
dev.off()
|
############# R Functions for parsing NCCA ASCII header information ##############
# 3-6-2014 Raymond Nelson
# 8-1-2014
#
# this script contains the following 3 functions
#
# chartHeader()
# to make a .csv file of the header data for each chart
#
# stimText()
# to make a .csv file of the stimulus text for each chart
#
# eventTable()
# to make a .csv file of the table of stimulus events for each chart
# will replace milling event values
#
##############################################
#
#
chartHeader <- function(x = "headerNames") {
# function to make a .csv file of the names of all *.header.txt files
#
headerNames <- get(x, pos = 1) # same name in the local environment as in the project environment
#
# a loop to open all the header files in the current working directory
for (i in 1:length(headerNames)) {
fileName <- headerNames[i]
headerFile <- readLines(fileName,
n = -1,
ok = TRUE,
warn = FALSE,
encoding = "UTF-8")
#select the header lines and separate the data
headerFileHeaders <- headerFile[1:(pmatch("Event Label Statement",
headerFile) - 2)]
headerFileHeaders <- strsplit(headerFileHeaders, ": ")
#
# define some null variables
headerFileHeadersElement <- NULL
headerFileHeaderLabels <- NULL
headerFileHeadersElementList <- NULL
#
# another loop to turn the header info into vectors that can be concatenated and saved as .csv
for (k in 1:length(headerFileHeaders)) {
headerFileHeadersElement <- headerFileHeaders[[k]]
headerFileHeaderLabels <- c(headerFileHeaderLabels,
headerFileHeadersElement[1])
headerFileHeadersElementList <- c(headerFileHeadersElementList,
headerFileHeadersElement[2])
}
# set the file name to .csv
fileNameCSV <- paste(strtrim(fileName, nchar(fileName) - 4), ".csv",
sep = "")
write.table(rbind(as.list(headerFileHeaderLabels),
as.list(headerFileHeadersElementList)),
file = fileNameCSV,
append = FALSE,
quote = TRUE,
sep = ",", eol = "\n",
na = "NA", dec = ".",
row.names = FALSE,
col.names = FALSE,
qmethod = "double",
fileEncoding = "UTF-8")
}
}
#
##########################################
#
#
stimText <- function(x = "headerNames") {
# function to make a csv table of the stimulus text for each chart
#
headerNames <- get(x, pos = 1) # same name in the local environment as in the project environment
#
# define a null variable for later use
# stimTableList <- NULL
#
for (i in 1:length(headerNames)) {
fileName <- headerNames[i]
headerFile <- readLines(fileName,
n = -1,
ok = TRUE,
warn = FALSE,
encoding = "UTF-8")
#
# get the stimulus text lines
headerFileStimuli <-
headerFile[pmatch("Event Label Statement", headerFile):
(pmatch("Event Label Begin End Answer",
headerFile) - 2)]
#
# create a null vector to hold the stimulus text
stimulusLines <- NULL
# a nested loop to fix wrapped text lines
for (j in 1:length(headerFileStimuli)) { # 2-22-2014 fixed so it handles multiple wrapped lines
# i <- 1 # for testing and development
if (strtrim(headerFileStimuli[j], 6) == " ")
stimulusLines[length(stimulusLines)] <-
paste(stimulusLines[length(stimulusLines)],
str_sub(headerFileStimuli[j], 16, nchar(headerFileStimuli[j])),
sep = "")
if (strtrim(headerFileStimuli[j], 6) != " ") stimulusLines <-
c(stimulusLines, headerFileStimuli[j])
}
headerFileStimuli <- stimulusLines
#
# set the header row
# headerFileStimHeader <- headerFileStimuli[1]
# headerFileStimHeader <- c(strtrim(headerFileStimHeader, 5), str_sub(headerFileStimHeader, 10, 14), str_sub(headerFileStimHeader, 16, 25))
# get the event numbers
eventNumb <- str_trim(strtrim(headerFileStimuli, 6), side = "both")
# get the event labels
eventLabels <- str_trim(str_sub(headerFileStimuli, 7, 14), side = "both")
# get the stimulus text
stimText <- str_trim(str_sub(headerFileStimuli, 16,
nchar(headerFileStimuli)), side = "both")
#
# set the filename and save the file
fileNameCSV <- paste(strtrim(fileName, nchar(fileName) - 11),
"_stimuli.csv", sep = "")
#
write.table(cbind(eventNumb[1:length(stimText)],
eventLabels[1:length(stimText)], stimText),
file = fileNameCSV,
append = FALSE,
quote = TRUE,
sep = ",",
eol = "\n",
na = "NA",
dec = ".",
row.names = FALSE,
col.names = FALSE,
qmethod = "double",
fileEncoding = "UTF-8")
#
# create a vector for each list of stim questions
# tableName <- paste("stimTable", i, sep = "")
# assign(tableName, cbind(eventNumb[1:length(stimText)], eventLabels[1:length(stimText)], stimText))
#
# stimTableList <- c(stimTableList, tableName[1])
#
# paste("stimTable", i, sep = "") <- cbind(eventNumb[1:length(stimText)], eventLabels[1:length(stimText)], stimText)
#
}
}
#
###########################################
#
#
eventTable <- function(x = "headerNames") {
# function to make a table of all stimulus events
#
headerNames <- get(x, pos = 1) # same name in the local environment as in the project environment
#
# define a null variable for later use
# stimTableList <- NULL
#
for (i in 1:length(headerNames)) {
fileName <- headerNames[i]
headerFile <- readLines(fileName,
n = -1,
ok = TRUE,
warn = FALSE,
encoding = "UTF-8")
#
# get the lines with stimulus text statements
headerFileEvents <- headerFile[pmatch("Event Label Begin End Answer", headerFile):length(headerFile)]
#
# get the event numbers
eventNumb <- str_trim(strtrim(headerFileEvents, 6), side = "both")
# get the event labels
eventLabels <- str_trim(str_sub(headerFileEvents, 7, 14), side = "both")
# get the event onset
eventOnset <- str_trim(str_sub(headerFileEvents, 15, 25), side = "both")
# get the event offset
eventOffset <- str_trim(str_sub(headerFileEvents, 26, 36), side = "both")
# get the event answer
eventAnswer <- str_trim(str_sub(headerFileEvents, 37, 47), side = "both")
#
# make the event table
tempCSV <- as.data.frame(cbind(eventNumb, eventLabels, eventOnset, eventOffset, eventAnswer), stringsAsFactors = FALSE)
#
# assign("tempCSV", tempCSV, pos = 1)
# replace missing events
#
# first define a null chacter character vector for use in the loop
# tempCSV <- read.csv(chartNames[i])
# tempCSV <- read.csv(chartNames[1])
tempCSV2 <- NULL
#
# then make a nested loop to remove lines with no data
for (j in 1:nrow(as.vector(tempCSV))) {
if (as.vector(tempCSV[j,2]) != "") {
if (nchar(as.character(tempCSV[j,4])) == 0) tempCSV[j,4] <- as.vector(tempCSV[j,3])
if (nchar(as.character(tempCSV[j,5])) == 0) tempCSV[j,5] <- as.vector(tempCSV[j,4])
tempCSV2 <- rbind(as.vector(tempCSV2), as.vector(tempCSV[j,]))
}
}
#
# set the filename and save the file
fileNameCSV <- paste(strtrim(fileName, nchar(fileName) - 11), "_events.csv", sep = "")
write.table(tempCSV2, file = fileNameCSV,
append = FALSE,
quote = TRUE,
sep = ",",
eol = "\n",
na = "NA",
dec = ".",
row.names = FALSE,
col.names = FALSE,
qmethod = "double",
fileEncoding = "UTF-8")
#
# loop will repeat with header files for all charts
}
}
#
|
/backup/headerParse.r
|
no_license
|
raymondnelson/NCCA_ASCII_Parse
|
R
| false
| false
| 8,531
|
r
|
############# R Functions for parsing NCCA ASCII header information ##############
# 3-6-2014 Raymond Nelson
# 8-1-2014
#
# this script contains the following 3 functions
#
# chartHeader()
# to make a .csv file of the header data for each chart
#
# stimText()
# to make a .csv file of the stimulus text for each chart
#
# eventTable()
# to make a .csv file of the table of stimulus events for each chart
# will replace milling event values
#
##############################################
#
#
chartHeader <- function(x = "headerNames") {
# function to make a .csv file of the names of all *.header.txt files
#
headerNames <- get(x, pos = 1) # same name in the local environment as in the project environment
#
# a loop to open all the header files in the current working directory
for (i in 1:length(headerNames)) {
fileName <- headerNames[i]
headerFile <- readLines(fileName,
n = -1,
ok = TRUE,
warn = FALSE,
encoding = "UTF-8")
#select the header lines and separate the data
headerFileHeaders <- headerFile[1:(pmatch("Event Label Statement",
headerFile) - 2)]
headerFileHeaders <- strsplit(headerFileHeaders, ": ")
#
# define some null variables
headerFileHeadersElement <- NULL
headerFileHeaderLabels <- NULL
headerFileHeadersElementList <- NULL
#
# another loop to turn the header info into vectors that can be concatenated and saved as .csv
for (k in 1:length(headerFileHeaders)) {
headerFileHeadersElement <- headerFileHeaders[[k]]
headerFileHeaderLabels <- c(headerFileHeaderLabels,
headerFileHeadersElement[1])
headerFileHeadersElementList <- c(headerFileHeadersElementList,
headerFileHeadersElement[2])
}
# set the file name to .csv
fileNameCSV <- paste(strtrim(fileName, nchar(fileName) - 4), ".csv",
sep = "")
write.table(rbind(as.list(headerFileHeaderLabels),
as.list(headerFileHeadersElementList)),
file = fileNameCSV,
append = FALSE,
quote = TRUE,
sep = ",", eol = "\n",
na = "NA", dec = ".",
row.names = FALSE,
col.names = FALSE,
qmethod = "double",
fileEncoding = "UTF-8")
}
}
#
##########################################
#
#
stimText <- function(x = "headerNames") {
# function to make a csv table of the stimulus text for each chart
#
headerNames <- get(x, pos = 1) # same name in the local environment as in the project environment
#
# define a null variable for later use
# stimTableList <- NULL
#
for (i in 1:length(headerNames)) {
fileName <- headerNames[i]
headerFile <- readLines(fileName,
n = -1,
ok = TRUE,
warn = FALSE,
encoding = "UTF-8")
#
# get the stimulus text lines
headerFileStimuli <-
headerFile[pmatch("Event Label Statement", headerFile):
(pmatch("Event Label Begin End Answer",
headerFile) - 2)]
#
# create a null vector to hold the stimulus text
stimulusLines <- NULL
# a nested loop to fix wrapped text lines
for (j in 1:length(headerFileStimuli)) { # 2-22-2014 fixed so it handles multiple wrapped lines
# i <- 1 # for testing and development
if (strtrim(headerFileStimuli[j], 6) == " ")
stimulusLines[length(stimulusLines)] <-
paste(stimulusLines[length(stimulusLines)],
str_sub(headerFileStimuli[j], 16, nchar(headerFileStimuli[j])),
sep = "")
if (strtrim(headerFileStimuli[j], 6) != " ") stimulusLines <-
c(stimulusLines, headerFileStimuli[j])
}
headerFileStimuli <- stimulusLines
#
# set the header row
# headerFileStimHeader <- headerFileStimuli[1]
# headerFileStimHeader <- c(strtrim(headerFileStimHeader, 5), str_sub(headerFileStimHeader, 10, 14), str_sub(headerFileStimHeader, 16, 25))
# get the event numbers
eventNumb <- str_trim(strtrim(headerFileStimuli, 6), side = "both")
# get the event labels
eventLabels <- str_trim(str_sub(headerFileStimuli, 7, 14), side = "both")
# get the stimulus text
stimText <- str_trim(str_sub(headerFileStimuli, 16,
nchar(headerFileStimuli)), side = "both")
#
# set the filename and save the file
fileNameCSV <- paste(strtrim(fileName, nchar(fileName) - 11),
"_stimuli.csv", sep = "")
#
write.table(cbind(eventNumb[1:length(stimText)],
eventLabels[1:length(stimText)], stimText),
file = fileNameCSV,
append = FALSE,
quote = TRUE,
sep = ",",
eol = "\n",
na = "NA",
dec = ".",
row.names = FALSE,
col.names = FALSE,
qmethod = "double",
fileEncoding = "UTF-8")
#
# create a vector for each list of stim questions
# tableName <- paste("stimTable", i, sep = "")
# assign(tableName, cbind(eventNumb[1:length(stimText)], eventLabels[1:length(stimText)], stimText))
#
# stimTableList <- c(stimTableList, tableName[1])
#
# paste("stimTable", i, sep = "") <- cbind(eventNumb[1:length(stimText)], eventLabels[1:length(stimText)], stimText)
#
}
}
#
###########################################
#
#
eventTable <- function(x = "headerNames") {
# function to make a table of all stimulus events
#
headerNames <- get(x, pos = 1) # same name in the local environment as in the project environment
#
# define a null variable for later use
# stimTableList <- NULL
#
for (i in 1:length(headerNames)) {
fileName <- headerNames[i]
headerFile <- readLines(fileName,
n = -1,
ok = TRUE,
warn = FALSE,
encoding = "UTF-8")
#
# get the lines with stimulus text statements
headerFileEvents <- headerFile[pmatch("Event Label Begin End Answer", headerFile):length(headerFile)]
#
# get the event numbers
eventNumb <- str_trim(strtrim(headerFileEvents, 6), side = "both")
# get the event labels
eventLabels <- str_trim(str_sub(headerFileEvents, 7, 14), side = "both")
# get the event onset
eventOnset <- str_trim(str_sub(headerFileEvents, 15, 25), side = "both")
# get the event offset
eventOffset <- str_trim(str_sub(headerFileEvents, 26, 36), side = "both")
# get the event answer
eventAnswer <- str_trim(str_sub(headerFileEvents, 37, 47), side = "both")
#
# make the event table
tempCSV <- as.data.frame(cbind(eventNumb, eventLabels, eventOnset, eventOffset, eventAnswer), stringsAsFactors = FALSE)
#
# assign("tempCSV", tempCSV, pos = 1)
# replace missing events
#
# first define a null chacter character vector for use in the loop
# tempCSV <- read.csv(chartNames[i])
# tempCSV <- read.csv(chartNames[1])
tempCSV2 <- NULL
#
# then make a nested loop to remove lines with no data
for (j in 1:nrow(as.vector(tempCSV))) {
if (as.vector(tempCSV[j,2]) != "") {
if (nchar(as.character(tempCSV[j,4])) == 0) tempCSV[j,4] <- as.vector(tempCSV[j,3])
if (nchar(as.character(tempCSV[j,5])) == 0) tempCSV[j,5] <- as.vector(tempCSV[j,4])
tempCSV2 <- rbind(as.vector(tempCSV2), as.vector(tempCSV[j,]))
}
}
#
# set the filename and save the file
fileNameCSV <- paste(strtrim(fileName, nchar(fileName) - 11), "_events.csv", sep = "")
write.table(tempCSV2, file = fileNameCSV,
append = FALSE,
quote = TRUE,
sep = ",",
eol = "\n",
na = "NA",
dec = ".",
row.names = FALSE,
col.names = FALSE,
qmethod = "double",
fileEncoding = "UTF-8")
#
# loop will repeat with header files for all charts
}
}
#
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/312.Expec_Leng_ADJ_All_Graph.R
\name{PlotexplALT}
\alias{PlotexplALT}
\title{Plots the Expected length using adjusted Logit Wald method}
\usage{
PlotexplALT(n, alp, h, a, b)
}
\arguments{
\item{n}{- Number of trials}
\item{alp}{- Alpha value (significance level required)}
\item{h}{- Adding factor}
\item{a}{- Beta parameters for hypo "p"}
\item{b}{- Beta parameters for hypo "p"}
}
\description{
Plots the Expected length using adjusted Logit Wald method
}
\details{
The plots of the Expected length of adjusted Wald method
}
\examples{
\dontrun{
n= 10; alp=0.05; h=2;a=1;b=1;
PlotexplALT(n,alp,h,a,b)
}
}
\seealso{
Other Expected length of adjusted methods:
\code{\link{PlotexplAAS}()},
\code{\link{PlotexplAAll}()},
\code{\link{PlotexplALR}()},
\code{\link{PlotexplASC}()},
\code{\link{PlotexplATW}()},
\code{\link{PlotexplAWD}()},
\code{\link{PlotlengthAAS}()},
\code{\link{PlotlengthAAll}()},
\code{\link{PlotlengthALR}()},
\code{\link{PlotlengthALT}()},
\code{\link{PlotlengthASC}()},
\code{\link{PlotlengthATW}()},
\code{\link{PlotlengthAWD}()},
\code{\link{lengthAAS}()},
\code{\link{lengthAAll}()},
\code{\link{lengthALR}()},
\code{\link{lengthALT}()},
\code{\link{lengthASC}()},
\code{\link{lengthATW}()},
\code{\link{lengthAWD}()}
}
\concept{Expected length of adjusted methods}
|
/man/PlotexplALT.Rd
|
no_license
|
RajeswaranV/proportion
|
R
| false
| true
| 1,376
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/312.Expec_Leng_ADJ_All_Graph.R
\name{PlotexplALT}
\alias{PlotexplALT}
\title{Plots the Expected length using adjusted Logit Wald method}
\usage{
PlotexplALT(n, alp, h, a, b)
}
\arguments{
\item{n}{- Number of trials}
\item{alp}{- Alpha value (significance level required)}
\item{h}{- Adding factor}
\item{a}{- Beta parameters for hypo "p"}
\item{b}{- Beta parameters for hypo "p"}
}
\description{
Plots the Expected length using adjusted Logit Wald method
}
\details{
The plots of the Expected length of adjusted Wald method
}
\examples{
\dontrun{
n= 10; alp=0.05; h=2;a=1;b=1;
PlotexplALT(n,alp,h,a,b)
}
}
\seealso{
Other Expected length of adjusted methods:
\code{\link{PlotexplAAS}()},
\code{\link{PlotexplAAll}()},
\code{\link{PlotexplALR}()},
\code{\link{PlotexplASC}()},
\code{\link{PlotexplATW}()},
\code{\link{PlotexplAWD}()},
\code{\link{PlotlengthAAS}()},
\code{\link{PlotlengthAAll}()},
\code{\link{PlotlengthALR}()},
\code{\link{PlotlengthALT}()},
\code{\link{PlotlengthASC}()},
\code{\link{PlotlengthATW}()},
\code{\link{PlotlengthAWD}()},
\code{\link{lengthAAS}()},
\code{\link{lengthAAll}()},
\code{\link{lengthALR}()},
\code{\link{lengthALT}()},
\code{\link{lengthASC}()},
\code{\link{lengthATW}()},
\code{\link{lengthAWD}()}
}
\concept{Expected length of adjusted methods}
|
rm(list = ls())
dirname = getSrcDirectory(function(x) {x})
setwd(dirname)
library(forecast)
library(deepnet)
library(RSNNS)
source("databaseconnection.R")
dirimages = paste(getwd(),"/Results/*",sep="")
unlink(dirimages)
open = 1
close = 2
high = 3
low = 4
variacaonula = c(0,0.001)
variacaomormal = c(0.001,0.003)
variacaomedia = c(0.003,0.1)
variacaoalta = c(0.1,+Inf)
typelist = c(open, close, high, low)
minuteslist = c(1)
blocklist = c(20)
omega = 1
mi = 0.2
pteste = 0.05
rangelist = c(1000)
ntests = 10
nrows = length(typelist)*length(minuteslist)*length(blocklist)*length(rangelist)
ncols = 1+1+1+1+2
resultsResume = matrix(0,nrow=nrows,ncol=ncols)
colnames(resultsResume) = c("Type","Range","Minutes","Block","RMSE",
"MAE")
iterator = 1
for(type in typelist){
typestring = switch(type,
open = "Open",
close = "Close",
high = "High",
low = "Low")
for(block in blocklist){
hiddenlayers = c(block/2,block/2)
for(minutes in minuteslist){
for(range in rangelist){
errorsMeans = rep(0,2)
title = paste(typestring," Data, Range ",range,", Minutes ",minutes,", Block ",block,sep="")
details = list(col=c(2,1),main=title)
for(test in 1:ntests){
source("forecast.R")
errors = accuracy(forecastvalues,targets)
if(test==1){
errorsMeans = as.vector(errors[2:3])
}else{
errorsMeans = rbind(errorsMeans,as.vector(errors[2:3]))
}
auxfilename=paste(getwd(),"/Results/",title,", ",toString(test),".png",sep="")
png(filename=auxfilename)
ts.plot(forecastvalues,targets,gpars=details)
dev.off()
}
cat(title,"\n")
errorsMeans = colMeans(errorsMeans)
print(errorsMeans)
resultsResume[iterator,1] = type
resultsResume[iterator,2] = range
resultsResume[iterator,3] = minutes
resultsResume[iterator,4] = block
resultsResume[iterator,5:6] = errorsMeans
iterator=iterator+1
}
}
}
}
|
/CodeR/testForecast.R
|
no_license
|
sandyporto/Forex
|
R
| false
| false
| 2,151
|
r
|
rm(list = ls())
dirname = getSrcDirectory(function(x) {x})
setwd(dirname)
library(forecast)
library(deepnet)
library(RSNNS)
source("databaseconnection.R")
dirimages = paste(getwd(),"/Results/*",sep="")
unlink(dirimages)
open = 1
close = 2
high = 3
low = 4
variacaonula = c(0,0.001)
variacaomormal = c(0.001,0.003)
variacaomedia = c(0.003,0.1)
variacaoalta = c(0.1,+Inf)
typelist = c(open, close, high, low)
minuteslist = c(1)
blocklist = c(20)
omega = 1
mi = 0.2
pteste = 0.05
rangelist = c(1000)
ntests = 10
nrows = length(typelist)*length(minuteslist)*length(blocklist)*length(rangelist)
ncols = 1+1+1+1+2
resultsResume = matrix(0,nrow=nrows,ncol=ncols)
colnames(resultsResume) = c("Type","Range","Minutes","Block","RMSE",
"MAE")
iterator = 1
for(type in typelist){
typestring = switch(type,
open = "Open",
close = "Close",
high = "High",
low = "Low")
for(block in blocklist){
hiddenlayers = c(block/2,block/2)
for(minutes in minuteslist){
for(range in rangelist){
errorsMeans = rep(0,2)
title = paste(typestring," Data, Range ",range,", Minutes ",minutes,", Block ",block,sep="")
details = list(col=c(2,1),main=title)
for(test in 1:ntests){
source("forecast.R")
errors = accuracy(forecastvalues,targets)
if(test==1){
errorsMeans = as.vector(errors[2:3])
}else{
errorsMeans = rbind(errorsMeans,as.vector(errors[2:3]))
}
auxfilename=paste(getwd(),"/Results/",title,", ",toString(test),".png",sep="")
png(filename=auxfilename)
ts.plot(forecastvalues,targets,gpars=details)
dev.off()
}
cat(title,"\n")
errorsMeans = colMeans(errorsMeans)
print(errorsMeans)
resultsResume[iterator,1] = type
resultsResume[iterator,2] = range
resultsResume[iterator,3] = minutes
resultsResume[iterator,4] = block
resultsResume[iterator,5:6] = errorsMeans
iterator=iterator+1
}
}
}
}
|
library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'wn'
scenario <- 16
param <- 1
anal_type <- "sing"
ss <- ss.bounds%>%
dplyr::filter(method == "wn", scenario.id == scenario)
do_val <- 0.15
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(mice, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%wn_ci(ss$M2,'y')
#define missingness parameters and do rates
m.param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss <- m.param%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wn_ci,
sing_anal = T,
mice_anal = F,
m2 = ss$M2, seed = 10000*scenario + x,
method = method,
alpha = alpha
))%>%
dplyr::select(missing, results)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H1',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize power from the simulated data
source('funs/h0.sing.sum.R')
h0.sing.sum(x1)%>%
dplyr::select(-mean.bias)
|
/sim_pgms/wn/do15/2xcontH1_sc16_do15_sing.R
|
no_license
|
yuliasidi/nibinom_apply
|
R
| false
| false
| 2,221
|
r
|
library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'wn'
scenario <- 16
param <- 1
anal_type <- "sing"
ss <- ss.bounds%>%
dplyr::filter(method == "wn", scenario.id == scenario)
do_val <- 0.15
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(mice, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%wn_ci(ss$M2,'y')
#define missingness parameters and do rates
m.param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss <- m.param%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wn_ci,
sing_anal = T,
mice_anal = F,
m2 = ss$M2, seed = 10000*scenario + x,
method = method,
alpha = alpha
))%>%
dplyr::select(missing, results)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H1',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize power from the simulated data
source('funs/h0.sing.sum.R')
h0.sing.sum(x1)%>%
dplyr::select(-mean.bias)
|
library(CoxRidge)
### Name: GBSG
### Title: German Breast Cancer Study Group.
### Aliases: GBSG
### Keywords: datasets
### ** Examples
data(GBSG)
str(GBSG)
|
/data/genthat_extracted_code/CoxRidge/examples/GBSG.rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 164
|
r
|
library(CoxRidge)
### Name: GBSG
### Title: German Breast Cancer Study Group.
### Aliases: GBSG
### Keywords: datasets
### ** Examples
data(GBSG)
str(GBSG)
|
#' Set file path to directory storing downloaded census data
#'
#'
#' @param path path to directory holding all downloaded census data, such as
#' "E:/my_census_data" and "~/my_census_data/".
#'
#' @export
# This function is modified from census_api_key() in package tidycensus, MIT liscence
set_path_to_census <- function (path){
# windows does not recognize directory ending with "/", so delete it if path
# is end with "/"
path_end <- str_trim(path) %>%
str_extract(".$")
if (path_end == "/") {
path <- str_replace(path, "/$", "")
}
# get user permission
message(paste(
"Set path to the directory storing downloaded census data.",
"You can choose to set a temporary path to the census data and",
"use it for current R session only.",
"Or you can choose to set a permanent path for all future R sessions",
"by adding a vairable 'PATH_TO_CENSUS' to your .Renviron file.\n"
))
cat("Your choice:")
choice <- switch(
menu(c("temporary path for this R session",
"permanent path for this and all future R sessions")),
"temporary",
"permanent"
)
if (choice == "permanent") {
# save initial working directory for later recovery
initial_wd <- getwd()
# set working directory to home directory
setwd(Sys.getenv("HOME"))
if (!file.exists(".Renviron")) {
file.create(".Renviron")
} else {
file.copy(".Renviron", ".Renviron_backup")
message(paste(
"Your original .Renviron has been backed up and stored as",
".Renviron_backup in your R HOME directory."
))
oldenv = read.table(".Renviron", stringsAsFactors = FALSE)[[1]]
newenv <- oldenv[!grepl("PATH_TO_CENSUS", oldenv)]
write.table(newenv, ".Renviron", quote = FALSE,
sep = "\n", col.names = FALSE, row.names = FALSE)
}
path_variable <- paste0("PATH_TO_CENSUS=", "'", path, "'")
write(path_variable, ".Renviron", sep = "\n", append = TRUE)
readRenviron("~/.Renviron")
message(paste(
"Your path to census data has been stored in your .Renviron and can",
"be accessed by Sys.getenv(\"PATH_TO_CENSUS\")"
))
# recover to initial working directory
setwd(initial_wd)
} else if (choice == "temporary"){
Sys.setenv(PATH_TO_CENSUS = path)
}
}
|
/R/set_path_to_census.R
|
no_license
|
ruralinnovation/totalcensus
|
R
| false
| false
| 2,523
|
r
|
#' Set file path to directory storing downloaded census data
#'
#'
#' @param path path to directory holding all downloaded census data, such as
#' "E:/my_census_data" and "~/my_census_data/".
#'
#' @export
# This function is modified from census_api_key() in package tidycensus, MIT liscence
set_path_to_census <- function (path){
# windows does not recognize directory ending with "/", so delete it if path
# is end with "/"
path_end <- str_trim(path) %>%
str_extract(".$")
if (path_end == "/") {
path <- str_replace(path, "/$", "")
}
# get user permission
message(paste(
"Set path to the directory storing downloaded census data.",
"You can choose to set a temporary path to the census data and",
"use it for current R session only.",
"Or you can choose to set a permanent path for all future R sessions",
"by adding a vairable 'PATH_TO_CENSUS' to your .Renviron file.\n"
))
cat("Your choice:")
choice <- switch(
menu(c("temporary path for this R session",
"permanent path for this and all future R sessions")),
"temporary",
"permanent"
)
if (choice == "permanent") {
# save initial working directory for later recovery
initial_wd <- getwd()
# set working directory to home directory
setwd(Sys.getenv("HOME"))
if (!file.exists(".Renviron")) {
file.create(".Renviron")
} else {
file.copy(".Renviron", ".Renviron_backup")
message(paste(
"Your original .Renviron has been backed up and stored as",
".Renviron_backup in your R HOME directory."
))
oldenv = read.table(".Renviron", stringsAsFactors = FALSE)[[1]]
newenv <- oldenv[!grepl("PATH_TO_CENSUS", oldenv)]
write.table(newenv, ".Renviron", quote = FALSE,
sep = "\n", col.names = FALSE, row.names = FALSE)
}
path_variable <- paste0("PATH_TO_CENSUS=", "'", path, "'")
write(path_variable, ".Renviron", sep = "\n", append = TRUE)
readRenviron("~/.Renviron")
message(paste(
"Your path to census data has been stored in your .Renviron and can",
"be accessed by Sys.getenv(\"PATH_TO_CENSUS\")"
))
# recover to initial working directory
setwd(initial_wd)
} else if (choice == "temporary"){
Sys.setenv(PATH_TO_CENSUS = path)
}
}
|
library(readxl)
library(dplyr)
library(reshape2)
##Reading each participant's csv file and combind their data
####################################################################
##Reading data
df_1 <- read.csv('~/Downloads/350s2/Participants/1/Report - 1.csv')
df_1 <- as.data.frame(t(df_1))
df_2 <- read.csv('~/Downloads/350s2/Participants/2/Report - 2.csv')
df_2 <- as.data.frame(t(df_2))
df_3 <- read.csv('~/Downloads/350s2/Participants/3/Report - 3.csv')
df_3 <- as.data.frame(t(df_3))
# df <- rbind(df,df_3[2,])
df_4 <- read.csv('~/Downloads/350s2/Participants/4/Report - 4.csv')
df_4 <- as.data.frame(t(df_4))
df_5 <- read.csv('~/Downloads/350s2/Participants/5/Report - 5.csv')
df_5 <- as.data.frame(t(df_5))
df_6 <- read.csv('~/Downloads/350s2/Participants/6/Report - 6.csv')
df_6 <- as.data.frame(t(df_6))
# df_7 <- read.csv('~/Downloads/350s2/Participants/7/Report - 7.csv')
# df_7 <- as.data.frame(t(df_7))
#
# df_8 <- read.csv('~/Downloads/350s2/Participants/8/Report - 8.csv')
# df_8 <- as.data.frame(t(df_8))
# df_11 <- read.csv('~/Downloads/350s2/Participants/11/Report - 11.csv')
# df_11<- as.data.frame(t(df_11))
df_13 <- read.csv('~/Downloads/350s2/Participants/13/Report - 13.csv')
df_13 <- as.data.frame(t(df_13))
df_14 <- read.csv('~/Downloads/350s2/Participants/14/Report - 14.csv')
df_14 <- as.data.frame(t(df_14))
df_15 <- read.csv('~/Downloads/350s2/Participants/15/Report - 15.csv')
df_15 <- as.data.frame(t(df_15))
df_16 <- read.csv('~/Downloads/350s2/Participants/16/Report - 16.csv')
df_16 <- as.data.frame(t(df_16))
df_17 <- read.csv('~/Downloads/350s2/Participants/17/Report - 17.csv')
df_17 <- as.data.frame(t(df_17))
df_18 <- read.csv('~/Downloads/350s2/Participants/18/Report - 18.csv')
df_18 <- as.data.frame(t(df_18))
df_19 <- read.csv('~/Downloads/350s2/Participants/19/Report - 19.csv')
df_19 <- as.data.frame(t(df_19))
df_20 <- read.csv('~/Downloads/350s2/Participants/20/Report - 20.csv')
df_20 <- as.data.frame(t(df_20))
df_21 <- read.csv('~/Downloads/350s2/Participants/21/Report - 21.csv')
df_21 <- as.data.frame(t(df_21))
df_22 <- read.csv('~/Downloads/350s2/Participants/22/Report - 22.csv')
df_22 <- as.data.frame(t(df_22))
df_23 <- read.csv('~/Downloads/350s2/Participants/23/Report - 23.csv')
df_23 <- as.data.frame(t(df_23))
df_25 <- read.csv('~/Downloads/350s2/Participants/25/Report - 25.csv')
df_25 <- as.data.frame(t(df_25))
df_26 <- read.csv('~/Downloads/350s2/Participants/26/Report - 26.csv')
df_26 <- as.data.frame(t(df_26))
df_27 <- read.csv('~/Downloads/350s2/Participants/27/Report - 27.csv')
df_27 <- as.data.frame(t(df_27))
df_28 <- read.csv('~/Downloads/350s2/Participants/28/Report - 28.csv')
df_28 <- as.data.frame(t(df_28))
df_29 <- read.csv('~/Downloads/350s2/Participants/29/Report - 29.csv')
df_29 <- as.data.frame(t(df_29))
df_30 <- read.csv('~/Downloads/350s2/Participants/30/Report - 30.csv')
df_30 <- as.data.frame(t(df_30))
df_31 <- read.csv('~/Downloads/350s2/Participants/31/Report - 31.csv')
df_31 <- as.data.frame(t(df_31))
df_32 <- read.csv('~/Downloads/350s2/Participants/32/Report - 32.csv')
df_32 <- as.data.frame(t(df_32))
df_33 <- read.csv('~/Downloads/350s2/Participants/33/Report - 33.csv')
df_33 <- as.data.frame(t(df_33))
df_34 <- read.csv('~/Downloads/350s2/Participants/34/Report - 34.csv')
df_34 <- as.data.frame(t(df_34))
df_35 <- read.csv('~/Downloads/350s2/Participants/35/Fixed reports/Report - 35.csv')
df_35 <- as.data.frame(t(df_35))
df_36 <- read.csv('~/Downloads/350s2/Participants/36/Fixed reports/Report - 36.csv')
df_36 <- as.data.frame(t(df_36))
df_37 <- read.csv('~/Downloads/350s2/Participants/37/Fixed reports/Report - 37.csv')
df_37 <- as.data.frame(t(df_37))
df_38 <- read.csv('~/Downloads/350s2/Participants/38/Fixed reports/Report - 38.csv')
df_38 <- as.data.frame(t(df_38))
df_39 <- read.csv('~/Downloads/350s2/Participants/39/Fixed reports/Report - 39.csv')
df_39 <- as.data.frame(t(df_39))
df_40 <- read.csv('~/Downloads/350s2/Participants/40/Fixed reports/Report - 40.csv')
df_40 <- as.data.frame(t(df_40))
df_41 <- read.csv('~/Downloads/350s2/Participants/41/Fixed reports/Report - 41.csv')
df_41 <- as.data.frame(t(df_41))
df_42 <- read.csv('~/Downloads/350s2/Participants/42/Fixed reports/Report - 42.csv')
df_42 <- as.data.frame(t(df_42))
df_43 <- read.csv('~/Downloads/350s2/Participants/43/Fixed reports/Report - 43.csv')
df_43 <- as.data.frame(t(df_43))
df_44 <- read.csv('~/Downloads/350s2/Participants/44/Fixed reports/Report - 44.csv')
df_44 <- as.data.frame(t(df_44))
df_45 <- read.csv('~/Downloads/350s2/Participants/45/Fixed reports/Report - 45.csv')
df_45 <- as.data.frame(t(df_45))
df_46 <- read.csv('~/Downloads/350s2/Participants/46/Fixed reports/Report - 46.csv')
df_46 <- as.data.frame(t(df_46))
df_47 <- read.csv('~/Downloads/350s2/Participants/47/Fixed reports/Report - 47.csv')
df_47 <- as.data.frame(t(df_47))
df_48 <- read.csv('~/Downloads/350s2/Participants/48/Fixed reports/Report - 48.csv')
df_48 <- as.data.frame(t(df_48))
df_49 <- read.csv('~/Downloads/350s2/Participants/49/Fixed reports/Report - 49.csv')
df_49 <- as.data.frame(t(df_49))
df_50 <- read.csv('~/Downloads/350s2/Participants/50/Fixed reports/Report - 50.csv')
df_50 <- as.data.frame(t(df_50))
df_51 <- read.csv('~/Downloads/350s2/Participants/51/Fixed reports/Report - 51.csv')
df_51 <- as.data.frame(t(df_51))
df_52 <- read.csv('~/Downloads/350s2/Participants/52/Fixed reports/Report - 52.csv')
df_52 <- as.data.frame(t(df_52))
df_53 <- read.csv('~/Downloads/350s2/Participants/53/Fixed reports/Report - 53.csv')
df_53 <- as.data.frame(t(df_53))
df_54 <- read.csv('~/Downloads/350s2/Participants/54/Fixed reports/Report - 54.csv')
df_54 <- as.data.frame(t(df_54))
df_55 <- read.csv('~/Downloads/350s2/Participants/55/Fixed reports/Report - 55.csv')
df_55 <- as.data.frame(t(df_55))
df_56 <- read.csv('~/Downloads/350s2/Participants/56/Fixed reports/Report - 56.csv')
df_56 <- as.data.frame(t(df_56))
df_57 <- read.csv('~/Downloads/350s2/Participants/57/Report - 57.csv')
df_57 <- as.data.frame(t(df_57))
df_58 <- read.csv('~/Downloads/350s2/Participants/58/Report - 58.csv')
df_58 <- as.data.frame(t(df_58))
df_59 <- read.csv('~/Downloads/350s2/Participants/59/Report - 59.csv')
df_59 <- as.data.frame(t(df_59))
df_60 <- read.csv('~/Downloads/350s2/Participants/60/Report - 60.csv')
df_60 <- as.data.frame(t(df_60))
df_61 <- read.csv('~/Downloads/350s2/Participants/61/Report - 61.csv')
df_61 <- as.data.frame(t(df_61))
df_62 <- read.csv('~/Downloads/350s2/Participants/62/Report - 62.csv')
df_62 <- as.data.frame(t(df_62))
df_63 <- read.csv('~/Downloads/350s2/Participants/63/Report - 63.csv')
df_63 <- as.data.frame(t(df_63))
df_64 <- read.csv('~/Downloads/350s2/Participants/64/Report - 64.csv')
df_64 <- as.data.frame(t(df_64))
df_65 <- read.csv('~/Downloads/350s2/Participants/65/Report - 65.csv')
df_65 <- as.data.frame(t(df_65))
df_66 <- read.csv('~/Downloads/350s2/Participants/66/Report - 66.csv')
df_66 <- as.data.frame(t(df_66))
df_67 <- read.csv('~/Downloads/350s2/Participants/67/Report - 67.csv')
df_67 <- as.data.frame(t(df_67))
df_68 <- read.csv('~/Downloads/350s2/Participants/68/Report - 68.csv')
df_68 <- as.data.frame(t(df_68))
df_69 <- read.csv('~/Downloads/350s2/Participants/69/Report - 69.csv')
df_69 <- as.data.frame(t(df_69))
df_70 <- read.csv('~/Downloads/350s2/Participants/70/Report - 70.csv')
df_70 <- as.data.frame(t(df_70))
df_71 <- read.csv('~/Downloads/350s2/Participants/71/Report - 71.csv')
df_71 <- as.data.frame(t(df_71))
df_72 <- read.csv('~/Downloads/350s2/Participants/72/Report - 72.csv')
df_72 <- as.data.frame(t(df_72))
df_73 <- read.csv('~/Downloads/350s2/Participants/73/Report - 73.csv')
df_73 <- as.data.frame(t(df_73))
df_74 <- read.csv('~/Downloads/350s2/Participants/74/Report - 74.csv')
df_74 <- as.data.frame(t(df_74))
df_75 <- read.csv('~/Downloads/350s2/Participants/75/Report - 75.csv')
df_75 <- as.data.frame(t(df_75))
df_76 <- read.csv('~/Downloads/350s2/Participants/76/Report - 76.csv')
df_76 <- as.data.frame(t(df_76))
df_77 <- read.csv('~/Downloads/350s2/Participants/77/Report - 77.csv')
df_77 <- as.data.frame(t(df_77))
df_78 <- read.csv('~/Downloads/350s2/Participants/78/Report - 78.csv')
df_78 <- as.data.frame(t(df_78))
df_79 <- read.csv('~/Downloads/350s2/Participants/79/Report - 79.csv')
df_79 <- as.data.frame(t(df_79))
df_80 <- read.csv('~/Downloads/350s2/Participants/80/Report - 80.csv')
df_80 <- as.data.frame(t(df_80))
df_81 <- read.csv('~/Downloads/350s2/Participants/81/Report - 81.csv')
df_81 <- as.data.frame(t(df_81))
df_82 <- read.csv('~/Downloads/350s2/Participants/82/Report - 82.csv')
df_82 <- as.data.frame(t(df_82))
df_83 <- read.csv('~/Downloads/350s2/Participants/83/Report - 83.csv')
df_83 <- as.data.frame(t(df_83))
####################################################################
##Selecting the attributes we need
for (i in 1:6) {
a <- paste0("df_",i," <- df_",i," %>% select(V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16)")
print(a)
eval(parse(text = a))
}
for (i in 13:23) {
a <- paste0("df_",i," <- df_",i," %>% select(V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16)")
print(a)
eval(parse(text = a))
}
for (i in 25:83) {
a <- paste0("df_",i," <- df_",i," %>% select(V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16)")
print(a)
eval(parse(text = a))
}
####################################################################
##Combining data
df <- rbind(df_1,df_2[2,])
for (i in 3:6) {
a <- paste0("df <- rbind(df,df_",i,"[2,])")
print(a)
eval(parse(text = a))
}
for (i in 13:23) {
a <- paste0("df <- rbind(df,df_",i,"[2,])")
print(a)
eval(parse(text = a))
}
for (i in 25:83) {
a <- paste0("df <- rbind(df,df_",i,"[2,])")
print(a)
eval(parse(text = a))
}
####################################################################
##Exporting data
library(xlsx)
write.xlsx(df, "experiment.xlsx")
|
/R_Data_analysis/experiment.R
|
no_license
|
kkateli/Data_Analysis_R
|
R
| false
| false
| 9,947
|
r
|
library(readxl)
library(dplyr)
library(reshape2)
##Reading each participant's csv file and combind their data
####################################################################
##Reading data
df_1 <- read.csv('~/Downloads/350s2/Participants/1/Report - 1.csv')
df_1 <- as.data.frame(t(df_1))
df_2 <- read.csv('~/Downloads/350s2/Participants/2/Report - 2.csv')
df_2 <- as.data.frame(t(df_2))
df_3 <- read.csv('~/Downloads/350s2/Participants/3/Report - 3.csv')
df_3 <- as.data.frame(t(df_3))
# df <- rbind(df,df_3[2,])
df_4 <- read.csv('~/Downloads/350s2/Participants/4/Report - 4.csv')
df_4 <- as.data.frame(t(df_4))
df_5 <- read.csv('~/Downloads/350s2/Participants/5/Report - 5.csv')
df_5 <- as.data.frame(t(df_5))
df_6 <- read.csv('~/Downloads/350s2/Participants/6/Report - 6.csv')
df_6 <- as.data.frame(t(df_6))
# df_7 <- read.csv('~/Downloads/350s2/Participants/7/Report - 7.csv')
# df_7 <- as.data.frame(t(df_7))
#
# df_8 <- read.csv('~/Downloads/350s2/Participants/8/Report - 8.csv')
# df_8 <- as.data.frame(t(df_8))
# df_11 <- read.csv('~/Downloads/350s2/Participants/11/Report - 11.csv')
# df_11<- as.data.frame(t(df_11))
df_13 <- read.csv('~/Downloads/350s2/Participants/13/Report - 13.csv')
df_13 <- as.data.frame(t(df_13))
df_14 <- read.csv('~/Downloads/350s2/Participants/14/Report - 14.csv')
df_14 <- as.data.frame(t(df_14))
df_15 <- read.csv('~/Downloads/350s2/Participants/15/Report - 15.csv')
df_15 <- as.data.frame(t(df_15))
df_16 <- read.csv('~/Downloads/350s2/Participants/16/Report - 16.csv')
df_16 <- as.data.frame(t(df_16))
df_17 <- read.csv('~/Downloads/350s2/Participants/17/Report - 17.csv')
df_17 <- as.data.frame(t(df_17))
df_18 <- read.csv('~/Downloads/350s2/Participants/18/Report - 18.csv')
df_18 <- as.data.frame(t(df_18))
df_19 <- read.csv('~/Downloads/350s2/Participants/19/Report - 19.csv')
df_19 <- as.data.frame(t(df_19))
df_20 <- read.csv('~/Downloads/350s2/Participants/20/Report - 20.csv')
df_20 <- as.data.frame(t(df_20))
df_21 <- read.csv('~/Downloads/350s2/Participants/21/Report - 21.csv')
df_21 <- as.data.frame(t(df_21))
df_22 <- read.csv('~/Downloads/350s2/Participants/22/Report - 22.csv')
df_22 <- as.data.frame(t(df_22))
df_23 <- read.csv('~/Downloads/350s2/Participants/23/Report - 23.csv')
df_23 <- as.data.frame(t(df_23))
df_25 <- read.csv('~/Downloads/350s2/Participants/25/Report - 25.csv')
df_25 <- as.data.frame(t(df_25))
df_26 <- read.csv('~/Downloads/350s2/Participants/26/Report - 26.csv')
df_26 <- as.data.frame(t(df_26))
df_27 <- read.csv('~/Downloads/350s2/Participants/27/Report - 27.csv')
df_27 <- as.data.frame(t(df_27))
df_28 <- read.csv('~/Downloads/350s2/Participants/28/Report - 28.csv')
df_28 <- as.data.frame(t(df_28))
df_29 <- read.csv('~/Downloads/350s2/Participants/29/Report - 29.csv')
df_29 <- as.data.frame(t(df_29))
df_30 <- read.csv('~/Downloads/350s2/Participants/30/Report - 30.csv')
df_30 <- as.data.frame(t(df_30))
df_31 <- read.csv('~/Downloads/350s2/Participants/31/Report - 31.csv')
df_31 <- as.data.frame(t(df_31))
df_32 <- read.csv('~/Downloads/350s2/Participants/32/Report - 32.csv')
df_32 <- as.data.frame(t(df_32))
df_33 <- read.csv('~/Downloads/350s2/Participants/33/Report - 33.csv')
df_33 <- as.data.frame(t(df_33))
df_34 <- read.csv('~/Downloads/350s2/Participants/34/Report - 34.csv')
df_34 <- as.data.frame(t(df_34))
df_35 <- read.csv('~/Downloads/350s2/Participants/35/Fixed reports/Report - 35.csv')
df_35 <- as.data.frame(t(df_35))
df_36 <- read.csv('~/Downloads/350s2/Participants/36/Fixed reports/Report - 36.csv')
df_36 <- as.data.frame(t(df_36))
df_37 <- read.csv('~/Downloads/350s2/Participants/37/Fixed reports/Report - 37.csv')
df_37 <- as.data.frame(t(df_37))
df_38 <- read.csv('~/Downloads/350s2/Participants/38/Fixed reports/Report - 38.csv')
df_38 <- as.data.frame(t(df_38))
df_39 <- read.csv('~/Downloads/350s2/Participants/39/Fixed reports/Report - 39.csv')
df_39 <- as.data.frame(t(df_39))
df_40 <- read.csv('~/Downloads/350s2/Participants/40/Fixed reports/Report - 40.csv')
df_40 <- as.data.frame(t(df_40))
df_41 <- read.csv('~/Downloads/350s2/Participants/41/Fixed reports/Report - 41.csv')
df_41 <- as.data.frame(t(df_41))
df_42 <- read.csv('~/Downloads/350s2/Participants/42/Fixed reports/Report - 42.csv')
df_42 <- as.data.frame(t(df_42))
df_43 <- read.csv('~/Downloads/350s2/Participants/43/Fixed reports/Report - 43.csv')
df_43 <- as.data.frame(t(df_43))
df_44 <- read.csv('~/Downloads/350s2/Participants/44/Fixed reports/Report - 44.csv')
df_44 <- as.data.frame(t(df_44))
df_45 <- read.csv('~/Downloads/350s2/Participants/45/Fixed reports/Report - 45.csv')
df_45 <- as.data.frame(t(df_45))
df_46 <- read.csv('~/Downloads/350s2/Participants/46/Fixed reports/Report - 46.csv')
df_46 <- as.data.frame(t(df_46))
df_47 <- read.csv('~/Downloads/350s2/Participants/47/Fixed reports/Report - 47.csv')
df_47 <- as.data.frame(t(df_47))
df_48 <- read.csv('~/Downloads/350s2/Participants/48/Fixed reports/Report - 48.csv')
df_48 <- as.data.frame(t(df_48))
df_49 <- read.csv('~/Downloads/350s2/Participants/49/Fixed reports/Report - 49.csv')
df_49 <- as.data.frame(t(df_49))
df_50 <- read.csv('~/Downloads/350s2/Participants/50/Fixed reports/Report - 50.csv')
df_50 <- as.data.frame(t(df_50))
df_51 <- read.csv('~/Downloads/350s2/Participants/51/Fixed reports/Report - 51.csv')
df_51 <- as.data.frame(t(df_51))
df_52 <- read.csv('~/Downloads/350s2/Participants/52/Fixed reports/Report - 52.csv')
df_52 <- as.data.frame(t(df_52))
df_53 <- read.csv('~/Downloads/350s2/Participants/53/Fixed reports/Report - 53.csv')
df_53 <- as.data.frame(t(df_53))
df_54 <- read.csv('~/Downloads/350s2/Participants/54/Fixed reports/Report - 54.csv')
df_54 <- as.data.frame(t(df_54))
df_55 <- read.csv('~/Downloads/350s2/Participants/55/Fixed reports/Report - 55.csv')
df_55 <- as.data.frame(t(df_55))
df_56 <- read.csv('~/Downloads/350s2/Participants/56/Fixed reports/Report - 56.csv')
df_56 <- as.data.frame(t(df_56))
df_57 <- read.csv('~/Downloads/350s2/Participants/57/Report - 57.csv')
df_57 <- as.data.frame(t(df_57))
df_58 <- read.csv('~/Downloads/350s2/Participants/58/Report - 58.csv')
df_58 <- as.data.frame(t(df_58))
df_59 <- read.csv('~/Downloads/350s2/Participants/59/Report - 59.csv')
df_59 <- as.data.frame(t(df_59))
df_60 <- read.csv('~/Downloads/350s2/Participants/60/Report - 60.csv')
df_60 <- as.data.frame(t(df_60))
df_61 <- read.csv('~/Downloads/350s2/Participants/61/Report - 61.csv')
df_61 <- as.data.frame(t(df_61))
df_62 <- read.csv('~/Downloads/350s2/Participants/62/Report - 62.csv')
df_62 <- as.data.frame(t(df_62))
df_63 <- read.csv('~/Downloads/350s2/Participants/63/Report - 63.csv')
df_63 <- as.data.frame(t(df_63))
df_64 <- read.csv('~/Downloads/350s2/Participants/64/Report - 64.csv')
df_64 <- as.data.frame(t(df_64))
df_65 <- read.csv('~/Downloads/350s2/Participants/65/Report - 65.csv')
df_65 <- as.data.frame(t(df_65))
df_66 <- read.csv('~/Downloads/350s2/Participants/66/Report - 66.csv')
df_66 <- as.data.frame(t(df_66))
df_67 <- read.csv('~/Downloads/350s2/Participants/67/Report - 67.csv')
df_67 <- as.data.frame(t(df_67))
df_68 <- read.csv('~/Downloads/350s2/Participants/68/Report - 68.csv')
df_68 <- as.data.frame(t(df_68))
df_69 <- read.csv('~/Downloads/350s2/Participants/69/Report - 69.csv')
df_69 <- as.data.frame(t(df_69))
df_70 <- read.csv('~/Downloads/350s2/Participants/70/Report - 70.csv')
df_70 <- as.data.frame(t(df_70))
df_71 <- read.csv('~/Downloads/350s2/Participants/71/Report - 71.csv')
df_71 <- as.data.frame(t(df_71))
df_72 <- read.csv('~/Downloads/350s2/Participants/72/Report - 72.csv')
df_72 <- as.data.frame(t(df_72))
df_73 <- read.csv('~/Downloads/350s2/Participants/73/Report - 73.csv')
df_73 <- as.data.frame(t(df_73))
df_74 <- read.csv('~/Downloads/350s2/Participants/74/Report - 74.csv')
df_74 <- as.data.frame(t(df_74))
df_75 <- read.csv('~/Downloads/350s2/Participants/75/Report - 75.csv')
df_75 <- as.data.frame(t(df_75))
df_76 <- read.csv('~/Downloads/350s2/Participants/76/Report - 76.csv')
df_76 <- as.data.frame(t(df_76))
df_77 <- read.csv('~/Downloads/350s2/Participants/77/Report - 77.csv')
df_77 <- as.data.frame(t(df_77))
df_78 <- read.csv('~/Downloads/350s2/Participants/78/Report - 78.csv')
df_78 <- as.data.frame(t(df_78))
df_79 <- read.csv('~/Downloads/350s2/Participants/79/Report - 79.csv')
df_79 <- as.data.frame(t(df_79))
df_80 <- read.csv('~/Downloads/350s2/Participants/80/Report - 80.csv')
df_80 <- as.data.frame(t(df_80))
df_81 <- read.csv('~/Downloads/350s2/Participants/81/Report - 81.csv')
df_81 <- as.data.frame(t(df_81))
df_82 <- read.csv('~/Downloads/350s2/Participants/82/Report - 82.csv')
df_82 <- as.data.frame(t(df_82))
df_83 <- read.csv('~/Downloads/350s2/Participants/83/Report - 83.csv')
df_83 <- as.data.frame(t(df_83))
####################################################################
##Selecting the attributes we need
for (i in 1:6) {
a <- paste0("df_",i," <- df_",i," %>% select(V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16)")
print(a)
eval(parse(text = a))
}
for (i in 13:23) {
a <- paste0("df_",i," <- df_",i," %>% select(V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16)")
print(a)
eval(parse(text = a))
}
for (i in 25:83) {
a <- paste0("df_",i," <- df_",i," %>% select(V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16)")
print(a)
eval(parse(text = a))
}
####################################################################
##Combining data
df <- rbind(df_1,df_2[2,])
for (i in 3:6) {
a <- paste0("df <- rbind(df,df_",i,"[2,])")
print(a)
eval(parse(text = a))
}
for (i in 13:23) {
a <- paste0("df <- rbind(df,df_",i,"[2,])")
print(a)
eval(parse(text = a))
}
for (i in 25:83) {
a <- paste0("df <- rbind(df,df_",i,"[2,])")
print(a)
eval(parse(text = a))
}
####################################################################
##Exporting data
library(xlsx)
write.xlsx(df, "experiment.xlsx")
|
#' Creates a new CellDateSet object.
#'
#' @param cellData expression data matrix for an experiment
#' @param phenoData data frame containing attributes of individual cells
#' @param featureData data frame containing attributes of features (e.g. genes)
#' @param lowerDetectionLimit the minimum expression level that consistitutes true expression
#' @param expressionFamily the VGAM family function to be used for expression response variables
#' @return a new CellDataSet object
#' @import VGAM
#' @importFrom Biobase annotatedDataFrameFrom assayDataNew
#' @export
#' @examples
#' \dontrun{
#' sample_sheet_small <- read.delim("../data/sample_sheet_small.txt", row.names=1)
#' sample_sheet_small$Time <- as.factor(sample_sheet_small$Time)
#' gene_annotations_small <- read.delim("../data/gene_annotations_small.txt", row.names=1)
#' fpkm_matrix_small <- read.delim("../data/fpkm_matrix_small.txt")
#' pd <- new("AnnotatedDataFrame", data = sample_sheet_small)
#' fd <- new("AnnotatedDataFrame", data = gene_annotations_small)
#' HSMM <- new("CellDataSet", exprs = as.matrix(fpkm_matrix_small), phenoData = pd, featureData = fd)
#' }
newCellDataSet <- function( cellData,
phenoData = NULL,
featureData = NULL,
lowerDetectionLimit = 0.1,
expressionFamily=VGAM::negbinomial.size())
{
#cellData <- as.matrix( cellData )
if(!('gene_short_name' %in% colnames(featureData))) {
warning("Warning: featureData must contain a column verbatim named 'gene_short_name' for certain functions")
}
if (class(cellData) != "matrix" && isSparseMatrix(cellData) == FALSE){
stop("Error: argument cellData must be a matrix (either sparse from the Matrix package or dense)")
}
if(!('gene_short_name' %in% colnames(featureData))) {
warning("Warning: featureData must contain a column verbatim named 'gene_short_name' for certain functions")
}
sizeFactors <- rep( NA_real_, ncol(cellData) )
if( is.null( phenoData ) )
phenoData <- annotatedDataFrameFrom( cellData, byrow=FALSE )
if( is.null( featureData ) )
featureData <- annotatedDataFrameFrom(cellData, byrow=TRUE)
if(!('gene_short_name' %in% colnames(featureData))) {
warning("Warning: featureData must contain a column verbatim named 'gene_short_name' for certain functions")
}
phenoData$`Size_Factor` <- sizeFactors
cds <- new( "CellDataSet",
assayData = assayDataNew( "environment", exprs=cellData ),
phenoData=phenoData,
featureData=featureData,
lowerDetectionLimit=lowerDetectionLimit,
expressionFamily=expressionFamily,
dispFitInfo = new.env( hash=TRUE ))
validObject( cds )
cds
}
sparseApply <- function(Sp_X, MARGIN, FUN, convert_to_dense, ...){
if (convert_to_dense){
if (MARGIN == 1){
Sp_X <- Matrix::t(Sp_X)
res <- lapply(colnames(Sp_X), function(i, FUN, ...) {
FUN(as.matrix(Sp_X[,i]), ...)
}, FUN, ...)
}else{
res <- lapply(colnames(Sp_X), function(i, FUN, ...) {
FUN(as.matrix(Sp_X[,i]), ...)
}, FUN, ...)
}
}else{
if (MARGIN == 1){
Sp_X <- Matrix::t(Sp_X)
res <- lapply(colnames(Sp_X), function(i, FUN, ...) {
FUN(Sp_X[,i], ...)
}, FUN, ...)
}else{
res <- lapply(colnames(Sp_X), function(i, FUN, ...) {
FUN(Sp_X[,i], ...)
}, FUN, ...)
}
}
return(res)
}
#' @importFrom parallel splitIndices
splitRows <- function (x, ncl) {
lapply(splitIndices(nrow(x), ncl), function(i) x[i, , drop = FALSE])
}
#' @importFrom parallel splitIndices
splitCols <- function (x, ncl) {
lapply(splitIndices(ncol(x), ncl), function(i) x[, i, drop = FALSE])
}
#' @importFrom BiocGenerics clusterApply
sparseParRApply <- function (cl, x, FUN, convert_to_dense, ...)
{
par_res <- do.call(c, clusterApply(cl = cl, x = splitRows(x, length(cl)),
fun = sparseApply, MARGIN = 1L, FUN = FUN, convert_to_dense=convert_to_dense, ...), quote = TRUE)
names(par_res) <- row.names(x)
par_res
}
#' @importFrom BiocGenerics clusterApply
sparseParCApply <- function (cl = NULL, x, FUN, convert_to_dense, ...)
{
par_res <- do.call(c, clusterApply(cl = cl, x = splitCols(x, length(cl)),
fun = sparseApply, MARGIN = 2L, FUN = FUN, convert_to_dense=convert_to_dense, ...), quote = TRUE)
names(par_res) <- colnames(x)
par_res
}
#' Multicore apply-like function for CellDataSet
#'
#' mcesApply computes the row-wise or column-wise results of FUN, just like esApply.
#' Variables in pData from X are available in FUN.
#'
#' @param X a CellDataSet object
#' @param MARGIN The margin to apply to, either 1 for rows (samples) or 2 for columns (features)
#' @param FUN Any function
#' @param required_packages A list of packages FUN will need. Failing to provide packages needed by FUN will generate errors in worker threads.
#' @param convert_to_dense Whether to force conversion a sparse matrix to a dense one before calling FUN
#' @param ... Additional parameters for FUN
#' @param cores The number of cores to use for evaluation
#'
#' @return The result of with(pData(X) apply(exprs(X)), MARGIN, FUN, ...))
#' @importFrom parallel makeCluster stopCluster
#' @importFrom BiocGenerics clusterCall parRapply parCapply
#' @importFrom Biobase pData exprs multiassign
#' @export
mcesApply <- function(X, MARGIN, FUN, required_packages, cores=1, convert_to_dense=TRUE, ...) {
parent <- environment(FUN)
if (is.null(parent))
parent <- emptyenv()
e1 <- new.env(parent=parent)
multiassign(names(pData(X)), pData(X), envir=e1)
environment(FUN) <- e1
# Note: use outfile argument to makeCluster for debugging
platform <- Sys.info()[['sysname']]
if (platform == "Windows")
cl <- makeCluster(cores)
if (platform %in% c("Linux", "Darwin"))
cl <- makeCluster(cores)
cleanup <- function(){
stopCluster(cl)
}
on.exit(cleanup)
if (is.null(required_packages) == FALSE){
clusterCall(cl, function(pkgs) {
for (req in pkgs) {
library(req, character.only=TRUE)
}
}, required_packages)
}
#clusterExport(cl, ls(e1), e1)
#force(exprs(X))
if (MARGIN == 1){
suppressWarnings(res <- sparseParRApply(cl, exprs(X), FUN, convert_to_dense, ...))
}else{
suppressWarnings(res <- sparseParCApply(cl, exprs(X), FUN, convert_to_dense, ...))
}
res
}
#' @importFrom Biobase multiassign
smartEsApply <- function(X, MARGIN, FUN, convert_to_dense, ...) {
parent <- environment(FUN)
if (is.null(parent))
parent <- emptyenv()
e1 <- new.env(parent=parent)
multiassign(names(pData(X)), pData(X), envir=e1)
environment(FUN) <- e1
if (isSparseMatrix(exprs(X))){
res <- sparseApply(exprs(X), MARGIN, FUN, convert_to_dense, ...)
}else{
res <- apply(exprs(X), MARGIN, FUN, ...)
}
if (MARGIN == 1)
{
names(res) <- row.names(X)
}else{
names(res) <- colnames(X)
}
res
}
####
#' Filter genes with extremely high or low negentropy
#'
#' @description Examines all the genes in the CellDataSet passed in and removes
#' all the genes that contain extremely high or low negentropies. You can specify
#' which genes to filter out based on the boundaries you can set for expression levels
#' and the boundaries you set for which centile to include. the function "dispersionTable"
#' is a better form of this function.
#'
#' @param cds a CellDataSet object upon which to perform this operation
#' @param lower_negentropy_bound the centile below which to exclude to genes
#' @param upper_negentropy_bound the centile above which to exclude to genes
#' @param expression_lower_thresh the expression level below which to exclude genes used to determine negentropy
#' @param expression_upper_thresh the expression level above which to exclude genes used to determine negentropy
#' @return a vector of gene names
#' @importFrom stats quantile
#' @export
#' @examples
#' \dontrun{
#' reasonableNegentropy <- selectNegentropyGenes(HSMM, "07%", "95%", 1, 100)
#' }
selectNegentropyGenes <- function(cds, lower_negentropy_bound="0%",
upper_negentropy_bound="99%",
expression_lower_thresh=0.1,
expression_upper_thresh=Inf){
.Deprecated("dispersionTable")
log_expression <- NULL
FM <- exprs(cds)
if (cds@expressionFamily@vfamily %in% c("negbinomial", "negbinomial.size"))
{
expression_lower_thresh <- expression_lower_thresh / colSums(FM)
expression_upper_thresh <- expression_upper_thresh / colSums(FM)
FM <- Matrix::t(Matrix::t(FM)/colSums(FM))
}
negentropy_exp <- apply(FM,1,function(x) {
expression <- x[x > expression_lower_thresh]
expression <- log2(x);
expression <- expression[is.finite(expression)]
if (length(expression)){
expression <- scale(expression)
mean(-exp(-(expression^2)/2))^2
}else{
0
}
}
)
means <- apply(FM,1,function(x) {
expression <- x[x > expression_lower_thresh]
expression <- log2(x);
expression[is.finite(expression) == FALSE] <- NA;
if (length(expression)){
mean(expression, na.rm=T)
}else{
NA
}
}
)
ordering_df <- data.frame(log_expression = means, negentropy = negentropy_exp)
negentropy <- NULL
log_express <- NULL
negentropy_residual <- NULL
ordering_df <- subset(ordering_df,
is.na(log_expression) == FALSE &
is.nan(log_expression) == FALSE &
is.na(negentropy) == FALSE &
is.nan(negentropy) == FALSE)
negentropy_fit <- vglm(negentropy~sm.ns(log_expression, df=4),data=ordering_df, family=VGAM::gaussianff())
ordering_df$negentropy_response <- predict(negentropy_fit, newdata=ordering_df, type="response")
ordering_df$negentropy_residual <- ordering_df$negentropy - ordering_df$negentropy_response
lower_negentropy_thresh <- quantile(ordering_df$negentropy_residual, probs=seq(0,1,0.01), na.rm=T)[lower_negentropy_bound]
upper_negentropy_thresh <- quantile(ordering_df$negentropy_residual, probs=seq(0,1,0.01), na.rm=T)[upper_negentropy_bound]
ordering_genes <- row.names(subset(ordering_df,
negentropy_residual >= lower_negentropy_thresh &
negentropy_residual <= upper_negentropy_thresh))
ordering_genes
}
#' Retrieve a table of values specifying the mean-variance relationship
#'
#' Calling estimateDispersions computes a smooth function describing how variance
#' in each gene's expression across cells varies according to the mean. This
#' function only works for CellDataSet objects containing count-based expression
#' data, either transcripts or reads.
#'
#' @param cds The CellDataSet from which to extract a dispersion table.
#' @return A data frame containing the empirical mean expression,
#' empirical dispersion, and the value estimated by the dispersion model.
#'
#' @export
dispersionTable <- function(cds){
if (is.null(cds@dispFitInfo[["blind"]])){
warning("Warning: estimateDispersions only works, and is only needed, when you're using a CellDataSet with a negbinomial or negbinomial.size expression family")
stop("Error: no dispersion model found. Please call estimateDispersions() before calling this function")
}
#if(!(('negbinomial()' == cds@expressionFamily) || ('negbinomial.size()' == cds@expressionFamily))){
#}
disp_df<-data.frame(gene_id=cds@dispFitInfo[["blind"]]$disp_table$gene_id,
mean_expression=cds@dispFitInfo[["blind"]]$disp_table$mu,
dispersion_fit=cds@dispFitInfo[["blind"]]$disp_func(cds@dispFitInfo[["blind"]]$disp_table$mu),
dispersion_empirical=cds@dispFitInfo[["blind"]]$disp_table$disp)
return(disp_df)
}
#####
#' Detects genes above minimum threshold.
#'
#' @description Sets the global expression detection threshold to be used with this CellDataSet.
#' Counts how many cells each feature in a CellDataSet object that are detectably expressed
#' above a minimum threshold. Also counts the number of genes above this threshold are
#' detectable in each cell.
#'
#' @param cds the CellDataSet upon which to perform this operation
#' @param min_expr the expression threshold
#' @return an updated CellDataSet object
#' @importFrom Biobase fData fData<- exprs pData pData<-
#' @export
#' @examples
#' \dontrun{
#' HSMM <- detectGenes(HSMM, min_expr=0.1)
#' }
detectGenes <- function(cds, min_expr=NULL){
if (is.null(min_expr))
{
min_expr <- cds@lowerDetectionLimit
}
# FM_genes <- do.call(rbind, apply(FM, 1,
# function(x) {
# return(data.frame(
# num_cells_expressed=sum(unlist(as.list(x)) >= min_expr)
# )
# )
# })
# )
#
# FM_cells <- do.call(rbind, apply(FM, 2,
# function(x) {
# return(data.frame(
# num_genes_expressed=sum(unlist(as.list(x)) >= min_expr)
# )
# )
# })
# )
#
#
#
# fData(cds)$num_cells_expressed <- FM_genes[row.names(fData(cds)),]
#
# pData(cds)$num_genes_expressed <- FM_cells[row.names(pData(cds)),]
#
fData(cds)$num_cells_expressed <- Matrix::rowSums(exprs(cds) > min_expr)
pData(cds)$num_genes_expressed <- Matrix::colSums(exprs(cds) > min_expr)
cds
}
# Convert a slam matrix to a sparseMatrix
#' @import slam
#' @import Matrix
asSparseMatrix = function (simpleTripletMatrix) {
retVal = sparseMatrix(i=simpleTripletMatrix[["i"]],
j=simpleTripletMatrix[["j"]],
x=simpleTripletMatrix[["v"]],
dims=c(simpleTripletMatrix[["nrow"]],
simpleTripletMatrix[["ncol"]]))
if (!is.null(simpleTripletMatrix[["dimnames"]]))
dimnames(retVal) = simpleTripletMatrix[["dimnames"]]
return(retVal)
}
# Convert a sparseMatrix from Matrix package to a slam matrix
#' @import slam
asSlamMatrix = function (sp_mat) {
sp <- Matrix::summary(sp_mat)
simple_triplet_matrix(sp[,"i"], sp[,"j"], sp[,"x"], ncol=ncol(sp_mat), nrow=nrow(sp_mat), dimnames=dimnames(sp_mat))
}
# Convert a sparseMatrix from Matrix package to a slam matrix
#' @import Matrix
isSparseMatrix <- function(x){
class(x) %in% c("dgCMatrix", "dgTMatrix")
}
# Estimate size factors for each column, given a sparseMatrix from the Matrix
# package
#' @import slam
#' @importFrom stats median
estimateSizeFactorsForSparseMatrix <- function(counts,
locfunc = median,
round_exprs=TRUE,
method="mean-geometric-mean-total"){
CM <- counts
if (round_exprs)
CM <- round(CM)
CM <- asSlamMatrix(CM)
if (method == "weighted-median"){
log_medians <- rowapply_simple_triplet_matrix(CM, function(cell_expr) {
log(locfunc(cell_expr))
})
weights <- rowapply_simple_triplet_matrix(CM, function(cell_expr) {
num_pos <- sum(cell_expr > 0)
num_pos / length(cell_expr)
})
sfs <- colapply_simple_triplet_matrix(CM, function(cnts) {
norm_cnts <- weights * (log(cnts) - log_medians)
norm_cnts <- norm_cnts[is.nan(norm_cnts) == FALSE]
norm_cnts <- norm_cnts[is.finite(norm_cnts)]
#print (head(norm_cnts))
exp( mean(norm_cnts) )
})
}else if (method == "median-geometric-mean"){
log_geo_means <- rowapply_simple_triplet_matrix(CM, function(x) { mean(log(CM)) })
sfs <- colapply_simple_triplet_matrix(CM, function(cnts) {
norm_cnts <- log(cnts) - log_geo_means
norm_cnts <- norm_cnts[is.nan(norm_cnts) == FALSE]
norm_cnts <- norm_cnts[is.finite(norm_cnts)]
#print (head(norm_cnts))
exp( locfunc( norm_cnts ))
})
}else if(method == "median"){
stop("Error: method 'median' not yet supported for sparse matrices")
}else if(method == 'mode'){
stop("Error: method 'mode' not yet supported for sparse matrices")
}else if(method == 'geometric-mean-total') {
cell_total <- col_sums(CM)
sfs <- log(cell_total) / mean(log(cell_total))
}else if(method == 'mean-geometric-mean-total') {
cell_total <- col_sums(CM)
sfs <- cell_total / exp(mean(log(cell_total)))
}
sfs[is.na(sfs)] <- 1
sfs
}
#' @importFrom stats median
estimateSizeFactorsForDenseMatrix <- function(counts, locfunc = median, round_exprs=TRUE, method="mean-geometric-mean-total"){
CM <- counts
if (round_exprs)
CM <- round(CM)
if (method == "weighted-median"){
log_medians <- apply(CM, 1, function(cell_expr) {
log(locfunc(cell_expr))
})
weights <- apply(CM, 1, function(cell_expr) {
num_pos <- sum(cell_expr > 0)
num_pos / length(cell_expr)
})
sfs <- apply( CM, 2, function(cnts) {
norm_cnts <- weights * (log(cnts) - log_medians)
norm_cnts <- norm_cnts[is.nan(norm_cnts) == FALSE]
norm_cnts <- norm_cnts[is.finite(norm_cnts)]
#print (head(norm_cnts))
exp( mean(norm_cnts) )
})
}else if (method == "median-geometric-mean"){
log_geo_means <- rowMeans(log(CM))
sfs <- apply( CM, 2, function(cnts) {
norm_cnts <- log(cnts) - log_geo_means
norm_cnts <- norm_cnts[is.nan(norm_cnts) == FALSE]
norm_cnts <- norm_cnts[is.finite(norm_cnts)]
#print (head(norm_cnts))
exp( locfunc( norm_cnts ))
})
}else if(method == "median"){
row_median <- apply(CM, 1, median)
sfs <- apply(Matrix::t(Matrix::t(CM) - row_median), 2, median)
}else if(method == 'mode'){
sfs <- estimate_t(CM)
}else if(method == 'geometric-mean-total') {
cell_total <- apply(CM, 2, sum)
sfs <- log(cell_total) / mean(log(cell_total))
}else if(method == 'mean-geometric-mean-total') {
cell_total <- apply(CM, 2, sum)
sfs <- cell_total / exp(mean(log(cell_total)))
}
sfs[is.na(sfs)] <- 1
sfs
}
#' Function to calculate the size factor for the single-cell RNA-seq data
#'
#' @importFrom stats median
#' @param counts The matrix for the gene expression data, either read counts or FPKM values or transcript counts
#' @param locfunc The location function used to find the representive value
#' @param round_exprs A logic flag to determine whether or not the expression value should be rounded
#' @param method A character to specify the size factor calculation appraoches. It can be either "mean-geometric-mean-total" (default),
#' "weighted-median", "median-geometric-mean", "median", "mode", "geometric-mean-total".
#'
estimateSizeFactorsForMatrix <- function(counts, locfunc = median, round_exprs=TRUE, method="mean-geometric-mean-total")
{
if (isSparseMatrix(counts)){
estimateSizeFactorsForSparseMatrix(counts, locfunc = locfunc, round_exprs=round_exprs, method=method)
}else{
estimateSizeFactorsForDenseMatrix(counts, locfunc = locfunc, round_exprs=round_exprs, method=method)
}
}
################
# Some convenience functions for loading the HSMM data
#' Return the names of classic muscle genes
#'
#' @description Returns a list of classic muscle genes. Used to
#' add conveinence for loading HSMM data.
#'
#' @export
get_classic_muscle_markers <- function(){
c("MEF2C", "MEF2D", "MYF5", "ANPEP", "PDGFRA",
"MYOG", "TPM1", "TPM2", "MYH2", "MYH3", "NCAM1", "TNNT1", "TNNT2", "TNNC1",
"CDK1", "CDK2", "CCNB1", "CCNB2", "CCND1", "CCNA1", "ID1")
}
#' Build a CellDataSet from the HSMMSingleCell package
#'
#' @description Creates a cellDataSet using the data from the
#' HSMMSingleCell package.
#'
#' @import HSMMSingleCell
#' @importFrom utils data
#' @export
load_HSMM <- function(){
HSMM_sample_sheet <- NA
HSMM_gene_annotation <- NA
HSMM_expr_matrix <- NA
gene_short_name <- NA
data(HSMM_expr_matrix, envir = environment())
data(HSMM_gene_annotation, envir = environment())
data(HSMM_sample_sheet, envir = environment())
pd <- new("AnnotatedDataFrame", data = HSMM_sample_sheet)
fd <- new("AnnotatedDataFrame", data = HSMM_gene_annotation)
HSMM <- newCellDataSet(as.matrix(HSMM_expr_matrix), phenoData = pd, featureData = fd)
HSMM <- estimateSizeFactors(HSMM)
HSMM <- estimateSizeFactors(HSMM)
HSMM
}
#' Return a CellDataSet of classic muscle genes.
#' @importFrom Biobase fData
#' @return A CellDataSet object
#' @export
load_HSMM_markers <- function(){
gene_short_name <- NA
HSMM <- load_HSMM()
marker_names <- get_classic_muscle_markers()
HSMM[row.names(subset(fData(HSMM), gene_short_name %in% marker_names)),]
}
#' Build a CellDataSet from the data stored in inst/extdata directory.
#' @importFrom Biobase pData pData<- exprs fData
#' @export
load_lung <- function(){
lung_phenotype_data <- NA
lung_feature_data <- NA
num_cells_expressed <- NA
baseLoc <- system.file(package="monocle")
#baseLoc <- './inst'
extPath <- file.path(baseLoc, "extdata")
load(file.path(extPath, "lung_phenotype_data.RData"))
load(file.path(extPath, "lung_exprs_data.RData"))
load(file.path(extPath, "lung_feature_data.RData"))
lung_exprs_data <- lung_exprs_data[,row.names(lung_phenotype_data)]
pd <- new("AnnotatedDataFrame", data = lung_phenotype_data)
fd <- new("AnnotatedDataFrame", data = lung_feature_data)
# Now, make a new CellDataSet using the RNA counts
lung <- newCellDataSet(lung_exprs_data,
phenoData = pd,
featureData = fd,
lowerDetectionLimit=1,
expressionFamily=negbinomial.size())
lung <- estimateSizeFactors(lung)
pData(lung)$Size_Factor <- lung_phenotype_data$Size_Factor
lung <- estimateDispersions(lung)
pData(lung)$Total_mRNAs <- colSums(exprs(lung))
lung <- detectGenes(lung, min_expr = 1)
expressed_genes <- row.names(subset(fData(lung), num_cells_expressed >= 5))
ordering_genes <- expressed_genes
lung <- setOrderingFilter(lung, ordering_genes)
# DDRTree based ordering:
lung <- reduceDimension(lung, norm_method="log", method = 'DDRTree', pseudo_expr = 1) #
lung <- orderCells(lung)
E14_state = as.numeric(pData(lung)['SRR1033936_0', 'State'])
if(E14_state != 1)
lung <- orderCells(lung, root_state=E14_state)
lung
}
|
/R/utils.R
|
no_license
|
wangprince2017/monocle-release
|
R
| false
| false
| 22,817
|
r
|
#' Creates a new CellDateSet object.
#'
#' @param cellData expression data matrix for an experiment
#' @param phenoData data frame containing attributes of individual cells
#' @param featureData data frame containing attributes of features (e.g. genes)
#' @param lowerDetectionLimit the minimum expression level that consistitutes true expression
#' @param expressionFamily the VGAM family function to be used for expression response variables
#' @return a new CellDataSet object
#' @import VGAM
#' @importFrom Biobase annotatedDataFrameFrom assayDataNew
#' @export
#' @examples
#' \dontrun{
#' sample_sheet_small <- read.delim("../data/sample_sheet_small.txt", row.names=1)
#' sample_sheet_small$Time <- as.factor(sample_sheet_small$Time)
#' gene_annotations_small <- read.delim("../data/gene_annotations_small.txt", row.names=1)
#' fpkm_matrix_small <- read.delim("../data/fpkm_matrix_small.txt")
#' pd <- new("AnnotatedDataFrame", data = sample_sheet_small)
#' fd <- new("AnnotatedDataFrame", data = gene_annotations_small)
#' HSMM <- new("CellDataSet", exprs = as.matrix(fpkm_matrix_small), phenoData = pd, featureData = fd)
#' }
newCellDataSet <- function( cellData,
phenoData = NULL,
featureData = NULL,
lowerDetectionLimit = 0.1,
expressionFamily=VGAM::negbinomial.size())
{
#cellData <- as.matrix( cellData )
if(!('gene_short_name' %in% colnames(featureData))) {
warning("Warning: featureData must contain a column verbatim named 'gene_short_name' for certain functions")
}
if (class(cellData) != "matrix" && isSparseMatrix(cellData) == FALSE){
stop("Error: argument cellData must be a matrix (either sparse from the Matrix package or dense)")
}
if(!('gene_short_name' %in% colnames(featureData))) {
warning("Warning: featureData must contain a column verbatim named 'gene_short_name' for certain functions")
}
sizeFactors <- rep( NA_real_, ncol(cellData) )
if( is.null( phenoData ) )
phenoData <- annotatedDataFrameFrom( cellData, byrow=FALSE )
if( is.null( featureData ) )
featureData <- annotatedDataFrameFrom(cellData, byrow=TRUE)
if(!('gene_short_name' %in% colnames(featureData))) {
warning("Warning: featureData must contain a column verbatim named 'gene_short_name' for certain functions")
}
phenoData$`Size_Factor` <- sizeFactors
cds <- new( "CellDataSet",
assayData = assayDataNew( "environment", exprs=cellData ),
phenoData=phenoData,
featureData=featureData,
lowerDetectionLimit=lowerDetectionLimit,
expressionFamily=expressionFamily,
dispFitInfo = new.env( hash=TRUE ))
validObject( cds )
cds
}
sparseApply <- function(Sp_X, MARGIN, FUN, convert_to_dense, ...){
if (convert_to_dense){
if (MARGIN == 1){
Sp_X <- Matrix::t(Sp_X)
res <- lapply(colnames(Sp_X), function(i, FUN, ...) {
FUN(as.matrix(Sp_X[,i]), ...)
}, FUN, ...)
}else{
res <- lapply(colnames(Sp_X), function(i, FUN, ...) {
FUN(as.matrix(Sp_X[,i]), ...)
}, FUN, ...)
}
}else{
if (MARGIN == 1){
Sp_X <- Matrix::t(Sp_X)
res <- lapply(colnames(Sp_X), function(i, FUN, ...) {
FUN(Sp_X[,i], ...)
}, FUN, ...)
}else{
res <- lapply(colnames(Sp_X), function(i, FUN, ...) {
FUN(Sp_X[,i], ...)
}, FUN, ...)
}
}
return(res)
}
#' @importFrom parallel splitIndices
splitRows <- function (x, ncl) {
lapply(splitIndices(nrow(x), ncl), function(i) x[i, , drop = FALSE])
}
#' @importFrom parallel splitIndices
splitCols <- function (x, ncl) {
lapply(splitIndices(ncol(x), ncl), function(i) x[, i, drop = FALSE])
}
#' @importFrom BiocGenerics clusterApply
sparseParRApply <- function (cl, x, FUN, convert_to_dense, ...)
{
par_res <- do.call(c, clusterApply(cl = cl, x = splitRows(x, length(cl)),
fun = sparseApply, MARGIN = 1L, FUN = FUN, convert_to_dense=convert_to_dense, ...), quote = TRUE)
names(par_res) <- row.names(x)
par_res
}
#' @importFrom BiocGenerics clusterApply
sparseParCApply <- function (cl = NULL, x, FUN, convert_to_dense, ...)
{
par_res <- do.call(c, clusterApply(cl = cl, x = splitCols(x, length(cl)),
fun = sparseApply, MARGIN = 2L, FUN = FUN, convert_to_dense=convert_to_dense, ...), quote = TRUE)
names(par_res) <- colnames(x)
par_res
}
#' Multicore apply-like function for CellDataSet
#'
#' mcesApply computes the row-wise or column-wise results of FUN, just like esApply.
#' Variables in pData from X are available in FUN.
#'
#' @param X a CellDataSet object
#' @param MARGIN The margin to apply to, either 1 for rows (samples) or 2 for columns (features)
#' @param FUN Any function
#' @param required_packages A list of packages FUN will need. Failing to provide packages needed by FUN will generate errors in worker threads.
#' @param convert_to_dense Whether to force conversion a sparse matrix to a dense one before calling FUN
#' @param ... Additional parameters for FUN
#' @param cores The number of cores to use for evaluation
#'
#' @return The result of with(pData(X) apply(exprs(X)), MARGIN, FUN, ...))
#' @importFrom parallel makeCluster stopCluster
#' @importFrom BiocGenerics clusterCall parRapply parCapply
#' @importFrom Biobase pData exprs multiassign
#' @export
mcesApply <- function(X, MARGIN, FUN, required_packages, cores=1, convert_to_dense=TRUE, ...) {
parent <- environment(FUN)
if (is.null(parent))
parent <- emptyenv()
e1 <- new.env(parent=parent)
multiassign(names(pData(X)), pData(X), envir=e1)
environment(FUN) <- e1
# Note: use outfile argument to makeCluster for debugging
platform <- Sys.info()[['sysname']]
if (platform == "Windows")
cl <- makeCluster(cores)
if (platform %in% c("Linux", "Darwin"))
cl <- makeCluster(cores)
cleanup <- function(){
stopCluster(cl)
}
on.exit(cleanup)
if (is.null(required_packages) == FALSE){
clusterCall(cl, function(pkgs) {
for (req in pkgs) {
library(req, character.only=TRUE)
}
}, required_packages)
}
#clusterExport(cl, ls(e1), e1)
#force(exprs(X))
if (MARGIN == 1){
suppressWarnings(res <- sparseParRApply(cl, exprs(X), FUN, convert_to_dense, ...))
}else{
suppressWarnings(res <- sparseParCApply(cl, exprs(X), FUN, convert_to_dense, ...))
}
res
}
#' @importFrom Biobase multiassign
smartEsApply <- function(X, MARGIN, FUN, convert_to_dense, ...) {
parent <- environment(FUN)
if (is.null(parent))
parent <- emptyenv()
e1 <- new.env(parent=parent)
multiassign(names(pData(X)), pData(X), envir=e1)
environment(FUN) <- e1
if (isSparseMatrix(exprs(X))){
res <- sparseApply(exprs(X), MARGIN, FUN, convert_to_dense, ...)
}else{
res <- apply(exprs(X), MARGIN, FUN, ...)
}
if (MARGIN == 1)
{
names(res) <- row.names(X)
}else{
names(res) <- colnames(X)
}
res
}
####
#' Filter genes with extremely high or low negentropy
#'
#' @description Examines all the genes in the CellDataSet passed in and removes
#' all the genes that contain extremely high or low negentropies. You can specify
#' which genes to filter out based on the boundaries you can set for expression levels
#' and the boundaries you set for which centile to include. the function "dispersionTable"
#' is a better form of this function.
#'
#' @param cds a CellDataSet object upon which to perform this operation
#' @param lower_negentropy_bound the centile below which to exclude to genes
#' @param upper_negentropy_bound the centile above which to exclude to genes
#' @param expression_lower_thresh the expression level below which to exclude genes used to determine negentropy
#' @param expression_upper_thresh the expression level above which to exclude genes used to determine negentropy
#' @return a vector of gene names
#' @importFrom stats quantile
#' @export
#' @examples
#' \dontrun{
#' reasonableNegentropy <- selectNegentropyGenes(HSMM, "07%", "95%", 1, 100)
#' }
selectNegentropyGenes <- function(cds, lower_negentropy_bound="0%",
upper_negentropy_bound="99%",
expression_lower_thresh=0.1,
expression_upper_thresh=Inf){
.Deprecated("dispersionTable")
log_expression <- NULL
FM <- exprs(cds)
if (cds@expressionFamily@vfamily %in% c("negbinomial", "negbinomial.size"))
{
expression_lower_thresh <- expression_lower_thresh / colSums(FM)
expression_upper_thresh <- expression_upper_thresh / colSums(FM)
FM <- Matrix::t(Matrix::t(FM)/colSums(FM))
}
negentropy_exp <- apply(FM,1,function(x) {
expression <- x[x > expression_lower_thresh]
expression <- log2(x);
expression <- expression[is.finite(expression)]
if (length(expression)){
expression <- scale(expression)
mean(-exp(-(expression^2)/2))^2
}else{
0
}
}
)
means <- apply(FM,1,function(x) {
expression <- x[x > expression_lower_thresh]
expression <- log2(x);
expression[is.finite(expression) == FALSE] <- NA;
if (length(expression)){
mean(expression, na.rm=T)
}else{
NA
}
}
)
ordering_df <- data.frame(log_expression = means, negentropy = negentropy_exp)
negentropy <- NULL
log_express <- NULL
negentropy_residual <- NULL
ordering_df <- subset(ordering_df,
is.na(log_expression) == FALSE &
is.nan(log_expression) == FALSE &
is.na(negentropy) == FALSE &
is.nan(negentropy) == FALSE)
negentropy_fit <- vglm(negentropy~sm.ns(log_expression, df=4),data=ordering_df, family=VGAM::gaussianff())
ordering_df$negentropy_response <- predict(negentropy_fit, newdata=ordering_df, type="response")
ordering_df$negentropy_residual <- ordering_df$negentropy - ordering_df$negentropy_response
lower_negentropy_thresh <- quantile(ordering_df$negentropy_residual, probs=seq(0,1,0.01), na.rm=T)[lower_negentropy_bound]
upper_negentropy_thresh <- quantile(ordering_df$negentropy_residual, probs=seq(0,1,0.01), na.rm=T)[upper_negentropy_bound]
ordering_genes <- row.names(subset(ordering_df,
negentropy_residual >= lower_negentropy_thresh &
negentropy_residual <= upper_negentropy_thresh))
ordering_genes
}
#' Retrieve a table of values specifying the mean-variance relationship
#'
#' Calling estimateDispersions computes a smooth function describing how variance
#' in each gene's expression across cells varies according to the mean. This
#' function only works for CellDataSet objects containing count-based expression
#' data, either transcripts or reads.
#'
#' @param cds The CellDataSet from which to extract a dispersion table.
#' @return A data frame containing the empirical mean expression,
#' empirical dispersion, and the value estimated by the dispersion model.
#'
#' @export
dispersionTable <- function(cds){
if (is.null(cds@dispFitInfo[["blind"]])){
warning("Warning: estimateDispersions only works, and is only needed, when you're using a CellDataSet with a negbinomial or negbinomial.size expression family")
stop("Error: no dispersion model found. Please call estimateDispersions() before calling this function")
}
#if(!(('negbinomial()' == cds@expressionFamily) || ('negbinomial.size()' == cds@expressionFamily))){
#}
disp_df<-data.frame(gene_id=cds@dispFitInfo[["blind"]]$disp_table$gene_id,
mean_expression=cds@dispFitInfo[["blind"]]$disp_table$mu,
dispersion_fit=cds@dispFitInfo[["blind"]]$disp_func(cds@dispFitInfo[["blind"]]$disp_table$mu),
dispersion_empirical=cds@dispFitInfo[["blind"]]$disp_table$disp)
return(disp_df)
}
#####
#' Detects genes above minimum threshold.
#'
#' @description Sets the global expression detection threshold to be used with this CellDataSet.
#' Counts how many cells each feature in a CellDataSet object that are detectably expressed
#' above a minimum threshold. Also counts the number of genes above this threshold are
#' detectable in each cell.
#'
#' @param cds the CellDataSet upon which to perform this operation
#' @param min_expr the expression threshold
#' @return an updated CellDataSet object
#' @importFrom Biobase fData fData<- exprs pData pData<-
#' @export
#' @examples
#' \dontrun{
#' HSMM <- detectGenes(HSMM, min_expr=0.1)
#' }
detectGenes <- function(cds, min_expr=NULL){
if (is.null(min_expr))
{
min_expr <- cds@lowerDetectionLimit
}
# FM_genes <- do.call(rbind, apply(FM, 1,
# function(x) {
# return(data.frame(
# num_cells_expressed=sum(unlist(as.list(x)) >= min_expr)
# )
# )
# })
# )
#
# FM_cells <- do.call(rbind, apply(FM, 2,
# function(x) {
# return(data.frame(
# num_genes_expressed=sum(unlist(as.list(x)) >= min_expr)
# )
# )
# })
# )
#
#
#
# fData(cds)$num_cells_expressed <- FM_genes[row.names(fData(cds)),]
#
# pData(cds)$num_genes_expressed <- FM_cells[row.names(pData(cds)),]
#
fData(cds)$num_cells_expressed <- Matrix::rowSums(exprs(cds) > min_expr)
pData(cds)$num_genes_expressed <- Matrix::colSums(exprs(cds) > min_expr)
cds
}
# Convert a slam matrix to a sparseMatrix
#' @import slam
#' @import Matrix
asSparseMatrix = function (simpleTripletMatrix) {
retVal = sparseMatrix(i=simpleTripletMatrix[["i"]],
j=simpleTripletMatrix[["j"]],
x=simpleTripletMatrix[["v"]],
dims=c(simpleTripletMatrix[["nrow"]],
simpleTripletMatrix[["ncol"]]))
if (!is.null(simpleTripletMatrix[["dimnames"]]))
dimnames(retVal) = simpleTripletMatrix[["dimnames"]]
return(retVal)
}
# Convert a sparseMatrix from Matrix package to a slam matrix
#' @import slam
asSlamMatrix = function (sp_mat) {
sp <- Matrix::summary(sp_mat)
simple_triplet_matrix(sp[,"i"], sp[,"j"], sp[,"x"], ncol=ncol(sp_mat), nrow=nrow(sp_mat), dimnames=dimnames(sp_mat))
}
# Convert a sparseMatrix from Matrix package to a slam matrix
#' @import Matrix
isSparseMatrix <- function(x){
class(x) %in% c("dgCMatrix", "dgTMatrix")
}
# Estimate size factors for each column, given a sparseMatrix from the Matrix
# package
#' @import slam
#' @importFrom stats median
estimateSizeFactorsForSparseMatrix <- function(counts,
locfunc = median,
round_exprs=TRUE,
method="mean-geometric-mean-total"){
CM <- counts
if (round_exprs)
CM <- round(CM)
CM <- asSlamMatrix(CM)
if (method == "weighted-median"){
log_medians <- rowapply_simple_triplet_matrix(CM, function(cell_expr) {
log(locfunc(cell_expr))
})
weights <- rowapply_simple_triplet_matrix(CM, function(cell_expr) {
num_pos <- sum(cell_expr > 0)
num_pos / length(cell_expr)
})
sfs <- colapply_simple_triplet_matrix(CM, function(cnts) {
norm_cnts <- weights * (log(cnts) - log_medians)
norm_cnts <- norm_cnts[is.nan(norm_cnts) == FALSE]
norm_cnts <- norm_cnts[is.finite(norm_cnts)]
#print (head(norm_cnts))
exp( mean(norm_cnts) )
})
}else if (method == "median-geometric-mean"){
log_geo_means <- rowapply_simple_triplet_matrix(CM, function(x) { mean(log(CM)) })
sfs <- colapply_simple_triplet_matrix(CM, function(cnts) {
norm_cnts <- log(cnts) - log_geo_means
norm_cnts <- norm_cnts[is.nan(norm_cnts) == FALSE]
norm_cnts <- norm_cnts[is.finite(norm_cnts)]
#print (head(norm_cnts))
exp( locfunc( norm_cnts ))
})
}else if(method == "median"){
stop("Error: method 'median' not yet supported for sparse matrices")
}else if(method == 'mode'){
stop("Error: method 'mode' not yet supported for sparse matrices")
}else if(method == 'geometric-mean-total') {
cell_total <- col_sums(CM)
sfs <- log(cell_total) / mean(log(cell_total))
}else if(method == 'mean-geometric-mean-total') {
cell_total <- col_sums(CM)
sfs <- cell_total / exp(mean(log(cell_total)))
}
sfs[is.na(sfs)] <- 1
sfs
}
#' @importFrom stats median
estimateSizeFactorsForDenseMatrix <- function(counts, locfunc = median, round_exprs=TRUE, method="mean-geometric-mean-total"){
CM <- counts
if (round_exprs)
CM <- round(CM)
if (method == "weighted-median"){
log_medians <- apply(CM, 1, function(cell_expr) {
log(locfunc(cell_expr))
})
weights <- apply(CM, 1, function(cell_expr) {
num_pos <- sum(cell_expr > 0)
num_pos / length(cell_expr)
})
sfs <- apply( CM, 2, function(cnts) {
norm_cnts <- weights * (log(cnts) - log_medians)
norm_cnts <- norm_cnts[is.nan(norm_cnts) == FALSE]
norm_cnts <- norm_cnts[is.finite(norm_cnts)]
#print (head(norm_cnts))
exp( mean(norm_cnts) )
})
}else if (method == "median-geometric-mean"){
log_geo_means <- rowMeans(log(CM))
sfs <- apply( CM, 2, function(cnts) {
norm_cnts <- log(cnts) - log_geo_means
norm_cnts <- norm_cnts[is.nan(norm_cnts) == FALSE]
norm_cnts <- norm_cnts[is.finite(norm_cnts)]
#print (head(norm_cnts))
exp( locfunc( norm_cnts ))
})
}else if(method == "median"){
row_median <- apply(CM, 1, median)
sfs <- apply(Matrix::t(Matrix::t(CM) - row_median), 2, median)
}else if(method == 'mode'){
sfs <- estimate_t(CM)
}else if(method == 'geometric-mean-total') {
cell_total <- apply(CM, 2, sum)
sfs <- log(cell_total) / mean(log(cell_total))
}else if(method == 'mean-geometric-mean-total') {
cell_total <- apply(CM, 2, sum)
sfs <- cell_total / exp(mean(log(cell_total)))
}
sfs[is.na(sfs)] <- 1
sfs
}
#' Function to calculate the size factor for the single-cell RNA-seq data
#'
#' @importFrom stats median
#' @param counts The matrix for the gene expression data, either read counts or FPKM values or transcript counts
#' @param locfunc The location function used to find the representive value
#' @param round_exprs A logic flag to determine whether or not the expression value should be rounded
#' @param method A character to specify the size factor calculation appraoches. It can be either "mean-geometric-mean-total" (default),
#' "weighted-median", "median-geometric-mean", "median", "mode", "geometric-mean-total".
#'
estimateSizeFactorsForMatrix <- function(counts, locfunc = median, round_exprs=TRUE, method="mean-geometric-mean-total")
{
if (isSparseMatrix(counts)){
estimateSizeFactorsForSparseMatrix(counts, locfunc = locfunc, round_exprs=round_exprs, method=method)
}else{
estimateSizeFactorsForDenseMatrix(counts, locfunc = locfunc, round_exprs=round_exprs, method=method)
}
}
################
# Some convenience functions for loading the HSMM data
#' Return the names of classic muscle genes
#'
#' @description Returns a list of classic muscle genes. Used to
#' add conveinence for loading HSMM data.
#'
#' @export
get_classic_muscle_markers <- function(){
c("MEF2C", "MEF2D", "MYF5", "ANPEP", "PDGFRA",
"MYOG", "TPM1", "TPM2", "MYH2", "MYH3", "NCAM1", "TNNT1", "TNNT2", "TNNC1",
"CDK1", "CDK2", "CCNB1", "CCNB2", "CCND1", "CCNA1", "ID1")
}
#' Build a CellDataSet from the HSMMSingleCell package
#'
#' @description Creates a cellDataSet using the data from the
#' HSMMSingleCell package.
#'
#' @import HSMMSingleCell
#' @importFrom utils data
#' @export
load_HSMM <- function(){
HSMM_sample_sheet <- NA
HSMM_gene_annotation <- NA
HSMM_expr_matrix <- NA
gene_short_name <- NA
data(HSMM_expr_matrix, envir = environment())
data(HSMM_gene_annotation, envir = environment())
data(HSMM_sample_sheet, envir = environment())
pd <- new("AnnotatedDataFrame", data = HSMM_sample_sheet)
fd <- new("AnnotatedDataFrame", data = HSMM_gene_annotation)
HSMM <- newCellDataSet(as.matrix(HSMM_expr_matrix), phenoData = pd, featureData = fd)
HSMM <- estimateSizeFactors(HSMM)
HSMM <- estimateSizeFactors(HSMM)
HSMM
}
#' Return a CellDataSet of classic muscle genes.
#' @importFrom Biobase fData
#' @return A CellDataSet object
#' @export
load_HSMM_markers <- function(){
gene_short_name <- NA
HSMM <- load_HSMM()
marker_names <- get_classic_muscle_markers()
HSMM[row.names(subset(fData(HSMM), gene_short_name %in% marker_names)),]
}
#' Build a CellDataSet from the data stored in inst/extdata directory.
#' @importFrom Biobase pData pData<- exprs fData
#' @export
load_lung <- function(){
lung_phenotype_data <- NA
lung_feature_data <- NA
num_cells_expressed <- NA
baseLoc <- system.file(package="monocle")
#baseLoc <- './inst'
extPath <- file.path(baseLoc, "extdata")
load(file.path(extPath, "lung_phenotype_data.RData"))
load(file.path(extPath, "lung_exprs_data.RData"))
load(file.path(extPath, "lung_feature_data.RData"))
lung_exprs_data <- lung_exprs_data[,row.names(lung_phenotype_data)]
pd <- new("AnnotatedDataFrame", data = lung_phenotype_data)
fd <- new("AnnotatedDataFrame", data = lung_feature_data)
# Now, make a new CellDataSet using the RNA counts
lung <- newCellDataSet(lung_exprs_data,
phenoData = pd,
featureData = fd,
lowerDetectionLimit=1,
expressionFamily=negbinomial.size())
lung <- estimateSizeFactors(lung)
pData(lung)$Size_Factor <- lung_phenotype_data$Size_Factor
lung <- estimateDispersions(lung)
pData(lung)$Total_mRNAs <- colSums(exprs(lung))
lung <- detectGenes(lung, min_expr = 1)
expressed_genes <- row.names(subset(fData(lung), num_cells_expressed >= 5))
ordering_genes <- expressed_genes
lung <- setOrderingFilter(lung, ordering_genes)
# DDRTree based ordering:
lung <- reduceDimension(lung, norm_method="log", method = 'DDRTree', pseudo_expr = 1) #
lung <- orderCells(lung)
E14_state = as.numeric(pData(lung)['SRR1033936_0', 'State'])
if(E14_state != 1)
lung <- orderCells(lung, root_state=E14_state)
lung
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qc_md_general_checks.R
\name{qc_env_dics}
\alias{qc_env_dics}
\title{Dictionary creation for environmental_md variables}
\usage{
qc_env_dics(variable, parent_logger = "test")
}
\arguments{
\item{variable}{Variable name in which the dictionary is needed as character
vector (e.g. \code{'env_vpd'}).}
}
\value{
A character vector containing the valid values for the provided
variable
}
\description{
\code{qc_env_dics} function creates a dictionary for the selected variable
containing the accepted values for that variable
}
\details{
In order to check if factor variables have a valid value or have been bad
formatted/introduced in the data template, first it is needed to have a list
of accepted values for each variable. This function creates that list to
use in the checks.
}
\section{Accepted variables}{
The factor variables in environmental_md are \code{env_time_zone},
\code{env_ta}, \code{env_rh}, \code{env_vpd}, \code{env_sw_in},
\code{env_ppfd_in}, \code{env_netrad}, \code{env_ws}, \code{env_precip},
\code{env_plant_watpot} and \code{env_leafarea_seasonal}.
}
\seealso{
Other Dictionaries: \code{\link{qc_plant_dics}},
\code{\link{qc_site_dics}},
\code{\link{qc_species_dics}},
\code{\link{qc_stand_dics}}
}
|
/man/qc_env_dics.Rd
|
no_license
|
sapfluxnet/sapfluxnetQC1
|
R
| false
| true
| 1,309
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qc_md_general_checks.R
\name{qc_env_dics}
\alias{qc_env_dics}
\title{Dictionary creation for environmental_md variables}
\usage{
qc_env_dics(variable, parent_logger = "test")
}
\arguments{
\item{variable}{Variable name in which the dictionary is needed as character
vector (e.g. \code{'env_vpd'}).}
}
\value{
A character vector containing the valid values for the provided
variable
}
\description{
\code{qc_env_dics} function creates a dictionary for the selected variable
containing the accepted values for that variable
}
\details{
In order to check if factor variables have a valid value or have been bad
formatted/introduced in the data template, first it is needed to have a list
of accepted values for each variable. This function creates that list to
use in the checks.
}
\section{Accepted variables}{
The factor variables in environmental_md are \code{env_time_zone},
\code{env_ta}, \code{env_rh}, \code{env_vpd}, \code{env_sw_in},
\code{env_ppfd_in}, \code{env_netrad}, \code{env_ws}, \code{env_precip},
\code{env_plant_watpot} and \code{env_leafarea_seasonal}.
}
\seealso{
Other Dictionaries: \code{\link{qc_plant_dics}},
\code{\link{qc_site_dics}},
\code{\link{qc_species_dics}},
\code{\link{qc_stand_dics}}
}
|
###batting log wrangled in "substring.r"
###data was slightly edited from original. redo in R.
library(rethinking)
library(tidyverse)
###
set.seed(1234)
options(mc.cores = parallel::detectCores())
Sys.setenv(LOCAL_CPPFLAGS = '-march=native')
###
m2 <- map2stan(
alist(
rdiff ~ dnorm( mu, sigma),
mu <- int +
a + h +
hits_h * Hh + double_h * B2Bh + triple_h * B3Bh + HR_h * BHRh + balls_h * BBBh +
hits_a * Ha + double_a * B2Ba + triple_a * B3Ba + HR_a * BHRa + balls_a * BBBa +
hits_allowed_h * HHAh + pballs_h * PBBh + pstrikeouts_h * PSOh + strikes_h * Strh +
hits_allowed_a * HHAa + pballs_a * PBBa + pstrikeouts_a * PSOa + strikes_a * Stra +
fh1 * PO_h + fh2 * A_h + fh3 * E_h + fh4 * DP_h + fa1 * PO_a + fa2 * A_a + fa3 * E_a + fa4 * DP_a,
###adaptive priors for coefficients
int ~ dnorm(0,3),
h ~ dnorm(0, 3),
a ~ dnorm(0, 3),
###
hits_h ~ dnorm(0, 3),
hits_a ~ dnorm(0, 3),
###
double_h ~ dnorm(0, 3),
double_a ~ dnorm(0, 3),
###
triple_h ~ dnorm(0, 3),
triple_a ~ dnorm(0, 3),
###
HR_h ~ dnorm(0, 3),
HR_a ~ dnorm(0, 3),
###
balls_h ~ dnorm(0, 3),
balls_a ~ dnorm(0, 3),
###
hits_allowed_h ~ dnorm(0, 3),
hits_allowed_a ~ dnorm(0, 3),
###
pballs_h ~ dnorm(0, 3),
pballs_a ~ dnorm(0, 3),
###
pstrikeouts_h ~ dnorm(0, 3),
pstrikeouts_a ~ dnorm(0, 3),
###
strikes_h ~ dnorm(0, 3),
strikes_a ~ dnorm(0, 3),
###standard priors
c(fh1, fh2, fh3, fh4, fa1, fa2, fa3, fa4) ~ dnorm(0,1),
sigma ~ dcauchy(0,2)),
data=mydata2,
iter=1000, warmup=100, chains=1, cores=4)
#option
#,control = list(adapt_delta = 0.99, max_treedepth = 15)
|
/Bayesian Baseball 2018/scripts/Modeling/Comparison Models/nonmlmv2.R
|
no_license
|
blakeshurtz/Bayesian-Baseball
|
R
| false
| false
| 1,724
|
r
|
###batting log wrangled in "substring.r"
###data was slightly edited from original. redo in R.
library(rethinking)
library(tidyverse)
###
set.seed(1234)
options(mc.cores = parallel::detectCores())
Sys.setenv(LOCAL_CPPFLAGS = '-march=native')
###
m2 <- map2stan(
alist(
rdiff ~ dnorm( mu, sigma),
mu <- int +
a + h +
hits_h * Hh + double_h * B2Bh + triple_h * B3Bh + HR_h * BHRh + balls_h * BBBh +
hits_a * Ha + double_a * B2Ba + triple_a * B3Ba + HR_a * BHRa + balls_a * BBBa +
hits_allowed_h * HHAh + pballs_h * PBBh + pstrikeouts_h * PSOh + strikes_h * Strh +
hits_allowed_a * HHAa + pballs_a * PBBa + pstrikeouts_a * PSOa + strikes_a * Stra +
fh1 * PO_h + fh2 * A_h + fh3 * E_h + fh4 * DP_h + fa1 * PO_a + fa2 * A_a + fa3 * E_a + fa4 * DP_a,
###adaptive priors for coefficients
int ~ dnorm(0,3),
h ~ dnorm(0, 3),
a ~ dnorm(0, 3),
###
hits_h ~ dnorm(0, 3),
hits_a ~ dnorm(0, 3),
###
double_h ~ dnorm(0, 3),
double_a ~ dnorm(0, 3),
###
triple_h ~ dnorm(0, 3),
triple_a ~ dnorm(0, 3),
###
HR_h ~ dnorm(0, 3),
HR_a ~ dnorm(0, 3),
###
balls_h ~ dnorm(0, 3),
balls_a ~ dnorm(0, 3),
###
hits_allowed_h ~ dnorm(0, 3),
hits_allowed_a ~ dnorm(0, 3),
###
pballs_h ~ dnorm(0, 3),
pballs_a ~ dnorm(0, 3),
###
pstrikeouts_h ~ dnorm(0, 3),
pstrikeouts_a ~ dnorm(0, 3),
###
strikes_h ~ dnorm(0, 3),
strikes_a ~ dnorm(0, 3),
###standard priors
c(fh1, fh2, fh3, fh4, fa1, fa2, fa3, fa4) ~ dnorm(0,1),
sigma ~ dcauchy(0,2)),
data=mydata2,
iter=1000, warmup=100, chains=1, cores=4)
#option
#,control = list(adapt_delta = 0.99, max_treedepth = 15)
|
#'Extract the metadata from Zeiss LSM file using loci bioformats as text vector
#'
#'Specifically, this relies on the showinf tool of loci Details
#'@param f Path to lsm file
#'@param cachefile Whether to save a copy of metadata to disk (TRUE)
#'@param ReturnMetaData Whether to return metadata rather than success (default:
#' \code{TRUE})
#'@param Force whether to re-parse metadata even if a cached version exists
#'@param UseLock whether to use a lock file while parsing for simple parallelism
#'@return character vector of metadata OR TRUE/FALSE for success
#'@export
#'@importFrom nat.utils RunCmdForNewerInput makelock
#'@seealso \code{\link{parse_key_lsm_metadata}}
#' @examples \dontrun{
#' lsmdir=file.path(fcconfig$regroot,"lsms")
#' for(f in dir(lsmdir,patt="lsm$",full=T)) lsm_metadata(f,UseLock=TRUE)
#'}
lsm_metadata<-function(f,cachefile=TRUE,
ReturnMetaData=TRUE,Force=FALSE,UseLock=FALSE){
if(!file.exists(f)) {
warning("File: ",f," is missing")
return(FALSE)
}
if(tools::file_ext(f)=="bz2"){
# we need to uncompress first
f=bunzip2(f)
}
if(is.logical(cachefile)) {
if(cachefile) {
cachefile=sub("\\.lsm$",".txt",f)
stdout=cachefile
} else stdout=TRUE
} else if(is.function(cachefile)){
stdout <- cachefile <- cachefile(f)
} else stdout=cachefile
if(is.character(cachefile) && file.exists(cachefile)){
if(!Force && !RunCmdForNewerInput(NULL,f,cachefile)){
if(ReturnMetaData) return(readLines(cachefile))
else return(TRUE)
}
}
if(is.character(stdout) && UseLock){
lockfile=paste(stdout,sep=".","lock")
if(!makelock(lockfile)) return (FALSE)
on.exit(unlink(lockfile))
}
# cached file is older, or missing, or we used force
rval=showinf(f, outfile=ifelse(is.character(cachefile), cachefile, FALSE))
if(is.numeric(rval) && rval>0) {
warning("showinf error for file: ",f)
if(is.character(stdout)) unlink(stdout)
return(FALSE)
}
if(ReturnMetaData) {
if (is.character(stdout)) invisible(readLines(stdout))
else invisible(rval)
}
else return(TRUE)
}
LoadObjsFromRda<-function(f){
objnames=load(f,envir=environment())
return(lapply(objnames,get,envir=environment()))
}
#' Make summary dataframe for lsm files and their key metadata
#'
#' @details Will calculate md5 sums for all input files and then use this to
#' update a dataframe.
#'
#' If an extrafields function is supplied, it should take one parameter, the
#' list output of \code{\link{parse_key_lsm_metadata}}, which will include as
#' an attribute the full contents of the metadata file dumped by the loci
#' showinf tool. The function should take care to return values for all fields
#' that it \emph{could} return each time it is called even if these are NA and
#' to ensure that values have a consistent data type per field.
#' @param lsmdir Path to directory containing lsm files
#' @param oldlsmdf Dataframe or path to rda file containing one
#' @param extrafields Function to parse extra fields from metadata for each
#' image
#' @param Verbose Logical: Show messages about progress/updating of cached data
#' @return dataframe
#' @export
#' @seealso \code{\link{lsm_metadata}}, \code{\link{parse_key_lsm_metadata}}
make_lsm_df<-function(lsmdir,oldlsmdf=NULL,extrafields=NULL,Verbose=TRUE){
lsmdf=data.frame(txtfile=dir(lsmdir,pattern = "txt$"),stringsAsFactors=FALSE)
lsmdf$lsmfile=sub("txt$","lsm",lsmdf$txtfile)
lsmdf$gene_name=sub(".txt","",lsmdf$txtfile,fixed=TRUE)
rownames(lsmdf)=lsmdf$gene_name
lsmdf$txtmtime=file.info(file.path(lsmdir,lsmdf$txtfile))$mtime
lsmfi=file.info(file.path(lsmdir,lsmdf$lsmfile))
lsmdf$lsmsize=lsmfi$size
lsmdf$lsmmtime=lsmfi$mtime
if(Verbose) message("Computing md5 sums for ",nrow(lsmdf),' metadata files.\n')
lsmdf$txtmd5=tools::md5sum(file.path(lsmdir,lsmdf$txtfile))
if(nrow(lsmdf)==0) return(lsmdf)
fieldsToCompute=c('lsmqhash',"Ch1","Ch2","ChannelName0","swapchannels","DimensionX", "DimensionY",
"DimensionZ", "VoxelSizeX", "VoxelSizeY","VoxelSizeZ")
lsmdf[,fieldsToCompute]=NA
# can accept a path to cached dataframe, if so read it in
if(is.character(oldlsmdf)){
if(!file.exists(oldlsmdf)){
warning("Unable to read cached dataframe from: ", oldlsmdf)
oldlsmdf=NULL
} else {
if(Verbose) message("Loading cached dataframe")
oldlsmdf=LoadObjsFromRda(oldlsmdf)[[1]]
}
}
if(!is.null(oldlsmdf)){
# we're going to use a cached version of the lsmdf table
# first drop anything that's not unique by md5
oldlsmdf=oldlsmdf[!duplicated(oldlsmdf$txtmd5) & !is.na(oldlsmdf$txtmd5), , drop=FALSE]
# then use the md5 for rownames
rownames(oldlsmdf)=oldlsmdf$txtmd5
# figure out for which rows we have cached data
gene_names_withmd5match=lsmdf[lsmdf$txtmd5%in%oldlsmdf$txtmd5,"gene_name"]
# and the one we will need to parse from scratch
gene_names_toparse=setdiff(lsmdf$gene_name,gene_names_withmd5match)
# now add matching data
fieldstocopy=intersect(fieldsToCompute,colnames(oldlsmdf))
# using md5s to look up data from cached dataframe
lsmdf[gene_names_withmd5match,fieldstocopy]=
oldlsmdf[lsmdf[gene_names_withmd5match,'txtmd5'],fieldstocopy]
} else {
# parse all rows from scratch
gene_names_toparse=lsmdf$gene_name
}
if(Verbose) message("Parsing ",length(gene_names_toparse)," metadata files from scratch")
for (g in gene_names_toparse){
p=try(parse_key_lsm_metadata(file.path(lsmdir,lsmdf[g,"txtfile"]),
ReturnRawMetaData=TRUE))
if(inherits(p,"try-error")) {
warning("Error parsing metadata for gene_name: ",g)
next
}
lsmdf[g,c("DimensionX", "DimensionY", "DimensionZ", "VoxelSizeX", "VoxelSizeY",
"VoxelSizeZ")]=p[[1]]
lsmdf[g,"Ch1"]=p[[2]][1]
lsmdf[g,"Ch2"]=p[[2]][2]
lsmdf[g,"ChannelName0"]=p[[2]][3]
lsmdf[g,"lsmqhash"]=quickhash.lsm(file.path(lsmdir,lsmdf[g,"lsmfile"]))
# will need to swap channels if brain is not channel 1
# TODO - figure out a way to provide this - pass in a user function?
# lsmdf[g,"swapchannels"] = FCLSMBrainChannel(p)!=1
if(!is.null(extrafields)){
xi=extrafields(attr(p,'rawmd'))
missing_cols=setdiff(names(xi),colnames(lsmdf))
lsmdf[,missing_cols]=NA
lsmdf[g,names(xi)]=xi
}
}
return(lsmdf)
}
#' Parse key Zeiss LSM metadata into an R list
#'
#' @param f Path to file containing lsm metadata
#' @param text Text version of lsm metadata (optional, otherwise read from file)
#' @param ReturnRawMetaData Whether to return the raw metadata from the file.
#'
#' @return a list containing parsed metadata
#' @export
#' @importFrom tools file_ext
#' @seealso \code{\link{lsm_metadata}}
parse_key_lsm_metadata<-function(f,text=NULL,ReturnRawMetaData=FALSE){
ext=file_ext(f)
if(ext=="lsm") {
text=lsm_metadata(f = f)
}
ll <- if(!is.null(text)) text else readLines(f)
chans=vector("character",length=2)
ch1=sub(".*Name: (.*)","\\1",
grep("DataChannel Name #1",ll,fixed=T,value = T))
chans[1]=ifelse(length(ch1)>0,ch1, NA)
ch2=sub(".*Name: (.*)","\\1",
grep("DataChannel Name #2",ll,fixed=T,value=T))
chans[2]=ifelse(length(ch2)>0,ch2, NA)
chnm0=grep("ChannelName0",ll,fixed=T,value=T)
if(length(chnm0))
chans[3]=sub("ChannelName0: ","",chans[3])
else chans[3]=NA_character_
names(chans)=c("Chan1Name","Chan2Name","ChannelName0")
# TODO: add Pixel type = uint16
selected_lines=grep("Dimension([XYZ]|Channels)",ll,value=T)
selected_lines=c(selected_lines,grep("VoxelSize[XYZ]",ll,value=T))
parse_values <- function(lines){
valueslist=strsplit(lines, ": ")
values_str=sapply(valueslist,"[[",2)
values=suppressWarnings(as.numeric(values_str))
if(any(is.na(values)))
values=values_str
names(values)=sapply(valueslist,"[[",1)
values
}
dimvalues=parse_values(selected_lines)
if(length(dimvalues)!=7) stop("Error retrieving Dimension metadata for file:",f)
lens_lines=grep("(Recording Objective|Zoom X)", ll, value=T)
lensvalues=parse_values(lens_lines)
timestamp=parse_values(grep("Sample 0Time", ll, value = T))
timestamp=ISOdatetime(1899,12,30,0,0,0)+timestamp[[1]]*60*60*24
bits=parse_values(grep("Bits Per Sample", ll, value = T))
structure(list(dim=dimvalues,chan=chans, lens=lensvalues, timestamp=timestamp, bits=bits),file=f,
rawmd=if(ReturnRawMetaData) ll else NULL)
}
|
/R/lsm_metadata.R
|
no_license
|
jefferislab/jimpipeline
|
R
| false
| false
| 8,576
|
r
|
#'Extract the metadata from Zeiss LSM file using loci bioformats as text vector
#'
#'Specifically, this relies on the showinf tool of loci Details
#'@param f Path to lsm file
#'@param cachefile Whether to save a copy of metadata to disk (TRUE)
#'@param ReturnMetaData Whether to return metadata rather than success (default:
#' \code{TRUE})
#'@param Force whether to re-parse metadata even if a cached version exists
#'@param UseLock whether to use a lock file while parsing for simple parallelism
#'@return character vector of metadata OR TRUE/FALSE for success
#'@export
#'@importFrom nat.utils RunCmdForNewerInput makelock
#'@seealso \code{\link{parse_key_lsm_metadata}}
#' @examples \dontrun{
#' lsmdir=file.path(fcconfig$regroot,"lsms")
#' for(f in dir(lsmdir,patt="lsm$",full=T)) lsm_metadata(f,UseLock=TRUE)
#'}
lsm_metadata<-function(f,cachefile=TRUE,
ReturnMetaData=TRUE,Force=FALSE,UseLock=FALSE){
if(!file.exists(f)) {
warning("File: ",f," is missing")
return(FALSE)
}
if(tools::file_ext(f)=="bz2"){
# we need to uncompress first
f=bunzip2(f)
}
if(is.logical(cachefile)) {
if(cachefile) {
cachefile=sub("\\.lsm$",".txt",f)
stdout=cachefile
} else stdout=TRUE
} else if(is.function(cachefile)){
stdout <- cachefile <- cachefile(f)
} else stdout=cachefile
if(is.character(cachefile) && file.exists(cachefile)){
if(!Force && !RunCmdForNewerInput(NULL,f,cachefile)){
if(ReturnMetaData) return(readLines(cachefile))
else return(TRUE)
}
}
if(is.character(stdout) && UseLock){
lockfile=paste(stdout,sep=".","lock")
if(!makelock(lockfile)) return (FALSE)
on.exit(unlink(lockfile))
}
# cached file is older, or missing, or we used force
rval=showinf(f, outfile=ifelse(is.character(cachefile), cachefile, FALSE))
if(is.numeric(rval) && rval>0) {
warning("showinf error for file: ",f)
if(is.character(stdout)) unlink(stdout)
return(FALSE)
}
if(ReturnMetaData) {
if (is.character(stdout)) invisible(readLines(stdout))
else invisible(rval)
}
else return(TRUE)
}
LoadObjsFromRda<-function(f){
objnames=load(f,envir=environment())
return(lapply(objnames,get,envir=environment()))
}
#' Make summary dataframe for lsm files and their key metadata
#'
#' @details Will calculate md5 sums for all input files and then use this to
#' update a dataframe.
#'
#' If an extrafields function is supplied, it should take one parameter, the
#' list output of \code{\link{parse_key_lsm_metadata}}, which will include as
#' an attribute the full contents of the metadata file dumped by the loci
#' showinf tool. The function should take care to return values for all fields
#' that it \emph{could} return each time it is called even if these are NA and
#' to ensure that values have a consistent data type per field.
#' @param lsmdir Path to directory containing lsm files
#' @param oldlsmdf Dataframe or path to rda file containing one
#' @param extrafields Function to parse extra fields from metadata for each
#' image
#' @param Verbose Logical: Show messages about progress/updating of cached data
#' @return dataframe
#' @export
#' @seealso \code{\link{lsm_metadata}}, \code{\link{parse_key_lsm_metadata}}
make_lsm_df<-function(lsmdir,oldlsmdf=NULL,extrafields=NULL,Verbose=TRUE){
lsmdf=data.frame(txtfile=dir(lsmdir,pattern = "txt$"),stringsAsFactors=FALSE)
lsmdf$lsmfile=sub("txt$","lsm",lsmdf$txtfile)
lsmdf$gene_name=sub(".txt","",lsmdf$txtfile,fixed=TRUE)
rownames(lsmdf)=lsmdf$gene_name
lsmdf$txtmtime=file.info(file.path(lsmdir,lsmdf$txtfile))$mtime
lsmfi=file.info(file.path(lsmdir,lsmdf$lsmfile))
lsmdf$lsmsize=lsmfi$size
lsmdf$lsmmtime=lsmfi$mtime
if(Verbose) message("Computing md5 sums for ",nrow(lsmdf),' metadata files.\n')
lsmdf$txtmd5=tools::md5sum(file.path(lsmdir,lsmdf$txtfile))
if(nrow(lsmdf)==0) return(lsmdf)
fieldsToCompute=c('lsmqhash',"Ch1","Ch2","ChannelName0","swapchannels","DimensionX", "DimensionY",
"DimensionZ", "VoxelSizeX", "VoxelSizeY","VoxelSizeZ")
lsmdf[,fieldsToCompute]=NA
# can accept a path to cached dataframe, if so read it in
if(is.character(oldlsmdf)){
if(!file.exists(oldlsmdf)){
warning("Unable to read cached dataframe from: ", oldlsmdf)
oldlsmdf=NULL
} else {
if(Verbose) message("Loading cached dataframe")
oldlsmdf=LoadObjsFromRda(oldlsmdf)[[1]]
}
}
if(!is.null(oldlsmdf)){
# we're going to use a cached version of the lsmdf table
# first drop anything that's not unique by md5
oldlsmdf=oldlsmdf[!duplicated(oldlsmdf$txtmd5) & !is.na(oldlsmdf$txtmd5), , drop=FALSE]
# then use the md5 for rownames
rownames(oldlsmdf)=oldlsmdf$txtmd5
# figure out for which rows we have cached data
gene_names_withmd5match=lsmdf[lsmdf$txtmd5%in%oldlsmdf$txtmd5,"gene_name"]
# and the one we will need to parse from scratch
gene_names_toparse=setdiff(lsmdf$gene_name,gene_names_withmd5match)
# now add matching data
fieldstocopy=intersect(fieldsToCompute,colnames(oldlsmdf))
# using md5s to look up data from cached dataframe
lsmdf[gene_names_withmd5match,fieldstocopy]=
oldlsmdf[lsmdf[gene_names_withmd5match,'txtmd5'],fieldstocopy]
} else {
# parse all rows from scratch
gene_names_toparse=lsmdf$gene_name
}
if(Verbose) message("Parsing ",length(gene_names_toparse)," metadata files from scratch")
for (g in gene_names_toparse){
p=try(parse_key_lsm_metadata(file.path(lsmdir,lsmdf[g,"txtfile"]),
ReturnRawMetaData=TRUE))
if(inherits(p,"try-error")) {
warning("Error parsing metadata for gene_name: ",g)
next
}
lsmdf[g,c("DimensionX", "DimensionY", "DimensionZ", "VoxelSizeX", "VoxelSizeY",
"VoxelSizeZ")]=p[[1]]
lsmdf[g,"Ch1"]=p[[2]][1]
lsmdf[g,"Ch2"]=p[[2]][2]
lsmdf[g,"ChannelName0"]=p[[2]][3]
lsmdf[g,"lsmqhash"]=quickhash.lsm(file.path(lsmdir,lsmdf[g,"lsmfile"]))
# will need to swap channels if brain is not channel 1
# TODO - figure out a way to provide this - pass in a user function?
# lsmdf[g,"swapchannels"] = FCLSMBrainChannel(p)!=1
if(!is.null(extrafields)){
xi=extrafields(attr(p,'rawmd'))
missing_cols=setdiff(names(xi),colnames(lsmdf))
lsmdf[,missing_cols]=NA
lsmdf[g,names(xi)]=xi
}
}
return(lsmdf)
}
#' Parse key Zeiss LSM metadata into an R list
#'
#' @param f Path to file containing lsm metadata
#' @param text Text version of lsm metadata (optional, otherwise read from file)
#' @param ReturnRawMetaData Whether to return the raw metadata from the file.
#'
#' @return a list containing parsed metadata
#' @export
#' @importFrom tools file_ext
#' @seealso \code{\link{lsm_metadata}}
parse_key_lsm_metadata<-function(f,text=NULL,ReturnRawMetaData=FALSE){
ext=file_ext(f)
if(ext=="lsm") {
text=lsm_metadata(f = f)
}
ll <- if(!is.null(text)) text else readLines(f)
chans=vector("character",length=2)
ch1=sub(".*Name: (.*)","\\1",
grep("DataChannel Name #1",ll,fixed=T,value = T))
chans[1]=ifelse(length(ch1)>0,ch1, NA)
ch2=sub(".*Name: (.*)","\\1",
grep("DataChannel Name #2",ll,fixed=T,value=T))
chans[2]=ifelse(length(ch2)>0,ch2, NA)
chnm0=grep("ChannelName0",ll,fixed=T,value=T)
if(length(chnm0))
chans[3]=sub("ChannelName0: ","",chans[3])
else chans[3]=NA_character_
names(chans)=c("Chan1Name","Chan2Name","ChannelName0")
# TODO: add Pixel type = uint16
selected_lines=grep("Dimension([XYZ]|Channels)",ll,value=T)
selected_lines=c(selected_lines,grep("VoxelSize[XYZ]",ll,value=T))
parse_values <- function(lines){
valueslist=strsplit(lines, ": ")
values_str=sapply(valueslist,"[[",2)
values=suppressWarnings(as.numeric(values_str))
if(any(is.na(values)))
values=values_str
names(values)=sapply(valueslist,"[[",1)
values
}
dimvalues=parse_values(selected_lines)
if(length(dimvalues)!=7) stop("Error retrieving Dimension metadata for file:",f)
lens_lines=grep("(Recording Objective|Zoom X)", ll, value=T)
lensvalues=parse_values(lens_lines)
timestamp=parse_values(grep("Sample 0Time", ll, value = T))
timestamp=ISOdatetime(1899,12,30,0,0,0)+timestamp[[1]]*60*60*24
bits=parse_values(grep("Bits Per Sample", ll, value = T))
structure(list(dim=dimvalues,chan=chans, lens=lensvalues, timestamp=timestamp, bits=bits),file=f,
rawmd=if(ReturnRawMetaData) ll else NULL)
}
|
h2o.ensemble <- function(x, y, training_frame,
model_id = "", validation_frame = NULL,
family = c("AUTO", "binomial", "gaussian"),
learner = c("h2o.glm.wrapper", "h2o.randomForest.wrapper", "h2o.gbm.wrapper", "h2o.deeplearning.wrapper"),
metalearner = "h2o.glm.wrapper",
cvControl = list(V = 5, shuffle = TRUE), #maybe change this to cv_control
seed = 1,
parallel = "seq", #only seq implemented
keep_levelone_data = TRUE)
{
starttime <- Sys.time()
runtime <- list()
# Training_frame may be a key or an H2O H2OFrame object
if ((!inherits(training_frame, "H2OFrame") && !inherits(training_frame, "H2OH2OFrame")))
tryCatch(training_frame <- h2o.getFrame(training_frame),
error = function(err) {
stop("argument \"training_frame\" must be a valid H2O H2OFrame or id")
})
if (!is.null(validation_frame)) {
if (is.character(validation_frame))
tryCatch(validation_frame <- h2o.getFrame(validation_frame),
error = function(err) {
stop("argument \"validation_frame\" must be a valid H2O H2OFrame or id")
})
}
N <- dim(training_frame)[1L] #Number of observations in training set
if (is.null(validation_frame)) {
validation_frame <- training_frame
}
# Determine prediction task family type automatically
# TO DO: Add auto-detection for other distributions like gamma - right now auto-detect as "gaussian"
if (length(family) > 0) {
family <- match.arg(family)
}
if (family == "AUTO") {
if (is.factor(training_frame[,y])) {
numcats <- length(h2o.levels(training_frame[,y]))
if (numcats == 2) {
family <- "binomial"
} else {
stop("Multinomial case not yet implemented for h2o.ensemble. Check here for progress: https://0xdata.atlassian.net/browse/PUBDEV-2355")
}
} else {
family <- "gaussian"
}
}
# Check that if specified, family matches data type for response
# binomial must be factor/enum and gaussian must be numeric
if (family == c("gaussian")) {
if (!is.numeric(training_frame[,y])) {
stop("When `family` is gaussian, the repsonse column must be numeric.")
}
# TO DO: Update this ylim calc when h2o.range method gets implemented for H2OH2OFrame cols
ylim <- c(min(training_frame[,y]), max(training_frame[,y])) #Used to enforce bounds
} else {
if (!is.factor(training_frame[,y])) {
stop("When `family` is binomial, the repsonse column must be a factor.")
} else {
numcats <- length(h2o.levels(training_frame[,y]))
if (numcats > 2) {
stop("Multinomial case not yet implemented for h2o.ensemble. Check here for progress: https://0xdata.atlassian.net/browse/PUBDEV-2355")
}
}
ylim <- NULL
}
# Update control args by filling in missing list elements
cvControl <- do.call(".cv_control", cvControl)
V <- cvControl$V #Number of CV folds
L <- length(learner) #Number of distinct learners
idxs <- expand.grid(1:V,1:L)
names(idxs) <- c("v","l")
# Validate learner and metalearner arguments
if (length(metalearner)>1 | !is.character(metalearner) | !exists(metalearner)) {
stop("The 'metalearner' argument must be a string, specifying the name of a base learner wrapper function.")
}
if (sum(!sapply(learner, exists))>0) {
stop("'learner' function name(s) not found.")
}
if (!exists(metalearner)) {
stop("'metalearner' function name not found.")
}
# The 'family' must be a string, not an R function input like gaussian()
# No support for multiclass at the moment, just binary classification or regression
if (!(family %in% c("binomial", "gaussian"))) {
stop("'family' not supported")
}
if (inherits(parallel, "character")) {
if (!(parallel %in% c("seq","multicore"))) {
stop("'parallel' must be either 'seq' or 'multicore' or a snow cluster object")
}
} else if (!inherits(parallel, "cluster")) {
stop("'parallel' must be either 'seq' or 'multicore' or a snow cluster object")
}
# Begin ensemble code
if (is.numeric(seed)) set.seed(seed) #If seed is specified, set seed prior to next step
folds <- sample(rep(seq(V), ceiling(N/V)))[1:N] # Cross-validation folds (stratified folds not yet supported)
training_frame$fold_id <- as.h2o(folds) # Add a fold_id column for each observation so we can subset by row later
# What type of metalearning function do we have?
# The h2o version is memory-optimized (the N x L level-one matrix, Z, never leaves H2O memory);
# SuperLearner metalearners provide additional metalearning algos, but has a much bigger memory footprint
if (grepl("^SL.", metalearner)) {
metalearner_type <- "SuperLearner"
} else if (grepl("^h2o.", metalearner)){
metalearner_type <- "h2o"
}
# Create the Z matrix of cross-validated predictions
mm <- .make_Z(x = x, y = y, training_frame = training_frame,
family = family,
learner = learner,
parallel = parallel,
seed = seed,
V = V,
L = L,
idxs = idxs,
metalearner_type = metalearner_type)
# TO DO: Could pass on the metalearner arg instead of metalearner_type and get this info internally
basefits <- mm$basefits
Z <- mm$Z #pure Z (dimension N x L)
# Metalearning: Regress y onto Z to learn optimal combination of base models
# TO DO: Replace grepl for metalearner_type
# TO DO: Pass on additional args to match.fun(metalearner) for h2o type
print("Metalearning")
if (is.numeric(seed)) set.seed(seed) #If seed given, set seed prior to next step
if (grepl("^SL.", metalearner)) {
# this is very hacky and should be used only for testing
if (is.character(family)) {
familyFun <- get(family, mode = "function", envir = parent.frame())
#print(familyFun$family) #does not work for SL.glmnet
}
Zdf <- as.data.frame(Z)
Y <- as.data.frame(training_frame[,c(y)])[,1]
# TO DO: for parity, need to add y col to Z like we do below
runtime$metalearning <- system.time(metafit <- match.fun(metalearner)(Y = Y,
X = Zdf,
newX = Zdf,
family = familyFun,
id = seq(N),
obsWeights = rep(1,N)), gcFirst = FALSE)
} else {
Z$y <- training_frame[,c(y)] # do we want to add y to the Z frame?
runtime$metalearning <- system.time(metafit <- match.fun(metalearner)(x = learner,
y = "y",
training_frame = Z,
validation_frame = NULL,
family = family), gcFirst = FALSE)
}
# Since baselearning is now performed along with CV, see if we can get this info, or deprecate this
runtime$baselearning <- NULL
runtime$total <- Sys.time() - starttime
# Keep level-one data?
if (!keep_levelone_data) {
Z <- NULL
}
# Ensemble model
out <- list(x = x,
y = y,
family = family,
learner = learner,
metalearner = metalearner,
cvControl = cvControl,
folds = folds,
ylim = ylim,
seed = seed,
parallel = parallel,
basefits = basefits,
metafit = metafit,
levelone = Z, #levelone = cbind(Z, y)
runtime = runtime,
h2o_version = packageVersion(pkg = "h2o"),
h2oEnsemble_version = packageVersion(pkg = "h2oEnsemble"))
class(out) <- "h2o.ensemble"
return(out)
}
# Generate the CV predicted values for all learners
.make_Z <- function(x, y, training_frame, family, learner, parallel, seed, V, L, idxs, metalearner_type = c("h2o", "SuperLearner")) {
# Do V-fold cross-validation of each learner (in a loop/apply over 1:L)...
fitlist <- sapply(X = 1:L, FUN = .fitWrapper, y = y, xcols = x, training_frame = training_frame,
validation_frame = NULL, family = family, learner = learner,
seed = seed, fold_column = "fold_id",
simplify = FALSE)
runtime <- list()
runtime$cv <- lapply(fitlist, function(ll) ll$fittime)
names(runtime$cv) <- learner
basefits <- lapply(fitlist, function(ll) ll$fit) #Base fits (trained on full data) to be saved
names(basefits) <- learner
# In the case of binary classification, a 3-col HDF is returned, colnames == c("predict", "p0", "p1")
# In the case of regression, 1-col HDF is already returned, colname == "predict"
.compress_cvpred_into_1col <- function(l, family) {
# return the frame_id of the resulting 1-col Hdf of cvpreds for learner l
if (family %in% c("bernoulli", "binomial")) {
predlist <- sapply(1:V, function(v) h2o.getFrame(basefits[[l]]@model$cross_validation_predictions[[v]]$name)[,3], simplify = FALSE)
} else {
predlist <- sapply(1:V, function(v) h2o.getFrame(basefits[[l]]@model$cross_validation_predictions[[v]]$name)$predict, simplify = FALSE)
}
cvpred_sparse <- h2o.cbind(predlist) #N x V Hdf with rows that are all zeros, except corresponding to the v^th fold if that rows is associated with v
cvpred_col <- apply(cvpred_sparse, 1, sum)
return(cvpred_col)
}
cvpred_framelist <- sapply(1:L, function(l) .compress_cvpred_into_1col(l, family))
Z <- h2o.cbind(cvpred_framelist)
names(Z) <- learner
return(list(Z = Z, basefits = basefits))
}
# Train a model using learner l
.fitFun <- function(l, y, x, training_frame, validation_frame, family, learner, seed, fold_column) {
if (!is.null(fold_column)) cv = TRUE
if (is.numeric(seed)) set.seed(seed) #If seed given, set seed prior to next step
if (("x" %in% names(formals(learner[l]))) && (as.character(formals(learner[l])$x)[1] != "")) {
# Special case where we pass a subset of the colnames, x, in a custom learner function wrapper
fit <- match.fun(learner[l])(y = y, training_frame = training_frame, validation_frame = NULL, family = family, fold_column = fold_column, keep_cross_validation_folds = cv)
} else {
# Use all predictors in training set for training
fit <- match.fun(learner[l])(y = y, x = x, training_frame = training_frame, validation_frame = NULL, family = family, fold_column = fold_column, keep_cross_validation_folds = cv)
}
#fit <- get(learner[l], mode = "function", envir = parent.frame())(y = y, x = x, training_frame = training_frame, validation_frame = NULL, family = family, fold_column = fold_column, keep_cross_validation_folds = cv)
return(fit)
}
# Wrapper function for .fitFun to record system.time
.fitWrapper <- function(l, y, xcols, training_frame, validation_frame, family, learner, seed, fold_column) {
print(sprintf("Cross-validating and training base learner %s: %s", l, learner[l]))
fittime <- system.time(fit <- .fitFun(l, y, xcols, training_frame, validation_frame, family,
learner, seed, fold_column), gcFirst=FALSE)
return(list(fit=fit, fittime=fittime))
}
.cv_control <- function(V = 5L, stratifyCV = TRUE, shuffle = TRUE){
# Parameters that control the CV process
# Only part of this being used currently --
# Stratification is not enabled yet in the h2o.ensemble function.
# We can use a modified SuperLearner::CVFolds function (or similar) to
# enable stratification by outcome in the future.
V <- as.integer(V) #Number of cross-validation folds
if(!is.logical(stratifyCV)) {
stop("'stratifyCV' must be logical")
}
if(!is.logical(shuffle)) {
stop("'shuffle' must be logical")
}
return(list(V = V, stratifyCV = stratifyCV, shuffle = shuffle))
}
predict.h2o.ensemble <- function(object, newdata, ...) {
if (object$family == "binomial") {
basepred <- h2o.cbind(sapply(object$basefits, function(ll) h2o.predict(object = ll, newdata = newdata)[,3]))
} else {
basepred <- h2o.cbind(sapply(object$basefits, function(ll) h2o.predict(object = ll, newdata = newdata)[,1]))
}
names(basepred) <- names(object$basefits)
if (grepl("H2O", class(object$metafit))) {
# H2O ensemble metalearner from wrappers.R
pred <- h2o.predict(object = object$metafit, newdata = basepred)
} else {
# SuperLearner wrapper function metalearner
basepreddf <- as.data.frame(basepred)
pred <- predict(object = object$metafit$fit, newdata = basepreddf)
}
out <- list(pred = pred, basepred = basepred)
return(out)
}
print.h2o.ensemble <- function(x, ...) {
cat("\nH2O Ensemble fit")
cat("\n----------------")
cat("\nfamily: ")
cat(x$family)
cat("\nlearner: ")
cat(x$learner)
cat("\nmetalearner: ")
cat(x$metalearner)
cat("\n\n")
}
# plot.h2o.ensemble <- function(x, ...) {
# cat("\nPlotting for an H2O Ensemble fit is not implemented at this time.")
# }
|
/h2o-r/ensemble/h2oEnsemble-package/R/ensemble.R
|
permissive
|
josh-whitney/h2o-3
|
R
| false
| false
| 13,564
|
r
|
h2o.ensemble <- function(x, y, training_frame,
model_id = "", validation_frame = NULL,
family = c("AUTO", "binomial", "gaussian"),
learner = c("h2o.glm.wrapper", "h2o.randomForest.wrapper", "h2o.gbm.wrapper", "h2o.deeplearning.wrapper"),
metalearner = "h2o.glm.wrapper",
cvControl = list(V = 5, shuffle = TRUE), #maybe change this to cv_control
seed = 1,
parallel = "seq", #only seq implemented
keep_levelone_data = TRUE)
{
starttime <- Sys.time()
runtime <- list()
# Training_frame may be a key or an H2O H2OFrame object
if ((!inherits(training_frame, "H2OFrame") && !inherits(training_frame, "H2OH2OFrame")))
tryCatch(training_frame <- h2o.getFrame(training_frame),
error = function(err) {
stop("argument \"training_frame\" must be a valid H2O H2OFrame or id")
})
if (!is.null(validation_frame)) {
if (is.character(validation_frame))
tryCatch(validation_frame <- h2o.getFrame(validation_frame),
error = function(err) {
stop("argument \"validation_frame\" must be a valid H2O H2OFrame or id")
})
}
N <- dim(training_frame)[1L] #Number of observations in training set
if (is.null(validation_frame)) {
validation_frame <- training_frame
}
# Determine prediction task family type automatically
# TO DO: Add auto-detection for other distributions like gamma - right now auto-detect as "gaussian"
if (length(family) > 0) {
family <- match.arg(family)
}
if (family == "AUTO") {
if (is.factor(training_frame[,y])) {
numcats <- length(h2o.levels(training_frame[,y]))
if (numcats == 2) {
family <- "binomial"
} else {
stop("Multinomial case not yet implemented for h2o.ensemble. Check here for progress: https://0xdata.atlassian.net/browse/PUBDEV-2355")
}
} else {
family <- "gaussian"
}
}
# Check that if specified, family matches data type for response
# binomial must be factor/enum and gaussian must be numeric
if (family == c("gaussian")) {
if (!is.numeric(training_frame[,y])) {
stop("When `family` is gaussian, the repsonse column must be numeric.")
}
# TO DO: Update this ylim calc when h2o.range method gets implemented for H2OH2OFrame cols
ylim <- c(min(training_frame[,y]), max(training_frame[,y])) #Used to enforce bounds
} else {
if (!is.factor(training_frame[,y])) {
stop("When `family` is binomial, the repsonse column must be a factor.")
} else {
numcats <- length(h2o.levels(training_frame[,y]))
if (numcats > 2) {
stop("Multinomial case not yet implemented for h2o.ensemble. Check here for progress: https://0xdata.atlassian.net/browse/PUBDEV-2355")
}
}
ylim <- NULL
}
# Update control args by filling in missing list elements
cvControl <- do.call(".cv_control", cvControl)
V <- cvControl$V #Number of CV folds
L <- length(learner) #Number of distinct learners
idxs <- expand.grid(1:V,1:L)
names(idxs) <- c("v","l")
# Validate learner and metalearner arguments
if (length(metalearner)>1 | !is.character(metalearner) | !exists(metalearner)) {
stop("The 'metalearner' argument must be a string, specifying the name of a base learner wrapper function.")
}
if (sum(!sapply(learner, exists))>0) {
stop("'learner' function name(s) not found.")
}
if (!exists(metalearner)) {
stop("'metalearner' function name not found.")
}
# The 'family' must be a string, not an R function input like gaussian()
# No support for multiclass at the moment, just binary classification or regression
if (!(family %in% c("binomial", "gaussian"))) {
stop("'family' not supported")
}
if (inherits(parallel, "character")) {
if (!(parallel %in% c("seq","multicore"))) {
stop("'parallel' must be either 'seq' or 'multicore' or a snow cluster object")
}
} else if (!inherits(parallel, "cluster")) {
stop("'parallel' must be either 'seq' or 'multicore' or a snow cluster object")
}
# Begin ensemble code
if (is.numeric(seed)) set.seed(seed) #If seed is specified, set seed prior to next step
folds <- sample(rep(seq(V), ceiling(N/V)))[1:N] # Cross-validation folds (stratified folds not yet supported)
training_frame$fold_id <- as.h2o(folds) # Add a fold_id column for each observation so we can subset by row later
# What type of metalearning function do we have?
# The h2o version is memory-optimized (the N x L level-one matrix, Z, never leaves H2O memory);
# SuperLearner metalearners provide additional metalearning algos, but has a much bigger memory footprint
if (grepl("^SL.", metalearner)) {
metalearner_type <- "SuperLearner"
} else if (grepl("^h2o.", metalearner)){
metalearner_type <- "h2o"
}
# Create the Z matrix of cross-validated predictions
mm <- .make_Z(x = x, y = y, training_frame = training_frame,
family = family,
learner = learner,
parallel = parallel,
seed = seed,
V = V,
L = L,
idxs = idxs,
metalearner_type = metalearner_type)
# TO DO: Could pass on the metalearner arg instead of metalearner_type and get this info internally
basefits <- mm$basefits
Z <- mm$Z #pure Z (dimension N x L)
# Metalearning: Regress y onto Z to learn optimal combination of base models
# TO DO: Replace grepl for metalearner_type
# TO DO: Pass on additional args to match.fun(metalearner) for h2o type
print("Metalearning")
if (is.numeric(seed)) set.seed(seed) #If seed given, set seed prior to next step
if (grepl("^SL.", metalearner)) {
# this is very hacky and should be used only for testing
if (is.character(family)) {
familyFun <- get(family, mode = "function", envir = parent.frame())
#print(familyFun$family) #does not work for SL.glmnet
}
Zdf <- as.data.frame(Z)
Y <- as.data.frame(training_frame[,c(y)])[,1]
# TO DO: for parity, need to add y col to Z like we do below
runtime$metalearning <- system.time(metafit <- match.fun(metalearner)(Y = Y,
X = Zdf,
newX = Zdf,
family = familyFun,
id = seq(N),
obsWeights = rep(1,N)), gcFirst = FALSE)
} else {
Z$y <- training_frame[,c(y)] # do we want to add y to the Z frame?
runtime$metalearning <- system.time(metafit <- match.fun(metalearner)(x = learner,
y = "y",
training_frame = Z,
validation_frame = NULL,
family = family), gcFirst = FALSE)
}
# Since baselearning is now performed along with CV, see if we can get this info, or deprecate this
runtime$baselearning <- NULL
runtime$total <- Sys.time() - starttime
# Keep level-one data?
if (!keep_levelone_data) {
Z <- NULL
}
# Ensemble model
out <- list(x = x,
y = y,
family = family,
learner = learner,
metalearner = metalearner,
cvControl = cvControl,
folds = folds,
ylim = ylim,
seed = seed,
parallel = parallel,
basefits = basefits,
metafit = metafit,
levelone = Z, #levelone = cbind(Z, y)
runtime = runtime,
h2o_version = packageVersion(pkg = "h2o"),
h2oEnsemble_version = packageVersion(pkg = "h2oEnsemble"))
class(out) <- "h2o.ensemble"
return(out)
}
# Generate the CV predicted values for all learners
.make_Z <- function(x, y, training_frame, family, learner, parallel, seed, V, L, idxs, metalearner_type = c("h2o", "SuperLearner")) {
# Do V-fold cross-validation of each learner (in a loop/apply over 1:L)...
fitlist <- sapply(X = 1:L, FUN = .fitWrapper, y = y, xcols = x, training_frame = training_frame,
validation_frame = NULL, family = family, learner = learner,
seed = seed, fold_column = "fold_id",
simplify = FALSE)
runtime <- list()
runtime$cv <- lapply(fitlist, function(ll) ll$fittime)
names(runtime$cv) <- learner
basefits <- lapply(fitlist, function(ll) ll$fit) #Base fits (trained on full data) to be saved
names(basefits) <- learner
# In the case of binary classification, a 3-col HDF is returned, colnames == c("predict", "p0", "p1")
# In the case of regression, 1-col HDF is already returned, colname == "predict"
.compress_cvpred_into_1col <- function(l, family) {
# return the frame_id of the resulting 1-col Hdf of cvpreds for learner l
if (family %in% c("bernoulli", "binomial")) {
predlist <- sapply(1:V, function(v) h2o.getFrame(basefits[[l]]@model$cross_validation_predictions[[v]]$name)[,3], simplify = FALSE)
} else {
predlist <- sapply(1:V, function(v) h2o.getFrame(basefits[[l]]@model$cross_validation_predictions[[v]]$name)$predict, simplify = FALSE)
}
cvpred_sparse <- h2o.cbind(predlist) #N x V Hdf with rows that are all zeros, except corresponding to the v^th fold if that rows is associated with v
cvpred_col <- apply(cvpred_sparse, 1, sum)
return(cvpred_col)
}
cvpred_framelist <- sapply(1:L, function(l) .compress_cvpred_into_1col(l, family))
Z <- h2o.cbind(cvpred_framelist)
names(Z) <- learner
return(list(Z = Z, basefits = basefits))
}
# Train a model using learner l
.fitFun <- function(l, y, x, training_frame, validation_frame, family, learner, seed, fold_column) {
if (!is.null(fold_column)) cv = TRUE
if (is.numeric(seed)) set.seed(seed) #If seed given, set seed prior to next step
if (("x" %in% names(formals(learner[l]))) && (as.character(formals(learner[l])$x)[1] != "")) {
# Special case where we pass a subset of the colnames, x, in a custom learner function wrapper
fit <- match.fun(learner[l])(y = y, training_frame = training_frame, validation_frame = NULL, family = family, fold_column = fold_column, keep_cross_validation_folds = cv)
} else {
# Use all predictors in training set for training
fit <- match.fun(learner[l])(y = y, x = x, training_frame = training_frame, validation_frame = NULL, family = family, fold_column = fold_column, keep_cross_validation_folds = cv)
}
#fit <- get(learner[l], mode = "function", envir = parent.frame())(y = y, x = x, training_frame = training_frame, validation_frame = NULL, family = family, fold_column = fold_column, keep_cross_validation_folds = cv)
return(fit)
}
# Wrapper function for .fitFun to record system.time
.fitWrapper <- function(l, y, xcols, training_frame, validation_frame, family, learner, seed, fold_column) {
print(sprintf("Cross-validating and training base learner %s: %s", l, learner[l]))
fittime <- system.time(fit <- .fitFun(l, y, xcols, training_frame, validation_frame, family,
learner, seed, fold_column), gcFirst=FALSE)
return(list(fit=fit, fittime=fittime))
}
.cv_control <- function(V = 5L, stratifyCV = TRUE, shuffle = TRUE){
# Parameters that control the CV process
# Only part of this being used currently --
# Stratification is not enabled yet in the h2o.ensemble function.
# We can use a modified SuperLearner::CVFolds function (or similar) to
# enable stratification by outcome in the future.
V <- as.integer(V) #Number of cross-validation folds
if(!is.logical(stratifyCV)) {
stop("'stratifyCV' must be logical")
}
if(!is.logical(shuffle)) {
stop("'shuffle' must be logical")
}
return(list(V = V, stratifyCV = stratifyCV, shuffle = shuffle))
}
predict.h2o.ensemble <- function(object, newdata, ...) {
if (object$family == "binomial") {
basepred <- h2o.cbind(sapply(object$basefits, function(ll) h2o.predict(object = ll, newdata = newdata)[,3]))
} else {
basepred <- h2o.cbind(sapply(object$basefits, function(ll) h2o.predict(object = ll, newdata = newdata)[,1]))
}
names(basepred) <- names(object$basefits)
if (grepl("H2O", class(object$metafit))) {
# H2O ensemble metalearner from wrappers.R
pred <- h2o.predict(object = object$metafit, newdata = basepred)
} else {
# SuperLearner wrapper function metalearner
basepreddf <- as.data.frame(basepred)
pred <- predict(object = object$metafit$fit, newdata = basepreddf)
}
out <- list(pred = pred, basepred = basepred)
return(out)
}
print.h2o.ensemble <- function(x, ...) {
cat("\nH2O Ensemble fit")
cat("\n----------------")
cat("\nfamily: ")
cat(x$family)
cat("\nlearner: ")
cat(x$learner)
cat("\nmetalearner: ")
cat(x$metalearner)
cat("\n\n")
}
# plot.h2o.ensemble <- function(x, ...) {
# cat("\nPlotting for an H2O Ensemble fit is not implemented at this time.")
# }
|
View(iris)
iris1<-iris[,c(1,2,3,4)]
kmeans_iris<-kmeans(iris1,4,iter.max=10,nstart=1)#####3=no of clusters..iter.max=10gives
##no of iterations,nstart=1...only 1 centroid
kmeans_iris
k1<-681.3706
k2<-28.55208+123.79588
k3<-39.82097+15.15100+23.87947
k4<-15.151000+9.749286+17.014222+15.351111
a<-c(1,2,3,4)
b<-c(k1,k2,k3,k4)
df<-data.frame(a,b)
plot(df,type="b",col="blue")
Ctg<-read.csv("C:/Users/AKHIL/Desktop/New folder/CTG.csv")
View(Ctg)
Ctg1<-Ctg[,c(1,2,3)]
View(Ctg1)
kmeans_Ctg<-kmeans(Ctg1,3,iter.max=10,nstart=1)
kmeans_Ctg
k5<-205794.4
k6<-41444.66+29959.39
k7-12212.789+8834.274+13606.472
k8-2460.920+5177.792+5874.535+10156.479
c<-c(1,2,3,4)
d<-c(k5,k6,k7,k8)
df_1<-data.frame(c,d)
plot(df_1,type=b,col="green")
|
/elbow.R
|
no_license
|
nikhilraj0025/K-mEAns-Elbow-Method-in-R
|
R
| false
| false
| 809
|
r
|
View(iris)
iris1<-iris[,c(1,2,3,4)]
kmeans_iris<-kmeans(iris1,4,iter.max=10,nstart=1)#####3=no of clusters..iter.max=10gives
##no of iterations,nstart=1...only 1 centroid
kmeans_iris
k1<-681.3706
k2<-28.55208+123.79588
k3<-39.82097+15.15100+23.87947
k4<-15.151000+9.749286+17.014222+15.351111
a<-c(1,2,3,4)
b<-c(k1,k2,k3,k4)
df<-data.frame(a,b)
plot(df,type="b",col="blue")
Ctg<-read.csv("C:/Users/AKHIL/Desktop/New folder/CTG.csv")
View(Ctg)
Ctg1<-Ctg[,c(1,2,3)]
View(Ctg1)
kmeans_Ctg<-kmeans(Ctg1,3,iter.max=10,nstart=1)
kmeans_Ctg
k5<-205794.4
k6<-41444.66+29959.39
k7-12212.789+8834.274+13606.472
k8-2460.920+5177.792+5874.535+10156.479
c<-c(1,2,3,4)
d<-c(k5,k6,k7,k8)
df_1<-data.frame(c,d)
plot(df_1,type=b,col="green")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gh-actions.R
\name{gha_add_secret}
\alias{gha_add_secret}
\title{Add a GitHub Actions secret to a repository}
\usage{
gha_add_secret(
secret,
name,
repo_slug = NULL,
remote = "origin",
visibility = "all",
selected_repositories = NULL
)
}
\arguments{
\item{secret}{\verb{[character]}\cr
The value which should be encrypted (e.g. a Personal Access Token).}
\item{name}{\verb{[character]}\cr
The name of the secret as which it will appear in the "Secrets" overview of
the repository.}
\item{repo_slug}{\verb{[character]}\cr
Repository slug of the repository to which the secret should be added.
Must follow the form \code{owner/repo}.}
\item{remote}{\verb{[character]}\cr
If \code{repo_slug = NULL}, the \code{repo_slug} is determined by the respective git
remote.}
\item{visibility}{\verb{[character]}\cr
The level of visibility for the secret. One of \code{"all"}, \code{"private"}, or
\code{"selected"}.
See https://developer.github.com/v3/actions/secrets/#create-or-update-an-organization-secret
for more information.}
\item{selected_repositories}{\verb{[character]}\cr
Vector of repository ids for which the secret is accessible.
Only applies if \code{visibility = "selected"} was set.}
}
\description{
Encrypts the supplied value using \code{libsodium} and adds it as a
secret to the given GitHub repository. Secrets can be be used in GitHub
Action runs as environment variables.
A common use case is to encrypt Personal Access Tokens (PAT) or API keys.
This is the same as adding a secret manually in GitHub via
\code{"Settings" -> "Secrets" -> "New repository secret"}
}
\examples{
\dontrun{
gha_add_secret("supersecret", name = "MY_SECRET", repo = "ropensci/tic")
}
}
|
/man/gha_add_secret.Rd
|
no_license
|
ropensci/tic
|
R
| false
| true
| 1,827
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gh-actions.R
\name{gha_add_secret}
\alias{gha_add_secret}
\title{Add a GitHub Actions secret to a repository}
\usage{
gha_add_secret(
secret,
name,
repo_slug = NULL,
remote = "origin",
visibility = "all",
selected_repositories = NULL
)
}
\arguments{
\item{secret}{\verb{[character]}\cr
The value which should be encrypted (e.g. a Personal Access Token).}
\item{name}{\verb{[character]}\cr
The name of the secret as which it will appear in the "Secrets" overview of
the repository.}
\item{repo_slug}{\verb{[character]}\cr
Repository slug of the repository to which the secret should be added.
Must follow the form \code{owner/repo}.}
\item{remote}{\verb{[character]}\cr
If \code{repo_slug = NULL}, the \code{repo_slug} is determined by the respective git
remote.}
\item{visibility}{\verb{[character]}\cr
The level of visibility for the secret. One of \code{"all"}, \code{"private"}, or
\code{"selected"}.
See https://developer.github.com/v3/actions/secrets/#create-or-update-an-organization-secret
for more information.}
\item{selected_repositories}{\verb{[character]}\cr
Vector of repository ids for which the secret is accessible.
Only applies if \code{visibility = "selected"} was set.}
}
\description{
Encrypts the supplied value using \code{libsodium} and adds it as a
secret to the given GitHub repository. Secrets can be be used in GitHub
Action runs as environment variables.
A common use case is to encrypt Personal Access Tokens (PAT) or API keys.
This is the same as adding a secret manually in GitHub via
\code{"Settings" -> "Secrets" -> "New repository secret"}
}
\examples{
\dontrun{
gha_add_secret("supersecret", name = "MY_SECRET", repo = "ropensci/tic")
}
}
|
library(csv)
### Name: as.csv
### Title: Read or Write CSV Using Selected Conventions
### Aliases: as.csv
### ** Examples
data <- head(Theoph)
filepath <- file.path(tempdir(),'theoph.csv')
as.csv(data,filepath)
as.csv(filepath)
|
/data/genthat_extracted_code/csv/examples/as.csv.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 235
|
r
|
library(csv)
### Name: as.csv
### Title: Read or Write CSV Using Selected Conventions
### Aliases: as.csv
### ** Examples
data <- head(Theoph)
filepath <- file.path(tempdir(),'theoph.csv')
as.csv(data,filepath)
as.csv(filepath)
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../../h2o-runit.R')
rtest <- function() {
hdfs_name_node = hadoop.namenode()
hdfs_data_file = "/datasets/bigdata/7MRows_4400KCols.csv"
#----------------------------------------------------------------------
# Parameters for the test.
#----------------------------------------------------------------------
url <- sprintf("hdfs://%s%s", hdfs_name_node, hdfs_data_file)
parse_time <- system.time(data.hex <- h2o.importFile(url))
print("Time it took to parse")
print(parse_time)
# Start modeling
# GBM
response="C1" #1:1000 imbalance
predictors=c(4:ncol(data.hex))
# Start modeling
# Gradient Boosted Trees
gbm_time <- system.time(mdl.gbm <- h2o.gbm(x=predictors, y=response, training_frame=data.hex, distribution = "AUTO"))
mdl.gbm
print("Time it took to build GBM")
print(gbm_time)
}
doTest("Test",rtest)
|
/h2o-r/tests/testdir_algos/gbm/runit_INTERNAL_GBM_7MRows_4.4KCols_xlarge.R
|
permissive
|
tamseo/h2o-3
|
R
| false
| false
| 898
|
r
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../../h2o-runit.R')
rtest <- function() {
hdfs_name_node = hadoop.namenode()
hdfs_data_file = "/datasets/bigdata/7MRows_4400KCols.csv"
#----------------------------------------------------------------------
# Parameters for the test.
#----------------------------------------------------------------------
url <- sprintf("hdfs://%s%s", hdfs_name_node, hdfs_data_file)
parse_time <- system.time(data.hex <- h2o.importFile(url))
print("Time it took to parse")
print(parse_time)
# Start modeling
# GBM
response="C1" #1:1000 imbalance
predictors=c(4:ncol(data.hex))
# Start modeling
# Gradient Boosted Trees
gbm_time <- system.time(mdl.gbm <- h2o.gbm(x=predictors, y=response, training_frame=data.hex, distribution = "AUTO"))
mdl.gbm
print("Time it took to build GBM")
print(gbm_time)
}
doTest("Test",rtest)
|
#' Returns predictor vector for design matrix
#' @description Returns predictor vector for design matrix from 44 astronomical angular velocities.
#' @param xi Transit index
#' @param ma Max number of predictors.
#' @param ivar Dummy.
#' @param tdiff Length of input time series.
#' @return The predictor vector. Values between -1, 1.
Funcs <- function (xi, ma = 89, ivar = 1, tdiff) {
rad <- 0.017453292519943
xi <- rad * xi
omegas <- vector()
omegas[1] <- 0.054809904
omegas[2] <- 0.115308512
omegas[3] <- 0.904885870
omegas[4] <- 1.020194382
omegas[5] <- 1.809771741
omegas[6] <- 2.040388764
omegas[7] <- 11.597841752
omegas[8] <- 11.713150263
omegas[9] <- 13.468112100
omegas[10] <- 13.522922004
omegas[11] <- 13.583420612
omegas[12] <- 13.638230516
omegas[13] <- 13.693040419
omegas[14] <- 15.563310768
omegas[15] <- 23.426300526
omegas[16] <- 24.215877885
omegas[17] <- 25.181262364
omegas[18] <- 25.236072267
omegas[19] <- 25.290882171
omegas[20] <- 25.351380779
omegas[21] <- 27.045844008
omegas[22] <- 27.161152519
omegas[23] <- 27.221651128
omegas[24] <- 27.276461031
omegas[25] <- 27.331270935
omegas[26] <- 36.949222530
omegas[27] <- 37.738799889
omegas[28] <- 38.704184367
omegas[29] <- 38.758994271
omegas[30] <- 38.813804174
omegas[31] <- 38.874302783
omegas[32] <- 38.989611294
omegas[33] <- 40.799383035
omegas[34] <- 49.451950152
omegas[35] <- 50.472144534
omegas[36] <- 52.281916275
omegas[37] <- 52.512533298
omegas[38] <- 54.552922062
omegas[39] <- 62.185294797
omegas[40] <- 63.995066538
omegas[41] <- 66.035455302
omegas[42] <- 75.708216801
omegas[43] <- 77.748605565
omegas[44] <- 100.944289068
afunc <- vector()
afunc[1] <- 1
rayleigh.criterion <- 0
for(i in seq(2,ma,2)) {
afunc[i] <- cos(omegas[i / 2] * xi)
afunc[i + 1] <- sin(omegas[i / 2] * xi)
}
hh <- 0
for(h in 0 : 44){
if(h < hh) next
for(hh in ((h + 1) : 44)) {
if(hh > 44) break
if(h == 0) {
rayleigh.criterion <- omegas[hh] * tdiff
} else {
rayleigh.criterion <- (omegas[hh] - omegas[h]) * tdiff
}
if((rayleigh.criterion > 360)) { break
} else {
afunc[2 * hh] <- 0
afunc[2 * hh + 1] <- 0
}
}
}
return(afunc)
}
|
/TideCurves/R/Funcs.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 2,442
|
r
|
#' Returns predictor vector for design matrix
#' @description Returns predictor vector for design matrix from 44 astronomical angular velocities.
#' @param xi Transit index
#' @param ma Max number of predictors.
#' @param ivar Dummy.
#' @param tdiff Length of input time series.
#' @return The predictor vector. Values between -1, 1.
Funcs <- function (xi, ma = 89, ivar = 1, tdiff) {
rad <- 0.017453292519943
xi <- rad * xi
omegas <- vector()
omegas[1] <- 0.054809904
omegas[2] <- 0.115308512
omegas[3] <- 0.904885870
omegas[4] <- 1.020194382
omegas[5] <- 1.809771741
omegas[6] <- 2.040388764
omegas[7] <- 11.597841752
omegas[8] <- 11.713150263
omegas[9] <- 13.468112100
omegas[10] <- 13.522922004
omegas[11] <- 13.583420612
omegas[12] <- 13.638230516
omegas[13] <- 13.693040419
omegas[14] <- 15.563310768
omegas[15] <- 23.426300526
omegas[16] <- 24.215877885
omegas[17] <- 25.181262364
omegas[18] <- 25.236072267
omegas[19] <- 25.290882171
omegas[20] <- 25.351380779
omegas[21] <- 27.045844008
omegas[22] <- 27.161152519
omegas[23] <- 27.221651128
omegas[24] <- 27.276461031
omegas[25] <- 27.331270935
omegas[26] <- 36.949222530
omegas[27] <- 37.738799889
omegas[28] <- 38.704184367
omegas[29] <- 38.758994271
omegas[30] <- 38.813804174
omegas[31] <- 38.874302783
omegas[32] <- 38.989611294
omegas[33] <- 40.799383035
omegas[34] <- 49.451950152
omegas[35] <- 50.472144534
omegas[36] <- 52.281916275
omegas[37] <- 52.512533298
omegas[38] <- 54.552922062
omegas[39] <- 62.185294797
omegas[40] <- 63.995066538
omegas[41] <- 66.035455302
omegas[42] <- 75.708216801
omegas[43] <- 77.748605565
omegas[44] <- 100.944289068
afunc <- vector()
afunc[1] <- 1
rayleigh.criterion <- 0
for(i in seq(2,ma,2)) {
afunc[i] <- cos(omegas[i / 2] * xi)
afunc[i + 1] <- sin(omegas[i / 2] * xi)
}
hh <- 0
for(h in 0 : 44){
if(h < hh) next
for(hh in ((h + 1) : 44)) {
if(hh > 44) break
if(h == 0) {
rayleigh.criterion <- omegas[hh] * tdiff
} else {
rayleigh.criterion <- (omegas[hh] - omegas[h]) * tdiff
}
if((rayleigh.criterion > 360)) { break
} else {
afunc[2 * hh] <- 0
afunc[2 * hh + 1] <- 0
}
}
}
return(afunc)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/keyword_function.R
\name{keyword_function}
\alias{keyword_function}
\title{A Self-made Median Function}
\usage{
keyword_function(xlsxfile, keyword, outname)
}
\arguments{
\item{x}{A numeric vector.}
}
\description{
This function allows you to calculate the median from a numeric vector.
}
\examples{
median_function(seq(1:10))
}
\keyword{median}
|
/keyword/man/keyword_function.Rd
|
no_license
|
chihchieh/R-package
|
R
| false
| true
| 424
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/keyword_function.R
\name{keyword_function}
\alias{keyword_function}
\title{A Self-made Median Function}
\usage{
keyword_function(xlsxfile, keyword, outname)
}
\arguments{
\item{x}{A numeric vector.}
}
\description{
This function allows you to calculate the median from a numeric vector.
}
\examples{
median_function(seq(1:10))
}
\keyword{median}
|
############################################################################
############ BASICS
############
############ This script matches with the examples presented in
############ "07-ApplicationIV-TreeBUGS.pdf"
############
############################################################################
###################################### SETUP
### install TreeBUGS from CRAN
# install.packages("TreeBUGS")
### install TreeBUGS from GitHub (newest developer version)
# install.packages(c("devtools", "coda", "runjags", "hypergeo", "testthat",
# "rjags", "Rcpp", "RcppArmadillo", "logspline"))
# devtools::install_github("denis-arnold/TreeBUGS", build_vignettes = TRUE)
### load TreeBUGS
library("TreeBUGS")
# adjust working directory!
# setwd("MPT-Workshop/08 Application IV - TreeBUGS")
#
# RStudio: "Session"-->"Set Working Directory"-->"To Source File Location"
###################################### DATA STRUCTURE
frequencies <- read.csv("2htm.csv")
head(frequencies, 5)
# plot example data:
plotFreq("2htm.csv", eqn = "2htm.eqn")
plotFreq(frequencies, boxplot = FALSE, eqn = "2htm.eqn")
###################################### FIT MODEL
# fitting with model files/csv files from disk:
fit_csv <- traitMPT(eqnfile = "2htm.eqn",
data = "2htm.csv",
restrictions = "2htm_constraints.txt")
fit_csv
summary(fit_csv)
# fitting in R
htm <- "
target hit do
target hit (1-do)*g
target miss (1-do)* (1-g)
lure cr dn
lure fa (1-dn)*g
lure cr (1-dn)*(1-g)
"
fit_R <- traitMPT(eqnfile = htm,
data = frequencies,
restrictions = list("dn=do"))
fit_R
summary(fit_R)
# beta-MPT (with hard-coded equality constraint):
htm_d <- "
target hit d
target hit (1-d)*g
target miss (1-d)* (1-g)
lure cr d
lure fa (1-d)*g
lure cr (1-d)*(1-g)
"
fit_beta <- betaMPT(eqnfile = htm_d, data = "2htm.csv")
fit_beta
summary(fit_beta)
# free parameter "g" for response bias
fit <- traitMPT(eqnfile = "2htm.eqn",
data = "2htm.csv",
restrictions = list("dn=do"))
###################################### CHECK CONVERGENCE
plot(fit, parameter = "mean", type = "default")
plot(fit, parameter = "sigma", type = "default")
# auto-correlation function (ideally close to zero):
plot(fit, parameter = "mean", type = "acf")
plot(fit, parameter = "rho", type = "acf")
# Gelman's Rhat statistic should be close to 1 (e.g., smaller than 1.05):
plot(fit, parameter = "mean", type = "gelman")
###################################### EXTEND SAMPLING
fit <- traitMPT(
eqnfile = htm, data = frequencies,
restrictions = list("dn=do"),
n.adapt = 5000, # longer adaption of JAGS increases efficiency of sampling
n.burnin = 5000,# longer burnin avoids issues due to bad starting values
n.iter = 30000, # drawing more MCMC samples leads to higher precision
n.thin = 10, # ommitting every 10th sample reduces memory load
n.chains = 4)
fit2 <- extendMPT(fit, # fitted MPT model
n.adapt = 2000, # JAGS need to restart and adapt again
n.burnin = 0, # burnin not needed if previous samples are OK
n.iter = 10000)
summary(fit)
summary(fit2)
plot(fit2)
###################################### PLOT ESTIMATES
# posterior distribution of specific parameters:
plot(fit, parameter = "mean", type = "density")
# group-level and individual MPT estimates:
plotParam(fit)
plotParam(fit, addLines = TRUE, select = c("dn", "g"))
# compare prior and posterior:
plotPriorPost(fit)
# distribution of individual MPT parameters:
plotDistribution(fit)
###################################### MODEL FIT
# graphical check of mean/covariance of frequencies:
colMeans(frequencies)
plotFit(fit)
cov(frequencies)
plotFit(fit, stat = "cov")
# posterior predictive p-values:
PPP(fit, M = 1000, nCPU = 4)
############################################################################
############ ADVANCED
############################################################################
###################################### WITHIN-SUBJECT COMPARISONS
# (1.) data file
freq_within <- read.csv("2htm_within.csv")
head(freq_within, 3)
# (2.) create EQN file for within-subject manipulations
withinSubjectEQN(htm_d,
labels = c("high","low"), # factor labels
constant=c("g"), # parameters constrained across conditions
save = "2htm_within.eqn")
# (3.) fit to all conditions:
fit_within <- traitMPT("2htm_within.eqn", "2htm_within.csv")
plot(fit_within)
# (4.) compute difference in d:
diff_d <- transformedParameters(fit_within,
transformedParameters = list("diff_d = d_high - d_low"),
level = "group")
summary(diff_d)
plot(diff_d)
###################################### BETWEEN-SUBJECT COMPARISONS
# 1. fit MPT for each condition separately
fit1 <- traitMPT(htm_d, "2htm.csv")
fit2 <- traitMPT(htm_d, "2htm_group2.csv")
# 2. compute difference in parameters
diff_between <- betweenSubjectMPT(fit1, fit2, # fitted MPT models
par1 = "d", # parameter to test
stat = c("x-y","x>y"), # transformed parameters
plot = TRUE)
diff_between
plot(diff_between$mcmc)
###################################### COVARIATES: CORRELATION
# include correlation with covariates:
fit_cor <- traitMPT(htm_d, data = "2htm.csv",
covData = "covariates.csv") # data with covariate(s)
# warning: posterior only quantifies uncertainty with respect to the MPT parameter estimates!
plot(fit_cor, "cor")
summary(fit_cor)
round(fit_cor$summary$group$cor, 2)
# We also need to consider the number of participants (sample size)!
correlationPosterior(fit_cor)
###################################### COVARIATES: REGRESSION
# probit regression for continuous covariate to predict MPT parameter:
fit_regression <- traitMPT(htm_d, data = "2htm.csv",
covData = "covariates.csv",
predStructure = list("d ; continuous"))
plot(fit_regression, "slope")
summary(fit_regression)
round(fit_regression$summary$group$slope, 2)
# Bayes Factor for Covariate
# * H0: Slope parameter beta=0
# * H1: Slope parameter beta ~ Cauchy(0, r) (with scale parameter r)
BayesFactorSlope(fit_regression,
parameter = "slope_d_continuous",
direction = ">",
plot = TRUE)
###################################### BETWEEN-SUBJECT similar to ANOVA
# Between-Subject Comparisons: Alternative method
# => Identical covariance matrix in each condition (as in ANOVA: "pooled variance")
fit_between <- traitMPT(
htm_d, "2htm.csv",
covData = "covariates.csv",
predStructure = list("d ; discrete"), # discrete predictor
predType = c("c","f")) # "c" =continuous; "f"=fixed-effects
plot(fit_between, "factor_")
summary(fit_between)
# get estimates for the group-specific MPT parameters
gmeans <- getGroupMeans(fit_between)
round(gmeans, 2)
############################################################################
############ SIMULATION & ROBUSTNESS
############################################################################
###################################### CHANGING PRIORS
# what does the prior mean?
plotPrior(prior = list(mu = c(dn = "dnorm(0,1)", # default prior
g = "dnorm(0,5)"), # prior focused around 50%
xi="dunif(0,2)", # more stable prior for scale parameter
V= diag(2), df = 3)) # default Wishart prior
fit_prior <- traitMPT("2htm.eqn", "2htm.csv", restrictions=list("dn=do"),
mu = c(dn = "dnorm(0,1)", # default prior
g = "dnorm(0,5)"), # prior focused around 50% guessing
xi = "dunif(0,2)", # less disperson of MPT parameters
V = diag(2), # default
df = 2 + 1) # default
summary(fit_prior)
summary(fit) # comparison to default prior
###################################### PRIOR PREDICTIVE
pp <- priorPredictive(prior = list(mu = "dnorm(0,1)", xi="dunif(0,10)",
V=diag(2), df=2+1),
eqnfile = htm, restrictions = list("dn=do"),
numItems = c(target = 50, lure = 50),
N = 50, M = 500) # number of participants/samples
# compute and plot predicted values for the average hit/FA rates (in ROC space)
mean_freq <- sapply(pp, colMeans)
plot(mean_freq["fa",]/50, mean_freq["hit",]/50,
asp = 1, xlim =0:1, ylim=0:1)
polygon(c(0,1,1), c(0,0,1), col = "gray")
abline(0, 1)
###################################### POSTERIOR PREDICTIVE
# sample new data using the posterior samples
postpred <- posteriorPredictive(fit, M = 100, nCPU = 4)
mf <- sapply(postpred, colMeans)
plot(mf["fa",]/50, mf["hit",]/50, asp = 1, xlim =c(.2,.6), ylim=c(.4,.9),
las=1,col = adjustcolor(1, alpha=.3),
main = "Posterior predicted (mean frequencies)",
xlab= "False Alarm Rate", ylab = "Hit Rate")
abline(0, 1)
polygon(c(0,1,1), c(0,0,1), col = "gray")
points(matrix(colMeans(frequencies)[c("fa", "hit")], 1)/50,
col = 2, pch = 16, cex = .5)
###################################### SIMULATION
set.seed(123)
# standard MPT for one person (no hierarchical structure)
sim <- genMPT(theta = c(d = .7, g = .5),
numItems = c(target = 50, lure = 50),
eqnfile = htm_d)
sim
# hierarchical MPT
gendat <- genTraitMPT(N = 10,
numItems = c(target = 50, lure = 50),
eqnfile = htm_d,
mean = c(d = .6, g = .5),
sigma = c(d = 1, g = .5),
rho = diag(2)) ## get real data
gendat$data
# prior predictive:
# 1. generate parameters from prior
# 2. generate data from parameters
priorPredictive(prior = list(mu = "dnorm(0,1)",
xi="dunif(0,10)",
V=diag(2), df=2+1),
htm_d,
numItems = c(100, 100), level = "data",
N = 1, M = 100, nCPU = 4)
########################## Appendix
# Testing for Heterogeneity (Smith & Batchelder, 2008)
# A) Test person heterogeneity assuming items homogeneity ($\chi^2$)
test <- testHetChi(freq = "2htm.csv",
tree = c(cr="lure", fa="lure",
hit="target",miss="target"))
data.frame(test)
# B) Test person heterogeneity under item heterogeneity (permutation bootstrap)
# => requires data in long format (variables: person / item / response)
# testHetPerm(data, tree, source = "person")
|
/2019-05 SMiP workshop (Mannheim)/07 Application III - TreeBUGS/07-ApplicationIII-TreeBUGS.R
|
no_license
|
danheck/MPT-workshop
|
R
| false
| false
| 10,974
|
r
|
############################################################################
############ BASICS
############
############ This script matches with the examples presented in
############ "07-ApplicationIV-TreeBUGS.pdf"
############
############################################################################
###################################### SETUP
### install TreeBUGS from CRAN
# install.packages("TreeBUGS")
### install TreeBUGS from GitHub (newest developer version)
# install.packages(c("devtools", "coda", "runjags", "hypergeo", "testthat",
# "rjags", "Rcpp", "RcppArmadillo", "logspline"))
# devtools::install_github("denis-arnold/TreeBUGS", build_vignettes = TRUE)
### load TreeBUGS
library("TreeBUGS")
# adjust working directory!
# setwd("MPT-Workshop/08 Application IV - TreeBUGS")
#
# RStudio: "Session"-->"Set Working Directory"-->"To Source File Location"
###################################### DATA STRUCTURE
frequencies <- read.csv("2htm.csv")
head(frequencies, 5)
# plot example data:
plotFreq("2htm.csv", eqn = "2htm.eqn")
plotFreq(frequencies, boxplot = FALSE, eqn = "2htm.eqn")
###################################### FIT MODEL
# fitting with model files/csv files from disk:
fit_csv <- traitMPT(eqnfile = "2htm.eqn",
data = "2htm.csv",
restrictions = "2htm_constraints.txt")
fit_csv
summary(fit_csv)
# fitting in R
htm <- "
target hit do
target hit (1-do)*g
target miss (1-do)* (1-g)
lure cr dn
lure fa (1-dn)*g
lure cr (1-dn)*(1-g)
"
fit_R <- traitMPT(eqnfile = htm,
data = frequencies,
restrictions = list("dn=do"))
fit_R
summary(fit_R)
# beta-MPT (with hard-coded equality constraint):
htm_d <- "
target hit d
target hit (1-d)*g
target miss (1-d)* (1-g)
lure cr d
lure fa (1-d)*g
lure cr (1-d)*(1-g)
"
fit_beta <- betaMPT(eqnfile = htm_d, data = "2htm.csv")
fit_beta
summary(fit_beta)
# free parameter "g" for response bias
fit <- traitMPT(eqnfile = "2htm.eqn",
data = "2htm.csv",
restrictions = list("dn=do"))
###################################### CHECK CONVERGENCE
plot(fit, parameter = "mean", type = "default")
plot(fit, parameter = "sigma", type = "default")
# auto-correlation function (ideally close to zero):
plot(fit, parameter = "mean", type = "acf")
plot(fit, parameter = "rho", type = "acf")
# Gelman's Rhat statistic should be close to 1 (e.g., smaller than 1.05):
plot(fit, parameter = "mean", type = "gelman")
###################################### EXTEND SAMPLING
fit <- traitMPT(
eqnfile = htm, data = frequencies,
restrictions = list("dn=do"),
n.adapt = 5000, # longer adaption of JAGS increases efficiency of sampling
n.burnin = 5000,# longer burnin avoids issues due to bad starting values
n.iter = 30000, # drawing more MCMC samples leads to higher precision
n.thin = 10, # ommitting every 10th sample reduces memory load
n.chains = 4)
fit2 <- extendMPT(fit, # fitted MPT model
n.adapt = 2000, # JAGS need to restart and adapt again
n.burnin = 0, # burnin not needed if previous samples are OK
n.iter = 10000)
summary(fit)
summary(fit2)
plot(fit2)
###################################### PLOT ESTIMATES
# posterior distribution of specific parameters:
plot(fit, parameter = "mean", type = "density")
# group-level and individual MPT estimates:
plotParam(fit)
plotParam(fit, addLines = TRUE, select = c("dn", "g"))
# compare prior and posterior:
plotPriorPost(fit)
# distribution of individual MPT parameters:
plotDistribution(fit)
###################################### MODEL FIT
# graphical check of mean/covariance of frequencies:
colMeans(frequencies)
plotFit(fit)
cov(frequencies)
plotFit(fit, stat = "cov")
# posterior predictive p-values:
PPP(fit, M = 1000, nCPU = 4)
############################################################################
############ ADVANCED
############################################################################
###################################### WITHIN-SUBJECT COMPARISONS
# (1.) data file
freq_within <- read.csv("2htm_within.csv")
head(freq_within, 3)
# (2.) create EQN file for within-subject manipulations
withinSubjectEQN(htm_d,
labels = c("high","low"), # factor labels
constant=c("g"), # parameters constrained across conditions
save = "2htm_within.eqn")
# (3.) fit to all conditions:
fit_within <- traitMPT("2htm_within.eqn", "2htm_within.csv")
plot(fit_within)
# (4.) compute difference in d:
diff_d <- transformedParameters(fit_within,
transformedParameters = list("diff_d = d_high - d_low"),
level = "group")
summary(diff_d)
plot(diff_d)
###################################### BETWEEN-SUBJECT COMPARISONS
# 1. fit MPT for each condition separately
fit1 <- traitMPT(htm_d, "2htm.csv")
fit2 <- traitMPT(htm_d, "2htm_group2.csv")
# 2. compute difference in parameters
diff_between <- betweenSubjectMPT(fit1, fit2, # fitted MPT models
par1 = "d", # parameter to test
stat = c("x-y","x>y"), # transformed parameters
plot = TRUE)
diff_between
plot(diff_between$mcmc)
###################################### COVARIATES: CORRELATION
# include correlation with covariates:
fit_cor <- traitMPT(htm_d, data = "2htm.csv",
covData = "covariates.csv") # data with covariate(s)
# warning: posterior only quantifies uncertainty with respect to the MPT parameter estimates!
plot(fit_cor, "cor")
summary(fit_cor)
round(fit_cor$summary$group$cor, 2)
# We also need to consider the number of participants (sample size)!
correlationPosterior(fit_cor)
###################################### COVARIATES: REGRESSION
# probit regression for continuous covariate to predict MPT parameter:
fit_regression <- traitMPT(htm_d, data = "2htm.csv",
covData = "covariates.csv",
predStructure = list("d ; continuous"))
plot(fit_regression, "slope")
summary(fit_regression)
round(fit_regression$summary$group$slope, 2)
# Bayes Factor for Covariate
# * H0: Slope parameter beta=0
# * H1: Slope parameter beta ~ Cauchy(0, r) (with scale parameter r)
BayesFactorSlope(fit_regression,
parameter = "slope_d_continuous",
direction = ">",
plot = TRUE)
###################################### BETWEEN-SUBJECT similar to ANOVA
# Between-Subject Comparisons: Alternative method
# => Identical covariance matrix in each condition (as in ANOVA: "pooled variance")
fit_between <- traitMPT(
htm_d, "2htm.csv",
covData = "covariates.csv",
predStructure = list("d ; discrete"), # discrete predictor
predType = c("c","f")) # "c" =continuous; "f"=fixed-effects
plot(fit_between, "factor_")
summary(fit_between)
# get estimates for the group-specific MPT parameters
gmeans <- getGroupMeans(fit_between)
round(gmeans, 2)
############################################################################
############ SIMULATION & ROBUSTNESS
############################################################################
###################################### CHANGING PRIORS
# what does the prior mean?
plotPrior(prior = list(mu = c(dn = "dnorm(0,1)", # default prior
g = "dnorm(0,5)"), # prior focused around 50%
xi="dunif(0,2)", # more stable prior for scale parameter
V= diag(2), df = 3)) # default Wishart prior
fit_prior <- traitMPT("2htm.eqn", "2htm.csv", restrictions=list("dn=do"),
mu = c(dn = "dnorm(0,1)", # default prior
g = "dnorm(0,5)"), # prior focused around 50% guessing
xi = "dunif(0,2)", # less disperson of MPT parameters
V = diag(2), # default
df = 2 + 1) # default
summary(fit_prior)
summary(fit) # comparison to default prior
###################################### PRIOR PREDICTIVE
pp <- priorPredictive(prior = list(mu = "dnorm(0,1)", xi="dunif(0,10)",
V=diag(2), df=2+1),
eqnfile = htm, restrictions = list("dn=do"),
numItems = c(target = 50, lure = 50),
N = 50, M = 500) # number of participants/samples
# compute and plot predicted values for the average hit/FA rates (in ROC space)
mean_freq <- sapply(pp, colMeans)
plot(mean_freq["fa",]/50, mean_freq["hit",]/50,
asp = 1, xlim =0:1, ylim=0:1)
polygon(c(0,1,1), c(0,0,1), col = "gray")
abline(0, 1)
###################################### POSTERIOR PREDICTIVE
# sample new data using the posterior samples
postpred <- posteriorPredictive(fit, M = 100, nCPU = 4)
mf <- sapply(postpred, colMeans)
plot(mf["fa",]/50, mf["hit",]/50, asp = 1, xlim =c(.2,.6), ylim=c(.4,.9),
las=1,col = adjustcolor(1, alpha=.3),
main = "Posterior predicted (mean frequencies)",
xlab= "False Alarm Rate", ylab = "Hit Rate")
abline(0, 1)
polygon(c(0,1,1), c(0,0,1), col = "gray")
points(matrix(colMeans(frequencies)[c("fa", "hit")], 1)/50,
col = 2, pch = 16, cex = .5)
###################################### SIMULATION
set.seed(123)
# standard MPT for one person (no hierarchical structure)
sim <- genMPT(theta = c(d = .7, g = .5),
numItems = c(target = 50, lure = 50),
eqnfile = htm_d)
sim
# hierarchical MPT
gendat <- genTraitMPT(N = 10,
numItems = c(target = 50, lure = 50),
eqnfile = htm_d,
mean = c(d = .6, g = .5),
sigma = c(d = 1, g = .5),
rho = diag(2)) ## get real data
gendat$data
# prior predictive:
# 1. generate parameters from prior
# 2. generate data from parameters
priorPredictive(prior = list(mu = "dnorm(0,1)",
xi="dunif(0,10)",
V=diag(2), df=2+1),
htm_d,
numItems = c(100, 100), level = "data",
N = 1, M = 100, nCPU = 4)
########################## Appendix
# Testing for Heterogeneity (Smith & Batchelder, 2008)
# A) Test person heterogeneity assuming items homogeneity ($\chi^2$)
test <- testHetChi(freq = "2htm.csv",
tree = c(cr="lure", fa="lure",
hit="target",miss="target"))
data.frame(test)
# B) Test person heterogeneity under item heterogeneity (permutation bootstrap)
# => requires data in long format (variables: person / item / response)
# testHetPerm(data, tree, source = "person")
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 63460
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 63460
c
c Input Parameter (command line, file):
c input filename QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF02-c01.blif-biu.inv.prop.bb-bmc.with-IOC.unfold-008.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 23575
c no.of clauses 63460
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 63460
c
c QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF02-c01.blif-biu.inv.prop.bb-bmc.with-IOC.unfold-008.qdimacs 23575 63460 E1 [] 0 415 21740 63460 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF02-c01.blif-biu.inv.prop.bb-bmc.with-IOC.unfold-008/biu.mv.xl_ao.bb-b003-p020-MIF02-c01.blif-biu.inv.prop.bb-bmc.with-IOC.unfold-008.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 787
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 63460
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 63460
c
c Input Parameter (command line, file):
c input filename QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF02-c01.blif-biu.inv.prop.bb-bmc.with-IOC.unfold-008.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 23575
c no.of clauses 63460
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 63460
c
c QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF02-c01.blif-biu.inv.prop.bb-bmc.with-IOC.unfold-008.qdimacs 23575 63460 E1 [] 0 415 21740 63460 NONE
|
# Page No. 5
no_of_white_flags = 4
no_of_red_flags = 3
no_of_blue_flags = 2
ans = factorial(no_of_white_flags + no_of_red_flags + no_of_blue_flags) / (factorial(no_of_white_flags) * factorial(no_of_red_flags) * factorial(no_of_blue_flags))
print(ans)
|
/A_First_Course_In_Probability_by_Sheldon_Ross/CH1/EX3.f/Ex1_3f.R
|
permissive
|
FOSSEE/R_TBC_Uploads
|
R
| false
| false
| 253
|
r
|
# Page No. 5
no_of_white_flags = 4
no_of_red_flags = 3
no_of_blue_flags = 2
ans = factorial(no_of_white_flags + no_of_red_flags + no_of_blue_flags) / (factorial(no_of_white_flags) * factorial(no_of_red_flags) * factorial(no_of_blue_flags))
print(ans)
|
wine <- read.csv("../Datasets/Wine/wine.data", header=FALSE)
colnames(wine) <- c("class","Alcohol","Malic Acid","Ash","Alcalinity of Ash","Magnesium","Total Phenols","Flavanoids","Nonflavanoid Phenols","Proanthocyanins","Color Intensity","Hue","0D280/OD315 of Diluted Wines","Proline")
# log transform
log.wine <- log(wine[, 2:14])
# apply PCA - scale. = TRUE is highly
# advisable, but default is FALSE.
wine.pca <- princomp(log.wine,
center = TRUE,
scale. = TRUE,cor=TRUE,scores=TRUE)
summary(wine.pca)
plot(wine.pca, type = "l")
plot3d(wine.pca$scores[,1:3], col=wine[,1])
library(rgl)
library(caret)
library(e1071)
library(caret)
library(MASS)
computeEuclideanDissimilarities <- function (sampMatrix,prototypesMatrix)
{
distances <- as.matrix(dist(rbind(sampMatrix,prototypesMatrix),method="euclidean"))
elements <- nrow(sampMatrix)*nrow(prototypesMatrix)
dissimMatrix<-distances[1:nrow(sampMatrix),(nrow(sampMatrix)+1):(nrow(sampMatrix)+nrow(prototypesMatrix))]
return (dissimMatrix)
}
runAnalysis <- function(numPrototypes)
{
print(c('Iterating with ',numPrototypes,' prototypes'))
inTrain <- createDataPartition(wine$class, p=0.6, list=FALSE)
trainingSet <- wine[inTrain,]
testSet <- wine[-inTrain,]
svmfit=svm(class~., data=trainingSet, kernel="linear", cost=10,scale=FALSE)
prototypes<-trainingSet[svmfit$index,]
#prototypes<-prototypes[1:numPrototypes,]
prototyp<-prototypes[1:numPrototypes,]
trainSetDissimilarities <- computeEuclideanDissimilarities (trainingSet[,-1],prototyp[,-1])
dissSpace<-as.data.frame(cbind(trainingSet$class,trainSetDissimilarities))
colnames(dissSpace)[1]<-"class"
qda.fit <-qda(class~.,data=dissSpace)
testSetDissimilarities <- computeEuclideanDissimilarities (testSet[,-1],prototyp[,-1])
testSetDissSpace <- as.data.frame(cbind(testSet$class,testSetDissimilarities))
colnames(testSetDissSpace)<-colnames(dissSpace)
qda.testpred <- predict(qda.fit, testSetDissSpace)
print(table(qda.testpred$class,testSet$class))
cf<-confusionMatrix(qda.testpred$class,testSet$class)
acc <- cf$overall['Accuracy']
print(acc)
return(acc)
}
runAnalysisGaussianKernel <- function(numPrototypes)
{
print(c('Iterating with ',numPrototypes,' prototypes'))
inTrain <- createDataPartition(wine$class, p=0.6, list=FALSE)
trainingSet <- wine[inTrain,]
testSet <- wine[-inTrain,]
svmfit=svm(class~., data=trainingSet, kernel="radial", cost=10,scale=FALSE)
prototypes<-trainingSet[svmfit$index,]
#prototypes<-prototypes[1:numPrototypes,]
prototyp<-prototypes[1:numPrototypes,]
trainSetDissimilarities <- computeEuclideanDissimilarities (trainingSet[,-1],prototyp[,-1])
dissSpace<-as.data.frame(cbind(trainingSet$class,trainSetDissimilarities))
colnames(dissSpace)[1]<-"class"
qda.fit <-qda(class~.,data=dissSpace)
testSetDissimilarities <- computeEuclideanDissimilarities (testSet[,-1],prototyp[,-1])
testSetDissSpace <- as.data.frame(cbind(testSet$class,testSetDissimilarities))
colnames(testSetDissSpace)<-colnames(dissSpace)
qda.testpred <- predict(qda.fit, testSetDissSpace)
print(table(qda.testpred$class,testSet$class))
cf<-confusionMatrix(qda.testpred$class,testSet$class)
acc <- cf$overall['Accuracy']
print(acc)
return(acc)
}
# inTrain <- createDataPartition(wine$class, p=0.6, list=FALSE)
# trainingSet <- wine[inTrain,]
# testSet <- wine[-inTrain,]
# We need to improve this to provide alpha = 0.6 per class, otherways qda does not work correctly
smp_size <- floor(0.60 * length(which(wine$class==1)))
set.seed(123)
inTrain1 <- sample(length(which(wine$class==1)),, size = smp_size)
trainingSet1 <- wine[which(wine$class==1),][inTrain1,]
testSet1 <- wine[-which(wine$class==1),][inTrain1,]
smp_size <- floor(0.60 * length(which(wine$class==2)))
set.seed(123)
inTrain2 <- sample(length(which(wine$class==2)),, size = smp_size)
trainingSet2 <- wine[which(wine$class==2),][inTrain2,]
testSet2 <- wine[-which(wine$class==2),][inTrain2,]
smp_size <- floor(0.60 * length(which(wine$class==3)))
set.seed(123)
inTrain3 <- sample(length(which(wine$class==3)),, size = smp_size)
trainingSet3 <- wine[which(wine$class==3),][inTrain3,]
testSet3 <- wine[-which(wine$class==3),][inTrain3,]
trainingSet <- rbind(trainingSet1,trainingSet2,trainingSet3)
testSet <- rbind(testSet1,testSet2,testSet3)
length(which(trainingSet$class==1))
length(which(trainingSet$class==2))
length(which(trainingSet$class==3))
svmfit=svm(class~., data=trainingSet, kernel="linear", cost=10,scale=FALSE)
prototypes<-trainingSet[svmfit$index,]
protoRange <- 3:25
accuraciesLinear <- lapply(protoRange,runAnalysis)
plot(protoRange,1-as.numeric(accuracies),type="l",main='Classification error',ylab='Classification error')
accuraciesGaussian <- lapply(protoRange,runAnalysisGaussianKernel)
which.max(as.numeric(accuraciesLinear))
max(as.numeric(accuraciesLinear))
which.max(as.numeric(accuraciesGaussian))
max(as.numeric(accuraciesGaussian))
|
/Proposal1/WineRScript.R
|
no_license
|
Seleucia/CI_2014_RG_HC
|
R
| false
| false
| 5,609
|
r
|
wine <- read.csv("../Datasets/Wine/wine.data", header=FALSE)
colnames(wine) <- c("class","Alcohol","Malic Acid","Ash","Alcalinity of Ash","Magnesium","Total Phenols","Flavanoids","Nonflavanoid Phenols","Proanthocyanins","Color Intensity","Hue","0D280/OD315 of Diluted Wines","Proline")
# log transform
log.wine <- log(wine[, 2:14])
# apply PCA - scale. = TRUE is highly
# advisable, but default is FALSE.
wine.pca <- princomp(log.wine,
center = TRUE,
scale. = TRUE,cor=TRUE,scores=TRUE)
summary(wine.pca)
plot(wine.pca, type = "l")
plot3d(wine.pca$scores[,1:3], col=wine[,1])
library(rgl)
library(caret)
library(e1071)
library(caret)
library(MASS)
computeEuclideanDissimilarities <- function (sampMatrix,prototypesMatrix)
{
distances <- as.matrix(dist(rbind(sampMatrix,prototypesMatrix),method="euclidean"))
elements <- nrow(sampMatrix)*nrow(prototypesMatrix)
dissimMatrix<-distances[1:nrow(sampMatrix),(nrow(sampMatrix)+1):(nrow(sampMatrix)+nrow(prototypesMatrix))]
return (dissimMatrix)
}
runAnalysis <- function(numPrototypes)
{
print(c('Iterating with ',numPrototypes,' prototypes'))
inTrain <- createDataPartition(wine$class, p=0.6, list=FALSE)
trainingSet <- wine[inTrain,]
testSet <- wine[-inTrain,]
svmfit=svm(class~., data=trainingSet, kernel="linear", cost=10,scale=FALSE)
prototypes<-trainingSet[svmfit$index,]
#prototypes<-prototypes[1:numPrototypes,]
prototyp<-prototypes[1:numPrototypes,]
trainSetDissimilarities <- computeEuclideanDissimilarities (trainingSet[,-1],prototyp[,-1])
dissSpace<-as.data.frame(cbind(trainingSet$class,trainSetDissimilarities))
colnames(dissSpace)[1]<-"class"
qda.fit <-qda(class~.,data=dissSpace)
testSetDissimilarities <- computeEuclideanDissimilarities (testSet[,-1],prototyp[,-1])
testSetDissSpace <- as.data.frame(cbind(testSet$class,testSetDissimilarities))
colnames(testSetDissSpace)<-colnames(dissSpace)
qda.testpred <- predict(qda.fit, testSetDissSpace)
print(table(qda.testpred$class,testSet$class))
cf<-confusionMatrix(qda.testpred$class,testSet$class)
acc <- cf$overall['Accuracy']
print(acc)
return(acc)
}
runAnalysisGaussianKernel <- function(numPrototypes)
{
print(c('Iterating with ',numPrototypes,' prototypes'))
inTrain <- createDataPartition(wine$class, p=0.6, list=FALSE)
trainingSet <- wine[inTrain,]
testSet <- wine[-inTrain,]
svmfit=svm(class~., data=trainingSet, kernel="radial", cost=10,scale=FALSE)
prototypes<-trainingSet[svmfit$index,]
#prototypes<-prototypes[1:numPrototypes,]
prototyp<-prototypes[1:numPrototypes,]
trainSetDissimilarities <- computeEuclideanDissimilarities (trainingSet[,-1],prototyp[,-1])
dissSpace<-as.data.frame(cbind(trainingSet$class,trainSetDissimilarities))
colnames(dissSpace)[1]<-"class"
qda.fit <-qda(class~.,data=dissSpace)
testSetDissimilarities <- computeEuclideanDissimilarities (testSet[,-1],prototyp[,-1])
testSetDissSpace <- as.data.frame(cbind(testSet$class,testSetDissimilarities))
colnames(testSetDissSpace)<-colnames(dissSpace)
qda.testpred <- predict(qda.fit, testSetDissSpace)
print(table(qda.testpred$class,testSet$class))
cf<-confusionMatrix(qda.testpred$class,testSet$class)
acc <- cf$overall['Accuracy']
print(acc)
return(acc)
}
# inTrain <- createDataPartition(wine$class, p=0.6, list=FALSE)
# trainingSet <- wine[inTrain,]
# testSet <- wine[-inTrain,]
# We need to improve this to provide alpha = 0.6 per class, otherways qda does not work correctly
smp_size <- floor(0.60 * length(which(wine$class==1)))
set.seed(123)
inTrain1 <- sample(length(which(wine$class==1)),, size = smp_size)
trainingSet1 <- wine[which(wine$class==1),][inTrain1,]
testSet1 <- wine[-which(wine$class==1),][inTrain1,]
smp_size <- floor(0.60 * length(which(wine$class==2)))
set.seed(123)
inTrain2 <- sample(length(which(wine$class==2)),, size = smp_size)
trainingSet2 <- wine[which(wine$class==2),][inTrain2,]
testSet2 <- wine[-which(wine$class==2),][inTrain2,]
smp_size <- floor(0.60 * length(which(wine$class==3)))
set.seed(123)
inTrain3 <- sample(length(which(wine$class==3)),, size = smp_size)
trainingSet3 <- wine[which(wine$class==3),][inTrain3,]
testSet3 <- wine[-which(wine$class==3),][inTrain3,]
trainingSet <- rbind(trainingSet1,trainingSet2,trainingSet3)
testSet <- rbind(testSet1,testSet2,testSet3)
length(which(trainingSet$class==1))
length(which(trainingSet$class==2))
length(which(trainingSet$class==3))
svmfit=svm(class~., data=trainingSet, kernel="linear", cost=10,scale=FALSE)
prototypes<-trainingSet[svmfit$index,]
protoRange <- 3:25
accuraciesLinear <- lapply(protoRange,runAnalysis)
plot(protoRange,1-as.numeric(accuracies),type="l",main='Classification error',ylab='Classification error')
accuraciesGaussian <- lapply(protoRange,runAnalysisGaussianKernel)
which.max(as.numeric(accuraciesLinear))
max(as.numeric(accuraciesLinear))
which.max(as.numeric(accuraciesGaussian))
max(as.numeric(accuraciesGaussian))
|
# options(encoding="UTF-8")
source("./resources/init.R")
source("./resources/uiTab.R")
source("./resources/server.R")
shinyApp(ui=uiTab,server=server)
|
/app.R
|
permissive
|
nplatonov/accenter
|
R
| false
| false
| 151
|
r
|
# options(encoding="UTF-8")
source("./resources/init.R")
source("./resources/uiTab.R")
source("./resources/server.R")
shinyApp(ui=uiTab,server=server)
|
data1 <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v1_m1.SNPs.txt",header=TRUE,sep="\t");
data2 <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v2_m1.SNPs.txt",header=TRUE,sep="\t");
data3 <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v3_m1.SNPs.txt",header=TRUE,sep="\t");
data0 <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.SNPs.txt",header=TRUE,sep="\t");
DGRP <- read.table("/Users/kraigrs/Wittkopp/DGRP/DGRP_line_40_SNPs_const.txt",header=FALSE,sep="\t");
data <- merge(data0,DGRP,by.x=c("chr","pos"),by.y=c("V1","V3"));
nrow(subset(data1,ref_allele/(ref_allele+alt_allele)==0.5))/nrow(data1);
nrow(subset(data2,ref_allele/(ref_allele+alt_allele)==0.5))/nrow(data2);
nrow(subset(data3,ref_allele/(ref_allele+alt_allele)==0.5))/nrow(data3);
nrow(subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele)==0.5))/nrow(temp);
### pie charts ###
pdf(file="/Users/kraigrs/Wittkopp/LabResearch/Simulating_ASE/revisions/new_plots/sim_50b_v1_pie.pdf");
pie(c(nrow(subset(data1,ref_allele/(ref_allele+alt_allele)!=0.5)),nrow(subset(data1,ref_allele/(ref_allele+alt_allele)==0.5))),col=c("gray","white"),labels="");
dev.off();
pdf(file="/Users/kraigrs/Wittkopp/LabResearch/Simulating_ASE/revisions/new_plots/sim_50b_v2_pie.pdf");
pie(c(nrow(subset(data2,ref_allele/(ref_allele+alt_allele)!=0.5)),nrow(subset(data2,ref_allele/(ref_allele+alt_allele)==0.5))),col=c("gray","white"),labels="");
dev.off();
pdf(file="/Users/kraigrs/Wittkopp/LabResearch/Simulating_ASE/revisions/new_plots/sim_50b_v3_pie.pdf");
pie(c(nrow(subset(data3,ref_allele/(ref_allele+alt_allele)!=0.5)),nrow(subset(data3,ref_allele/(ref_allele+alt_allele)==0.5))),col=c("gray","white"),labels="");
dev.off();
pdf(file="/Users/kraigrs/Wittkopp/LabResearch/Simulating_ASE/revisions/new_plots/sim_50b_v0_pie.pdf");
pie(c(nrow(subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele)!=0.5)),nrow(subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele)==0.5))),col=c("gray","white"),labels="");
dev.off();
data1 <- subset(data1,ref_allele>0&alt_allele>0);
data2 <- subset(data2,ref_allele>0&alt_allele>0);
data3 <- subset(data3,ref_allele>0&alt_allele>0);
summary(log2(data1$ref_allele/data1$alt_allele));
summary(log2(data2$ref_allele/data2$alt_allele));
summary(log2(data3$ref_allele/data3$alt_allele));
summary(data0$neighbor);
summary(data1$neighbor);
summary(data2$neighbor);
summary(data3$neighbor);
temp1 <- merge(data1,data2,by.x=c("chr","pos"),by.y=c("chr","pos"));
data <- merge(temp1,data3,by.x=c("chr","pos"),by.y=c("chr","pos"));
data <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.SNPs.txt",header=TRUE,sep="\t");
ref_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/SNPs_line_40.dm3_ref.l50_m0.mappability.txt",header=TRUE,sep="\t");
alt_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/SNPs_line_40.dm3_alt_line_40.l50_m0.mappability.txt",header=TRUE,sep="\t");
mappability_0mm <- merge(ref_mappability,alt_mappability,by.x=c("chr","position"),by.y=c("chr","position"));
data <- merge(data,mappability_0mm,by.x=c("chr","pos"),by.y=c("chr","position"));
#data <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_alt_line_40.bowtie_v1_m1.SNPs.txt",header=TRUE,sep="\t");
data <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v3_m1.SNPs.txt",header=TRUE,sep="\t");
###### figure out how many SNPs are within 50b of each SNP ######
### start iterative run here
complete_data <- NULL;
chromosomes <- c("chr2L","chr2R","chr3L","chr3R","chrX");
for(j in 1:length(chromosomes))
{
arm <- chromosomes[j];
chrom <- subset(data,chr == arm);
chrom_sorted <- chrom[order(chrom$pos),];
chrom <- chrom_sorted;
steps_right = 0;
steps_left = 0;
SNPs_left <- mat.or.vec(nrow(chrom),1);
SNPs_right <- mat.or.vec(nrow(chrom),1);
for(i in 1:nrow(chrom))
{
if(i == 1)
{
SNPs_left[i] <- 0;
k <- i + 1;
right <- chrom$pos[k] - chrom$pos[i];
while(right < 50)
{
steps_right <- steps_right + 1;
if(k == nrow(chrom)){break;}
k <- k + 1;
right <- chrom$pos[k] - chrom$pos[i];
}
SNPs_right[i] = steps_right;
}
if(i == nrow(chrom))
{
SNPs_right[i] <- 0;
j <- i - 1;
left <- chrom$pos[i] - chrom$pos[j];
while(left < 50)
{
steps_left <- steps_left + 1;
if(j == 1){break;}
j <- j - 1;
left <- chrom$pos[i] - chrom$pos[j];
}
SNPs_left[i] = steps_left;
}
j <- i - 1;
k <- i + 1;
steps_left <- 0;
steps_right <- 0;
if(j != 0 & k != (nrow(chrom)+1))
{
left <- chrom$pos[i] - chrom$pos[j];
right <- chrom$pos[k] - chrom$pos[i];
while(left < 50)
{
steps_left <- steps_left + 1;
j <- j - 1;
if(j == 0){break;}
left <- chrom$pos[i] - chrom$pos[j];
}
SNPs_left[i] = steps_left;
while(right < 50)
{
steps_right <- steps_right + 1;
k <- k + 1;
if(k == (nrow(chrom)+1)){break;}
right <- chrom$pos[k] - chrom$pos[i];
}
SNPs_right[i] = steps_right;
}
}
neighbor <- SNPs_left+SNPs_right;
chrom <- cbind(chrom,neighbor);
complete_data <- rbind(complete_data,chrom);
}
neighbor_plus <- complete_data$neighbor+1;
complete_data <- cbind(complete_data,neighbor_plus);
summary(complete_data$neighbor_plus);
complete_data0 <- complete_data;
# run the previous iteratively to get complete dataset
#write.table(complete_data,file="/Users/kraigrs/Wittkopp/Simulations/tiled/constExons_single_bp50_error0_tiled.dm3_ref.bowtie_mm1.neighbor_SNPs.txt",quote=FALSE,sep="\t",row.names=FALSE);
#write.table(ref_bad_SNPs,file="/Users/kraigrs/Wittkopp/Simulations/tiled/ref_bad_SNPs.txt",quote=FALSE,sep="\t",row.names=FALSE,col.names=FALSE);
#write.table(alt_bad_SNPs,file="/Users/kraigrs/Wittkopp/Simulations/tiled/alt_bad_SNPs.txt",quote=FALSE,sep="\t",row.names=FALSE,col.names=FALSE);
# 1 mismatch
boxplot(log2(ref_allele.x/alt_allele.x) ~ neighbor,data = subset(complete_data,ref_allele.x > 0 & alt_allele.x > 0),varwidth = TRUE,xlab="Number of neighboring SNPs",ylab="Distribution of log2(ref/alt) among SNPs",border=rgb(0,0,0,0.5),xlim=c(0,13),ylim=c(-6,6));
par(new=TRUE);
# 2 mismatches
boxplot(log2(ref_allele.y/alt_allele.y) ~ neighbor,data = subset(complete_data, ref_allele.y > 0 & alt_allele.y > 0),varwidth = TRUE,xlab="Number of neighboring SNPs",ylab="Distribution of log2(ref/alt)",border=rgb(1,0,0,0.5),xlim=c(0,13),ylim=c(-6,6));
par(new=TRUE);
# 3 mismatches
boxplot(log2(ref_allele/alt_allele) ~ neighbor,data = subset(complete_data, ref_allele > 0 & alt_allele > 0),varwidth = TRUE,xlab="Number of neighboring SNPs",ylab="Distribution of log2(ref/alt)",border=rgb(0,0,1,0.5),xlim=c(0,13),ylim=c(-6,6));
boxplot(log2(ref_allele/alt_allele) ~ neighbor,data = subset(complete_data, ref_allele > 0 & alt_allele > 0),varwidth = TRUE,xlab="Number of neighboring SNPs",ylab="Distribution of log2(ref/alt)",main="DGRP line_40 single reference (3mm)",xlim=c(0,13),ylim=c(-7,7),pars = list(outpch=19,cex=0.2));
par(mfrow=c(2,2));
boxplot(ref_allele/(ref_allele+alt_allele) ~ neighbor_plus,data = complete_data, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.2));
abline(h=0.5,lty=2,col="red");
boxplot(dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) ~ neighbor_plus,data = complete_data, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.2));
abline(h=0.5,lty=2,col="red");
#### bubble plot ####
data <- subset(complete_data,is.finite(log2(ref_allele/alt_allele)) & !is.na(log2(ref_allele/alt_allele)));
radii <- sqrt(data$total_overlap/pi);
symbols(
data$neighbor+1,
log2(data$ref_allele/data$alt_allele),
circles=radii);
#### neighbor plus ####
neighbor_plus <- rep(0,nrow(complete_data));
for(i in 1:nrow(complete_data))
{
if(complete_data$neighbor[i] < 6){neighbor_plus[i] <- complete_data$neighbor[i];}
else{neighbor_plus[i] <- "6";}
}
temp <- cbind(complete_data,neighbor_plus);
boxplot(log2(ref_allele.x/alt_allele.x) ~ neighbor_plus, data = subset(temp,ref_allele.x > 0 & alt_allele.x > 0),varwidth = TRUE,xlab="Number of neighboring SNPs",ylab="Distribution of log2(ref/alt) among SNPs",main="(# neighboring SNPs capped at 6)",border=rgb(0,0,0,0.5),ylim=c(-3,7));
abline(h = 0,col="red",lty="dashed");
#############################################
# plot medians for the different mismatches #
#############################################
summary(complete_data$neighbor); # range of neighboring SNPs [0,22]
multiple <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.SNPs.txt",header=TRUE,sep="\t");
both <- merge(complete_data,multiple,by.x=c("chr","pos"),by.y=c("chr","pos"));
neighbor_plus <- rep(0,nrow(both));
for(i in 1:nrow(both))
{
if(both$neighbor[i] < 6){neighbor_plus[i] <- both$neighbor[i];}
else{neighbor_plus[i] <- "6";}
}
both <- cbind(both,neighbor_plus);
#medians <- mat.or.vec(7,5);
#colnames(medians) <- c("neighbor","v1","v2","v3","multiple");
medians <- mat.or.vec(13,4);
colnames(medians) <- c("neighbor","v1","v2","v3");
#vars <- mat.or.vec(7,5);
#colnames(vars) <- c("neighbor","v1","v2","v3","multiple");
vars <- mat.or.vec(13,4);
colnames(vars) <- c("neighbor","v1","v2","v3");
both <- complete_data;
for(i in 1:13)
{
j <- i-1;
temp <- subset(both,neighbor == j);
#medians[i,1] <- j;
#medians[i,2] <- median(log2(temp$ref_allele.x/temp$alt_allele.x));
#medians[i,3] <- median(log2(temp$ref_allele.y/temp$alt_allele.y));
#medians[i,4] <- median(log2(temp$ref_allele/temp$alt_allele));
#medians[i,5] <- median(log2(temp$dm3_ref_ref_allele/temp$dm3_alt_alt_allele));
medians[i,1] <- j;
medians[i,2] <- median(temp$ref_allele.x/(temp$ref_allele.x+temp$alt_allele.x));
medians[i,3] <- median(temp$ref_allele.y/(temp$ref_allele.y+temp$alt_allele.y));
medians[i,4] <- median(temp$ref_allele/(temp$ref_allele+temp$alt_allele));
#medians[i,5] <- median(temp$dm3_ref_ref_allele/(temp$dm3_ref_ref_allele+temp$dm3_alt_alt_allele));
#vars[i,1] <- j;
#vars[i,2] <- var(log2(temp$ref_allele.x/temp$alt_allele.x));
#vars[i,3] <- var(log2(temp$ref_allele.y/temp$alt_allele.y));
#vars[i,4] <- var(log2(temp$ref_allele/temp$alt_allele));
#vars[i,5] <- var(log2(temp$dm3_ref_ref_allele/temp$dm3_alt_alt_allele));
vars[i,1] <- j;
vars[i,2] <- var(temp$ref_allele.x/(temp$ref_allele.x+temp$alt_allele.x));
vars[i,3] <- var(temp$ref_allele.y/(temp$ref_allele.y+temp$alt_allele.y));
vars[i,4] <- var(temp$ref_allele/(temp$ref_allele+temp$alt_allele));
#vars[i,5] <- var(log2(temp$dm3_ref_ref_allele/temp$dm3_alt_alt_allele));
}
plot(medians[,1],medians[,2],type="o",pch=1,col="black",xlim=c(0,12),ylim=c(0,1),xaxt="n",yaxt="n",xlab="",ylab="");
par(new=TRUE);
plot(medians[,1],medians[,3],type="o",pch=0,col="black",xlim=c(0,12),ylim=c(0,1),xaxt="n",yaxt="n",xlab="",ylab="");
par(new=TRUE);
plot(medians[,1],medians[,4],type="o",pch=2,col="black",xlim=c(0,12),ylim=c(0,1),xlab="# neighboring SNPs",ylab="median ref/(ref+alt)",main="Compare medians");
#legend("bottomright",legend=c("1 mm","2 mm","3 mm"),fill=c("black","red","green"),bty="n");
legend("bottomright",legend=c("1 mm","2 mm","3 mm"),pch=c(1,0,2),bty="n");
plot(vars[,1],vars[,2],type="o",col="black",xlim=c(0,6),ylim=c(0,3),xaxt="n",yaxt="n",xlab="",ylab="");
par(new=TRUE);
plot(vars[,1],vars[,3],type="o",col="red",xlim=c(0,6),ylim=c(0,3),xaxt="n",yaxt="n",xlab="",ylab="");
par(new=TRUE);
plot(vars[,1],vars[,4],type="o",col="green",xlim=c(0,6),ylim=c(0,3),xlab="# neighboring SNPs",ylab="variance log2(ASE)");
legend("topleft",legend=c("1 mismatch","2 mismatches","3 mismatches"),fill=c("black","red","green"));
# compare distributions of log2(ASE) for differing numbers of mismatches
total1 <- subset(complete_data,ref_allele.x > 0 & alt_allele.x > 0);
total2 <- subset(complete_data,ref_allele.y > 0 & alt_allele.y > 0);
total3 <- subset(complete_data,ref_allele > 0 & alt_allele > 0);
obj1 <- hist(log2(total1$ref_allele.x/total1$alt_allele.x),breaks=seq(-4.75,6.5,0.25));
obj2 <- hist(log2(total2$ref_allele.y/total2$alt_allele.y),breaks=seq(-4.75,6.5,0.25));
obj3 <- hist(log2(total3$ref_allele/total3$alt_allele),breaks=seq(-4.75,6.5,0.25));
mat <- cbind(obj1$counts/nrow(total1),obj2$counts/nrow(total2),obj3$counts/nrow(total3));
barplot(t(mat),beside=TRUE,names.arg=seq(-4.50,6.5,0.25),xlab="log2(ref/alt)",ylab="Proportion",main="SNP-based measurements of ASE",col=c("black","red","green"));
legend("topright",legend=c("1 mismatch","2 mismatches","3 mismatches"),fill=c("black","red","green"));
# compare distributions of fraction of reference allele for differing numbers of mismatches
obj1 <- hist(complete_data$ref_allele.x/(complete_data$ref_allele.x+complete_data$alt_allele.x),breaks=20);
obj2 <- hist(complete_data$ref_allele.y/(complete_data$ref_allele.y+complete_data$alt_allele.y),breaks=20);
obj3 <- hist(complete_data$ref_allele/(complete_data$ref_allele+complete_data$alt_allele),breaks=20);
mat <- cbind(obj1$counts/nrow(total1),obj2$counts/nrow(total2),obj3$counts/nrow(total3));
barplot(t(mat),beside=TRUE,names.arg=seq(0.05,1,0.05),xlab="Fraction of reference allele",ylab="Proportion",main="SNP-based measurements of ASE",col=c("black","red","green"));
legend("topright",legend=c("1 mismatch","2 mismatches","3 mismatches"),fill=c("black","red","green"));
temp <- cbind(obj1$breaks[2:length(obj1$breaks)],obj1$counts,
obj2$breaks[2:length(obj2$breaks)],obj2$counts,
obj3$breaks[2:length(obj3$breaks)],obj3$counts);
temp <- temp[14:nrow(temp),];
par(mfrow=c(1,3));
barplot(temp[,2]/nrow(total1),names.arg=temp[,1],xlab="",ylab="Proportion",main="\n\n1 mismatch",col="gray",ylim=c(0,1),cex.axis=1.5,cex.names=1.5,cex.lab=1.5,cex.main=1.5);
barplot(temp[,4]/nrow(total2),names.arg=temp[,3],xlab="Fraction of reference allele",ylab="",main="SNP-based ASE using single reference\n\n2 mismatches",col="gray",ylim=c(0,1),cex.axis=1.5,cex.names=1.5,cex.lab=1.5,cex.main=1.5);
barplot(temp[,6]/nrow(total3),names.arg=temp[,5],xlab="",ylab="",main="\n\n3 mismatches",col="gray",ylim=c(0,1),cex.axis=1.5,cex.names=1.5,cex.lab=1.5,cex.main=1.5);
pie(c(nrow(subset(total1,log2(ref_allele.x/alt_allele.x) != 0)),nrow(subset(total1,log2(ref_allele.x/alt_allele.x) == 0))),col=c("gray","white"),labels="");
pie(c(nrow(subset(total2,log2(ref_allele.y/alt_allele.y) != 0)),nrow(subset(total2,log2(ref_allele.y/alt_allele.y) == 0))),col=c("gray","white"),labels="");
pie(c(nrow(subset(total3,log2(ref_allele/alt_allele) != 0)),nrow(subset(total3,log2(ref_allele/alt_allele) == 0))),col=c("gray","white"),labels="");
pie(c(nrow(subset(total1,ref_allele.x/(ref_allele.x+alt_allele.x) != 0.5)),nrow(subset(total1,ref_allele.x/(ref_allele.x+alt_allele.x) == 0.5))),col=c("gray","white"),labels=c("AI","no AI"));
pie(c(nrow(subset(total2,ref_allele.y/(ref_allele.y+alt_allele.y) != 0.5)),nrow(subset(total2,ref_allele.y/(ref_allele.y+alt_allele.y) == 0.5))),col=c("gray","white"),labels=c("AI","no AI"));
pie(c(nrow(subset(total3,ref_allele/(ref_allele+alt_allele) != 0.5)),nrow(subset(total3,ref_allele/(ref_allele+alt_allele) == 0.5))),col=c("gray","white"),labels=c("AI","no AI"));
# new plots
nrow(subset(both,ref_allele.x>0&alt_allele.x>0&ref_allele.y>0&alt_allele.y>0&ref_allele>0&alt_allele>0&dm3_ref_ref_allele>0&dm3_alt_alt_allele>0));
posASE <- subset(both,ref_allele.x>0&alt_allele.x>0&ref_allele.y>0&alt_allele.y>0&ref_allele>0&alt_allele>0&dm3_ref_ref_allele>0&dm3_alt_alt_allele>0);
props <- c(
nrow( subset(posASE,log2(ref_allele.x/alt_allele.x) != 0) )/nrow(posASE),
nrow( subset(posASE,log2(ref_allele.y/alt_allele.y) != 0) )/nrow(posASE),
nrow( subset(posASE,log2(ref_allele/alt_allele) != 0) )/nrow(posASE),
nrow( subset(posASE,log2(dm3_ref_ref_allele/dm3_alt_alt_allele) != 0) )/nrow(posASE)
);
barplot(props,names.arg=c(1,2,3,0),ylim=c(0,1),xlab="Number of mismatches",ylab="Proportion of AI",main="Comparison of single and multiple genomes");
nrow(subset(both,ref_allele.x>0&alt_allele.x>0&ref_allele.y>0&alt_allele.y>0&ref_allele>0&alt_allele>0&dm3_ref_ref_allele>0&dm3_alt_alt_allele>0));
posASE <- both
props <- c(
nrow( subset(both,ref_allele.x/(ref_allele.x+alt_allele.x) != 0.5) )/nrow(posASE),
nrow( subset(both,ref_allele.y/(ref_allele.y+alt_allele.y) != 0.5) )/nrow(posASE),
nrow( subset(both,ref_allele/(ref_allele+alt_allele) != 0.5) )/nrow(posASE),
nrow( subset(both,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) != 0.5) )/nrow(posASE)
);
barplot(props,names.arg=c(1,2,3,0),ylim=c(0,1),xlab="Number of mismatches",ylab="Proportion of AI",main="Comparison of single and multiple genomes");
###################
# compare methods #
###################
##### compare single vs. multiple ASE measurements in exons
single <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v1_m1.exons.txt",header=TRUE,sep="\t");
multiple <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.exons.txt",header=TRUE,sep="\t");
single_multiple <- merge(single, multiple,by.x="gene_exon",by.y="gene_exon");
posASE <- subset(single_multiple,dm3.x>0&line_40.x>0&dm3.y>0&line_40.y>0);
obj1 <- hist(log2(posASE$dm3.x/posASE$line_40.x),breaks=seq(-6,7,0.25));
obj2 <- hist(log2(posASE$dm3.y/posASE$line_40.y),breaks=seq(-6,7,0.25));
mat <- cbind(obj1$counts/nrow(posASE),obj2$counts/nrow(posASE));
barplot(t(mat),beside=TRUE,names.arg=seq(-4.75,8,0.25),xlab="log2(reference allele/alternative allele)",ylab="Proportion",main="Exon-based measurements of ASE",col=c("black","gray"));
legend("topleft",legend=c("single genome, AI = 0.482","multiple genomes, AI = 0.011"),fill=c("black","gray"),bty="n");
temp <- cbind(obj1$breaks[2:length(obj1$breaks)],obj1$counts,
obj2$breaks[2:length(obj2$breaks)],obj2$counts);
#temp <- temp[14:nrow(temp),];
temp <- temp[18:47,];
mat <- cbind(temp[,2]/nrow(posASE),temp[,4]/nrow(posASE));
barplot(t(mat),beside=TRUE,names.arg=temp[,1],xlab="log2(dm3/line_40)",ylab="Proportion",main="Exon-based measurements of ASE",col=c("black","gray"),ylim=c(0,1));
legend("topright",legend=c("single genome, AI = 0.482","multiple genomes, AI = 0.011"),fill=c("black","gray"),bty="n");
# AI
posASE_single <- subset(single_multiple,dm3.x>0&line_40.x>0&log2(dm3.x/line_40.x)!=0);
posASE_multiple <- subset(single_multiple,dm3.y>0&line_40.y>0&log2(dm3.y/line_40.y)!=0);
obj1 <- hist(log2(posASE_single$dm3.x/posASE_single$line_40.x),breaks=seq(-6,7,0.25));
obj2 <- hist(log2(posASE_multiple$dm3.y/posASE_multiple$line_40.y),breaks=seq(-6,7,0.25));
mat <- cbind(obj1$counts/nrow(posASE_single),obj2$counts/nrow(posASE_multiple));
temp <- cbind(obj1$breaks[2:length(obj1$breaks)],obj1$counts,
obj2$breaks[2:length(obj2$breaks)],obj2$counts);
#temp <- temp[14:nrow(temp),];
temp <- temp[18:47,];
mat <- cbind(temp[,2]/nrow(posASE_single),temp[,4]/nrow(posASE_multiple));
barplot(t(mat),beside=TRUE,names.arg=temp[,1],xlab="",ylab="",main="",col=c("black","gray"),ylim=c(0,1));
legend("topleft",legend=c("single (n = 31,334)","multiple (n = 674)"),fill=c("black","gray"));
legend("topright",legend="Exons with detectable ASE\nshowing imbalance");
pie(c(nrow(posASE_single),nrow(posASE_multiple)),labels=c("Single","Multiple"),col=c("black","gray"));
##### compare single vs. multiple ASE measurements in SNPs
single <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v1_m1.SNPs.txt",header=TRUE,sep="\t");
multiple <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.SNPs.txt",header=TRUE,sep="\t");
single_multiple <- merge(single,multiple,by.x=c("chr","pos"),by.y=c("chr","pos"));
posASE <- subset(single_multiple,ref_allele>0&alt_allele>0&dm3_ref_ref_allele>0&dm3_alt_alt_allele>0);
obj1 <- hist(log2(posASE$ref_allele/posASE$alt_allele),breaks=seq(-6,7,0.25));
obj2 <- hist(log2(posASE$dm3_ref_ref_allele/posASE$dm3_alt_alt_allele),breaks=seq(-6,7,0.25));
mat <- cbind(obj1$counts/nrow(posASE),obj2$counts/nrow(posASE));
temp <- cbind(obj1$breaks[2:length(obj1$breaks)],obj1$counts,
obj2$breaks[2:length(obj2$breaks)],obj2$counts);
#temp <- temp[14:nrow(temp),];
temp <- temp[18:47,];
mat <- cbind(temp[,2]/nrow(posASE),temp[,4]/nrow(posASE));
barplot(t(mat),beside=TRUE,names.arg=temp[,1],xlab="log2(dm3/line_40)",ylab="Proportion",main="SNP-based measurements of ASE",col=c("black","gray"),ylim=c(0,1));
legend("topright",legend=c("single genome, AI = 0.507","multiple genomes, AI = 0.003"),fill=c("black","gray"),bty="n");
legend("topright",legend="78,860 SNPs with detectable ASE");
# AI
posASE_single <- subset(single_multiple,ref_allele>0&alt_allele>0&log2(ref_allele/alt_allele)!=0);
posASE_multiple <- subset(single_multiple,dm3_ref_ref_allele>0&dm3_alt_alt_allele>0&log2(dm3_ref_ref_allele/dm3_alt_alt_allele)!=0);
obj1 <- hist(log2(posASE_single$ref_allele/posASE_single$alt_allele),breaks=seq(-6,7,0.25));
obj2 <- hist(log2(posASE_multiple$dm3_ref_ref_allele/posASE_multiple$dm3_alt_alt_allele),breaks=seq(-6,7,0.25));
mat <- cbind(obj1$counts/nrow(posASE_single),obj2$counts/nrow(posASE_multiple));
temp <- cbind(obj1$breaks[2:length(obj1$breaks)],obj1$counts,
obj2$breaks[2:length(obj2$breaks)],obj2$counts);
#temp <- temp[14:nrow(temp),];
temp <- temp[18:47,];
mat <- cbind(temp[,2]/nrow(posASE_single),temp[,4]/nrow(posASE_multiple));
barplot(t(mat),beside=TRUE,names.arg=temp[,1],xlab="",ylab="",main="",col=c("black","gray"),ylim=c(0,1));
legend("topleft",legend=c("single (n = 31,334)","multiple (n = 674)"),fill=c("black","gray"));
legend("topright",legend="Exons with detectable ASE\nshowing imbalance");
pie(c(nrow(posASE_single),nrow(posASE_multiple)),labels=c("Single","Multiple"),col=c("black","gray"));
###### redone using fraction of reference allele
##### compare single vs. multiple ASE measurements in SNPs
single <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v1_m1.SNPs.txt",header=TRUE,sep="\t");
multiple <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.SNPs.txt",header=TRUE,sep="\t");
single_multiple <- merge(single,multiple,by.x=c("chr","pos"),by.y=c("chr","pos"));
posASE <- single_multiple;
obj1 <- hist(posASE$ref_allele/(posASE$ref_allele+posASE$alt_allele),breaks=20);
obj2 <- hist(posASE$dm3_ref_ref_allele/(posASE$dm3_ref_ref_allele+posASE$dm3_alt_alt_allele),breaks=20);
mat <- cbind(obj1$counts/nrow(posASE),obj2$counts/nrow(posASE));
temp <- cbind(obj1$breaks[2:length(obj1$breaks)],obj1$counts,
obj2$breaks[2:length(obj2$breaks)],obj2$counts);
#temp <- temp[14:nrow(temp),];
#temp <- temp[18:47,];
mat <- cbind(temp[,2]/nrow(posASE),temp[,4]/nrow(posASE));
barplot(t(mat),beside=TRUE,names.arg=temp[,1],xlab="Fraction of reference allele",ylab="Proportion",main="SNP-based measurements of ASE",col=c("black","gray"),ylim=c(0,1));
legend("topright",legend=c("single genome, AI = 0.507","multiple genomes, AI = 0.003"),fill=c("black","gray"),bty="n");
legend("topright",legend="78,860 SNPs with detectable ASE");
# AI
posASE_single <- subset(single_multiple,ref_allele/(ref_allele+alt_allele)!=0.5);
posASE_multiple <- subset(single_multiple,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele)!=0.5);
obj1 <- hist(posASE_single$ref_allele/(posASE_single$ref_allele+posASE_single$alt_allele),breaks=20);
obj2 <- hist(posASE_multiple$dm3_ref_ref_allele/(posASE_multiple$dm3_ref_ref_allele+posASE_multiple$dm3_alt_alt_allele),breaks=20);
mat <- cbind(obj1$counts/nrow(posASE_single),obj2$counts/nrow(posASE_multiple));
temp <- cbind(obj1$breaks[2:length(obj1$breaks)],obj1$counts,
obj2$breaks[2:length(obj2$breaks)],obj2$counts);
#temp <- temp[14:nrow(temp),];
#temp <- temp[18:47,];
mat <- cbind(temp[,2]/nrow(posASE_single),temp[,4]/nrow(posASE_multiple));
barplot(t(mat),beside=TRUE,names.arg=temp[,1],xlab="",ylab="",main="",col=c("black","gray"),ylim=c(0,1));
legend("topleft",legend=c("single (n = 31,334)","multiple (n = 674)"),fill=c("black","gray"));
legend("topright",legend="Exons with detectable ASE\nshowing imbalance");
pie(c(nrow(posASE_single),nrow(posASE_multiple)),labels=c("Single","Multiple"),col=c("black","gray"));
#########################
###############
# mappability #
###############
dm3_ref_exon_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons.dm3_ref.l50_m0.mappability.txt",header=TRUE,sep="\t");
dm3_alt_exon_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons.dm3_alt_line_40.l50_m0.mappability.txt",header=TRUE,sep="\t");
exon_mappability = merge(dm3_ref_exon_mappability,dm3_alt_exon_mappability,by.x="locus",by.y="locus");
dm3_ref_avg <- exon_mappability$sum.x/exon_mappability$length.x;
dm3_alt_avg <- exon_mappability$sum.y/exon_mappability$length.y;
mappability <- cbind(exon_mappability,dm3_ref_avg,dm3_alt_avg);
multiple <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.exons.txt",header=TRUE,sep="\t");
SNPsInExons <- read.table("/Users/kraigrs/Wittkopp/DGRP/DGRP_line_40_SNPs_in_const.txt",header=FALSE,sep="\t");
temp1 <- merge(multiple,mappability,by.x="gene_exon",by.y="locus");
temp2 <- merge(temp1,SNPsInExons,by.x="gene_exon",by.y="V8");
weird <- subset(temp2,dm3>0&line_40>0&V7==0&log2(dm3/line_40)!=0)[,c(1:4,11,12,19)];
AI <- subset(temp2,dm3>0&line_40>0&V7>0&log2(dm3/line_40)!=0);
AI_prop <- nrow(subset(AI,dm3_ref_avg != 1 | dm3_alt_avg != 1))/nrow(AI);
call <- rep("AI",nrow(AI));
AI <- cbind(AI,call);
ASE <- subset(temp2,dm3>0&line_40>0&V7>0&log2(dm3/line_40)==0);
ASE_prop <- nrow(subset(ASE,dm3_ref_avg != 1 | dm3_alt_avg != 1))/nrow(ASE);
call <- rep("ASE",nrow(ASE));
ASE <- cbind(ASE,call);
data <- rbind(AI,ASE);
boxplot(log2(dm3_ref_avg/dm3_alt_avg) ~ call, data=data,ylab="log2(mappability ratio)",varwidth=TRUE);
props <- c(AI_prop,ASE_prop);
barplot(props,names.arg=c("AI\nn = 300","not AI\nn = 23,902"),ylim=c(0,1),ylab="Proportion filtered out in each category",main="Filtering imperfect mappability retains many exons");
barplot(c(ASE_prop,AI_prop),xlim=c(0,1),horiz=TRUE,names.arg=c("No AI\nn = 23,902","AI\nn = 300"),width=0.3,xlab="Proportion removed due to imperfect mappability",main="Exons");
############
# 1 mismatch
mappability_1mm <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/SNPs_line_40.dm3_ref.l50_m1.mappability.txt",header=TRUE,sep="\t");
data <- merge(complete_data1,mappability_1mm,by.x=c("chr","pos"),by.y=c("chr","position"));
write.table(data,file="/Users/kraigrs/Desktop/exons/sim_SNPs_1mm.txt",quote=F,sep="\t",row.names=F,col.names=F);
temp <- subset(data,neighbor<1);
write.table(temp,file="/Users/kraigrs/Desktop/exons/sim_SNPs_1mm_unbiased.txt",quote=F,sep="\t",row.names=F,col.names=F);
perfect <- subset(temp,sum/length == 1);
write.table(perfect,file="/Users/kraigrs/Desktop/exons/sim_SNPs_1mm_unbiased_perfect.txt",quote=F,sep="\t",row.names=F,col.names=F);
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5 & sum/length != 1))/nrow(subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5))*100; # % unequal ASRA and imperfect mappability
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5 & sum/length != 1))/nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5))*100; # % equal ASRA and imperfect mappability
# proportion of differentiating sites with perfect mappability and equal allelic abundance
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5 & sum/length == 1))/nrow(subset(temp,sum/length == 1))*100;
perfect <- subset(temp,sum/length == 1);
imperfect <- subset(temp,sum/length != 1);
nonsig <- subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5);
sig <- subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5);
nrow(subset(sig,sum/length < 1))/nrow(sig)*100;
nrow(subset(nonsig,sum/length < 1))/nrow(nonsig)*100;
nonsig_hist <- hist( (nonsig$sum/nonsig$length) ,breaks=seq(0,1,0.05));
sig_hist <- hist( (sig$sum/sig$length) ,breaks=seq(0,1,0.05));
mat <- cbind(nonsig_hist$counts/nrow(nonsig),sig_hist$counts/nrow(sig));
barplot(t(mat),beside=TRUE,names.arg=seq(0.05,1,0.05),xlab="",ylab="",main="",col=c("white","grey"));
data <- merge(complete_data1,mappability_1mm,by.x=c("chr","pos"),by.y=c("chr","position"));
perfect_hist <- hist(perfect$ref_allele/(perfect$ref_allele+perfect$alt_allele),breaks=seq(0,1,0.05));
imperfect_hist <- hist(imperfect$ref_allele/(imperfect$ref_allele+imperfect$alt_allele),breaks=seq(0,1,0.05));
mat <- cbind(perfect_hist$counts/nrow(perfect),imperfect_hist$counts/nrow(imperfect));
pdf(file="/Users/kraigrs/Wittkopp/LabResearch/Simulating_ASE/revisions/new_plots/sim_50b_1mm_barplot_by_mapp.pdf");
barplot(t(mat),beside=TRUE,names.arg=seq(0.05,1,0.05),ylim=c(0,1),xlab="",ylab="",main="",col=c("white","grey"));
#par(new=TRUE);
#points(seq(0.05,1,0.05),mat[,1],ylim=c(0,1),col="black",type="b");
#par(new=TRUE);
#points(seq(0.05,1,0.05),mat[,2],ylim=c(0,1),col="grey",type="b");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_1mm_pie_by_mapp.pdf");
pie(nrow(perfect),nrow(imperfect),labels="");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_1mm_boxplot_perfect.pdf");
boxplot(ref_allele/(ref_allele+alt_allele) ~ neighbor_plus,data = perfect, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.4,col=rgb(0,0,0,0.4)));
abline(h = 0.5,col="red",lty="dashed");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_1mm_boxplot_imperfect.pdf");
boxplot(ref_allele/(ref_allele+alt_allele) ~ neighbor_plus,data = imperfect, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.4,col=rgb(0,0,0,0.4)));
abline(h = 0.5,col="red",lty="dashed");
dev.off();
d1_perfect <- density(perfect$ref_allele/(perfect$ref_allele+perfect$alt_allele),from=0,to=1);
d1_imperfect <- density(imperfect$ref_allele/(imperfect$ref_allele+imperfect$alt_allele),from=0,to=1);
plot(d1_perfect,xlim=c(0,1),ylim=c(0,10),main="",col="blue",xlab="");
par(new=TRUE);
plot(d1_imperfect,xlim=c(0,1),ylim=c(0,10),main="",col="red",xlab="");
# 2 mismatch
mappability_2mm <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/SNPs_line_40.dm3_ref.l50_m2.mappability.txt",header=TRUE,sep="\t");
data <- merge(complete_data2,mappability_2mm,by.x=c("chr","pos"),by.y=c("chr","position"));
write.table(data,file="/Users/kraigrs/Desktop/exons/sim_SNPs_2mm.txt",quote=F,sep="\t",row.names=F,col.names=F);
temp <- subset(data,neighbor<2);
write.table(temp,file="/Users/kraigrs/Desktop/exons/sim_SNPs_2mm_unbiased.txt",quote=F,sep="\t",row.names=F,col.names=F);
perfect <- subset(temp,sum/length == 1);
write.table(perfect,file="/Users/kraigrs/Desktop/exons/sim_SNPs_2mm_unbiased_perfect.txt",quote=F,sep="\t",row.names=F,col.names=F);
# proportion of differentiating sites with perfect mappability and equal allelic abundance
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5 & sum/length == 1))/nrow(subset(temp,sum/length == 1))*100;
nrow(subset(data,ref_allele/(ref_allele+alt_allele) == 0.5 & sum/length == 1))/nrow(subset(data,ref_allele/(ref_allele+alt_allele) == 0.5))*100; #
nrow(subset(data,ref_allele/(ref_allele+alt_allele) == 0.5 & sum/length != 1))/nrow(subset(data,ref_allele/(ref_allele+alt_allele) == 0.5))*100; #
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5 & sum/length != 1))/nrow(subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5))*100; # % unequal ASRA and imperfect mappability
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5 & sum/length != 1))/nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5))*100; # % equal ASRA and imperfect mappability
perfect <- subset(temp,sum/length == 1);
imperfect <- subset(temp,sum/length != 1);
nonsig <- subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5);
sig <- subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5);
nrow(subset(sig,sum/length < 1))/nrow(sig)*100;
nrow(subset(nonsig,sum/length < 1))/nrow(nonsig)*100;
nonsig_hist <- hist( (nonsig$sum/nonsig$length) ,breaks=seq(0,1,0.05));
sig_hist <- hist( (sig$sum/sig$length) ,breaks=seq(0,1,0.05));
mat <- cbind(nonsig_hist$counts/nrow(nonsig),sig_hist$counts/nrow(sig));
barplot(t(mat),beside=TRUE,names.arg=seq(0.05,1,0.05),xlab="",ylab="",main="",col=c("white","grey"));
data <- merge(complete_data2,mappability_2mm,by.x=c("chr","pos"),by.y=c("chr","position"));
perfect <- subset(temp,sum/length == 1);
imperfect <- subset(temp,sum/length != 1);
perfect_hist <- hist(perfect$ref_allele/(perfect$ref_allele+perfect$alt_allele),breaks=seq(0,1,0.05));
imperfect_hist <- hist(imperfect$ref_allele/(imperfect$ref_allele+imperfect$alt_allele),breaks=seq(0,1,0.05));
mat <- cbind(perfect_hist$counts/nrow(perfect),imperfect_hist$counts/nrow(imperfect));
pdf(file="/Users/kraigrs/Wittkopp/LabResearch/Simulating_ASE/revisions/new_plots/sim_50b_2mm_barplot_by_mapp.pdf");
barplot(t(mat),beside=TRUE,names.arg=seq(0.05,1,0.05),ylim=c(0,1),xlab="",ylab="",main="",col=c("white","grey"));
#par(new=TRUE);
#points(seq(0.05,1,0.05),mat[,1],ylim=c(0,1),col="black",type="b");
#par(new=TRUE);
#points(seq(0.05,1,0.05),mat[,2],ylim=c(0,1),col="grey",type="b");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_2mm_boxplot_perfect.pdf");
boxplot(ref_allele/(ref_allele+alt_allele) ~ neighbor_plus,data = perfect, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.4,col=rgb(0,0,0,0.4)));
abline(h = 0.5,col="red",lty="dashed");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_2mm_boxplot_imperfect.pdf");
boxplot(ref_allele/(ref_allele+alt_allele) ~ neighbor_plus,data = imperfect, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.4,col=rgb(0,0,0,0.4)));
abline(h = 0.5,col="red",lty="dashed");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_2mm_pie_by_mapp.pdf");
pie(nrow(perfect),nrow(imperfect),labels="");
dev.off();
d2_perfect <- density(perfect$ref_allele/(perfect$ref_allele+perfect$alt_allele));
d2_imperfect <- density(imperfect$ref_allele/(imperfect$ref_allele+imperfect$alt_allele));
plot(d2_perfect,xlim=c(0,1),ylim=c(0,10),main="",col="blue",xlab="");
par(new=TRUE);
plot(d2_imperfect,xlim=c(0,1),ylim=c(0,10),main="",col="red",xlab="");
# 3 mismatch
mappability_3mm <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/SNPs_line_40.dm3_ref.l50_m3.mappability.txt",header=TRUE,sep="\t");
data <- merge(complete_data3,mappability_3mm,by.x=c("chr","pos"),by.y=c("chr","position"));
write.table(data,file="/Users/kraigrs/Desktop/exons/sim_SNPs_3mm.txt",quote=F,sep="\t",row.names=F,col.names=F);
temp <- subset(data,neighbor<3);
write.table(temp,file="/Users/kraigrs/Desktop/exons/sim_SNPs_3mm_unbiased.txt",quote=F,sep="\t",row.names=F,col.names=F);
perfect <- subset(temp,sum/length == 1);
write.table(perfect,file="/Users/kraigrs/Desktop/exons/sim_SNPs_3mm_unbiased_perfect.txt",quote=F,sep="\t",row.names=F,col.names=F);
# proportion of differentiating sites with perfect mappability and equal allelic abundance
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5 & sum/length == 1))/nrow(subset(temp,sum/length == 1))*100;
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5 & sum/length != 1))/nrow(subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5))*100; # % unequal ASRA and imperfect mappability
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5 & sum/length != 1))/nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5))*100; # % equal ASRA and imperfect mappability
perfect <- subset(temp,sum/length == 1);
imperfect <- subset(temp,sum/length != 1);
nonsig <- subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5);
sig <- subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5);
nrow(subset(sig,sum/length < 1))/nrow(sig)*100;
nrow(subset(nonsig,sum/length < 1))/nrow(nonsig)*100;
nonsig_hist <- hist( (nonsig$sum/nonsig$length) ,breaks=seq(0,1,0.05));
sig_hist <- hist( (sig$sum/sig$length) ,breaks=seq(0,1,0.05));
mat <- cbind(nonsig_hist$counts/nrow(nonsig),sig_hist$counts/nrow(sig));
barplot(t(mat),beside=TRUE,names.arg=seq(0.05,1,0.05),xlab="",ylab="",main="",col=c("white","grey"));
data <- merge(complete_data3,mappability_3mm,by.x=c("chr","pos"),by.y=c("chr","position"));
perfect <- subset(temp,sum/length == 1);
imperfect <- subset(temp,sum/length != 1);
perfect_hist <- hist(perfect$ref_allele/(perfect$ref_allele+perfect$alt_allele),breaks=seq(0,1,0.05));
imperfect_hist <- hist(imperfect$ref_allele/(imperfect$ref_allele+imperfect$alt_allele),breaks=seq(0,1,0.05));
mat <- cbind(perfect_hist$counts/nrow(perfect),imperfect_hist$counts/nrow(imperfect));
pdf(file="/Users/kraigrs/Wittkopp/LabResearch/Simulating_ASE/revisions/new_plots/sim_50b_3mm_barplot_by_mapp.pdf");
barplot(t(mat),beside=TRUE,names.arg=seq(0.05,1,0.05),ylim=c(0,1),xlab="",ylab="",main="",col=c("white","grey"));
#par(new=TRUE);
#points(seq(0.05,1,0.05),mat[,1],ylim=c(0,1),col="black",type="b");
#par(new=TRUE);
#points(seq(0.05,1,0.05),mat[,2],ylim=c(0,1),col="grey",type="b");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_3mm_pie_by_mapp.pdf");
pie(nrow(perfect),nrow(imperfect),labels="");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_3mm_boxplot_perfect.pdf");
boxplot(ref_allele/(ref_allele+alt_allele) ~ neighbor_plus,data = perfect, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.4,col=rgb(0,0,0,0.4)));
abline(h = 0.5,col="red",lty="dashed");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_3mm_boxplot_imperfect.pdf");
boxplot(ref_allele/(ref_allele+alt_allele) ~ neighbor_plus,data = imperfect, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.4,col=rgb(0,0,0,0.4)));
abline(h = 0.5,col="red",lty="dashed");
dev.off();
d3_perfect <- density(perfect$ref_allele/(perfect$ref_allele+perfect$alt_allele));
d3_imperfect <- density(imperfect$ref_allele/(imperfect$ref_allele+imperfect$alt_allele));
plot(d3_perfect,xlim=c(0,1),ylim=c(0,10),main="",col="blue",xlab="");
par(new=TRUE);
plot(d3_imperfect,xlim=c(0,1),ylim=c(0,10),main="",col="red",xlab="");
# 0 mismatches
ref_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/SNPs_line_40.dm3_ref.l50_m0.mappability.txt",header=TRUE,sep="\t");
alt_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/SNPs_line_40.dm3_alt_line_40.l50_m0.mappability.txt",header=TRUE,sep="\t");
mappability_0mm <- merge(ref_mappability,alt_mappability,by.x=c("chr","position"),by.y=c("chr","position"));
data <- merge(complete_data0,mappability_0mm,by.x=c("chr","pos"),by.y=c("chr","position"));
temp <- data;
write.table(complete_data0,file="/Users/kraigrs/Desktop/exons/sim_SNPs_0mm.txt",quote=F,sep="\t",row.names=F,col.names=F);
write.table(complete_data0,file="/Users/kraigrs/Desktop/exons/sim_SNPs_0mm_unbiased.txt",quote=F,sep="\t",row.names=F,col.names=F);
perfect <- subset(complete_data0,((sum.x/length.x)+(sum.y/length.y)) == 2);
write.table(perfect,file="/Users/kraigrs/Desktop/exons/sim_SNPs_0mm_unbiased_perfect.txt",quote=F,sep="\t",row.names=F,col.names=F);
# proportion of differentiating sites with perfect mappability and equal allelic abundance
nrow(subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) == 0.5 & ((sum.x/length.x)+(sum.y/length.y)) == 2))/nrow(subset(temp,((sum.x/length.x)+(sum.y/length.y)) == 2))*100;
nrow(subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) != 0.5 & ((sum.x/length.x)+(sum.y/length.y)) < 2))/nrow(subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) != 0.5))*100; # % unequal ASRA and imperfect mappability
nrow(subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) == 0.5 & ((sum.x/length.x)+(sum.y/length.y)) < 2))/nrow(subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) == 0.5))*100; # % equal ASRA and imperfect mappability
perfect <- subset(temp,((sum.x/length.x)+(sum.y/length.y)) == 2);
imperfect <- subset(temp,((sum.x/length.x)+(sum.y/length.y)) < 2);
nonsig <- subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) == 0.5);
sig <- subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) != 0.5);
#nrow(subset(sig,sum.x/length.x + sum.y/length.y != 2))/nrow(sig)*100;
#nrow(subset(nonsig,sum.x/length.x + sum.y/length.y != 2))/nrow(nonsig)*100;
nonsig_hist <- hist( (nonsig$sum.x/nonsig$length.x)+(nonsig$sum.y/nonsig$length.y) ,breaks=seq(0,2,0.1));
sig_hist <- hist( (sig$sum.x/sig$length.x)+(sig$sum.y/sig$length.y) ,breaks=seq(0,2,0.1));
mat <- cbind(nonsig_hist$counts/nrow(nonsig),sig_hist$counts/nrow(sig));
barplot(t(mat),beside=TRUE,names.arg=seq(0.1,2,0.1),xlab="",ylab="",main="",col=c("white","grey"));
plot(log2((data$sum.x/data$length.x)/(data$sum.y/data$length.y)),data$dm3_ref_ref_allele/(data$dm3_ref_ref_allele+data$dm3_alt_alt_allele),xlab="",ylab="",pch=19,col=rgb(0,0,0,0.3),cex=0.7);
perfect <- subset(temp,((sum.x/length.x)+(sum.y/length.y)) == 2);
imperfect <- subset(temp,((sum.x/length.x)+(sum.y/length.y)) != 2);
perfect_hist <- hist(perfect$dm3_ref_ref_allele/(perfect$dm3_ref_ref_allele+perfect$dm3_alt_alt_allele),breaks=seq(0,1,0.05));
imperfect_hist <- hist(imperfect$dm3_ref_ref_allele/(imperfect$dm3_ref_ref_allele+imperfect$dm3_alt_alt_allele),breaks=seq(0,1,0.05));
mat <- cbind(perfect_hist$counts/nrow(perfect),imperfect_hist$counts/nrow(imperfect));
pdf(file="/Users/kraigrs/Wittkopp/LabResearch/Simulating_ASE/revisions/new_plots/sim_50b_0mm_barplot_by_mapp.pdf");
barplot(t(mat),beside=TRUE,names.arg=seq(0.05,1,0.05),ylim=c(0,1),xlab="",ylab="",main="",col=c("white","grey"));
#par(new=TRUE);
#points(seq(0.05,1,0.05),mat[,1],ylim=c(0,1),col="black",type="b");
#par(new=TRUE);
#points(seq(0.05,1,0.05),mat[,2],ylim=c(0,1),col="grey",type="b");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_0mm_pie_by_mapp.pdf");
pie(nrow(perfect),nrow(imperfect),labels="");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_0mm_boxplot_perfect.pdf");
boxplot(dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) ~ neighbor_plus,data = perfect, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.4,col=rgb(0,0,0,0.4)));
abline(h = 0.5,col="red",lty="dashed");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_0mm_boxplot_imperfect.pdf");
boxplot(dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) ~ neighbor_plus,data = imperfect, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.4,col=rgb(0,0,0,0.4)));
abline(h = 0.5,col="red",lty="dashed");
dev.off();
d0_perfect <- density(perfect$dm3_ref_ref_allele/(perfect$dm3_ref_ref_allele+perfect$dm3_alt_alt_allele));
d0_imperfect <- density(imperfect$dm3_ref_ref_allele/(imperfect$dm3_ref_ref_allele+imperfect$dm3_alt_alt_allele));
plot(d0_perfect,xlim=c(0,1),ylim=c(0,10),main="",col="blue",xlab="");
par(new=TRUE);
plot(d0_imperfect,xlim=c(0,1),ylim=c(0,10),main="",col="red",xlab="");
#########################
plot((data$sum.x/data$length.x)/(data$sum.x/data$length.x+data$sum.y/data$length.y),data$dm3_ref_ref_allele/(data$dm3_ref_ref_allele+data$dm3_alt_alt_allele),xlab="mappability",ylab="proportion of reference allele",pch=19,col=rgb(0,0,0,0.5),cex=0.5)
data <- subset(data,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) != 0.5 & (sum.x/length.x)/(sum.x/length.x+sum.y/length.y) != 0.5);
data <- subset(data,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) != 0.5);
nrow(subset(data, sign((sum.x/length.x)/(sum.x/length.x+sum.y/length.y)-0.5) == sign(dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele)-0.5) ));
multiple <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.SNPs.txt",header=TRUE,sep="\t");
data <- merge(multiple,SNP_mappability,by.x=c("chr","pos"),by.y=c("chr","position"));
nrow(subset(data,dm3_ref_ref_allele > 0 & dm3_alt_alt_allele > 0 & log2(dm3_ref_ref_allele/dm3_alt_alt_allele) != 0));
nrow(subset(data,dm3_ref_ref_allele > 0 & dm3_alt_alt_allele > 0 & log2(dm3_ref_ref_allele/dm3_alt_alt_allele) != 0 & log2((sum.x/length.x)/(sum.y/length.y)) != 0));
nrow(subset(data,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) != 0.5));
nrow(subset(data,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) != 0.5 & log2((sum.x/length.x)/(sum.y/length.y)) != 0));
poor_map <- subset(data,dm3_ref_ref_allele > 0 & dm3_alt_alt_allele > 0 & log2(dm3_ref_ref_allele/dm3_alt_alt_allele) != 0 & log2((sum.x/length.x)/(sum.y/length.y)) != 0);
nrow(subset(poor_map,sign(log2(dm3_ref_ref_allele/dm3_alt_alt_allele)) == sign(log2((sum.x/length.x)/(sum.y/length.y)))));
AI <- subset(data,ref_allele.x > 0 & alt_allele.x > 0 & log2(ref_allele.x/alt_allele.x) != 0);
AI_prop <- nrow(subset(AI,sum/length != 1))/nrow(AI);
ASE <- subset(data,ref_allele.x > 0 & alt_allele.x > 0 & log2(ref_allele.x/alt_allele.x) == 0);
ASE_prop <- nrow(subset(ASE,sum/length != 1))/nrow(ASE);
# SNPs with no neighbors and AI
temp1 <- subset(AI,neighbor==0);
AI_prop <- nrow(subset(temp1,sum/length != 1))/nrow(temp1);
temp2 <- subset(ASE,neighbor==0);
ASE_prop <- nrow(subset(temp2,sum/length != 1))/nrow(temp2);
props <- c(AI_prop,ASE_prop);
barplot(props,names.arg=c("AI\nn = 179","not AI\nn = 38,819"),ylim=c(0,1),ylab="Proportion filtered out in each category",main="Filtering imperfect mappability retains many SNPs");
barplot(c(ASE_prop,AI_prop),xlim=c(0,1),horiz=TRUE,names.arg=c("No AI\nn = 179","AI\nn = 38,819"),width=0.3,xlab="Proportion removed due to imperfect mappability",main="SNPs");
##################################
# plot mappability across genome #
##################################
library(lattice);
dm3_ref_exon_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons.dm3_ref.l50_m0.mappability.txt",header=TRUE,sep="\t");
dm3_alt_exon_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons.dm3_alt_line_40.l50_m0.mappability.txt",header=TRUE,sep="\t");
exon_mappability = merge(dm3_ref_exon_mappability,dm3_alt_exon_mappability,by.x="locus",by.y="locus");
dm3_ref_avg <- exon_mappability$sum.x/exon_mappability$length.x;
dm3_alt_avg <- exon_mappability$sum.y/exon_mappability$length.y;
mappability <- cbind(exon_mappability,dm3_ref_avg,dm3_alt_avg);
multiple <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.exons.txt",header=TRUE,sep="\t");
SNPsInExons <- read.table("/Users/kraigrs/Wittkopp/DGRP/DGRP_line_40_SNPs_in_const.txt",header=FALSE,sep="\t");
temp1 <- merge(multiple,mappability,by.x="gene_exon",by.y="locus");
temp2 <- merge(temp1,SNPsInExons,by.x="gene_exon",by.y="V8");
temp3 <- subset(temp2,dm3 > 0 & line_40 > 0);
xyplot(dm3_ref_avg/dm3_alt_avg ~ (V2+V3)/2 | V1, data = temp3,xlab="Midpoint of exon",ylab="dm3/line_40 mappability",main="Mappability across both allele-specific genomes",pch=19,cex=0.4,col=rgb(0,0,0,0.3),layout=c(2,3));
xyplot(dm3_ref_avg/dm3_alt_avg ~ log2(dm3/line_40) | V1, data = temp3,xlab="log2(dm3/line_40) ASE",ylab="dm3/line_40 mappability",main="Concordance of mappability direction and bias in ASE",pch=19,cex=0.4,col=rgb(0,0,0,0.3));
# how many exons show opposite signs of mappability and bias? 0.2%, so 99.8% show same sign, awesome!
# also, of the 660 that do show differential mappability, 237 favor dm3_ref and 423 favor dm3_alt
sum( sign(log2(temp3$dm3_ref_avg/temp3$dm3_alt_avg)) != sign(log2(temp3$dm3/temp3$line_40)) );
# correlation? r^2 = 0.6
cor(log2(temp3$dm3_ref_avg/temp3$dm3_alt_avg),log2(temp3$dm3_ref/temp3$dm3_alt))^2;
###############
SNPsInExons <- read.table("/Users/kraigrs/Wittkopp/DGRP/DGRP_line_40_SNPs_in_const.txt",header=FALSE,sep="\t");
v0 <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.exons.txt",header=TRUE,sep="\t");
v1 <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v1_m1.exons.txt",header=TRUE,sep="\t");
v2 <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v2_m1.exons.txt",header=TRUE,sep="\t");
v3 <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v3_m1.exons.txt",header=TRUE,sep="\t");
v0 <- merge(v0,SNPsInExons,by.x="gene_exon",by.y="V8");
v1 <- merge(v1,SNPsInExons,by.x="gene_exon",by.y="V8");
v2 <- merge(v2,SNPsInExons,by.x="gene_exon",by.y="V8");
v3 <- merge(v3,SNPsInExons,by.x="gene_exon",by.y="V8");
nrow(subset(v0,dm3>0&line_40>0&log2(dm3/line_40)!=0&V7>0))/nrow(v0);
nrow(subset(v1,dm3>0&line_40>0&log2(dm3/line_40)!=0&V7>0))/nrow(v1);
nrow(subset(v2,dm3>0&line_40>0&log2(dm3/line_40)!=0&V7>0))/nrow(v2);
nrow(subset(v3,dm3>0&line_40>0&log2(dm3/line_40)!=0&V7>0))/nrow(v3);
nrow(subset(v0,dm3>0&line_40>0&V7>0));
nrow(subset(v1,dm3>0&line_40>0&V7>0));
nrow(subset(v2,dm3>0&line_40>0&V7>0));
nrow(subset(v3,dm3>0&line_40>0&V7>0));
# mappability for single-genome approach
data <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v1_m1.SNPs.txt",header=TRUE,sep="\t");
SNP_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/SNPs_line_40.dm3_ref.l50_m1.mappability.txt",header=TRUE,sep="\t");
data <- merge(complete_data,SNP_mappability,by.x=c("chr","pos"),by.y=c("chr","position"));
AI <- subset(data, ref_allele/(ref_allele+alt_allele) != 0.5));
AI_prop <- nrow(subset(AI,sum/length != 1))/nrow(AI);
ASE <- subset(data,ref_allele.x > 0 & alt_allele.x > 0 & log2(ref_allele.x/alt_allele.x) == 0);
ASE_prop <- nrow(subset(ASE,sum/length != 1))/nrow(ASE);
# SNPs with no neighbors and AI
temp1 <- subset(AI,neighbor==0);
AI_prop <- nrow(subset(temp1,sum/length != 1))/nrow(temp1);
temp2 <- subset(ASE,neighbor==0);
ASE_prop <- nrow(subset(temp2,sum/length != 1))/nrow(temp2);
##############
# alternative allele used as genome
data <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_alt_line_40.bowtie_v1_m1.SNPs.txt",header=TRUE,sep="\t");
neighbors_plus <- data$neighbors+1;
data <- cbind(data,neighbors_plus);
boxplot(alt_allele/(alt_allele+ref_allele) ~ neighbors_plus,data = data, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.2));
abline(h=0.5,lty=2,col="red");
pie(c(nrow(subset(data,alt_allele/(ref_allele+alt_allele)!=0.5)),nrow(data)),col=c("gray","white"),labels="");
|
/simulations/R/SNP_ASE_line_40.R
|
no_license
|
kraigrs/thesis_work
|
R
| false
| false
| 51,657
|
r
|
data1 <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v1_m1.SNPs.txt",header=TRUE,sep="\t");
data2 <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v2_m1.SNPs.txt",header=TRUE,sep="\t");
data3 <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v3_m1.SNPs.txt",header=TRUE,sep="\t");
data0 <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.SNPs.txt",header=TRUE,sep="\t");
DGRP <- read.table("/Users/kraigrs/Wittkopp/DGRP/DGRP_line_40_SNPs_const.txt",header=FALSE,sep="\t");
data <- merge(data0,DGRP,by.x=c("chr","pos"),by.y=c("V1","V3"));
nrow(subset(data1,ref_allele/(ref_allele+alt_allele)==0.5))/nrow(data1);
nrow(subset(data2,ref_allele/(ref_allele+alt_allele)==0.5))/nrow(data2);
nrow(subset(data3,ref_allele/(ref_allele+alt_allele)==0.5))/nrow(data3);
nrow(subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele)==0.5))/nrow(temp);
### pie charts ###
pdf(file="/Users/kraigrs/Wittkopp/LabResearch/Simulating_ASE/revisions/new_plots/sim_50b_v1_pie.pdf");
pie(c(nrow(subset(data1,ref_allele/(ref_allele+alt_allele)!=0.5)),nrow(subset(data1,ref_allele/(ref_allele+alt_allele)==0.5))),col=c("gray","white"),labels="");
dev.off();
pdf(file="/Users/kraigrs/Wittkopp/LabResearch/Simulating_ASE/revisions/new_plots/sim_50b_v2_pie.pdf");
pie(c(nrow(subset(data2,ref_allele/(ref_allele+alt_allele)!=0.5)),nrow(subset(data2,ref_allele/(ref_allele+alt_allele)==0.5))),col=c("gray","white"),labels="");
dev.off();
pdf(file="/Users/kraigrs/Wittkopp/LabResearch/Simulating_ASE/revisions/new_plots/sim_50b_v3_pie.pdf");
pie(c(nrow(subset(data3,ref_allele/(ref_allele+alt_allele)!=0.5)),nrow(subset(data3,ref_allele/(ref_allele+alt_allele)==0.5))),col=c("gray","white"),labels="");
dev.off();
pdf(file="/Users/kraigrs/Wittkopp/LabResearch/Simulating_ASE/revisions/new_plots/sim_50b_v0_pie.pdf");
pie(c(nrow(subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele)!=0.5)),nrow(subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele)==0.5))),col=c("gray","white"),labels="");
dev.off();
data1 <- subset(data1,ref_allele>0&alt_allele>0);
data2 <- subset(data2,ref_allele>0&alt_allele>0);
data3 <- subset(data3,ref_allele>0&alt_allele>0);
summary(log2(data1$ref_allele/data1$alt_allele));
summary(log2(data2$ref_allele/data2$alt_allele));
summary(log2(data3$ref_allele/data3$alt_allele));
summary(data0$neighbor);
summary(data1$neighbor);
summary(data2$neighbor);
summary(data3$neighbor);
temp1 <- merge(data1,data2,by.x=c("chr","pos"),by.y=c("chr","pos"));
data <- merge(temp1,data3,by.x=c("chr","pos"),by.y=c("chr","pos"));
data <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.SNPs.txt",header=TRUE,sep="\t");
ref_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/SNPs_line_40.dm3_ref.l50_m0.mappability.txt",header=TRUE,sep="\t");
alt_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/SNPs_line_40.dm3_alt_line_40.l50_m0.mappability.txt",header=TRUE,sep="\t");
mappability_0mm <- merge(ref_mappability,alt_mappability,by.x=c("chr","position"),by.y=c("chr","position"));
data <- merge(data,mappability_0mm,by.x=c("chr","pos"),by.y=c("chr","position"));
#data <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_alt_line_40.bowtie_v1_m1.SNPs.txt",header=TRUE,sep="\t");
data <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v3_m1.SNPs.txt",header=TRUE,sep="\t");
###### figure out how many SNPs are within 50b of each SNP ######
### start iterative run here
complete_data <- NULL;
chromosomes <- c("chr2L","chr2R","chr3L","chr3R","chrX");
for(j in 1:length(chromosomes))
{
arm <- chromosomes[j];
chrom <- subset(data,chr == arm);
chrom_sorted <- chrom[order(chrom$pos),];
chrom <- chrom_sorted;
steps_right = 0;
steps_left = 0;
SNPs_left <- mat.or.vec(nrow(chrom),1);
SNPs_right <- mat.or.vec(nrow(chrom),1);
for(i in 1:nrow(chrom))
{
if(i == 1)
{
SNPs_left[i] <- 0;
k <- i + 1;
right <- chrom$pos[k] - chrom$pos[i];
while(right < 50)
{
steps_right <- steps_right + 1;
if(k == nrow(chrom)){break;}
k <- k + 1;
right <- chrom$pos[k] - chrom$pos[i];
}
SNPs_right[i] = steps_right;
}
if(i == nrow(chrom))
{
SNPs_right[i] <- 0;
j <- i - 1;
left <- chrom$pos[i] - chrom$pos[j];
while(left < 50)
{
steps_left <- steps_left + 1;
if(j == 1){break;}
j <- j - 1;
left <- chrom$pos[i] - chrom$pos[j];
}
SNPs_left[i] = steps_left;
}
j <- i - 1;
k <- i + 1;
steps_left <- 0;
steps_right <- 0;
if(j != 0 & k != (nrow(chrom)+1))
{
left <- chrom$pos[i] - chrom$pos[j];
right <- chrom$pos[k] - chrom$pos[i];
while(left < 50)
{
steps_left <- steps_left + 1;
j <- j - 1;
if(j == 0){break;}
left <- chrom$pos[i] - chrom$pos[j];
}
SNPs_left[i] = steps_left;
while(right < 50)
{
steps_right <- steps_right + 1;
k <- k + 1;
if(k == (nrow(chrom)+1)){break;}
right <- chrom$pos[k] - chrom$pos[i];
}
SNPs_right[i] = steps_right;
}
}
neighbor <- SNPs_left+SNPs_right;
chrom <- cbind(chrom,neighbor);
complete_data <- rbind(complete_data,chrom);
}
neighbor_plus <- complete_data$neighbor+1;
complete_data <- cbind(complete_data,neighbor_plus);
summary(complete_data$neighbor_plus);
complete_data0 <- complete_data;
# run the previous iteratively to get complete dataset
#write.table(complete_data,file="/Users/kraigrs/Wittkopp/Simulations/tiled/constExons_single_bp50_error0_tiled.dm3_ref.bowtie_mm1.neighbor_SNPs.txt",quote=FALSE,sep="\t",row.names=FALSE);
#write.table(ref_bad_SNPs,file="/Users/kraigrs/Wittkopp/Simulations/tiled/ref_bad_SNPs.txt",quote=FALSE,sep="\t",row.names=FALSE,col.names=FALSE);
#write.table(alt_bad_SNPs,file="/Users/kraigrs/Wittkopp/Simulations/tiled/alt_bad_SNPs.txt",quote=FALSE,sep="\t",row.names=FALSE,col.names=FALSE);
# 1 mismatch
boxplot(log2(ref_allele.x/alt_allele.x) ~ neighbor,data = subset(complete_data,ref_allele.x > 0 & alt_allele.x > 0),varwidth = TRUE,xlab="Number of neighboring SNPs",ylab="Distribution of log2(ref/alt) among SNPs",border=rgb(0,0,0,0.5),xlim=c(0,13),ylim=c(-6,6));
par(new=TRUE);
# 2 mismatches
boxplot(log2(ref_allele.y/alt_allele.y) ~ neighbor,data = subset(complete_data, ref_allele.y > 0 & alt_allele.y > 0),varwidth = TRUE,xlab="Number of neighboring SNPs",ylab="Distribution of log2(ref/alt)",border=rgb(1,0,0,0.5),xlim=c(0,13),ylim=c(-6,6));
par(new=TRUE);
# 3 mismatches
boxplot(log2(ref_allele/alt_allele) ~ neighbor,data = subset(complete_data, ref_allele > 0 & alt_allele > 0),varwidth = TRUE,xlab="Number of neighboring SNPs",ylab="Distribution of log2(ref/alt)",border=rgb(0,0,1,0.5),xlim=c(0,13),ylim=c(-6,6));
boxplot(log2(ref_allele/alt_allele) ~ neighbor,data = subset(complete_data, ref_allele > 0 & alt_allele > 0),varwidth = TRUE,xlab="Number of neighboring SNPs",ylab="Distribution of log2(ref/alt)",main="DGRP line_40 single reference (3mm)",xlim=c(0,13),ylim=c(-7,7),pars = list(outpch=19,cex=0.2));
par(mfrow=c(2,2));
boxplot(ref_allele/(ref_allele+alt_allele) ~ neighbor_plus,data = complete_data, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.2));
abline(h=0.5,lty=2,col="red");
boxplot(dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) ~ neighbor_plus,data = complete_data, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.2));
abline(h=0.5,lty=2,col="red");
#### bubble plot ####
data <- subset(complete_data,is.finite(log2(ref_allele/alt_allele)) & !is.na(log2(ref_allele/alt_allele)));
radii <- sqrt(data$total_overlap/pi);
symbols(
data$neighbor+1,
log2(data$ref_allele/data$alt_allele),
circles=radii);
#### neighbor plus ####
neighbor_plus <- rep(0,nrow(complete_data));
for(i in 1:nrow(complete_data))
{
if(complete_data$neighbor[i] < 6){neighbor_plus[i] <- complete_data$neighbor[i];}
else{neighbor_plus[i] <- "6";}
}
temp <- cbind(complete_data,neighbor_plus);
boxplot(log2(ref_allele.x/alt_allele.x) ~ neighbor_plus, data = subset(temp,ref_allele.x > 0 & alt_allele.x > 0),varwidth = TRUE,xlab="Number of neighboring SNPs",ylab="Distribution of log2(ref/alt) among SNPs",main="(# neighboring SNPs capped at 6)",border=rgb(0,0,0,0.5),ylim=c(-3,7));
abline(h = 0,col="red",lty="dashed");
#############################################
# plot medians for the different mismatches #
#############################################
summary(complete_data$neighbor); # range of neighboring SNPs [0,22]
multiple <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.SNPs.txt",header=TRUE,sep="\t");
both <- merge(complete_data,multiple,by.x=c("chr","pos"),by.y=c("chr","pos"));
neighbor_plus <- rep(0,nrow(both));
for(i in 1:nrow(both))
{
if(both$neighbor[i] < 6){neighbor_plus[i] <- both$neighbor[i];}
else{neighbor_plus[i] <- "6";}
}
both <- cbind(both,neighbor_plus);
#medians <- mat.or.vec(7,5);
#colnames(medians) <- c("neighbor","v1","v2","v3","multiple");
medians <- mat.or.vec(13,4);
colnames(medians) <- c("neighbor","v1","v2","v3");
#vars <- mat.or.vec(7,5);
#colnames(vars) <- c("neighbor","v1","v2","v3","multiple");
vars <- mat.or.vec(13,4);
colnames(vars) <- c("neighbor","v1","v2","v3");
both <- complete_data;
for(i in 1:13)
{
j <- i-1;
temp <- subset(both,neighbor == j);
#medians[i,1] <- j;
#medians[i,2] <- median(log2(temp$ref_allele.x/temp$alt_allele.x));
#medians[i,3] <- median(log2(temp$ref_allele.y/temp$alt_allele.y));
#medians[i,4] <- median(log2(temp$ref_allele/temp$alt_allele));
#medians[i,5] <- median(log2(temp$dm3_ref_ref_allele/temp$dm3_alt_alt_allele));
medians[i,1] <- j;
medians[i,2] <- median(temp$ref_allele.x/(temp$ref_allele.x+temp$alt_allele.x));
medians[i,3] <- median(temp$ref_allele.y/(temp$ref_allele.y+temp$alt_allele.y));
medians[i,4] <- median(temp$ref_allele/(temp$ref_allele+temp$alt_allele));
#medians[i,5] <- median(temp$dm3_ref_ref_allele/(temp$dm3_ref_ref_allele+temp$dm3_alt_alt_allele));
#vars[i,1] <- j;
#vars[i,2] <- var(log2(temp$ref_allele.x/temp$alt_allele.x));
#vars[i,3] <- var(log2(temp$ref_allele.y/temp$alt_allele.y));
#vars[i,4] <- var(log2(temp$ref_allele/temp$alt_allele));
#vars[i,5] <- var(log2(temp$dm3_ref_ref_allele/temp$dm3_alt_alt_allele));
vars[i,1] <- j;
vars[i,2] <- var(temp$ref_allele.x/(temp$ref_allele.x+temp$alt_allele.x));
vars[i,3] <- var(temp$ref_allele.y/(temp$ref_allele.y+temp$alt_allele.y));
vars[i,4] <- var(temp$ref_allele/(temp$ref_allele+temp$alt_allele));
#vars[i,5] <- var(log2(temp$dm3_ref_ref_allele/temp$dm3_alt_alt_allele));
}
plot(medians[,1],medians[,2],type="o",pch=1,col="black",xlim=c(0,12),ylim=c(0,1),xaxt="n",yaxt="n",xlab="",ylab="");
par(new=TRUE);
plot(medians[,1],medians[,3],type="o",pch=0,col="black",xlim=c(0,12),ylim=c(0,1),xaxt="n",yaxt="n",xlab="",ylab="");
par(new=TRUE);
plot(medians[,1],medians[,4],type="o",pch=2,col="black",xlim=c(0,12),ylim=c(0,1),xlab="# neighboring SNPs",ylab="median ref/(ref+alt)",main="Compare medians");
#legend("bottomright",legend=c("1 mm","2 mm","3 mm"),fill=c("black","red","green"),bty="n");
legend("bottomright",legend=c("1 mm","2 mm","3 mm"),pch=c(1,0,2),bty="n");
plot(vars[,1],vars[,2],type="o",col="black",xlim=c(0,6),ylim=c(0,3),xaxt="n",yaxt="n",xlab="",ylab="");
par(new=TRUE);
plot(vars[,1],vars[,3],type="o",col="red",xlim=c(0,6),ylim=c(0,3),xaxt="n",yaxt="n",xlab="",ylab="");
par(new=TRUE);
plot(vars[,1],vars[,4],type="o",col="green",xlim=c(0,6),ylim=c(0,3),xlab="# neighboring SNPs",ylab="variance log2(ASE)");
legend("topleft",legend=c("1 mismatch","2 mismatches","3 mismatches"),fill=c("black","red","green"));
# compare distributions of log2(ASE) for differing numbers of mismatches
total1 <- subset(complete_data,ref_allele.x > 0 & alt_allele.x > 0);
total2 <- subset(complete_data,ref_allele.y > 0 & alt_allele.y > 0);
total3 <- subset(complete_data,ref_allele > 0 & alt_allele > 0);
obj1 <- hist(log2(total1$ref_allele.x/total1$alt_allele.x),breaks=seq(-4.75,6.5,0.25));
obj2 <- hist(log2(total2$ref_allele.y/total2$alt_allele.y),breaks=seq(-4.75,6.5,0.25));
obj3 <- hist(log2(total3$ref_allele/total3$alt_allele),breaks=seq(-4.75,6.5,0.25));
mat <- cbind(obj1$counts/nrow(total1),obj2$counts/nrow(total2),obj3$counts/nrow(total3));
barplot(t(mat),beside=TRUE,names.arg=seq(-4.50,6.5,0.25),xlab="log2(ref/alt)",ylab="Proportion",main="SNP-based measurements of ASE",col=c("black","red","green"));
legend("topright",legend=c("1 mismatch","2 mismatches","3 mismatches"),fill=c("black","red","green"));
# compare distributions of fraction of reference allele for differing numbers of mismatches
obj1 <- hist(complete_data$ref_allele.x/(complete_data$ref_allele.x+complete_data$alt_allele.x),breaks=20);
obj2 <- hist(complete_data$ref_allele.y/(complete_data$ref_allele.y+complete_data$alt_allele.y),breaks=20);
obj3 <- hist(complete_data$ref_allele/(complete_data$ref_allele+complete_data$alt_allele),breaks=20);
mat <- cbind(obj1$counts/nrow(total1),obj2$counts/nrow(total2),obj3$counts/nrow(total3));
barplot(t(mat),beside=TRUE,names.arg=seq(0.05,1,0.05),xlab="Fraction of reference allele",ylab="Proportion",main="SNP-based measurements of ASE",col=c("black","red","green"));
legend("topright",legend=c("1 mismatch","2 mismatches","3 mismatches"),fill=c("black","red","green"));
temp <- cbind(obj1$breaks[2:length(obj1$breaks)],obj1$counts,
obj2$breaks[2:length(obj2$breaks)],obj2$counts,
obj3$breaks[2:length(obj3$breaks)],obj3$counts);
temp <- temp[14:nrow(temp),];
par(mfrow=c(1,3));
barplot(temp[,2]/nrow(total1),names.arg=temp[,1],xlab="",ylab="Proportion",main="\n\n1 mismatch",col="gray",ylim=c(0,1),cex.axis=1.5,cex.names=1.5,cex.lab=1.5,cex.main=1.5);
barplot(temp[,4]/nrow(total2),names.arg=temp[,3],xlab="Fraction of reference allele",ylab="",main="SNP-based ASE using single reference\n\n2 mismatches",col="gray",ylim=c(0,1),cex.axis=1.5,cex.names=1.5,cex.lab=1.5,cex.main=1.5);
barplot(temp[,6]/nrow(total3),names.arg=temp[,5],xlab="",ylab="",main="\n\n3 mismatches",col="gray",ylim=c(0,1),cex.axis=1.5,cex.names=1.5,cex.lab=1.5,cex.main=1.5);
pie(c(nrow(subset(total1,log2(ref_allele.x/alt_allele.x) != 0)),nrow(subset(total1,log2(ref_allele.x/alt_allele.x) == 0))),col=c("gray","white"),labels="");
pie(c(nrow(subset(total2,log2(ref_allele.y/alt_allele.y) != 0)),nrow(subset(total2,log2(ref_allele.y/alt_allele.y) == 0))),col=c("gray","white"),labels="");
pie(c(nrow(subset(total3,log2(ref_allele/alt_allele) != 0)),nrow(subset(total3,log2(ref_allele/alt_allele) == 0))),col=c("gray","white"),labels="");
pie(c(nrow(subset(total1,ref_allele.x/(ref_allele.x+alt_allele.x) != 0.5)),nrow(subset(total1,ref_allele.x/(ref_allele.x+alt_allele.x) == 0.5))),col=c("gray","white"),labels=c("AI","no AI"));
pie(c(nrow(subset(total2,ref_allele.y/(ref_allele.y+alt_allele.y) != 0.5)),nrow(subset(total2,ref_allele.y/(ref_allele.y+alt_allele.y) == 0.5))),col=c("gray","white"),labels=c("AI","no AI"));
pie(c(nrow(subset(total3,ref_allele/(ref_allele+alt_allele) != 0.5)),nrow(subset(total3,ref_allele/(ref_allele+alt_allele) == 0.5))),col=c("gray","white"),labels=c("AI","no AI"));
# new plots
nrow(subset(both,ref_allele.x>0&alt_allele.x>0&ref_allele.y>0&alt_allele.y>0&ref_allele>0&alt_allele>0&dm3_ref_ref_allele>0&dm3_alt_alt_allele>0));
posASE <- subset(both,ref_allele.x>0&alt_allele.x>0&ref_allele.y>0&alt_allele.y>0&ref_allele>0&alt_allele>0&dm3_ref_ref_allele>0&dm3_alt_alt_allele>0);
props <- c(
nrow( subset(posASE,log2(ref_allele.x/alt_allele.x) != 0) )/nrow(posASE),
nrow( subset(posASE,log2(ref_allele.y/alt_allele.y) != 0) )/nrow(posASE),
nrow( subset(posASE,log2(ref_allele/alt_allele) != 0) )/nrow(posASE),
nrow( subset(posASE,log2(dm3_ref_ref_allele/dm3_alt_alt_allele) != 0) )/nrow(posASE)
);
barplot(props,names.arg=c(1,2,3,0),ylim=c(0,1),xlab="Number of mismatches",ylab="Proportion of AI",main="Comparison of single and multiple genomes");
nrow(subset(both,ref_allele.x>0&alt_allele.x>0&ref_allele.y>0&alt_allele.y>0&ref_allele>0&alt_allele>0&dm3_ref_ref_allele>0&dm3_alt_alt_allele>0));
posASE <- both
props <- c(
nrow( subset(both,ref_allele.x/(ref_allele.x+alt_allele.x) != 0.5) )/nrow(posASE),
nrow( subset(both,ref_allele.y/(ref_allele.y+alt_allele.y) != 0.5) )/nrow(posASE),
nrow( subset(both,ref_allele/(ref_allele+alt_allele) != 0.5) )/nrow(posASE),
nrow( subset(both,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) != 0.5) )/nrow(posASE)
);
barplot(props,names.arg=c(1,2,3,0),ylim=c(0,1),xlab="Number of mismatches",ylab="Proportion of AI",main="Comparison of single and multiple genomes");
###################
# compare methods #
###################
##### compare single vs. multiple ASE measurements in exons
single <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v1_m1.exons.txt",header=TRUE,sep="\t");
multiple <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.exons.txt",header=TRUE,sep="\t");
single_multiple <- merge(single, multiple,by.x="gene_exon",by.y="gene_exon");
posASE <- subset(single_multiple,dm3.x>0&line_40.x>0&dm3.y>0&line_40.y>0);
obj1 <- hist(log2(posASE$dm3.x/posASE$line_40.x),breaks=seq(-6,7,0.25));
obj2 <- hist(log2(posASE$dm3.y/posASE$line_40.y),breaks=seq(-6,7,0.25));
mat <- cbind(obj1$counts/nrow(posASE),obj2$counts/nrow(posASE));
barplot(t(mat),beside=TRUE,names.arg=seq(-4.75,8,0.25),xlab="log2(reference allele/alternative allele)",ylab="Proportion",main="Exon-based measurements of ASE",col=c("black","gray"));
legend("topleft",legend=c("single genome, AI = 0.482","multiple genomes, AI = 0.011"),fill=c("black","gray"),bty="n");
temp <- cbind(obj1$breaks[2:length(obj1$breaks)],obj1$counts,
obj2$breaks[2:length(obj2$breaks)],obj2$counts);
#temp <- temp[14:nrow(temp),];
temp <- temp[18:47,];
mat <- cbind(temp[,2]/nrow(posASE),temp[,4]/nrow(posASE));
barplot(t(mat),beside=TRUE,names.arg=temp[,1],xlab="log2(dm3/line_40)",ylab="Proportion",main="Exon-based measurements of ASE",col=c("black","gray"),ylim=c(0,1));
legend("topright",legend=c("single genome, AI = 0.482","multiple genomes, AI = 0.011"),fill=c("black","gray"),bty="n");
# AI
posASE_single <- subset(single_multiple,dm3.x>0&line_40.x>0&log2(dm3.x/line_40.x)!=0);
posASE_multiple <- subset(single_multiple,dm3.y>0&line_40.y>0&log2(dm3.y/line_40.y)!=0);
obj1 <- hist(log2(posASE_single$dm3.x/posASE_single$line_40.x),breaks=seq(-6,7,0.25));
obj2 <- hist(log2(posASE_multiple$dm3.y/posASE_multiple$line_40.y),breaks=seq(-6,7,0.25));
mat <- cbind(obj1$counts/nrow(posASE_single),obj2$counts/nrow(posASE_multiple));
temp <- cbind(obj1$breaks[2:length(obj1$breaks)],obj1$counts,
obj2$breaks[2:length(obj2$breaks)],obj2$counts);
#temp <- temp[14:nrow(temp),];
temp <- temp[18:47,];
mat <- cbind(temp[,2]/nrow(posASE_single),temp[,4]/nrow(posASE_multiple));
barplot(t(mat),beside=TRUE,names.arg=temp[,1],xlab="",ylab="",main="",col=c("black","gray"),ylim=c(0,1));
legend("topleft",legend=c("single (n = 31,334)","multiple (n = 674)"),fill=c("black","gray"));
legend("topright",legend="Exons with detectable ASE\nshowing imbalance");
pie(c(nrow(posASE_single),nrow(posASE_multiple)),labels=c("Single","Multiple"),col=c("black","gray"));
##### compare single vs. multiple ASE measurements in SNPs
single <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v1_m1.SNPs.txt",header=TRUE,sep="\t");
multiple <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.SNPs.txt",header=TRUE,sep="\t");
single_multiple <- merge(single,multiple,by.x=c("chr","pos"),by.y=c("chr","pos"));
posASE <- subset(single_multiple,ref_allele>0&alt_allele>0&dm3_ref_ref_allele>0&dm3_alt_alt_allele>0);
obj1 <- hist(log2(posASE$ref_allele/posASE$alt_allele),breaks=seq(-6,7,0.25));
obj2 <- hist(log2(posASE$dm3_ref_ref_allele/posASE$dm3_alt_alt_allele),breaks=seq(-6,7,0.25));
mat <- cbind(obj1$counts/nrow(posASE),obj2$counts/nrow(posASE));
temp <- cbind(obj1$breaks[2:length(obj1$breaks)],obj1$counts,
obj2$breaks[2:length(obj2$breaks)],obj2$counts);
#temp <- temp[14:nrow(temp),];
temp <- temp[18:47,];
mat <- cbind(temp[,2]/nrow(posASE),temp[,4]/nrow(posASE));
barplot(t(mat),beside=TRUE,names.arg=temp[,1],xlab="log2(dm3/line_40)",ylab="Proportion",main="SNP-based measurements of ASE",col=c("black","gray"),ylim=c(0,1));
legend("topright",legend=c("single genome, AI = 0.507","multiple genomes, AI = 0.003"),fill=c("black","gray"),bty="n");
legend("topright",legend="78,860 SNPs with detectable ASE");
# AI
posASE_single <- subset(single_multiple,ref_allele>0&alt_allele>0&log2(ref_allele/alt_allele)!=0);
posASE_multiple <- subset(single_multiple,dm3_ref_ref_allele>0&dm3_alt_alt_allele>0&log2(dm3_ref_ref_allele/dm3_alt_alt_allele)!=0);
obj1 <- hist(log2(posASE_single$ref_allele/posASE_single$alt_allele),breaks=seq(-6,7,0.25));
obj2 <- hist(log2(posASE_multiple$dm3_ref_ref_allele/posASE_multiple$dm3_alt_alt_allele),breaks=seq(-6,7,0.25));
mat <- cbind(obj1$counts/nrow(posASE_single),obj2$counts/nrow(posASE_multiple));
temp <- cbind(obj1$breaks[2:length(obj1$breaks)],obj1$counts,
obj2$breaks[2:length(obj2$breaks)],obj2$counts);
#temp <- temp[14:nrow(temp),];
temp <- temp[18:47,];
mat <- cbind(temp[,2]/nrow(posASE_single),temp[,4]/nrow(posASE_multiple));
barplot(t(mat),beside=TRUE,names.arg=temp[,1],xlab="",ylab="",main="",col=c("black","gray"),ylim=c(0,1));
legend("topleft",legend=c("single (n = 31,334)","multiple (n = 674)"),fill=c("black","gray"));
legend("topright",legend="Exons with detectable ASE\nshowing imbalance");
pie(c(nrow(posASE_single),nrow(posASE_multiple)),labels=c("Single","Multiple"),col=c("black","gray"));
###### redone using fraction of reference allele
##### compare single vs. multiple ASE measurements in SNPs
single <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v1_m1.SNPs.txt",header=TRUE,sep="\t");
multiple <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.SNPs.txt",header=TRUE,sep="\t");
single_multiple <- merge(single,multiple,by.x=c("chr","pos"),by.y=c("chr","pos"));
posASE <- single_multiple;
obj1 <- hist(posASE$ref_allele/(posASE$ref_allele+posASE$alt_allele),breaks=20);
obj2 <- hist(posASE$dm3_ref_ref_allele/(posASE$dm3_ref_ref_allele+posASE$dm3_alt_alt_allele),breaks=20);
mat <- cbind(obj1$counts/nrow(posASE),obj2$counts/nrow(posASE));
temp <- cbind(obj1$breaks[2:length(obj1$breaks)],obj1$counts,
obj2$breaks[2:length(obj2$breaks)],obj2$counts);
#temp <- temp[14:nrow(temp),];
#temp <- temp[18:47,];
mat <- cbind(temp[,2]/nrow(posASE),temp[,4]/nrow(posASE));
barplot(t(mat),beside=TRUE,names.arg=temp[,1],xlab="Fraction of reference allele",ylab="Proportion",main="SNP-based measurements of ASE",col=c("black","gray"),ylim=c(0,1));
legend("topright",legend=c("single genome, AI = 0.507","multiple genomes, AI = 0.003"),fill=c("black","gray"),bty="n");
legend("topright",legend="78,860 SNPs with detectable ASE");
# AI
posASE_single <- subset(single_multiple,ref_allele/(ref_allele+alt_allele)!=0.5);
posASE_multiple <- subset(single_multiple,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele)!=0.5);
obj1 <- hist(posASE_single$ref_allele/(posASE_single$ref_allele+posASE_single$alt_allele),breaks=20);
obj2 <- hist(posASE_multiple$dm3_ref_ref_allele/(posASE_multiple$dm3_ref_ref_allele+posASE_multiple$dm3_alt_alt_allele),breaks=20);
mat <- cbind(obj1$counts/nrow(posASE_single),obj2$counts/nrow(posASE_multiple));
temp <- cbind(obj1$breaks[2:length(obj1$breaks)],obj1$counts,
obj2$breaks[2:length(obj2$breaks)],obj2$counts);
#temp <- temp[14:nrow(temp),];
#temp <- temp[18:47,];
mat <- cbind(temp[,2]/nrow(posASE_single),temp[,4]/nrow(posASE_multiple));
barplot(t(mat),beside=TRUE,names.arg=temp[,1],xlab="",ylab="",main="",col=c("black","gray"),ylim=c(0,1));
legend("topleft",legend=c("single (n = 31,334)","multiple (n = 674)"),fill=c("black","gray"));
legend("topright",legend="Exons with detectable ASE\nshowing imbalance");
pie(c(nrow(posASE_single),nrow(posASE_multiple)),labels=c("Single","Multiple"),col=c("black","gray"));
#########################
###############
# mappability #
###############
dm3_ref_exon_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons.dm3_ref.l50_m0.mappability.txt",header=TRUE,sep="\t");
dm3_alt_exon_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons.dm3_alt_line_40.l50_m0.mappability.txt",header=TRUE,sep="\t");
exon_mappability = merge(dm3_ref_exon_mappability,dm3_alt_exon_mappability,by.x="locus",by.y="locus");
dm3_ref_avg <- exon_mappability$sum.x/exon_mappability$length.x;
dm3_alt_avg <- exon_mappability$sum.y/exon_mappability$length.y;
mappability <- cbind(exon_mappability,dm3_ref_avg,dm3_alt_avg);
multiple <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.exons.txt",header=TRUE,sep="\t");
SNPsInExons <- read.table("/Users/kraigrs/Wittkopp/DGRP/DGRP_line_40_SNPs_in_const.txt",header=FALSE,sep="\t");
temp1 <- merge(multiple,mappability,by.x="gene_exon",by.y="locus");
temp2 <- merge(temp1,SNPsInExons,by.x="gene_exon",by.y="V8");
weird <- subset(temp2,dm3>0&line_40>0&V7==0&log2(dm3/line_40)!=0)[,c(1:4,11,12,19)];
AI <- subset(temp2,dm3>0&line_40>0&V7>0&log2(dm3/line_40)!=0);
AI_prop <- nrow(subset(AI,dm3_ref_avg != 1 | dm3_alt_avg != 1))/nrow(AI);
call <- rep("AI",nrow(AI));
AI <- cbind(AI,call);
ASE <- subset(temp2,dm3>0&line_40>0&V7>0&log2(dm3/line_40)==0);
ASE_prop <- nrow(subset(ASE,dm3_ref_avg != 1 | dm3_alt_avg != 1))/nrow(ASE);
call <- rep("ASE",nrow(ASE));
ASE <- cbind(ASE,call);
data <- rbind(AI,ASE);
boxplot(log2(dm3_ref_avg/dm3_alt_avg) ~ call, data=data,ylab="log2(mappability ratio)",varwidth=TRUE);
props <- c(AI_prop,ASE_prop);
barplot(props,names.arg=c("AI\nn = 300","not AI\nn = 23,902"),ylim=c(0,1),ylab="Proportion filtered out in each category",main="Filtering imperfect mappability retains many exons");
barplot(c(ASE_prop,AI_prop),xlim=c(0,1),horiz=TRUE,names.arg=c("No AI\nn = 23,902","AI\nn = 300"),width=0.3,xlab="Proportion removed due to imperfect mappability",main="Exons");
############
# 1 mismatch
mappability_1mm <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/SNPs_line_40.dm3_ref.l50_m1.mappability.txt",header=TRUE,sep="\t");
data <- merge(complete_data1,mappability_1mm,by.x=c("chr","pos"),by.y=c("chr","position"));
write.table(data,file="/Users/kraigrs/Desktop/exons/sim_SNPs_1mm.txt",quote=F,sep="\t",row.names=F,col.names=F);
temp <- subset(data,neighbor<1);
write.table(temp,file="/Users/kraigrs/Desktop/exons/sim_SNPs_1mm_unbiased.txt",quote=F,sep="\t",row.names=F,col.names=F);
perfect <- subset(temp,sum/length == 1);
write.table(perfect,file="/Users/kraigrs/Desktop/exons/sim_SNPs_1mm_unbiased_perfect.txt",quote=F,sep="\t",row.names=F,col.names=F);
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5 & sum/length != 1))/nrow(subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5))*100; # % unequal ASRA and imperfect mappability
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5 & sum/length != 1))/nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5))*100; # % equal ASRA and imperfect mappability
# proportion of differentiating sites with perfect mappability and equal allelic abundance
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5 & sum/length == 1))/nrow(subset(temp,sum/length == 1))*100;
perfect <- subset(temp,sum/length == 1);
imperfect <- subset(temp,sum/length != 1);
nonsig <- subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5);
sig <- subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5);
nrow(subset(sig,sum/length < 1))/nrow(sig)*100;
nrow(subset(nonsig,sum/length < 1))/nrow(nonsig)*100;
nonsig_hist <- hist( (nonsig$sum/nonsig$length) ,breaks=seq(0,1,0.05));
sig_hist <- hist( (sig$sum/sig$length) ,breaks=seq(0,1,0.05));
mat <- cbind(nonsig_hist$counts/nrow(nonsig),sig_hist$counts/nrow(sig));
barplot(t(mat),beside=TRUE,names.arg=seq(0.05,1,0.05),xlab="",ylab="",main="",col=c("white","grey"));
data <- merge(complete_data1,mappability_1mm,by.x=c("chr","pos"),by.y=c("chr","position"));
perfect_hist <- hist(perfect$ref_allele/(perfect$ref_allele+perfect$alt_allele),breaks=seq(0,1,0.05));
imperfect_hist <- hist(imperfect$ref_allele/(imperfect$ref_allele+imperfect$alt_allele),breaks=seq(0,1,0.05));
mat <- cbind(perfect_hist$counts/nrow(perfect),imperfect_hist$counts/nrow(imperfect));
pdf(file="/Users/kraigrs/Wittkopp/LabResearch/Simulating_ASE/revisions/new_plots/sim_50b_1mm_barplot_by_mapp.pdf");
barplot(t(mat),beside=TRUE,names.arg=seq(0.05,1,0.05),ylim=c(0,1),xlab="",ylab="",main="",col=c("white","grey"));
#par(new=TRUE);
#points(seq(0.05,1,0.05),mat[,1],ylim=c(0,1),col="black",type="b");
#par(new=TRUE);
#points(seq(0.05,1,0.05),mat[,2],ylim=c(0,1),col="grey",type="b");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_1mm_pie_by_mapp.pdf");
pie(nrow(perfect),nrow(imperfect),labels="");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_1mm_boxplot_perfect.pdf");
boxplot(ref_allele/(ref_allele+alt_allele) ~ neighbor_plus,data = perfect, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.4,col=rgb(0,0,0,0.4)));
abline(h = 0.5,col="red",lty="dashed");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_1mm_boxplot_imperfect.pdf");
boxplot(ref_allele/(ref_allele+alt_allele) ~ neighbor_plus,data = imperfect, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.4,col=rgb(0,0,0,0.4)));
abline(h = 0.5,col="red",lty="dashed");
dev.off();
d1_perfect <- density(perfect$ref_allele/(perfect$ref_allele+perfect$alt_allele),from=0,to=1);
d1_imperfect <- density(imperfect$ref_allele/(imperfect$ref_allele+imperfect$alt_allele),from=0,to=1);
plot(d1_perfect,xlim=c(0,1),ylim=c(0,10),main="",col="blue",xlab="");
par(new=TRUE);
plot(d1_imperfect,xlim=c(0,1),ylim=c(0,10),main="",col="red",xlab="");
# 2 mismatch
mappability_2mm <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/SNPs_line_40.dm3_ref.l50_m2.mappability.txt",header=TRUE,sep="\t");
data <- merge(complete_data2,mappability_2mm,by.x=c("chr","pos"),by.y=c("chr","position"));
write.table(data,file="/Users/kraigrs/Desktop/exons/sim_SNPs_2mm.txt",quote=F,sep="\t",row.names=F,col.names=F);
temp <- subset(data,neighbor<2);
write.table(temp,file="/Users/kraigrs/Desktop/exons/sim_SNPs_2mm_unbiased.txt",quote=F,sep="\t",row.names=F,col.names=F);
perfect <- subset(temp,sum/length == 1);
write.table(perfect,file="/Users/kraigrs/Desktop/exons/sim_SNPs_2mm_unbiased_perfect.txt",quote=F,sep="\t",row.names=F,col.names=F);
# proportion of differentiating sites with perfect mappability and equal allelic abundance
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5 & sum/length == 1))/nrow(subset(temp,sum/length == 1))*100;
nrow(subset(data,ref_allele/(ref_allele+alt_allele) == 0.5 & sum/length == 1))/nrow(subset(data,ref_allele/(ref_allele+alt_allele) == 0.5))*100; #
nrow(subset(data,ref_allele/(ref_allele+alt_allele) == 0.5 & sum/length != 1))/nrow(subset(data,ref_allele/(ref_allele+alt_allele) == 0.5))*100; #
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5 & sum/length != 1))/nrow(subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5))*100; # % unequal ASRA and imperfect mappability
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5 & sum/length != 1))/nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5))*100; # % equal ASRA and imperfect mappability
perfect <- subset(temp,sum/length == 1);
imperfect <- subset(temp,sum/length != 1);
nonsig <- subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5);
sig <- subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5);
nrow(subset(sig,sum/length < 1))/nrow(sig)*100;
nrow(subset(nonsig,sum/length < 1))/nrow(nonsig)*100;
nonsig_hist <- hist( (nonsig$sum/nonsig$length) ,breaks=seq(0,1,0.05));
sig_hist <- hist( (sig$sum/sig$length) ,breaks=seq(0,1,0.05));
mat <- cbind(nonsig_hist$counts/nrow(nonsig),sig_hist$counts/nrow(sig));
barplot(t(mat),beside=TRUE,names.arg=seq(0.05,1,0.05),xlab="",ylab="",main="",col=c("white","grey"));
data <- merge(complete_data2,mappability_2mm,by.x=c("chr","pos"),by.y=c("chr","position"));
perfect <- subset(temp,sum/length == 1);
imperfect <- subset(temp,sum/length != 1);
perfect_hist <- hist(perfect$ref_allele/(perfect$ref_allele+perfect$alt_allele),breaks=seq(0,1,0.05));
imperfect_hist <- hist(imperfect$ref_allele/(imperfect$ref_allele+imperfect$alt_allele),breaks=seq(0,1,0.05));
mat <- cbind(perfect_hist$counts/nrow(perfect),imperfect_hist$counts/nrow(imperfect));
pdf(file="/Users/kraigrs/Wittkopp/LabResearch/Simulating_ASE/revisions/new_plots/sim_50b_2mm_barplot_by_mapp.pdf");
barplot(t(mat),beside=TRUE,names.arg=seq(0.05,1,0.05),ylim=c(0,1),xlab="",ylab="",main="",col=c("white","grey"));
#par(new=TRUE);
#points(seq(0.05,1,0.05),mat[,1],ylim=c(0,1),col="black",type="b");
#par(new=TRUE);
#points(seq(0.05,1,0.05),mat[,2],ylim=c(0,1),col="grey",type="b");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_2mm_boxplot_perfect.pdf");
boxplot(ref_allele/(ref_allele+alt_allele) ~ neighbor_plus,data = perfect, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.4,col=rgb(0,0,0,0.4)));
abline(h = 0.5,col="red",lty="dashed");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_2mm_boxplot_imperfect.pdf");
boxplot(ref_allele/(ref_allele+alt_allele) ~ neighbor_plus,data = imperfect, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.4,col=rgb(0,0,0,0.4)));
abline(h = 0.5,col="red",lty="dashed");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_2mm_pie_by_mapp.pdf");
pie(nrow(perfect),nrow(imperfect),labels="");
dev.off();
d2_perfect <- density(perfect$ref_allele/(perfect$ref_allele+perfect$alt_allele));
d2_imperfect <- density(imperfect$ref_allele/(imperfect$ref_allele+imperfect$alt_allele));
plot(d2_perfect,xlim=c(0,1),ylim=c(0,10),main="",col="blue",xlab="");
par(new=TRUE);
plot(d2_imperfect,xlim=c(0,1),ylim=c(0,10),main="",col="red",xlab="");
# 3 mismatch
mappability_3mm <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/SNPs_line_40.dm3_ref.l50_m3.mappability.txt",header=TRUE,sep="\t");
data <- merge(complete_data3,mappability_3mm,by.x=c("chr","pos"),by.y=c("chr","position"));
write.table(data,file="/Users/kraigrs/Desktop/exons/sim_SNPs_3mm.txt",quote=F,sep="\t",row.names=F,col.names=F);
temp <- subset(data,neighbor<3);
write.table(temp,file="/Users/kraigrs/Desktop/exons/sim_SNPs_3mm_unbiased.txt",quote=F,sep="\t",row.names=F,col.names=F);
perfect <- subset(temp,sum/length == 1);
write.table(perfect,file="/Users/kraigrs/Desktop/exons/sim_SNPs_3mm_unbiased_perfect.txt",quote=F,sep="\t",row.names=F,col.names=F);
# proportion of differentiating sites with perfect mappability and equal allelic abundance
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5 & sum/length == 1))/nrow(subset(temp,sum/length == 1))*100;
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5 & sum/length != 1))/nrow(subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5))*100; # % unequal ASRA and imperfect mappability
nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5 & sum/length != 1))/nrow(subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5))*100; # % equal ASRA and imperfect mappability
perfect <- subset(temp,sum/length == 1);
imperfect <- subset(temp,sum/length != 1);
nonsig <- subset(temp,ref_allele/(ref_allele+alt_allele) == 0.5);
sig <- subset(temp,ref_allele/(ref_allele+alt_allele) != 0.5);
nrow(subset(sig,sum/length < 1))/nrow(sig)*100;
nrow(subset(nonsig,sum/length < 1))/nrow(nonsig)*100;
nonsig_hist <- hist( (nonsig$sum/nonsig$length) ,breaks=seq(0,1,0.05));
sig_hist <- hist( (sig$sum/sig$length) ,breaks=seq(0,1,0.05));
mat <- cbind(nonsig_hist$counts/nrow(nonsig),sig_hist$counts/nrow(sig));
barplot(t(mat),beside=TRUE,names.arg=seq(0.05,1,0.05),xlab="",ylab="",main="",col=c("white","grey"));
data <- merge(complete_data3,mappability_3mm,by.x=c("chr","pos"),by.y=c("chr","position"));
perfect <- subset(temp,sum/length == 1);
imperfect <- subset(temp,sum/length != 1);
perfect_hist <- hist(perfect$ref_allele/(perfect$ref_allele+perfect$alt_allele),breaks=seq(0,1,0.05));
imperfect_hist <- hist(imperfect$ref_allele/(imperfect$ref_allele+imperfect$alt_allele),breaks=seq(0,1,0.05));
mat <- cbind(perfect_hist$counts/nrow(perfect),imperfect_hist$counts/nrow(imperfect));
pdf(file="/Users/kraigrs/Wittkopp/LabResearch/Simulating_ASE/revisions/new_plots/sim_50b_3mm_barplot_by_mapp.pdf");
barplot(t(mat),beside=TRUE,names.arg=seq(0.05,1,0.05),ylim=c(0,1),xlab="",ylab="",main="",col=c("white","grey"));
#par(new=TRUE);
#points(seq(0.05,1,0.05),mat[,1],ylim=c(0,1),col="black",type="b");
#par(new=TRUE);
#points(seq(0.05,1,0.05),mat[,2],ylim=c(0,1),col="grey",type="b");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_3mm_pie_by_mapp.pdf");
pie(nrow(perfect),nrow(imperfect),labels="");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_3mm_boxplot_perfect.pdf");
boxplot(ref_allele/(ref_allele+alt_allele) ~ neighbor_plus,data = perfect, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.4,col=rgb(0,0,0,0.4)));
abline(h = 0.5,col="red",lty="dashed");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_3mm_boxplot_imperfect.pdf");
boxplot(ref_allele/(ref_allele+alt_allele) ~ neighbor_plus,data = imperfect, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.4,col=rgb(0,0,0,0.4)));
abline(h = 0.5,col="red",lty="dashed");
dev.off();
d3_perfect <- density(perfect$ref_allele/(perfect$ref_allele+perfect$alt_allele));
d3_imperfect <- density(imperfect$ref_allele/(imperfect$ref_allele+imperfect$alt_allele));
plot(d3_perfect,xlim=c(0,1),ylim=c(0,10),main="",col="blue",xlab="");
par(new=TRUE);
plot(d3_imperfect,xlim=c(0,1),ylim=c(0,10),main="",col="red",xlab="");
# 0 mismatches
ref_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/SNPs_line_40.dm3_ref.l50_m0.mappability.txt",header=TRUE,sep="\t");
alt_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/SNPs_line_40.dm3_alt_line_40.l50_m0.mappability.txt",header=TRUE,sep="\t");
mappability_0mm <- merge(ref_mappability,alt_mappability,by.x=c("chr","position"),by.y=c("chr","position"));
data <- merge(complete_data0,mappability_0mm,by.x=c("chr","pos"),by.y=c("chr","position"));
temp <- data;
write.table(complete_data0,file="/Users/kraigrs/Desktop/exons/sim_SNPs_0mm.txt",quote=F,sep="\t",row.names=F,col.names=F);
write.table(complete_data0,file="/Users/kraigrs/Desktop/exons/sim_SNPs_0mm_unbiased.txt",quote=F,sep="\t",row.names=F,col.names=F);
perfect <- subset(complete_data0,((sum.x/length.x)+(sum.y/length.y)) == 2);
write.table(perfect,file="/Users/kraigrs/Desktop/exons/sim_SNPs_0mm_unbiased_perfect.txt",quote=F,sep="\t",row.names=F,col.names=F);
# proportion of differentiating sites with perfect mappability and equal allelic abundance
nrow(subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) == 0.5 & ((sum.x/length.x)+(sum.y/length.y)) == 2))/nrow(subset(temp,((sum.x/length.x)+(sum.y/length.y)) == 2))*100;
nrow(subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) != 0.5 & ((sum.x/length.x)+(sum.y/length.y)) < 2))/nrow(subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) != 0.5))*100; # % unequal ASRA and imperfect mappability
nrow(subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) == 0.5 & ((sum.x/length.x)+(sum.y/length.y)) < 2))/nrow(subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) == 0.5))*100; # % equal ASRA and imperfect mappability
perfect <- subset(temp,((sum.x/length.x)+(sum.y/length.y)) == 2);
imperfect <- subset(temp,((sum.x/length.x)+(sum.y/length.y)) < 2);
nonsig <- subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) == 0.5);
sig <- subset(temp,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) != 0.5);
#nrow(subset(sig,sum.x/length.x + sum.y/length.y != 2))/nrow(sig)*100;
#nrow(subset(nonsig,sum.x/length.x + sum.y/length.y != 2))/nrow(nonsig)*100;
nonsig_hist <- hist( (nonsig$sum.x/nonsig$length.x)+(nonsig$sum.y/nonsig$length.y) ,breaks=seq(0,2,0.1));
sig_hist <- hist( (sig$sum.x/sig$length.x)+(sig$sum.y/sig$length.y) ,breaks=seq(0,2,0.1));
mat <- cbind(nonsig_hist$counts/nrow(nonsig),sig_hist$counts/nrow(sig));
barplot(t(mat),beside=TRUE,names.arg=seq(0.1,2,0.1),xlab="",ylab="",main="",col=c("white","grey"));
plot(log2((data$sum.x/data$length.x)/(data$sum.y/data$length.y)),data$dm3_ref_ref_allele/(data$dm3_ref_ref_allele+data$dm3_alt_alt_allele),xlab="",ylab="",pch=19,col=rgb(0,0,0,0.3),cex=0.7);
perfect <- subset(temp,((sum.x/length.x)+(sum.y/length.y)) == 2);
imperfect <- subset(temp,((sum.x/length.x)+(sum.y/length.y)) != 2);
perfect_hist <- hist(perfect$dm3_ref_ref_allele/(perfect$dm3_ref_ref_allele+perfect$dm3_alt_alt_allele),breaks=seq(0,1,0.05));
imperfect_hist <- hist(imperfect$dm3_ref_ref_allele/(imperfect$dm3_ref_ref_allele+imperfect$dm3_alt_alt_allele),breaks=seq(0,1,0.05));
mat <- cbind(perfect_hist$counts/nrow(perfect),imperfect_hist$counts/nrow(imperfect));
pdf(file="/Users/kraigrs/Wittkopp/LabResearch/Simulating_ASE/revisions/new_plots/sim_50b_0mm_barplot_by_mapp.pdf");
barplot(t(mat),beside=TRUE,names.arg=seq(0.05,1,0.05),ylim=c(0,1),xlab="",ylab="",main="",col=c("white","grey"));
#par(new=TRUE);
#points(seq(0.05,1,0.05),mat[,1],ylim=c(0,1),col="black",type="b");
#par(new=TRUE);
#points(seq(0.05,1,0.05),mat[,2],ylim=c(0,1),col="grey",type="b");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_0mm_pie_by_mapp.pdf");
pie(nrow(perfect),nrow(imperfect),labels="");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_0mm_boxplot_perfect.pdf");
boxplot(dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) ~ neighbor_plus,data = perfect, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.4,col=rgb(0,0,0,0.4)));
abline(h = 0.5,col="red",lty="dashed");
dev.off();
pdf(file="/Users/kraigrs/Desktop/plots/sim_0mm_boxplot_imperfect.pdf");
boxplot(dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) ~ neighbor_plus,data = imperfect, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.4,col=rgb(0,0,0,0.4)));
abline(h = 0.5,col="red",lty="dashed");
dev.off();
d0_perfect <- density(perfect$dm3_ref_ref_allele/(perfect$dm3_ref_ref_allele+perfect$dm3_alt_alt_allele));
d0_imperfect <- density(imperfect$dm3_ref_ref_allele/(imperfect$dm3_ref_ref_allele+imperfect$dm3_alt_alt_allele));
plot(d0_perfect,xlim=c(0,1),ylim=c(0,10),main="",col="blue",xlab="");
par(new=TRUE);
plot(d0_imperfect,xlim=c(0,1),ylim=c(0,10),main="",col="red",xlab="");
#########################
plot((data$sum.x/data$length.x)/(data$sum.x/data$length.x+data$sum.y/data$length.y),data$dm3_ref_ref_allele/(data$dm3_ref_ref_allele+data$dm3_alt_alt_allele),xlab="mappability",ylab="proportion of reference allele",pch=19,col=rgb(0,0,0,0.5),cex=0.5)
data <- subset(data,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) != 0.5 & (sum.x/length.x)/(sum.x/length.x+sum.y/length.y) != 0.5);
data <- subset(data,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) != 0.5);
nrow(subset(data, sign((sum.x/length.x)/(sum.x/length.x+sum.y/length.y)-0.5) == sign(dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele)-0.5) ));
multiple <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.SNPs.txt",header=TRUE,sep="\t");
data <- merge(multiple,SNP_mappability,by.x=c("chr","pos"),by.y=c("chr","position"));
nrow(subset(data,dm3_ref_ref_allele > 0 & dm3_alt_alt_allele > 0 & log2(dm3_ref_ref_allele/dm3_alt_alt_allele) != 0));
nrow(subset(data,dm3_ref_ref_allele > 0 & dm3_alt_alt_allele > 0 & log2(dm3_ref_ref_allele/dm3_alt_alt_allele) != 0 & log2((sum.x/length.x)/(sum.y/length.y)) != 0));
nrow(subset(data,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) != 0.5));
nrow(subset(data,dm3_ref_ref_allele/(dm3_ref_ref_allele+dm3_alt_alt_allele) != 0.5 & log2((sum.x/length.x)/(sum.y/length.y)) != 0));
poor_map <- subset(data,dm3_ref_ref_allele > 0 & dm3_alt_alt_allele > 0 & log2(dm3_ref_ref_allele/dm3_alt_alt_allele) != 0 & log2((sum.x/length.x)/(sum.y/length.y)) != 0);
nrow(subset(poor_map,sign(log2(dm3_ref_ref_allele/dm3_alt_alt_allele)) == sign(log2((sum.x/length.x)/(sum.y/length.y)))));
AI <- subset(data,ref_allele.x > 0 & alt_allele.x > 0 & log2(ref_allele.x/alt_allele.x) != 0);
AI_prop <- nrow(subset(AI,sum/length != 1))/nrow(AI);
ASE <- subset(data,ref_allele.x > 0 & alt_allele.x > 0 & log2(ref_allele.x/alt_allele.x) == 0);
ASE_prop <- nrow(subset(ASE,sum/length != 1))/nrow(ASE);
# SNPs with no neighbors and AI
temp1 <- subset(AI,neighbor==0);
AI_prop <- nrow(subset(temp1,sum/length != 1))/nrow(temp1);
temp2 <- subset(ASE,neighbor==0);
ASE_prop <- nrow(subset(temp2,sum/length != 1))/nrow(temp2);
props <- c(AI_prop,ASE_prop);
barplot(props,names.arg=c("AI\nn = 179","not AI\nn = 38,819"),ylim=c(0,1),ylab="Proportion filtered out in each category",main="Filtering imperfect mappability retains many SNPs");
barplot(c(ASE_prop,AI_prop),xlim=c(0,1),horiz=TRUE,names.arg=c("No AI\nn = 179","AI\nn = 38,819"),width=0.3,xlab="Proportion removed due to imperfect mappability",main="SNPs");
##################################
# plot mappability across genome #
##################################
library(lattice);
dm3_ref_exon_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons.dm3_ref.l50_m0.mappability.txt",header=TRUE,sep="\t");
dm3_alt_exon_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons.dm3_alt_line_40.l50_m0.mappability.txt",header=TRUE,sep="\t");
exon_mappability = merge(dm3_ref_exon_mappability,dm3_alt_exon_mappability,by.x="locus",by.y="locus");
dm3_ref_avg <- exon_mappability$sum.x/exon_mappability$length.x;
dm3_alt_avg <- exon_mappability$sum.y/exon_mappability$length.y;
mappability <- cbind(exon_mappability,dm3_ref_avg,dm3_alt_avg);
multiple <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.exons.txt",header=TRUE,sep="\t");
SNPsInExons <- read.table("/Users/kraigrs/Wittkopp/DGRP/DGRP_line_40_SNPs_in_const.txt",header=FALSE,sep="\t");
temp1 <- merge(multiple,mappability,by.x="gene_exon",by.y="locus");
temp2 <- merge(temp1,SNPsInExons,by.x="gene_exon",by.y="V8");
temp3 <- subset(temp2,dm3 > 0 & line_40 > 0);
xyplot(dm3_ref_avg/dm3_alt_avg ~ (V2+V3)/2 | V1, data = temp3,xlab="Midpoint of exon",ylab="dm3/line_40 mappability",main="Mappability across both allele-specific genomes",pch=19,cex=0.4,col=rgb(0,0,0,0.3),layout=c(2,3));
xyplot(dm3_ref_avg/dm3_alt_avg ~ log2(dm3/line_40) | V1, data = temp3,xlab="log2(dm3/line_40) ASE",ylab="dm3/line_40 mappability",main="Concordance of mappability direction and bias in ASE",pch=19,cex=0.4,col=rgb(0,0,0,0.3));
# how many exons show opposite signs of mappability and bias? 0.2%, so 99.8% show same sign, awesome!
# also, of the 660 that do show differential mappability, 237 favor dm3_ref and 423 favor dm3_alt
sum( sign(log2(temp3$dm3_ref_avg/temp3$dm3_alt_avg)) != sign(log2(temp3$dm3/temp3$line_40)) );
# correlation? r^2 = 0.6
cor(log2(temp3$dm3_ref_avg/temp3$dm3_alt_avg),log2(temp3$dm3_ref/temp3$dm3_alt))^2;
###############
SNPsInExons <- read.table("/Users/kraigrs/Wittkopp/DGRP/DGRP_line_40_SNPs_in_const.txt",header=FALSE,sep="\t");
v0 <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.bowtie_v0_m1.exons.txt",header=TRUE,sep="\t");
v1 <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v1_m1.exons.txt",header=TRUE,sep="\t");
v2 <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v2_m1.exons.txt",header=TRUE,sep="\t");
v3 <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v3_m1.exons.txt",header=TRUE,sep="\t");
v0 <- merge(v0,SNPsInExons,by.x="gene_exon",by.y="V8");
v1 <- merge(v1,SNPsInExons,by.x="gene_exon",by.y="V8");
v2 <- merge(v2,SNPsInExons,by.x="gene_exon",by.y="V8");
v3 <- merge(v3,SNPsInExons,by.x="gene_exon",by.y="V8");
nrow(subset(v0,dm3>0&line_40>0&log2(dm3/line_40)!=0&V7>0))/nrow(v0);
nrow(subset(v1,dm3>0&line_40>0&log2(dm3/line_40)!=0&V7>0))/nrow(v1);
nrow(subset(v2,dm3>0&line_40>0&log2(dm3/line_40)!=0&V7>0))/nrow(v2);
nrow(subset(v3,dm3>0&line_40>0&log2(dm3/line_40)!=0&V7>0))/nrow(v3);
nrow(subset(v0,dm3>0&line_40>0&V7>0));
nrow(subset(v1,dm3>0&line_40>0&V7>0));
nrow(subset(v2,dm3>0&line_40>0&V7>0));
nrow(subset(v3,dm3>0&line_40>0&V7>0));
# mappability for single-genome approach
data <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_ref.bowtie_v1_m1.SNPs.txt",header=TRUE,sep="\t");
SNP_mappability <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/SNPs_line_40.dm3_ref.l50_m1.mappability.txt",header=TRUE,sep="\t");
data <- merge(complete_data,SNP_mappability,by.x=c("chr","pos"),by.y=c("chr","position"));
AI <- subset(data, ref_allele/(ref_allele+alt_allele) != 0.5));
AI_prop <- nrow(subset(AI,sum/length != 1))/nrow(AI);
ASE <- subset(data,ref_allele.x > 0 & alt_allele.x > 0 & log2(ref_allele.x/alt_allele.x) == 0);
ASE_prop <- nrow(subset(ASE,sum/length != 1))/nrow(ASE);
# SNPs with no neighbors and AI
temp1 <- subset(AI,neighbor==0);
AI_prop <- nrow(subset(temp1,sum/length != 1))/nrow(temp1);
temp2 <- subset(ASE,neighbor==0);
ASE_prop <- nrow(subset(temp2,sum/length != 1))/nrow(temp2);
##############
# alternative allele used as genome
data <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/line_40/constExons_single_bp50_error0_tiled_line_40.dm3_alt_line_40.bowtie_v1_m1.SNPs.txt",header=TRUE,sep="\t");
neighbors_plus <- data$neighbors+1;
data <- cbind(data,neighbors_plus);
boxplot(alt_allele/(alt_allele+ref_allele) ~ neighbors_plus,data = data, varwidth = TRUE,xlab="",ylab="",main="",xlim=c(1,14),ylim=c(0,1),pars = list(outpch=19,cex=0.2));
abline(h=0.5,lty=2,col="red");
pie(c(nrow(subset(data,alt_allele/(ref_allele+alt_allele)!=0.5)),nrow(data)),col=c("gray","white"),labels="");
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/poppr.R
\docType{data}
\name{monpop}
\alias{monpop}
\title{Peach brown rot pathogen \emph{Monilinia fructicola}}
\format{a \code{\linkS4class{genclone}} object with 3 hierarchical levels
coded into one population factor. These are named "Tree", "Year", and
"Symptom"}
\usage{
data(monpop)
}
\description{
This is microsatellite data for a population of the haploid
plant pathogen \emph{Monilinia fructicola} that causes disease within peach
tree canopies (Everhart & Scherm, 2014). Entire populations within trees
were sampled across 3 years (2009, 2010, and 2011) in a total of four
trees, where one tree was sampled in all three years, for a total of 6
within-tree populations. Within each year, samples in the spring were taken
from affected blossoms (termed "BB" for blossom blight) and in late summer
from affected fruits (termed "FR" for fruit rot). There are a total of 694
isolates with 65 to 173 isolates within each canopy population that were
characterized using a set of 13 microsatellite markers.
}
\examples{
data(monpop)
splitStrata(monpop) <- ~Tree/Year/Symptom
setPop(monpop) <- ~Symptom/Year
monpop
}
\references{
SE Everhart, H Scherm, (2015) Fine-scale genetic structure of
\emph{Monilinia fructicola} during brown rot epidemics within individual
peach tree canopies. Phytopathology 105:542-549 doi:
\href{https://doi.org/10.1094/PHYTO-03-14-0088-R}{10.1094/PHYTO-03-14-0088-R}
}
|
/man/monpop.Rd
|
no_license
|
haono/poppr
|
R
| false
| true
| 1,512
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/poppr.R
\docType{data}
\name{monpop}
\alias{monpop}
\title{Peach brown rot pathogen \emph{Monilinia fructicola}}
\format{a \code{\linkS4class{genclone}} object with 3 hierarchical levels
coded into one population factor. These are named "Tree", "Year", and
"Symptom"}
\usage{
data(monpop)
}
\description{
This is microsatellite data for a population of the haploid
plant pathogen \emph{Monilinia fructicola} that causes disease within peach
tree canopies (Everhart & Scherm, 2014). Entire populations within trees
were sampled across 3 years (2009, 2010, and 2011) in a total of four
trees, where one tree was sampled in all three years, for a total of 6
within-tree populations. Within each year, samples in the spring were taken
from affected blossoms (termed "BB" for blossom blight) and in late summer
from affected fruits (termed "FR" for fruit rot). There are a total of 694
isolates with 65 to 173 isolates within each canopy population that were
characterized using a set of 13 microsatellite markers.
}
\examples{
data(monpop)
splitStrata(monpop) <- ~Tree/Year/Symptom
setPop(monpop) <- ~Symptom/Year
monpop
}
\references{
SE Everhart, H Scherm, (2015) Fine-scale genetic structure of
\emph{Monilinia fructicola} during brown rot epidemics within individual
peach tree canopies. Phytopathology 105:542-549 doi:
\href{https://doi.org/10.1094/PHYTO-03-14-0088-R}{10.1094/PHYTO-03-14-0088-R}
}
|
#' A corpus of 10 texts of the Vatlongos (vtk) language
#'
#' The corpus is produced with the read.emeld() function. It is a list of 4 slots representing four
#' units: "texts" "sentences" "words" "morphems". Each slot contains a data frame, and each row
#' in the data.frame describe one occurrences of the corresponding unit.
#'
#' \itemize{
#' \item texts : a data frame of 95 units and 5 columns ("text_id", "title.en", "title.abbreviation.en", "source.en", "comment.en")
#' \item sentenes : a data frame of 3967 units and 6 columns ("text_id", "sentence_id", "segnum.en", "gls.en", "lit.en", "note.en")
#' \item words : a data frame of 52983 units and 6 columns ("text_id" "sentence_id" "word_id" "txt.tvk" "gls.en" "pos.en")
#' \item mophems numeric : a data frame of 56354 units and 10 columns ("text_id" "sentence_id" "word_id" "morphem_id" "type" "txt.tvk" "cf.tvk" "gls.en" "msa.en" "hn.en" )
#' }
#'
#' See the vignette vatlongos for Case study based on this corpus.
#'
#' @format A list with 4 slots
#' @references Eleanor Ridge <Eleanor_Ridge@soas.ac.uk>
"vatlongos"
# #' A corpus of 8 texts of the Kakabe language
# #'
# #' The corpus is produced with the read.toolbox() function. It is a list of 4 slots representing four
# #' units: "texts" "sentences" "words" "morphems". Each slot contains a data frame, and each row
# #' in the data.frame describe one occurrences of the corresponding unit.
# #'
# #' \itemize{
# #' \item texts : a data frame of 8 units and 2 columns
# #' \item sentenes : a data frame of 552 units and 5 columns
# #' \item words : a data frame of 7381 units and 7 columns
# #' \item mophems numeric : a data frame of 8659 units and 9 columns
# #' }
# #'
# #'
# #' @format A list with 4 slots
# #' @references Data by Alexandra Vydrina <alexandra.vydrina@gmail.com>
# "kakabe"
|
/R/dataset.R
|
permissive
|
sylvainloiseau/interlineaR
|
R
| false
| false
| 1,835
|
r
|
#' A corpus of 10 texts of the Vatlongos (vtk) language
#'
#' The corpus is produced with the read.emeld() function. It is a list of 4 slots representing four
#' units: "texts" "sentences" "words" "morphems". Each slot contains a data frame, and each row
#' in the data.frame describe one occurrences of the corresponding unit.
#'
#' \itemize{
#' \item texts : a data frame of 95 units and 5 columns ("text_id", "title.en", "title.abbreviation.en", "source.en", "comment.en")
#' \item sentenes : a data frame of 3967 units and 6 columns ("text_id", "sentence_id", "segnum.en", "gls.en", "lit.en", "note.en")
#' \item words : a data frame of 52983 units and 6 columns ("text_id" "sentence_id" "word_id" "txt.tvk" "gls.en" "pos.en")
#' \item mophems numeric : a data frame of 56354 units and 10 columns ("text_id" "sentence_id" "word_id" "morphem_id" "type" "txt.tvk" "cf.tvk" "gls.en" "msa.en" "hn.en" )
#' }
#'
#' See the vignette vatlongos for Case study based on this corpus.
#'
#' @format A list with 4 slots
#' @references Eleanor Ridge <Eleanor_Ridge@soas.ac.uk>
"vatlongos"
# #' A corpus of 8 texts of the Kakabe language
# #'
# #' The corpus is produced with the read.toolbox() function. It is a list of 4 slots representing four
# #' units: "texts" "sentences" "words" "morphems". Each slot contains a data frame, and each row
# #' in the data.frame describe one occurrences of the corresponding unit.
# #'
# #' \itemize{
# #' \item texts : a data frame of 8 units and 2 columns
# #' \item sentenes : a data frame of 552 units and 5 columns
# #' \item words : a data frame of 7381 units and 7 columns
# #' \item mophems numeric : a data frame of 8659 units and 9 columns
# #' }
# #'
# #'
# #' @format A list with 4 slots
# #' @references Data by Alexandra Vydrina <alexandra.vydrina@gmail.com>
# "kakabe"
|
#' Check your sanity and the length or rows for equality
#' @description After creating train and test datasets in ML, check to make
#' sure that the correponsing weights, or response vectors are the same
#' amount of observations as the original dataset.
#' @param check1 a object of class data.frame, character, or numeric
#' @param check2 a object of class data.frame, character, or numeric
#' @param ... optional arguements passed through (TBD)
#'
#' @return a string indicating the success of the comparison or
#' failure of equality of observations form both input objects
#' @export
#'
#' @examples sanity_check(iris, iris)
#' sanity_check(iris, cars)
sanity_check <- function(check1, check2, ...){
to_check = c(...)
# compare two arguements are data frames
if(is.data.frame(check1) & is.data.frame(check2)){
if (nrow(check1) == nrow(check2)) {
print("Good: Same number of rows.")
} else{
print("Bad: Not the same number of rows.")
}
# if the two arguements are character vectors
} else if (is.character(check1) & is.character(check2)){
if (length(check1) == length(check2)) {
print("Good: Same number of rows.")
} else{
print("Bad: Not the same number of rows.")
}
# if there are mixed df and vector
} else if (is.data.frame(check1) & is.character(check2)){
if (nrow(check1) == length(check2)) {
print("Good: Same number of rows.")
} else{
print("Bad: Not the same number of rows.")
}
# if there are mixed df and vector
} else if (is.data.frame(check2) & is.character(check1)) {
if (nrow(check2) == length(check1)) {
print("Good: Same number of rows.")
} else{
print("Bad: Not the same number of rows.")
}
# if the two arguements are numeric vectors
} else if (is.numeric(check1) & is.numeric(check2)){
if (length(check1) == length(check2)) {
print("Good: Same number of rows.")
} else{
print("Bad: Not the same number of rows.")
}
# if there are mixed df and vector
} else if (is.data.frame(check1) & is.numeric(check2)){
if (nrow(check1) == length(check2)) {
print("Good: Same number of rows.")
} else{
print("Bad: Not the same number of rows.")
}
# if there are mixed df and vector
} else if (is.data.frame(check2) & is.numeric(check1)) {
if (nrow(check2) == length(check1)) {
print("Good: Same number of rows.")
} else{
print("Bad: Not the same number of rows.")
}
}
}
|
/R/sanity_check.R
|
no_license
|
jasdumas/dumas
|
R
| false
| false
| 2,504
|
r
|
#' Check your sanity and the length or rows for equality
#' @description After creating train and test datasets in ML, check to make
#' sure that the correponsing weights, or response vectors are the same
#' amount of observations as the original dataset.
#' @param check1 a object of class data.frame, character, or numeric
#' @param check2 a object of class data.frame, character, or numeric
#' @param ... optional arguements passed through (TBD)
#'
#' @return a string indicating the success of the comparison or
#' failure of equality of observations form both input objects
#' @export
#'
#' @examples sanity_check(iris, iris)
#' sanity_check(iris, cars)
sanity_check <- function(check1, check2, ...){
to_check = c(...)
# compare two arguements are data frames
if(is.data.frame(check1) & is.data.frame(check2)){
if (nrow(check1) == nrow(check2)) {
print("Good: Same number of rows.")
} else{
print("Bad: Not the same number of rows.")
}
# if the two arguements are character vectors
} else if (is.character(check1) & is.character(check2)){
if (length(check1) == length(check2)) {
print("Good: Same number of rows.")
} else{
print("Bad: Not the same number of rows.")
}
# if there are mixed df and vector
} else if (is.data.frame(check1) & is.character(check2)){
if (nrow(check1) == length(check2)) {
print("Good: Same number of rows.")
} else{
print("Bad: Not the same number of rows.")
}
# if there are mixed df and vector
} else if (is.data.frame(check2) & is.character(check1)) {
if (nrow(check2) == length(check1)) {
print("Good: Same number of rows.")
} else{
print("Bad: Not the same number of rows.")
}
# if the two arguements are numeric vectors
} else if (is.numeric(check1) & is.numeric(check2)){
if (length(check1) == length(check2)) {
print("Good: Same number of rows.")
} else{
print("Bad: Not the same number of rows.")
}
# if there are mixed df and vector
} else if (is.data.frame(check1) & is.numeric(check2)){
if (nrow(check1) == length(check2)) {
print("Good: Same number of rows.")
} else{
print("Bad: Not the same number of rows.")
}
# if there are mixed df and vector
} else if (is.data.frame(check2) & is.numeric(check1)) {
if (nrow(check2) == length(check1)) {
print("Good: Same number of rows.")
} else{
print("Bad: Not the same number of rows.")
}
}
}
|
load_all()
library(dplyr)
df1 <- rgdal::readOGR("./data_raw/surface_roughness_locations",
"surface_roughness_locations")
df1 <- df1@data
df1 <- select(df1, site.id, area=Label_1, x, y)
df1[df1$area=="T3SW", ]$area <- "T3-SW"
df1[df1$area=="T3SE", ]$area <- "T3-SE"
df1[df1$area=="T3NE", ]$area <- "T3-NE"
df1[df1$area=="T24 ADD", ]$area <- "T24-ADD"
twb2_sites <- df1
save(twb2_sites, file="./data/twb2_sites.RData")
|
/scripts/load_locations.R
|
no_license
|
jwbannister/twb2
|
R
| false
| false
| 440
|
r
|
load_all()
library(dplyr)
df1 <- rgdal::readOGR("./data_raw/surface_roughness_locations",
"surface_roughness_locations")
df1 <- df1@data
df1 <- select(df1, site.id, area=Label_1, x, y)
df1[df1$area=="T3SW", ]$area <- "T3-SW"
df1[df1$area=="T3SE", ]$area <- "T3-SE"
df1[df1$area=="T3NE", ]$area <- "T3-NE"
df1[df1$area=="T24 ADD", ]$area <- "T24-ADD"
twb2_sites <- df1
save(twb2_sites, file="./data/twb2_sites.RData")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tracer.R
\name{Reset traces}
\alias{Reset traces}
\alias{reset_traces}
\title{Clears the captured traces}
\usage{
reset_traces(tracer)
}
|
/man/Reset-traces.Rd
|
no_license
|
PRL-PRG/genthat
|
R
| false
| true
| 215
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tracer.R
\name{Reset traces}
\alias{Reset traces}
\alias{reset_traces}
\title{Clears the captured traces}
\usage{
reset_traces(tracer)
}
|
library(UsingR)
library(ggplot2)
data(iris)
shinyServer(
function(input, output) {
## Render Plot main function
output$mainPlot <- renderPlot({
## Setting up the variables
choice <<- input$id1
ptx <<- input$id2
pty <<- input$id3
x <<- iris$Petal.Width
y <<- iris$Petal.Length
Species <<- iris$Species
centroids <<- aggregate(cbind(x,y)~Species,iris,FUN = mean)
## Base Plot
irisPlot <- ggplot(iris,aes(x,y)) + geom_point() + ggtitle("Plot of Petal Width Against Petal Length (iris data)") + xlab("Petal Width") + ylab("Petal Length")
## If checkbox selected, Show clustering
if (any(choice=='1')) {
irisPlot <- irisPlot + geom_point(aes(color = factor(Species)))
irisPlot <- irisPlot + geom_point(aes(x=centroids$x,y=centroids$y),size=5)
}
## If checkbox selected, Show only linear regression
if (any(choice=='2')) {
irisPlot <- irisPlot + geom_smooth(method="lm",se=FALSE, fullrange=TRUE)
}
## Predict petal length if Predict Checkbox Selected
if (any(input$id4=="1")) {
fit <- lm(y ~ x, iris)
ptynew <<- unname(ptx*fit$coefficients[2] + fit$coefficients[1])
} else {
ptynew <<- pty
}
## Predict the species of the new point
## Finding the centroid with the smallest distance to the new point
centroids$diff <- abs(centroids$x-ptx) + abs(centroids$y-ptynew)
pts <<- centroids[which.min(centroids$diff),]$Species
## Add the new point to graph
# if (ptynew>0)
irisPlot <- irisPlot + geom_point(mapping=aes(x=ptx,y=ptynew),size=5,shape=17)
## For UI to display the results (Width, Length and Species)
output$Width <- renderText({paste("Selected Width = ", round(ptx,2))})
if (any(input$id4=="1")) {
output$Length <- renderText({paste("(Predicted) Length = ", round(ptynew,2))})
} else {
output$Length <- renderText({paste("(Selected) Length = ", round(ptynew,2))})
}
output$Species <- renderText({paste("(Predicted) Species = ", pts)})
## Final Plot to be rendered
irisPlot
}, height=500)
})
|
/server.R
|
no_license
|
kevinlimhk/App-SimpleDemoPredictor
|
R
| false
| false
| 2,208
|
r
|
library(UsingR)
library(ggplot2)
data(iris)
shinyServer(
function(input, output) {
## Render Plot main function
output$mainPlot <- renderPlot({
## Setting up the variables
choice <<- input$id1
ptx <<- input$id2
pty <<- input$id3
x <<- iris$Petal.Width
y <<- iris$Petal.Length
Species <<- iris$Species
centroids <<- aggregate(cbind(x,y)~Species,iris,FUN = mean)
## Base Plot
irisPlot <- ggplot(iris,aes(x,y)) + geom_point() + ggtitle("Plot of Petal Width Against Petal Length (iris data)") + xlab("Petal Width") + ylab("Petal Length")
## If checkbox selected, Show clustering
if (any(choice=='1')) {
irisPlot <- irisPlot + geom_point(aes(color = factor(Species)))
irisPlot <- irisPlot + geom_point(aes(x=centroids$x,y=centroids$y),size=5)
}
## If checkbox selected, Show only linear regression
if (any(choice=='2')) {
irisPlot <- irisPlot + geom_smooth(method="lm",se=FALSE, fullrange=TRUE)
}
## Predict petal length if Predict Checkbox Selected
if (any(input$id4=="1")) {
fit <- lm(y ~ x, iris)
ptynew <<- unname(ptx*fit$coefficients[2] + fit$coefficients[1])
} else {
ptynew <<- pty
}
## Predict the species of the new point
## Finding the centroid with the smallest distance to the new point
centroids$diff <- abs(centroids$x-ptx) + abs(centroids$y-ptynew)
pts <<- centroids[which.min(centroids$diff),]$Species
## Add the new point to graph
# if (ptynew>0)
irisPlot <- irisPlot + geom_point(mapping=aes(x=ptx,y=ptynew),size=5,shape=17)
## For UI to display the results (Width, Length and Species)
output$Width <- renderText({paste("Selected Width = ", round(ptx,2))})
if (any(input$id4=="1")) {
output$Length <- renderText({paste("(Predicted) Length = ", round(ptynew,2))})
} else {
output$Length <- renderText({paste("(Selected) Length = ", round(ptynew,2))})
}
output$Species <- renderText({paste("(Predicted) Species = ", pts)})
## Final Plot to be rendered
irisPlot
}, height=500)
})
|
library(foreign)
dir <- 'C:\\Users\\alin\\Documents\\SelfStudy\\MyLearning\\Applied_Logitudinal_Analysis\\data\\'
fpath <- paste(dir, 'tlc-data.txt', sep = '')
ds <- read.table(file = fpath)
names(ds) <- c('id', 'trt', 'y0', 'y1', 'y4', 'y6')
df <- reshape(ds, idvar="id", varying=c("y0","y1","y4","y6"),
v.names="y", timevar="time", time=1:4, direction="long")
df$week <- df$time
df$week[df$time == 1] <- 0
df$week[df$time == 2] <- 1
df$week[df$time == 3] <- 4
df$week[df$time == 4] <- 6
interaction.plot(df$week, df$trt, df$y, type="b", pch=c(19,21), ylim=c(10, 30),
xlab="Time (in weeks)", ylab="Blood Lead Levels",
main="Plot of Mean Response Profiles in the Placebo & Succimer Groups",
col=c(2,4))
df$week.f <- factor(df$week, c(0,1,4,6))
library(nlme)
model <- gls(model = y ~ trt*week.f, data = df, corr=corSymm(, form= ~ time | id),
weights = varIdent(form = ~ 1 | week.f))
summary(model)
anova(model)
|
/Applied_Logitudinal_Analysis/R/r5_4.r
|
no_license
|
anhualin/MyLearning
|
R
| false
| false
| 994
|
r
|
library(foreign)
dir <- 'C:\\Users\\alin\\Documents\\SelfStudy\\MyLearning\\Applied_Logitudinal_Analysis\\data\\'
fpath <- paste(dir, 'tlc-data.txt', sep = '')
ds <- read.table(file = fpath)
names(ds) <- c('id', 'trt', 'y0', 'y1', 'y4', 'y6')
df <- reshape(ds, idvar="id", varying=c("y0","y1","y4","y6"),
v.names="y", timevar="time", time=1:4, direction="long")
df$week <- df$time
df$week[df$time == 1] <- 0
df$week[df$time == 2] <- 1
df$week[df$time == 3] <- 4
df$week[df$time == 4] <- 6
interaction.plot(df$week, df$trt, df$y, type="b", pch=c(19,21), ylim=c(10, 30),
xlab="Time (in weeks)", ylab="Blood Lead Levels",
main="Plot of Mean Response Profiles in the Placebo & Succimer Groups",
col=c(2,4))
df$week.f <- factor(df$week, c(0,1,4,6))
library(nlme)
model <- gls(model = y ~ trt*week.f, data = df, corr=corSymm(, form= ~ time | id),
weights = varIdent(form = ~ 1 | week.f))
summary(model)
anova(model)
|
s=c(rep(0,100), 10*exp(-(1:100)/20)*cos(2*pi*1:100/4))
x=ts(s + rnorm(200, 0, 1))
plot(x,col="blue")
s=c(rep(0,100), 10*exp(-(1:100)/200)*cos(2*pi*1:100/4))
x=ts(s + rnorm(200, 0, 1))
plot(x,col="red")
s=c(exp(-(1:100)/20))
x=ts(s)
s1=c(exp(-(1:100)/200)
y=ts(s1)
par(mfrow=c(2,1))
plot.ts(x,col="blue")
plot.ts(y,col="red")
|
/hw3/p2.R
|
no_license
|
yaoshiyu/5825
|
R
| false
| false
| 326
|
r
|
s=c(rep(0,100), 10*exp(-(1:100)/20)*cos(2*pi*1:100/4))
x=ts(s + rnorm(200, 0, 1))
plot(x,col="blue")
s=c(rep(0,100), 10*exp(-(1:100)/200)*cos(2*pi*1:100/4))
x=ts(s + rnorm(200, 0, 1))
plot(x,col="red")
s=c(exp(-(1:100)/20))
x=ts(s)
s1=c(exp(-(1:100)/200)
y=ts(s1)
par(mfrow=c(2,1))
plot.ts(x,col="blue")
plot.ts(y,col="red")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pinpoint_operations.R
\name{pinpoint_update_sms_channel}
\alias{pinpoint_update_sms_channel}
\title{Enables the SMS channel for an application or updates the status and
settings of the SMS channel for an application}
\usage{
pinpoint_update_sms_channel(ApplicationId, SMSChannelRequest)
}
\arguments{
\item{ApplicationId}{[required] The unique identifier for the application. This identifier is displayed
as the \strong{Project ID} on the Amazon Pinpoint console.}
\item{SMSChannelRequest}{[required]}
}
\value{
A list with the following syntax:\preformatted{list(
SMSChannelResponse = list(
ApplicationId = "string",
CreationDate = "string",
Enabled = TRUE|FALSE,
HasCredential = TRUE|FALSE,
Id = "string",
IsArchived = TRUE|FALSE,
LastModifiedBy = "string",
LastModifiedDate = "string",
Platform = "string",
PromotionalMessagesPerSecond = 123,
SenderId = "string",
ShortCode = "string",
TransactionalMessagesPerSecond = 123,
Version = 123
)
)
}
}
\description{
Enables the SMS channel for an application or updates the status and
settings of the SMS channel for an application.
}
\section{Request syntax}{
\preformatted{svc$update_sms_channel(
ApplicationId = "string",
SMSChannelRequest = list(
Enabled = TRUE|FALSE,
SenderId = "string",
ShortCode = "string"
)
)
}
}
\keyword{internal}
|
/cran/paws.customer.engagement/man/pinpoint_update_sms_channel.Rd
|
permissive
|
TWarczak/paws
|
R
| false
| true
| 1,447
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pinpoint_operations.R
\name{pinpoint_update_sms_channel}
\alias{pinpoint_update_sms_channel}
\title{Enables the SMS channel for an application or updates the status and
settings of the SMS channel for an application}
\usage{
pinpoint_update_sms_channel(ApplicationId, SMSChannelRequest)
}
\arguments{
\item{ApplicationId}{[required] The unique identifier for the application. This identifier is displayed
as the \strong{Project ID} on the Amazon Pinpoint console.}
\item{SMSChannelRequest}{[required]}
}
\value{
A list with the following syntax:\preformatted{list(
SMSChannelResponse = list(
ApplicationId = "string",
CreationDate = "string",
Enabled = TRUE|FALSE,
HasCredential = TRUE|FALSE,
Id = "string",
IsArchived = TRUE|FALSE,
LastModifiedBy = "string",
LastModifiedDate = "string",
Platform = "string",
PromotionalMessagesPerSecond = 123,
SenderId = "string",
ShortCode = "string",
TransactionalMessagesPerSecond = 123,
Version = 123
)
)
}
}
\description{
Enables the SMS channel for an application or updates the status and
settings of the SMS channel for an application.
}
\section{Request syntax}{
\preformatted{svc$update_sms_channel(
ApplicationId = "string",
SMSChannelRequest = list(
Enabled = TRUE|FALSE,
SenderId = "string",
ShortCode = "string"
)
)
}
}
\keyword{internal}
|
#############################################
# Function CDpca #
#############################################
CDpca <- function(data, class = NULL, P, Q, SDPinitial = FALSE, tol=10^(-5), maxit, r, cdpcaplot= TRUE) {
# CDpca performs a clustering and disjoint principal components analysis
# on the given numeric data matrix and returns a list of results
#
# Args:
# data: data frame (numeric).
# class: vector (numeric) or 0, if classes of objects are unknown.
# fixAtt: vector (numeric) or 0, for a selection of attributes.
# SDPinitial: 1 or 0, for random initialization of U and V
# nnloads: 1 or 0, for nonnegative loadings
# cdpcaplot: integer: 1 or 0, if no plot is displayed.
# Q: integer, number of clusters of variables.
# P: integer, number of clusters of objects.
# tol: real number, small positive tolerance.
# maxit: integer, maximum of iterations.
# r: number of runs of the cdpca algoritm for the final solution.
#
# Returns:
# iter: iterations used in the best loop for computing the best solution
# loop: best loop number
# timebestloop: computation time on the best loop
# timeallloops: computation time for all loops
# Y: the component score matrix
# Ybar: the object centroids matrix in the reduced space
# A: the component loading matrix
# U: the partition of objects
# V: the partition of variables
# F: function to maximize
# bcdev: between cluster deviance
# bcdevTotal: between cluster deviance over the total variability
# tableclass: cdpca classification
# pseudocm: pseudo confusion matrix of the real and cdpca classifications
# Enorm: error norm for the obtained cdpca model
#CDPCA is applied on normalized data (mean zero and unit variance)
#############################################
# Function RandMat #
#############################################
RandMat <- function(dim1, dim2) {
# Generates a random binary and row stochastic matrix.
#
# Args:
# dim1: number of rows.
# dim2: number of columns.
#
# Returns:
# The random matrix (dim1xdim2) with only one nonzero element per row.
#
U0 <- matrix(0, dim1, dim2)
U0[1:dim2, 1:dim2] <- diag(dim2)
for (j in (dim2+1):dim1){
p <- sample(1:dim2, 1)
U0[j, p] <- 1
}
U0
} # end RandMat function
######################################
#
# SDP initialization
#
######################################
SDPinitialization <- function(dim1,dim2,D,data){
# Generates a binary and row stochastic matrix
# using SDP-based approach
#
# Args:
# dim1: number of rows.
# dim2: number of columns.
#
# Returns:
# The random matrix (dim1xdim2) with only one nonzero element per row.
#
# approximate solution for clustering
em <- cbind(rep(1,dim1))
matIem <- diag(dim1)-(1/dim1)*em%*%t(em)
if (dim1 == dim(data)[1]){
data <- D
}else{
data <- t(D)
}
So <- matIem%*%data%*%t(data)%*%matIem
matF <- matrix(svd(So)$v[, 1:(dim2-1)], ncol = dim2-1)
matFFT <- matF%*%t(matF)
# approximate solution to the SDP-based model for clustering
Z.bar <- matFFT + 1/dim1 *em%*%t(em)
# refine solutions
cent <- Z.bar%*%data
centunique <- unique(cent)
t <- dim(centunique)[1]
# initial kmeans step
indcentroids <- sort(sample(1:t, dim2, replace=FALSE), decreasing=FALSE)
centroids <- centunique[indcentroids, ]
solkmeans <- kmeans(data, centroids, iter.max = 100000)
M <- matrix(0, dim1, dim2)
for (i in 1:dim1){
M[i, solkmeans$cluster[i]] <- 1
}
M
}
data.sd <- data.frame(scale(data, center = TRUE, scale = TRUE))
I <- dim(data)[1] # number of objects I
J <- dim(data)[2] # number of variables J
Xs <- as.matrix(data.sd*sqrt(I/(I-1))) # matrix of normalized data (with variance divided by I)
# matriz X (I x J) (obs x vars), numeric matrix argument
tfbest <- matrix(0, r, 1) # computational time at each loop
# Run CDPCA ALS algorithm r times
for (loop in 1:r) {
t1 <- proc.time()
# Initialization
iter <- 0
# SDP initialization or not?
if (SDPinitial){
U <- SDPinitialization(I, P, Xs, data)
V <- SDPinitialization(J, Q, Xs, data)
}else{
U <- RandMat(I, P)
V <- RandMat(J, Q)
}
#Set of the variables
X.bar <- diag(1/colSums(U))%*%t(U)%*%Xs # (PxJ) object centroid matrix. It identifies the P centroids in the variable space
X.group <- U%*%X.bar # (IxJ) matrix. Each object in the data matrix is replaced by its centroid.
vJ <- matrix(1:J, ncol = 1)
zJ <- matrix(0, J, 1)
zQ <- matrix(0, 1, Q)
# matrix A
A <- matrix(0, J, Q)
for (j in 1: Q) {
Jxx <- which(V[, j] == 1)
len.Jxx <- length(Jxx)
if (sum(V[, j])>1) {
if (I >= len.Jxx) {
# CASE 1: I >= J (more observations than variables)
S.group <- t(X.group[, Jxx])%*%X.group[, Jxx]
A[Jxx, j] <- powerMethod(S.group)$vector
}else{
# CASE 2: I < J ( more variables than observations)
SS.group <- X.group[, Jxx]%*%t(X.group[, Jxx])
PMinitial <- powerMethod(SS.group)
A[Jxx, j] <- t(X.group[, Jxx])%*%PMinitial$vector/sqrt(PMinitial$value)
} # end if
}else{
A[Jxx, j] <- 1
} # end if
} # end for
A0 <- A
Y.bar <- X.bar%*%A # (PxQ) object centroid matrix. It identifies the P centroids in the reduced space of the Q principal components.
F0 <- sum(diag(t(U%*%Y.bar)%*%U%*%Y.bar)) # Objective function to maximize.
Fmax <- F0
conv <- 2*tol
while (conv > tol) {
iter <- iter+1
Y <- Xs%*%A # (IxQ) component score matrix. It identifies the I objects in the reduced space of the Q principal components.
U <- matrix(0, I, P)
# update U
for (i in 1:I) {
dist <- rep(0, P)
for (p in 1:P) {
dist[p] <- sum((Y[i, ]-Y.bar[p, ])^2)
} # end for
min.dist <- which.min(dist)
U[i, min.dist] <- 1
} # end for
su <- colSums(U)
while (sum(su == 0) > 0) {
ind.max <- which.max(su) # p2
su.max <- max(su) # m2
ind.min <- which.min(su) # p1
su.min <- min(su) # m1
ind.nzU <- which(U[, ind.max] == 1)
ind.sel <- ind.nzU[1:floor(su.max)/2]
U[ind.sel, ind.min] <- 1
U[ind.sel, ind.max] <- 0
su <- colSums(U)
} # end while
# Given U and A compute X.bar
X.bar <- diag(1/colSums(U))%*%t(U)%*%Xs
Y.bar <- X.bar%*%A
X.group <- U%*%X.bar
# Update V and A
for (j in 1:J) {
posmax <- which(V[j, ] == 1)
for (g in 1:Q) {
V[j, ] <- diag(Q)[g, ]
####
if (g!=posmax) { ### if 1
for (gg in c(g,posmax)) {
xx <- V[, gg]
Jxx <- which(xx == 1)
len.Jxx <- length(Jxx)
A[, gg] <- zJ
if (sum(xx) > 1) {
if (I >= len.Jxx) {
# CASE 1: I >= J (more observations than variables)
S.group <- t(X.group[, Jxx])%*%X.group[, Jxx]
A[Jxx, gg] <- matrix(powerMethod(S.group)$vector, nrow = len.Jxx)
}else{
# CASE 2: I < J (more variables than observations)
SS.group <- X.group[, Jxx]%*%t(X.group[, Jxx])
PMgeneral <- powerMethod(SS.group)
A[Jxx, gg] <- matrix((t(X.group[, Jxx])%*%PMgeneral$vector/sqrt(PMgeneral$value))[, 1], nrow = len.Jxx)
} # end if
}else{
if (sum(xx)==1) {
A[Jxx, gg] <- 1
} # end if
} # end if
} # end for
} # end ### if 1
####
Y.bar <- X.bar%*%A
F <- sum(diag(t(U%*%Y.bar)%*%U%*%Y.bar))
if (F > Fmax) {
Fmax <- F
posmax <- g
A0 <- A
}else{
A <- A0
} # end if
} # end for
V[j, ]=diag(Q)[posmax, ]
} # end for
Y <- Xs%*%A
Y.bar <- X.bar%*%A
F <- sum(diag(t(U%*%Y.bar)%*%U%*%Y.bar))
conv <- F-F0
if (conv > tol) {
F0 <- F
A0 <- A
}else{
break
} # end if
if (iter == maxit) {
print("Maximum iterations reached.")
break
} # end if
} # end while
# Computation time for each loop
t2 <- proc.time()
tfinal <- t2-t1
tfbest[loop] <- tfinal[3]
#Results to be observed in each run of CDpca
# BetClusDevTotal <- F/(I*J)*100 # between cluster deviance (two different formula for the same thing)
BetClusDev <- (F/sum(diag(t(Y)%*%Y)))*100 # between cluster deviance
tabperloop <- data.frame(cbind(loop, iter, tfinal[3], BetClusDev, F, conv))
rownames(tabperloop) <- c(" ")
colnames(tabperloop) <- c("Loop", "Iter", "Loop time", "Between cluster deviance(%):", "F", "Convergence")
print(tabperloop) # Loop display
if (loop == 1) {
Vcdpca <- V
Ucdpca <- U
X.barcdpca <- diag(1/colSums(Ucdpca))%*%t(Ucdpca)%*%Xs
Acdpca <- A
Ycdpca <- Xs%*%Acdpca
Y.barcdpca <- X.barcdpca%*%Acdpca
Fcdpca <- F
loopcdpca <- 1
itercdpca <- iter
convcdpca <- conv
} # end if
if (F > Fcdpca) {
Vcdpca <- V
Ucdpca <- U
X.barcdpca <- diag(1/colSums(Ucdpca))%*%t(Ucdpca)%*%Xs
Acdpca <- A
Ycdpca <- Xs%*%Acdpca
Y.barcdpca <- X.barcdpca%*%Acdpca
Fcdpca <- F
loopcdpca <- loop
itercdpca <- iter
convcdpca <- conv
} # end if
} # end for loop
# Computation time for all loops
tftotal=sum(tfbest)
# maximum between cluster deviance
BetClusDevTotal <- Fcdpca/(I*J)*100 # (sum(diag(var(Y)))*(I-1))*100
BetClusDev <- (sum(diag(t(Ucdpca%*%Y.barcdpca)%*%(Ucdpca%*%Y.barcdpca)))/sum(diag(t(Ycdpca)%*%Ycdpca)))*100
# error in cdpca model
Enormcdpca <- 1/I*norm(Xs-Ucdpca%*%Y.barcdpca%*%t(Acdpca), "F")
# Table Real vs CDPCA classification
classcdpca <- Ucdpca%*%as.matrix(1:ncol(Ucdpca))
if (!is.null(class)){
class <- data.frame(class)
maxclass <- max(class)
}
# Variability of Ycdpca and in decreasing order
varYcdpca <- var(Ycdpca)
d <- round(diag(varYcdpca)*100/J, 2)
dorder <- d[order(d, decreasing = TRUE)]
tablevar <- data.frame(1:Q, d)
colnames(tablevar) <- c("Dim", "Var (%)")
# Presentation of the matrices associated to the CDPCA component sorted by their variances.
Yorder <- t(t(rbind(d, Ycdpca))[order(d, decreasing = TRUE), ])[-1, ]
Ybarorder <- t(t(rbind(d, Y.barcdpca))[order(d, decreasing=TRUE),])[-1, ]
Aorder <- t(t(rbind(d, Acdpca))[order(d, decreasing = TRUE), ])[-1, ]
Vorder <- t(t(rbind(d, Vcdpca))[order(d, decreasing = TRUE), ])[-1, ]
#
# We can check the model using these column sort matrices and observe that
# Ucdpca%*%Y.barcdpca%*%t(Acdpca) = Ucdpca%*%Ybarorder%*%t(Aorder)
if ( is.null(class) ) {
realclasstrue <- 2
pseudocm <- NULL
tabrealcdpca <- data.frame(classcdpca)
colnames(tabrealcdpca) <- c("CDPCA Class")
}else{
realclasstrue <- 3
tabrealcdpca <- data.frame(class, classcdpca)
colnames(tabrealcdpca) <- c("Real Class", "CDPCA Class")
# pseudo-confusion matrix
pseudocm <- table(tabrealcdpca)
} # end if
# PLOTS: CDPCA classification
if (cdpcaplot) {
displaygraphics <- par(no.readonly = TRUE)
par(mfrow = c(1, realclasstrue))
if (realclasstrue == 3) {
# plot1
matplot(Yorder[, 1], Yorder[, 2], xlab = "Dim 1", ylab = "Dim 2", type = "n", main = "Real classification")
for (i in 1:maxclass) { points(Yorder[, 1][class == i], Yorder[, 2][class == i], pch = 1, col = i+1) }
# plot2
matplot(Yorder[, 1], Yorder[, 2], xlab = paste(" Dim 1 (", (dorder[1]), " % )"), ylab = paste(" Dim 2 (", (dorder[2]), " % )"), type = "n", main = "CDPCA classification")
for (i in 1:P) { points(Yorder[, 1][classcdpca == i], Yorder[, 2][classcdpca == i], pch = 2, col = i+1) }
points(Ybarorder[, 1], Ybarorder[, 2],pch = 15) # introduce the centroids into the plot
# plot3 (legend)
matplot(Y.barcdpca, type = "n", axes = FALSE, xlab = "", ylab = "")
legend("topleft", pch = 1, col=c(2:(maxclass+1)), legend = c(1:maxclass), ncol = maxclass)
legend("bottomleft", pch = 2, col=c(2:(P+1)), legend = 1:P, ncol = P)
}else{
# plot2
matplot(Yorder[, 1], Yorder[, 2], xlab = paste(" Dim 1 (", (dorder[1]), " % )"), ylab = paste(" Dim 2 (", (dorder[2]), " % )"),type = "n", main = "CDPCA classification")
for (i in 1:P) { points(Yorder[, 1][classcdpca == i], Yorder[, 2][classcdpca == i], pch = 2, col = i+1)}
points(Ybarorder[, 1],Ybarorder[, 2], pch = 15) # introduce the centroids into the plot
# plot3 (legend)
matplot(Y.barcdpca, type = "n", axes = FALSE, xlab = "", ylab = "")
legend("bottomleft", pch = 2, col=c(2:(P+1)), legend = 1:P, ncol = P)
} # end if
} #end if cdpcaplot
# OUTPUT
list(timeallloops = tftotal,
timebestloop = tfbest[loopcdpca],
loop = loopcdpca,
iter = itercdpca,
bcdevTotal = BetClusDevTotal ,
bcdev = BetClusDev ,
V = Vorder,
U = Ucdpca,
A = Aorder,
F = Fcdpca,
Enorm = Enormcdpca,
tableclass = tabrealcdpca,
pseudocm = pseudocm,
Y = Yorder,
Ybar = Ybarorder,
dorder = dorder,
Xscale = Xs)
} # end CDpca function
|
/R/CDpca.R
|
no_license
|
cran/biplotbootGUI
|
R
| false
| false
| 19,809
|
r
|
#############################################
# Function CDpca #
#############################################
CDpca <- function(data, class = NULL, P, Q, SDPinitial = FALSE, tol=10^(-5), maxit, r, cdpcaplot= TRUE) {
# CDpca performs a clustering and disjoint principal components analysis
# on the given numeric data matrix and returns a list of results
#
# Args:
# data: data frame (numeric).
# class: vector (numeric) or 0, if classes of objects are unknown.
# fixAtt: vector (numeric) or 0, for a selection of attributes.
# SDPinitial: 1 or 0, for random initialization of U and V
# nnloads: 1 or 0, for nonnegative loadings
# cdpcaplot: integer: 1 or 0, if no plot is displayed.
# Q: integer, number of clusters of variables.
# P: integer, number of clusters of objects.
# tol: real number, small positive tolerance.
# maxit: integer, maximum of iterations.
# r: number of runs of the cdpca algoritm for the final solution.
#
# Returns:
# iter: iterations used in the best loop for computing the best solution
# loop: best loop number
# timebestloop: computation time on the best loop
# timeallloops: computation time for all loops
# Y: the component score matrix
# Ybar: the object centroids matrix in the reduced space
# A: the component loading matrix
# U: the partition of objects
# V: the partition of variables
# F: function to maximize
# bcdev: between cluster deviance
# bcdevTotal: between cluster deviance over the total variability
# tableclass: cdpca classification
# pseudocm: pseudo confusion matrix of the real and cdpca classifications
# Enorm: error norm for the obtained cdpca model
#CDPCA is applied on normalized data (mean zero and unit variance)
#############################################
# Function RandMat #
#############################################
RandMat <- function(dim1, dim2) {
# Generates a random binary and row stochastic matrix.
#
# Args:
# dim1: number of rows.
# dim2: number of columns.
#
# Returns:
# The random matrix (dim1xdim2) with only one nonzero element per row.
#
U0 <- matrix(0, dim1, dim2)
U0[1:dim2, 1:dim2] <- diag(dim2)
for (j in (dim2+1):dim1){
p <- sample(1:dim2, 1)
U0[j, p] <- 1
}
U0
} # end RandMat function
######################################
#
# SDP initialization
#
######################################
SDPinitialization <- function(dim1,dim2,D,data){
# Generates a binary and row stochastic matrix
# using SDP-based approach
#
# Args:
# dim1: number of rows.
# dim2: number of columns.
#
# Returns:
# The random matrix (dim1xdim2) with only one nonzero element per row.
#
# approximate solution for clustering
em <- cbind(rep(1,dim1))
matIem <- diag(dim1)-(1/dim1)*em%*%t(em)
if (dim1 == dim(data)[1]){
data <- D
}else{
data <- t(D)
}
So <- matIem%*%data%*%t(data)%*%matIem
matF <- matrix(svd(So)$v[, 1:(dim2-1)], ncol = dim2-1)
matFFT <- matF%*%t(matF)
# approximate solution to the SDP-based model for clustering
Z.bar <- matFFT + 1/dim1 *em%*%t(em)
# refine solutions
cent <- Z.bar%*%data
centunique <- unique(cent)
t <- dim(centunique)[1]
# initial kmeans step
indcentroids <- sort(sample(1:t, dim2, replace=FALSE), decreasing=FALSE)
centroids <- centunique[indcentroids, ]
solkmeans <- kmeans(data, centroids, iter.max = 100000)
M <- matrix(0, dim1, dim2)
for (i in 1:dim1){
M[i, solkmeans$cluster[i]] <- 1
}
M
}
data.sd <- data.frame(scale(data, center = TRUE, scale = TRUE))
I <- dim(data)[1] # number of objects I
J <- dim(data)[2] # number of variables J
Xs <- as.matrix(data.sd*sqrt(I/(I-1))) # matrix of normalized data (with variance divided by I)
# matriz X (I x J) (obs x vars), numeric matrix argument
tfbest <- matrix(0, r, 1) # computational time at each loop
# Run CDPCA ALS algorithm r times
for (loop in 1:r) {
t1 <- proc.time()
# Initialization
iter <- 0
# SDP initialization or not?
if (SDPinitial){
U <- SDPinitialization(I, P, Xs, data)
V <- SDPinitialization(J, Q, Xs, data)
}else{
U <- RandMat(I, P)
V <- RandMat(J, Q)
}
#Set of the variables
X.bar <- diag(1/colSums(U))%*%t(U)%*%Xs # (PxJ) object centroid matrix. It identifies the P centroids in the variable space
X.group <- U%*%X.bar # (IxJ) matrix. Each object in the data matrix is replaced by its centroid.
vJ <- matrix(1:J, ncol = 1)
zJ <- matrix(0, J, 1)
zQ <- matrix(0, 1, Q)
# matrix A
A <- matrix(0, J, Q)
for (j in 1: Q) {
Jxx <- which(V[, j] == 1)
len.Jxx <- length(Jxx)
if (sum(V[, j])>1) {
if (I >= len.Jxx) {
# CASE 1: I >= J (more observations than variables)
S.group <- t(X.group[, Jxx])%*%X.group[, Jxx]
A[Jxx, j] <- powerMethod(S.group)$vector
}else{
# CASE 2: I < J ( more variables than observations)
SS.group <- X.group[, Jxx]%*%t(X.group[, Jxx])
PMinitial <- powerMethod(SS.group)
A[Jxx, j] <- t(X.group[, Jxx])%*%PMinitial$vector/sqrt(PMinitial$value)
} # end if
}else{
A[Jxx, j] <- 1
} # end if
} # end for
A0 <- A
Y.bar <- X.bar%*%A # (PxQ) object centroid matrix. It identifies the P centroids in the reduced space of the Q principal components.
F0 <- sum(diag(t(U%*%Y.bar)%*%U%*%Y.bar)) # Objective function to maximize.
Fmax <- F0
conv <- 2*tol
while (conv > tol) {
iter <- iter+1
Y <- Xs%*%A # (IxQ) component score matrix. It identifies the I objects in the reduced space of the Q principal components.
U <- matrix(0, I, P)
# update U
for (i in 1:I) {
dist <- rep(0, P)
for (p in 1:P) {
dist[p] <- sum((Y[i, ]-Y.bar[p, ])^2)
} # end for
min.dist <- which.min(dist)
U[i, min.dist] <- 1
} # end for
su <- colSums(U)
while (sum(su == 0) > 0) {
ind.max <- which.max(su) # p2
su.max <- max(su) # m2
ind.min <- which.min(su) # p1
su.min <- min(su) # m1
ind.nzU <- which(U[, ind.max] == 1)
ind.sel <- ind.nzU[1:floor(su.max)/2]
U[ind.sel, ind.min] <- 1
U[ind.sel, ind.max] <- 0
su <- colSums(U)
} # end while
# Given U and A compute X.bar
X.bar <- diag(1/colSums(U))%*%t(U)%*%Xs
Y.bar <- X.bar%*%A
X.group <- U%*%X.bar
# Update V and A
for (j in 1:J) {
posmax <- which(V[j, ] == 1)
for (g in 1:Q) {
V[j, ] <- diag(Q)[g, ]
####
if (g!=posmax) { ### if 1
for (gg in c(g,posmax)) {
xx <- V[, gg]
Jxx <- which(xx == 1)
len.Jxx <- length(Jxx)
A[, gg] <- zJ
if (sum(xx) > 1) {
if (I >= len.Jxx) {
# CASE 1: I >= J (more observations than variables)
S.group <- t(X.group[, Jxx])%*%X.group[, Jxx]
A[Jxx, gg] <- matrix(powerMethod(S.group)$vector, nrow = len.Jxx)
}else{
# CASE 2: I < J (more variables than observations)
SS.group <- X.group[, Jxx]%*%t(X.group[, Jxx])
PMgeneral <- powerMethod(SS.group)
A[Jxx, gg] <- matrix((t(X.group[, Jxx])%*%PMgeneral$vector/sqrt(PMgeneral$value))[, 1], nrow = len.Jxx)
} # end if
}else{
if (sum(xx)==1) {
A[Jxx, gg] <- 1
} # end if
} # end if
} # end for
} # end ### if 1
####
Y.bar <- X.bar%*%A
F <- sum(diag(t(U%*%Y.bar)%*%U%*%Y.bar))
if (F > Fmax) {
Fmax <- F
posmax <- g
A0 <- A
}else{
A <- A0
} # end if
} # end for
V[j, ]=diag(Q)[posmax, ]
} # end for
Y <- Xs%*%A
Y.bar <- X.bar%*%A
F <- sum(diag(t(U%*%Y.bar)%*%U%*%Y.bar))
conv <- F-F0
if (conv > tol) {
F0 <- F
A0 <- A
}else{
break
} # end if
if (iter == maxit) {
print("Maximum iterations reached.")
break
} # end if
} # end while
# Computation time for each loop
t2 <- proc.time()
tfinal <- t2-t1
tfbest[loop] <- tfinal[3]
#Results to be observed in each run of CDpca
# BetClusDevTotal <- F/(I*J)*100 # between cluster deviance (two different formula for the same thing)
BetClusDev <- (F/sum(diag(t(Y)%*%Y)))*100 # between cluster deviance
tabperloop <- data.frame(cbind(loop, iter, tfinal[3], BetClusDev, F, conv))
rownames(tabperloop) <- c(" ")
colnames(tabperloop) <- c("Loop", "Iter", "Loop time", "Between cluster deviance(%):", "F", "Convergence")
print(tabperloop) # Loop display
if (loop == 1) {
Vcdpca <- V
Ucdpca <- U
X.barcdpca <- diag(1/colSums(Ucdpca))%*%t(Ucdpca)%*%Xs
Acdpca <- A
Ycdpca <- Xs%*%Acdpca
Y.barcdpca <- X.barcdpca%*%Acdpca
Fcdpca <- F
loopcdpca <- 1
itercdpca <- iter
convcdpca <- conv
} # end if
if (F > Fcdpca) {
Vcdpca <- V
Ucdpca <- U
X.barcdpca <- diag(1/colSums(Ucdpca))%*%t(Ucdpca)%*%Xs
Acdpca <- A
Ycdpca <- Xs%*%Acdpca
Y.barcdpca <- X.barcdpca%*%Acdpca
Fcdpca <- F
loopcdpca <- loop
itercdpca <- iter
convcdpca <- conv
} # end if
} # end for loop
# Computation time for all loops
tftotal=sum(tfbest)
# maximum between cluster deviance
BetClusDevTotal <- Fcdpca/(I*J)*100 # (sum(diag(var(Y)))*(I-1))*100
BetClusDev <- (sum(diag(t(Ucdpca%*%Y.barcdpca)%*%(Ucdpca%*%Y.barcdpca)))/sum(diag(t(Ycdpca)%*%Ycdpca)))*100
# error in cdpca model
Enormcdpca <- 1/I*norm(Xs-Ucdpca%*%Y.barcdpca%*%t(Acdpca), "F")
# Table Real vs CDPCA classification
classcdpca <- Ucdpca%*%as.matrix(1:ncol(Ucdpca))
if (!is.null(class)){
class <- data.frame(class)
maxclass <- max(class)
}
# Variability of Ycdpca and in decreasing order
varYcdpca <- var(Ycdpca)
d <- round(diag(varYcdpca)*100/J, 2)
dorder <- d[order(d, decreasing = TRUE)]
tablevar <- data.frame(1:Q, d)
colnames(tablevar) <- c("Dim", "Var (%)")
# Presentation of the matrices associated to the CDPCA component sorted by their variances.
Yorder <- t(t(rbind(d, Ycdpca))[order(d, decreasing = TRUE), ])[-1, ]
Ybarorder <- t(t(rbind(d, Y.barcdpca))[order(d, decreasing=TRUE),])[-1, ]
Aorder <- t(t(rbind(d, Acdpca))[order(d, decreasing = TRUE), ])[-1, ]
Vorder <- t(t(rbind(d, Vcdpca))[order(d, decreasing = TRUE), ])[-1, ]
#
# We can check the model using these column sort matrices and observe that
# Ucdpca%*%Y.barcdpca%*%t(Acdpca) = Ucdpca%*%Ybarorder%*%t(Aorder)
if ( is.null(class) ) {
realclasstrue <- 2
pseudocm <- NULL
tabrealcdpca <- data.frame(classcdpca)
colnames(tabrealcdpca) <- c("CDPCA Class")
}else{
realclasstrue <- 3
tabrealcdpca <- data.frame(class, classcdpca)
colnames(tabrealcdpca) <- c("Real Class", "CDPCA Class")
# pseudo-confusion matrix
pseudocm <- table(tabrealcdpca)
} # end if
# PLOTS: CDPCA classification
if (cdpcaplot) {
displaygraphics <- par(no.readonly = TRUE)
par(mfrow = c(1, realclasstrue))
if (realclasstrue == 3) {
# plot1
matplot(Yorder[, 1], Yorder[, 2], xlab = "Dim 1", ylab = "Dim 2", type = "n", main = "Real classification")
for (i in 1:maxclass) { points(Yorder[, 1][class == i], Yorder[, 2][class == i], pch = 1, col = i+1) }
# plot2
matplot(Yorder[, 1], Yorder[, 2], xlab = paste(" Dim 1 (", (dorder[1]), " % )"), ylab = paste(" Dim 2 (", (dorder[2]), " % )"), type = "n", main = "CDPCA classification")
for (i in 1:P) { points(Yorder[, 1][classcdpca == i], Yorder[, 2][classcdpca == i], pch = 2, col = i+1) }
points(Ybarorder[, 1], Ybarorder[, 2],pch = 15) # introduce the centroids into the plot
# plot3 (legend)
matplot(Y.barcdpca, type = "n", axes = FALSE, xlab = "", ylab = "")
legend("topleft", pch = 1, col=c(2:(maxclass+1)), legend = c(1:maxclass), ncol = maxclass)
legend("bottomleft", pch = 2, col=c(2:(P+1)), legend = 1:P, ncol = P)
}else{
# plot2
matplot(Yorder[, 1], Yorder[, 2], xlab = paste(" Dim 1 (", (dorder[1]), " % )"), ylab = paste(" Dim 2 (", (dorder[2]), " % )"),type = "n", main = "CDPCA classification")
for (i in 1:P) { points(Yorder[, 1][classcdpca == i], Yorder[, 2][classcdpca == i], pch = 2, col = i+1)}
points(Ybarorder[, 1],Ybarorder[, 2], pch = 15) # introduce the centroids into the plot
# plot3 (legend)
matplot(Y.barcdpca, type = "n", axes = FALSE, xlab = "", ylab = "")
legend("bottomleft", pch = 2, col=c(2:(P+1)), legend = 1:P, ncol = P)
} # end if
} #end if cdpcaplot
# OUTPUT
list(timeallloops = tftotal,
timebestloop = tfbest[loopcdpca],
loop = loopcdpca,
iter = itercdpca,
bcdevTotal = BetClusDevTotal ,
bcdev = BetClusDev ,
V = Vorder,
U = Ucdpca,
A = Aorder,
F = Fcdpca,
Enorm = Enormcdpca,
tableclass = tabrealcdpca,
pseudocm = pseudocm,
Y = Yorder,
Ybar = Ybarorder,
dorder = dorder,
Xscale = Xs)
} # end CDpca function
|
`bioenv.numeric` <-
function(comm, env, method = "spearman", index = "bray", as.numeric = NULL,
upto = ncol(env), trace = FALSE, partial = NULL, ...) {
env2 <- env
if (is.null(as.numeric) == F) {
for (i in 1:length(as.numeric)) {
if(any(names(env) == as.numeric[i])) {
env2[, as.numeric[i]] <- as.numeric(env[, as.numeric[i]])
}
}
}
vars <- names(env2)
for (i in 1:length(vars)) {
focal.var <- which(names(env2)==vars[i])
if (is.numeric(env2[, focal.var]) == F) {env2 <- env2[, -focal.var]}
}
env <- env2
if (length(names(env)) < 2) {warning("Not enough numeric variables in environmental data set")}
result <- bioenv(comm=comm, env=env, method=method, index=index,
upto=upto, trace=trace, partial=partial, ...)
return(result)
}
|
/R/bioenv.test.R
|
no_license
|
BRozhkov/BiodiversityR
|
R
| false
| false
| 881
|
r
|
`bioenv.numeric` <-
function(comm, env, method = "spearman", index = "bray", as.numeric = NULL,
upto = ncol(env), trace = FALSE, partial = NULL, ...) {
env2 <- env
if (is.null(as.numeric) == F) {
for (i in 1:length(as.numeric)) {
if(any(names(env) == as.numeric[i])) {
env2[, as.numeric[i]] <- as.numeric(env[, as.numeric[i]])
}
}
}
vars <- names(env2)
for (i in 1:length(vars)) {
focal.var <- which(names(env2)==vars[i])
if (is.numeric(env2[, focal.var]) == F) {env2 <- env2[, -focal.var]}
}
env <- env2
if (length(names(env)) < 2) {warning("Not enough numeric variables in environmental data set")}
result <- bioenv(comm=comm, env=env, method=method, index=index,
upto=upto, trace=trace, partial=partial, ...)
return(result)
}
|
library(tidyverse)
library(shiny)
library(shinythemes)
library(ggthemes)
library(lubridate)
library(plotly)
library(smooth)
library(leaflet)
library(leaflet.extras)
crime = read_csv('../data/crime.csv')
df = crime %>%
mutate(Date = mdy_hms(REPORTED_DATE), reportedIncidents = 1) %>%
mutate(Year = factor(year(Date)), Month = factor(month(Date)), DayOfWeek = wday(Date, label=TRUE), Hour = factor(hour(Date))) %>%
mutate(YearMonth = factor(paste0(as.character(Year), "-", as.character(Month)))) %>%
select(Date, Year, Month, DayOfWeek, Hour, FIRST_OCCURRENCE_DATE, LAST_OCCURRENCE_DATE, REPORTED_DATE,
OFFENSE_TYPE_ID,OFFENSE_CATEGORY_ID,
GEO_LAT,GEO_LON,NEIGHBORHOOD_ID, DISTRICT_ID, PRECINCT_ID,
IS_CRIME, IS_TRAFFIC,
reportedIncidents)
OFFENSE_CATEGORY_CHOICES = unique(df$OFFENSE_CATEGORY_ID)
|
/shinyApp/global.R
|
no_license
|
stoltzmaniac/Denver-Crime-Analysis
|
R
| false
| false
| 846
|
r
|
library(tidyverse)
library(shiny)
library(shinythemes)
library(ggthemes)
library(lubridate)
library(plotly)
library(smooth)
library(leaflet)
library(leaflet.extras)
crime = read_csv('../data/crime.csv')
df = crime %>%
mutate(Date = mdy_hms(REPORTED_DATE), reportedIncidents = 1) %>%
mutate(Year = factor(year(Date)), Month = factor(month(Date)), DayOfWeek = wday(Date, label=TRUE), Hour = factor(hour(Date))) %>%
mutate(YearMonth = factor(paste0(as.character(Year), "-", as.character(Month)))) %>%
select(Date, Year, Month, DayOfWeek, Hour, FIRST_OCCURRENCE_DATE, LAST_OCCURRENCE_DATE, REPORTED_DATE,
OFFENSE_TYPE_ID,OFFENSE_CATEGORY_ID,
GEO_LAT,GEO_LON,NEIGHBORHOOD_ID, DISTRICT_ID, PRECINCT_ID,
IS_CRIME, IS_TRAFFIC,
reportedIncidents)
OFFENSE_CATEGORY_CHOICES = unique(df$OFFENSE_CATEGORY_ID)
|
library(ggplot2)
library(plyr)
library(lme4)
####For English data
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/queriesandoutput/cprelExtrapos.ymeb.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year", "Text")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
#####Note that it is crucial to make sure empty string Year is not included, because this deletes codes which correspond to clauses that are above the clause containing the relevant token. They were never coded for Year because they were not relevant. Note that in the "fixed" sets, the textid has been put into the year place, so it is necessary to subset again and droplevels again after converting Year to numeric.
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Position != "z" & Year != "z" & Year != "0" & Year != "" & Weight != "z" & Year != "NA")
library(gdata)
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
#####THIS IS A COMMENT REPEATING A COMMENT ABOVE, BECAUSE OF THE NEED TO DROPLEVELS AGAIN: Note that it is crucial to make sure empty string Year is not included, because this deletes codes which correspond to clauses that are above the clause containing the relevant token. They were never coded for Year because they were not relevant. **Note that in the "fixed" sets, the textid has been put into the year place, so it is necessary to subset again and droplevels again after converting Year to numeric.**
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
####Fit a logistic regression with ex as a binary outcome, output a summary of the model, and output a model comparison to models without various factors. Leaves out Author.
"English Model"
#ex.fit <- glm(Extraposed~Year*Position*Clause*TextOrSpeech*Weight, family = binomial, data=ex.data)
#summary(ex.fit)
#anova(ex.fit, test = "Chisq")
#m <- glmer(remission ~ IL6 + CRP + CancerStage + LengthofStay + Experience + (1 | DID), data = hdp, family = binomial, control = glmerControl(optimizer = "bobyqa"), nAGQ = 10)
#ex.fit <- glmer(Extraposed~(1|Text)+Year*Position*Clause*TextOrSpeech*Weight, family = binomial, data=ex.data)
#ex.fit <- glmer(Extraposed~(1|Text)*Year*Position*Clause*TextOrSpeech*Weight, family = binomial, data=ex.data.fixed)
#ex.fit2 <- glmer(Extraposed~(1|Text)+Position*Clause*TextOrSpeech*Weight, family = binomial, data=ex.data.fixed)
#summary(ex.fit)
#anova(ex.fit, test = "Chisq")
#anova(ex.fit2, ex.fit, test = "Chisq")
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
#Note that any models more complex than the first one below did not converge.
ex.fit <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Position+zYear*Clause, family = binomial, data=ex.data.fixed)
ex.fit2 <- glmer(Extraposed~(1|Text)+Position+Clause+TextOrSpeech+zWeight, family = binomial, data=ex.data.fixed)
summary(ex.fit)
anova(ex.fit2, ex.fit, test = "Chisq")
####For Icelandic data.
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/queriesandoutput/cprelExtrapos.ice.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year","Text","Genre","Textid")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Position != "z" & Year != "z" & Year != "0" & Year != "" & Weight != "z")
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
#####THIS IS A COMMENT REPEATING A COMMENT ABOVE, BECAUSE OF THE NEED TO DROPLEVELS AGAIN: Note that it is crucial to make sure empty string Year is not included, because this deletes codes which correspond to clauses that are above the clause containing the relevant token. They were never coded for Year because they were not relevant. **Note that in the "fixed" sets, the textid has been put into the year place, so it is necessary to subset again and droplevels again after converting Year to numeric.**
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
####Fit a logistic regression with ex as a binary outcome, output a summary of the model, and output a model comparison to models without various factors. Leaves out Author.
"Icelandic Model"
#ex.fit <- glm(Extraposed~Year*Position*Clause*TextOrSpeech*Weight*Genre, family = binomial, data=ex.data.fixed)
#summary(ex.fit)
#anova(ex.fit, test = "Chisq")
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
#Note that any models more complex than the first one below did not converge.
ex.fit <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Position+zYear*Clause+zYear*zWeight, family = binomial, data=ex.data.fixed)
ex.fit2 <- glmer(Extraposed~(1|Text)+Position+Clause+TextOrSpeech+zWeight, family = binomial, data=ex.data.fixed)
ex.fit3 <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Position+zYear*Clause, family = binomial, data=ex.data.fixed)
summary(ex.fit)
anova(ex.fit, ex.fit2, test = "Chisq")
anova(ex.fit, ex.fit3, test = "Chisq")
####For Middle French data.
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/queriesandoutput/cprelExtrapos.fre.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year","Text")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Position != "z" & Year != "z" & Year != "0" & Year != "" & Weight != "z")
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
####Fit a logistic regression with ex as a binary outcome, output a summary of the model, and output a model comparison to models without various factors. Leaves out Author.
"French Model"
#ex.fit <- glm(Extraposed~Year*Position*Clause*TextOrSpeech*Weight, family = binomial, data=ex.data.fixed)
#summary(ex.fit)
#anova(ex.fit, test = "Chisq")
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
#Note that any models more complex than the first one below did not converge.
ex.fit <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Position+zYear*Clause+zYear*zWeight, family = binomial, data=ex.data.fixed)
ex.fit2 <- glmer(Extraposed~(1|Text)+Position+Clause+TextOrSpeech+zWeight, family = binomial, data=ex.data.fixed)
ex.fit3 <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Position+zYear*Clause, family = binomial, data=ex.data.fixed)
summary(ex.fit)
anova(ex.fit2, ex.fit, test = "Chisq")
anova(ex.fit3, ex.fit, test = "Chisq")
####For Portuguese data.
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/queriesandoutput/cprelExtrapos.port.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year","Text")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Position != "z" & Year != "z" & Year != "0" & Year != "" & Weight != "z")
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
####Fit a logistic regression with ex as a binary outcome, output a summary of the model, and output a model comparison to models without various factors. Leaves out Author.
"Portuguese Model"
#ex.fit <- glm(Extraposed~Year*Position*Clause*TextOrSpeech*Weight, family = binomial, data=ex.data.fixed)
#summary(ex.fit)
#anova(ex.fit, test = "Chisq")
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
#Note that any models more complex than the first one below did not converge.
ex.fit <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Clause, family = binomial, data=ex.data.fixed)
ex.fit2 <- glmer(Extraposed~(1|Text)+Position+Clause+TextOrSpeech+zWeight, family = binomial, data=ex.data.fixed)
ex.fit3 <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Clause, family = binomial, data=ex.data.fixed)
summary(ex.fit)
anova(ex.fit2, ex.fit, test = "Chisq")
anova(ex.fit, ex.fit3, test = "Chisq")
#See if slope is same across langs
"All Languages"
library(ggplot2)
library(plyr)
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/plotsandstats/allLangsEx.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year","Text","Language")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Year != "z" & Year != "0" & Year != "" & Position != "z" & Weight != "z" & Language != "French" & Language != "Portuguese" & TextOrSpeech != "z")
library(gdata)
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
nrow(ex.data.fixed)
#Note that I only consider subject position below
"Note that I only consider subject position below"
#ex.crossLing.fit <- glm(Extraposed~Year*Clause*TextOrSpeech*Weight*Language, family = binomial, data=ex.data.fixed)
#summary(ex.crossLing.fit)
#anova(ex.crossLing.fit, test = "Chisq")
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
ex.crossLing.fit <- glmer(Extraposed~(1|Text)+zYear+zWeight+Language, family = binomial, data=ex.data.fixed)
summary(ex.crossLing.fit)
##Now for subject only:
####For English data
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/queriesandoutput/cprelExtrapos.ymeb.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year", "Text")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
#####Note that it is crucial to make sure empty string Year is not included, because this deletes codes which correspond to clauses that are above the clause containing the relevant token. They were never coded for Year because they were not relevant. Note that in the "fixed" sets, the textid has been put into the year place, so it is necessary to subset again and droplevels again after converting Year to numeric.
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Position == "sbj" & Year != "z" & Year != "0" & Year != "" & Weight != "z" & Year != "NA")
library(gdata)
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
#####THIS IS A COMMENT REPEATING A COMMENT ABOVE, BECAUSE OF THE NEED TO DROPLEVELS AGAIN: Note that it is crucial to make sure empty string Year is not included, because this deletes codes which correspond to clauses that are above the clause containing the relevant token. They were never coded for Year because they were not relevant. **Note that in the "fixed" sets, the textid has been put into the year place, so it is necessary to subset again and droplevels again after converting Year to numeric.**
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
####Fit a logistic regression with ex as a binary outcome, output a summary of the model, and output a model comparison to models without various factors. Leaves out Author.
"English Model"
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
#Note that any models more complex than the first one below did not converge.
ex.fit <- glmer(Extraposed~(1|Text)+zYear+Clause+TextOrSpeech+zWeight+zYear*Clause, family = binomial, data=ex.data.fixed)
ex.fit2 <- glmer(Extraposed~(1|Text)+Clause+TextOrSpeech+zWeight, family = binomial, data=ex.data.fixed)
summary(ex.fit)
anova(ex.fit2, ex.fit, test = "Chisq")
####For Icelandic data.
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/queriesandoutput/cprelExtrapos.ice.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year","Text","Genre","Textid")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Position == "sbj" & Year != "z" & Year != "0" & Year != "" & Weight != "z")
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
#####THIS IS A COMMENT REPEATING A COMMENT ABOVE, BECAUSE OF THE NEED TO DROPLEVELS AGAIN: Note that it is crucial to make sure empty string Year is not included, because this deletes codes which correspond to clauses that are above the clause containing the relevant token. They were never coded for Year because they were not relevant. **Note that in the "fixed" sets, the textid has been put into the year place, so it is necessary to subset again and droplevels again after converting Year to numeric.**
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
####Fit a logistic regression with ex as a binary outcome, output a summary of the model, and output a model comparison to models without various factors. Leaves out Author.
"Icelandic Model"
#ex.fit <- glm(Extraposed~Year*Position*Clause*TextOrSpeech*Weight*Genre, family = binomial, data=ex.data.fixed)
#summary(ex.fit)
#anova(ex.fit, test = "Chisq")
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
#Note that any models more complex than the first one below did not converge.
ex.fit <- glmer(Extraposed~(1|Text)+zYear+Clause+TextOrSpeech+zWeight+zYear*Clause+zYear*zWeight, family = binomial, data=ex.data.fixed)
ex.fit2 <- glmer(Extraposed~(1|Text)+Clause+TextOrSpeech+zWeight, family = binomial, data=ex.data.fixed)
#ex.fit3 <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Position+zYear*Clause, family = binomial, data=ex.data.fixed)
summary(ex.fit)
anova(ex.fit, ex.fit2, test = "Chisq")
#anova(ex.fit, ex.fit3, test = "Chisq")
####For Middle French data.
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/queriesandoutput/cprelExtrapos.fre.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year","Text")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Position == "sbj" & Year != "z" & Year != "0" & Year != "" & Weight != "z")
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
####Fit a logistic regression with ex as a binary outcome, output a summary of the model, and output a model comparison to models without various factors. Leaves out Author.
"French Model"
#ex.fit <- glm(Extraposed~Year*Position*Clause*TextOrSpeech*Weight, family = binomial, data=ex.data.fixed)
#summary(ex.fit)
#anova(ex.fit, test = "Chisq")
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
#Note that any models more complex than the first one below did not converge.
ex.fit <- glmer(Extraposed~(1|Text)+zYear+Clause+TextOrSpeech+zWeight+zYear*Clause+zYear*zWeight, family = binomial, data=ex.data.fixed)
ex.fit2 <- glmer(Extraposed~(1|Text)+Clause+TextOrSpeech+zWeight, family = binomial, data=ex.data.fixed)
#ex.fit3 <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Position+zYear*Clause, family = binomial, data=ex.data.fixed)
summary(ex.fit)
anova(ex.fit2, ex.fit, test = "Chisq")
#anova(ex.fit3, ex.fit, test = "Chisq")
####For Portuguese data.
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/queriesandoutput/cprelExtrapos.port.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year","Text")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Position == "sbj" & Year != "z" & Year != "0" & Year != "" & Weight != "z")
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
####Fit a logistic regression with ex as a binary outcome, output a summary of the model, and output a model comparison to models without various factors. Leaves out Author.
"Portuguese Model"
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
#Note that any models more complex than the first one below did not converge.
ex.fit <- glmer(Extraposed~(1|Text)+zYear+Clause+TextOrSpeech+zWeight+zYear*Clause, family = binomial, data=ex.data.fixed)
ex.fit2 <- glmer(Extraposed~(1|Text)+Clause+TextOrSpeech+zWeight, family = binomial, data=ex.data.fixed)
#ex.fit3 <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Clause, family = binomial, data=ex.data.fixed)
summary(ex.fit)
anova(ex.fit2, ex.fit, test = "Chisq")
#anova(ex.fit, ex.fit3, test = "Chisq")
|
/extraposition/plotsandstats/extraposition_diach_randomText.R
|
no_license
|
joelcw/tyneside
|
R
| false
| false
| 22,923
|
r
|
library(ggplot2)
library(plyr)
library(lme4)
####For English data
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/queriesandoutput/cprelExtrapos.ymeb.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year", "Text")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
#####Note that it is crucial to make sure empty string Year is not included, because this deletes codes which correspond to clauses that are above the clause containing the relevant token. They were never coded for Year because they were not relevant. Note that in the "fixed" sets, the textid has been put into the year place, so it is necessary to subset again and droplevels again after converting Year to numeric.
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Position != "z" & Year != "z" & Year != "0" & Year != "" & Weight != "z" & Year != "NA")
library(gdata)
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
#####THIS IS A COMMENT REPEATING A COMMENT ABOVE, BECAUSE OF THE NEED TO DROPLEVELS AGAIN: Note that it is crucial to make sure empty string Year is not included, because this deletes codes which correspond to clauses that are above the clause containing the relevant token. They were never coded for Year because they were not relevant. **Note that in the "fixed" sets, the textid has been put into the year place, so it is necessary to subset again and droplevels again after converting Year to numeric.**
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
####Fit a logistic regression with ex as a binary outcome, output a summary of the model, and output a model comparison to models without various factors. Leaves out Author.
"English Model"
#ex.fit <- glm(Extraposed~Year*Position*Clause*TextOrSpeech*Weight, family = binomial, data=ex.data)
#summary(ex.fit)
#anova(ex.fit, test = "Chisq")
#m <- glmer(remission ~ IL6 + CRP + CancerStage + LengthofStay + Experience + (1 | DID), data = hdp, family = binomial, control = glmerControl(optimizer = "bobyqa"), nAGQ = 10)
#ex.fit <- glmer(Extraposed~(1|Text)+Year*Position*Clause*TextOrSpeech*Weight, family = binomial, data=ex.data)
#ex.fit <- glmer(Extraposed~(1|Text)*Year*Position*Clause*TextOrSpeech*Weight, family = binomial, data=ex.data.fixed)
#ex.fit2 <- glmer(Extraposed~(1|Text)+Position*Clause*TextOrSpeech*Weight, family = binomial, data=ex.data.fixed)
#summary(ex.fit)
#anova(ex.fit, test = "Chisq")
#anova(ex.fit2, ex.fit, test = "Chisq")
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
#Note that any models more complex than the first one below did not converge.
ex.fit <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Position+zYear*Clause, family = binomial, data=ex.data.fixed)
ex.fit2 <- glmer(Extraposed~(1|Text)+Position+Clause+TextOrSpeech+zWeight, family = binomial, data=ex.data.fixed)
summary(ex.fit)
anova(ex.fit2, ex.fit, test = "Chisq")
####For Icelandic data.
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/queriesandoutput/cprelExtrapos.ice.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year","Text","Genre","Textid")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Position != "z" & Year != "z" & Year != "0" & Year != "" & Weight != "z")
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
#####THIS IS A COMMENT REPEATING A COMMENT ABOVE, BECAUSE OF THE NEED TO DROPLEVELS AGAIN: Note that it is crucial to make sure empty string Year is not included, because this deletes codes which correspond to clauses that are above the clause containing the relevant token. They were never coded for Year because they were not relevant. **Note that in the "fixed" sets, the textid has been put into the year place, so it is necessary to subset again and droplevels again after converting Year to numeric.**
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
####Fit a logistic regression with ex as a binary outcome, output a summary of the model, and output a model comparison to models without various factors. Leaves out Author.
"Icelandic Model"
#ex.fit <- glm(Extraposed~Year*Position*Clause*TextOrSpeech*Weight*Genre, family = binomial, data=ex.data.fixed)
#summary(ex.fit)
#anova(ex.fit, test = "Chisq")
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
#Note that any models more complex than the first one below did not converge.
ex.fit <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Position+zYear*Clause+zYear*zWeight, family = binomial, data=ex.data.fixed)
ex.fit2 <- glmer(Extraposed~(1|Text)+Position+Clause+TextOrSpeech+zWeight, family = binomial, data=ex.data.fixed)
ex.fit3 <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Position+zYear*Clause, family = binomial, data=ex.data.fixed)
summary(ex.fit)
anova(ex.fit, ex.fit2, test = "Chisq")
anova(ex.fit, ex.fit3, test = "Chisq")
####For Middle French data.
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/queriesandoutput/cprelExtrapos.fre.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year","Text")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Position != "z" & Year != "z" & Year != "0" & Year != "" & Weight != "z")
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
####Fit a logistic regression with ex as a binary outcome, output a summary of the model, and output a model comparison to models without various factors. Leaves out Author.
"French Model"
#ex.fit <- glm(Extraposed~Year*Position*Clause*TextOrSpeech*Weight, family = binomial, data=ex.data.fixed)
#summary(ex.fit)
#anova(ex.fit, test = "Chisq")
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
#Note that any models more complex than the first one below did not converge.
ex.fit <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Position+zYear*Clause+zYear*zWeight, family = binomial, data=ex.data.fixed)
ex.fit2 <- glmer(Extraposed~(1|Text)+Position+Clause+TextOrSpeech+zWeight, family = binomial, data=ex.data.fixed)
ex.fit3 <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Position+zYear*Clause, family = binomial, data=ex.data.fixed)
summary(ex.fit)
anova(ex.fit2, ex.fit, test = "Chisq")
anova(ex.fit3, ex.fit, test = "Chisq")
####For Portuguese data.
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/queriesandoutput/cprelExtrapos.port.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year","Text")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Position != "z" & Year != "z" & Year != "0" & Year != "" & Weight != "z")
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
####Fit a logistic regression with ex as a binary outcome, output a summary of the model, and output a model comparison to models without various factors. Leaves out Author.
"Portuguese Model"
#ex.fit <- glm(Extraposed~Year*Position*Clause*TextOrSpeech*Weight, family = binomial, data=ex.data.fixed)
#summary(ex.fit)
#anova(ex.fit, test = "Chisq")
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
#Note that any models more complex than the first one below did not converge.
ex.fit <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Clause, family = binomial, data=ex.data.fixed)
ex.fit2 <- glmer(Extraposed~(1|Text)+Position+Clause+TextOrSpeech+zWeight, family = binomial, data=ex.data.fixed)
ex.fit3 <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Clause, family = binomial, data=ex.data.fixed)
summary(ex.fit)
anova(ex.fit2, ex.fit, test = "Chisq")
anova(ex.fit, ex.fit3, test = "Chisq")
#See if slope is same across langs
"All Languages"
library(ggplot2)
library(plyr)
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/plotsandstats/allLangsEx.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year","Text","Language")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Year != "z" & Year != "0" & Year != "" & Position != "z" & Weight != "z" & Language != "French" & Language != "Portuguese" & TextOrSpeech != "z")
library(gdata)
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
nrow(ex.data.fixed)
#Note that I only consider subject position below
"Note that I only consider subject position below"
#ex.crossLing.fit <- glm(Extraposed~Year*Clause*TextOrSpeech*Weight*Language, family = binomial, data=ex.data.fixed)
#summary(ex.crossLing.fit)
#anova(ex.crossLing.fit, test = "Chisq")
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
ex.crossLing.fit <- glmer(Extraposed~(1|Text)+zYear+zWeight+Language, family = binomial, data=ex.data.fixed)
summary(ex.crossLing.fit)
##Now for subject only:
####For English data
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/queriesandoutput/cprelExtrapos.ymeb.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year", "Text")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
#####Note that it is crucial to make sure empty string Year is not included, because this deletes codes which correspond to clauses that are above the clause containing the relevant token. They were never coded for Year because they were not relevant. Note that in the "fixed" sets, the textid has been put into the year place, so it is necessary to subset again and droplevels again after converting Year to numeric.
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Position == "sbj" & Year != "z" & Year != "0" & Year != "" & Weight != "z" & Year != "NA")
library(gdata)
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
#####THIS IS A COMMENT REPEATING A COMMENT ABOVE, BECAUSE OF THE NEED TO DROPLEVELS AGAIN: Note that it is crucial to make sure empty string Year is not included, because this deletes codes which correspond to clauses that are above the clause containing the relevant token. They were never coded for Year because they were not relevant. **Note that in the "fixed" sets, the textid has been put into the year place, so it is necessary to subset again and droplevels again after converting Year to numeric.**
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
####Fit a logistic regression with ex as a binary outcome, output a summary of the model, and output a model comparison to models without various factors. Leaves out Author.
"English Model"
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
#Note that any models more complex than the first one below did not converge.
ex.fit <- glmer(Extraposed~(1|Text)+zYear+Clause+TextOrSpeech+zWeight+zYear*Clause, family = binomial, data=ex.data.fixed)
ex.fit2 <- glmer(Extraposed~(1|Text)+Clause+TextOrSpeech+zWeight, family = binomial, data=ex.data.fixed)
summary(ex.fit)
anova(ex.fit2, ex.fit, test = "Chisq")
####For Icelandic data.
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/queriesandoutput/cprelExtrapos.ice.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year","Text","Genre","Textid")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Position == "sbj" & Year != "z" & Year != "0" & Year != "" & Weight != "z")
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
#####THIS IS A COMMENT REPEATING A COMMENT ABOVE, BECAUSE OF THE NEED TO DROPLEVELS AGAIN: Note that it is crucial to make sure empty string Year is not included, because this deletes codes which correspond to clauses that are above the clause containing the relevant token. They were never coded for Year because they were not relevant. **Note that in the "fixed" sets, the textid has been put into the year place, so it is necessary to subset again and droplevels again after converting Year to numeric.**
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
####Fit a logistic regression with ex as a binary outcome, output a summary of the model, and output a model comparison to models without various factors. Leaves out Author.
"Icelandic Model"
#ex.fit <- glm(Extraposed~Year*Position*Clause*TextOrSpeech*Weight*Genre, family = binomial, data=ex.data.fixed)
#summary(ex.fit)
#anova(ex.fit, test = "Chisq")
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
#Note that any models more complex than the first one below did not converge.
ex.fit <- glmer(Extraposed~(1|Text)+zYear+Clause+TextOrSpeech+zWeight+zYear*Clause+zYear*zWeight, family = binomial, data=ex.data.fixed)
ex.fit2 <- glmer(Extraposed~(1|Text)+Clause+TextOrSpeech+zWeight, family = binomial, data=ex.data.fixed)
#ex.fit3 <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Position+zYear*Clause, family = binomial, data=ex.data.fixed)
summary(ex.fit)
anova(ex.fit, ex.fit2, test = "Chisq")
#anova(ex.fit, ex.fit3, test = "Chisq")
####For Middle French data.
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/queriesandoutput/cprelExtrapos.fre.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year","Text")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Position == "sbj" & Year != "z" & Year != "0" & Year != "" & Weight != "z")
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
####Fit a logistic regression with ex as a binary outcome, output a summary of the model, and output a model comparison to models without various factors. Leaves out Author.
"French Model"
#ex.fit <- glm(Extraposed~Year*Position*Clause*TextOrSpeech*Weight, family = binomial, data=ex.data.fixed)
#summary(ex.fit)
#anova(ex.fit, test = "Chisq")
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
#Note that any models more complex than the first one below did not converge.
ex.fit <- glmer(Extraposed~(1|Text)+zYear+Clause+TextOrSpeech+zWeight+zYear*Clause+zYear*zWeight, family = binomial, data=ex.data.fixed)
ex.fit2 <- glmer(Extraposed~(1|Text)+Clause+TextOrSpeech+zWeight, family = binomial, data=ex.data.fixed)
#ex.fit3 <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Position+zYear*Clause, family = binomial, data=ex.data.fixed)
summary(ex.fit)
anova(ex.fit2, ex.fit, test = "Chisq")
#anova(ex.fit3, ex.fit, test = "Chisq")
####For Portuguese data.
####Read the file of CorpusSearch codes into an R data frame.
foo <- read.delim("~/tyneside/extraposition/queriesandoutput/cprelExtrapos.port.cod.fixed.ooo",header=F,sep=":")
####Give appropriate column names to the columns
colnames(foo) <- c("Extraposed","Position","Clause","TextOrSpeech", "Weight","Year","Text")
####Throw out all the codes that refer to tokens that are irrelevant for the study.
"Got up to subsetting"
ex.data <- subset(foo,Extraposed != "z" & Clause != "z" & Position == "sbj" & Year != "z" & Year != "0" & Year != "" & Weight != "z")
####Make sure R factor groups don't include factors for the irrelevant codes.
ex.data <- droplevels(ex.data)
"finished droplevels"
####Make sure dates abd 0/1 codes are stored as numbers, and weights
ex.data$Year <- as.numeric(as.character(ex.data$Year))
ex.data$Extraposed <- as.numeric(as.character(ex.data$Extraposed))
ex.data$Weight <- as.numeric(as.character(ex.data$Weight))
ex.data.fixed <- subset(ex.data,Year != "NA")
ex.data.fixed <- droplevels(ex.data.fixed)
"finished converting to numeric"
####Fit a logistic regression with ex as a binary outcome, output a summary of the model, and output a model comparison to models without various factors. Leaves out Author.
"Portuguese Model"
#Zing the numeric predictors so the mixed effects model doesnt barf so much:
ex.data.fixed$zYear <- scale(ex.data.fixed$Year, center=TRUE, scale=TRUE)
ex.data.fixed$zWeight <- scale(ex.data.fixed$Weight, center=TRUE, scale=TRUE)
#Note that any models more complex than the first one below did not converge.
ex.fit <- glmer(Extraposed~(1|Text)+zYear+Clause+TextOrSpeech+zWeight+zYear*Clause, family = binomial, data=ex.data.fixed)
ex.fit2 <- glmer(Extraposed~(1|Text)+Clause+TextOrSpeech+zWeight, family = binomial, data=ex.data.fixed)
#ex.fit3 <- glmer(Extraposed~(1|Text)+zYear+Position+Clause+TextOrSpeech+zWeight+zYear*Clause, family = binomial, data=ex.data.fixed)
summary(ex.fit)
anova(ex.fit2, ex.fit, test = "Chisq")
#anova(ex.fit, ex.fit3, test = "Chisq")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_read_years}
\alias{fars_read_years}
\title{Reads FARS data sets for multiple years and creates a list of tibbles.}
\usage{
fars_read_years(years)
}
\arguments{
\item{years}{a vector of integers, one for each year for which you want to
load the FARS data}
}
\value{
a list of tibbles, one for each year given
}
\description{
This function reads FARS data sets for multiple years from the current
working directory and creates a list of tibbles. The list contains as many
elements as years were given. Each tibble has two columns: \code{MONTH}
and \code{year}, where \code{year} is the given year and \code{MONTH}
contains the number of the month to which each observation belongs.
}
\details{
This function uses \code{fars_read} and \code{make_filename}. These
functions may throw errors, for example, if files with names for the
requested FARS datasets do not exists in the current working directory.
}
\examples{
\dontrun{
fars_data <- fars_read_years(c(2013, 2014))
}
}
|
/man/fars_read_years.Rd
|
no_license
|
plexxx/myAssignment
|
R
| false
| true
| 1,078
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_read_years}
\alias{fars_read_years}
\title{Reads FARS data sets for multiple years and creates a list of tibbles.}
\usage{
fars_read_years(years)
}
\arguments{
\item{years}{a vector of integers, one for each year for which you want to
load the FARS data}
}
\value{
a list of tibbles, one for each year given
}
\description{
This function reads FARS data sets for multiple years from the current
working directory and creates a list of tibbles. The list contains as many
elements as years were given. Each tibble has two columns: \code{MONTH}
and \code{year}, where \code{year} is the given year and \code{MONTH}
contains the number of the month to which each observation belongs.
}
\details{
This function uses \code{fars_read} and \code{make_filename}. These
functions may throw errors, for example, if files with names for the
requested FARS datasets do not exists in the current working directory.
}
\examples{
\dontrun{
fars_data <- fars_read_years(c(2013, 2014))
}
}
|
## ----setup, include=FALSE, echo=TRUE-------------------------------------
knitr::opts_chunk$set(tidy=TRUE, echo = TRUE)
library(formatR)
getwd()
setwd("C:/Users/User/Documents/NCI20-21_Sem3/DWM HDSDA_JAN/Lecture 4 Classification")
## --STEP 1--echo=TRUE-----------------------------------------------------------
#read in the .csv file using the url() function
data <- read.table("student-mat.csv",sep=";",header=TRUE)
#change all variable names to lowercase
var.names.data <-tolower(colnames(data))
colnames(data) <- var.names.data
head(data)
## --STEP 2----------------------------------------------------------------------
install.packages("class", dependencies = TRUE)
install.packages("caret", dependencies = TRUE)
install.packages("dplyr", dependencies = TRUE)
install.packages("e1071", dependencies = TRUE)
install.packages("FNN", dependencies = TRUE)
install.packages("gmodels", dependencies = TRUE)
install.packages("psych", dependencies = TRUE)
install.packages("data.table", dependencies=TRUE)
install.packages("BiocManager", dependencies= TRUE)
## --STEP 3--echo=TRUE-----------------------------------------------------------
#libraries needed
library(caret)
library(class)
library(dplyr)
library(e1071)
library(FNN)
library(gmodels)
library(psych)
library(plyr)
## --STEP 4--echo=TRUE-----------------------------------------------------------
data_class <- data
## ----echo=TRUE-----------------------------------------------------------
# put outcome in its own object
mjob_outcome <- data_class %>% select(mjob)
# remove original variable from the data set
data_class <- data_class %>% select(-mjob)
data_class$mjob <- NULL
## ---STEP 5---------------------------------------------------------------------
str(data_class)
## --STEP 6 --echo=TRUE Scale the variables-----------------------------------------------------------
data_class[, c("age", "medu", "fedu", "traveltime", "studytime", "failures", "famrel", "freetime", "goout", "dalc", "walc", "health", "absences", "g1", "g2", "g3")] <- scale(data_class[, c("age", "medu", "fedu", "traveltime", "studytime", "failures", "famrel", "freetime", "goout", "dalc", "walc", "health", "absences", "g1", "g2", "g3")])
head(data_class)
## --STEP 7--echo=TRUE-----------------------------------------------------------
str(data_class)
## ----echo=TRUE-----------------------------------------------------------
data_class$schoolsup <- ifelse(data_class$schoolsup == "yes", 1, 0)
data_class$famsup <- ifelse(data_class$famsup == "yes", 1, 0)
data_class$paid <- ifelse(data_class$paid == "yes", 1, 0)
data_class$activities <- ifelse(data_class$activities == "yes", 1, 0)
data_class$nursery <- ifelse(data_class$nursery == "yes", 1, 0)
data_class$higher <- ifelse(data_class$higher == "yes", 1, 0)
data_class$internet <- ifelse(data_class$internet == "yes", 1, 0)
data_class$romantic <- ifelse(data_class$romantic == "yes", 1, 0)
## ----echo=TRUE-----------------------------------------------------------
data_class$school <- dummy.code(data_class$school)
data_class$sex <- dummy.code(data_class$sex)
data_class$address <- dummy.code(data_class$address)
data_class$famsize <- dummy.code(data_class$famsize)
data_class$pstatus <- dummy.code(data_class$pstatus)
## ------------------------------------------------------------------------
fjob <- as.data.frame(dummy.code(data_class$fjob))
reason <- as.data.frame(dummy.code(data_class$reason))
guardian <- as.data.frame(dummy.code(data_class$guardian))
## ------------------------------------------------------------------------
fjob <- dplyr::rename(fjob, other_fjob = other)
fjob <- dplyr::rename(fjob, health_fjob = health)
reason <- dplyr::rename(reason, other_reason = other)
guardian <- dplyr::rename(guardian, other_guardian = other)
## ---STEP 8---------------------------------------------------------------------
data_class <- cbind(data_class, fjob, guardian, reason)
## ---STEP 9---------------------------------------------------------------------
data_class <- data_class %>% select(-one_of(c("fjob", "guardian", "reason")))
head(data_class)
##########################
## End of data Prep
##########################
## ------------------------------------------------------------------------
set.seed(1234) # set the seed to make the partition reproducible
# 75% of the sample size = 296
smp_size <- floor(0.75 * nrow(data_class))
train_ind <- sample(seq_len(nrow(data_class)), size = smp_size)
# creating test and training sets that contain all of the predictors
class_pred_train <- data_class[train_ind, ]
class_pred_test <- data_class[-train_ind, ]
## ------------------------------------------------------------------------
mjob_outcome_train <- mjob_outcome[train_ind, ]
mjob_outcome_test <- mjob_outcome[-train_ind, ]
## ----tidy=TRUE, results="asis"----sq root of 296 = 17---------------------------------------
mjob_pred_knn <- knn(train = class_pred_train, test = class_pred_test, cl = mjob_outcome_train, k=17)
## --STEP 11--results="asis"------------------------------------------------------
# put "mjob_outcome_test" in a data frame
mjob_outcome_test <- data.frame(mjob_outcome_test)
# merge "mjob_pred_knn" and "mjob_outcome_test"
class_comparison <- data.frame(mjob_pred_knn, mjob_outcome_test)
# specify column names for "class_comparison"
names(class_comparison) <- c("PredictedMjob", "ObservedMjob")
# inspect "class_comparison"
head(class_comparison)
## ----tidy=TRUE, results= "asis"------------------------------------------
# create table examining model accuracy
CrossTable(x = class_comparison$ObservedMjob, y = class_comparison$PredictedMjob, prop.chisq=FALSE, prop.c = FALSE, prop.r = FALSE, prop.t = FALSE)
## --STEP 13--tidy=TRUE, results="asis"-------------------------------------------
mjob_pred_caret <- train(class_pred_train, mjob_outcome_train, method = "knn", preProcess = c("center","scale"))
## ----tidy=TRUE, results="asis", echo=TRUE--------------------------------
mjob_pred_caret
## ----tidy=TRUE, results="asis", echo=TRUE--------------------------------
plot(mjob_pred_caret)
## ----tidy=TRUE, results="asis", echo=TRUE--------------------------------
library(BiocManager)
library(caret)
knnPredict <- predict(mjob_pred_caret, newdata = class_pred_test)
## --STEP 14--tidy=TRUE, results="asis"-------------------------------------------
library(caret)
library(ggplot2)
library(data.table)
library(Matrix)
confusion_matrix <- confusionMatrix(table(knnPredict, mjob_outcome_test$mjob_outcome_test))
## ------------------------------------------------------------------------
confusion_matrix
|
/Week 4/Part 4 Student Maths - kNN Tutorial.R
|
no_license
|
kxnxchukwu/Data-and-Web-Mining
|
R
| false
| false
| 6,775
|
r
|
## ----setup, include=FALSE, echo=TRUE-------------------------------------
knitr::opts_chunk$set(tidy=TRUE, echo = TRUE)
library(formatR)
getwd()
setwd("C:/Users/User/Documents/NCI20-21_Sem3/DWM HDSDA_JAN/Lecture 4 Classification")
## --STEP 1--echo=TRUE-----------------------------------------------------------
#read in the .csv file using the url() function
data <- read.table("student-mat.csv",sep=";",header=TRUE)
#change all variable names to lowercase
var.names.data <-tolower(colnames(data))
colnames(data) <- var.names.data
head(data)
## --STEP 2----------------------------------------------------------------------
install.packages("class", dependencies = TRUE)
install.packages("caret", dependencies = TRUE)
install.packages("dplyr", dependencies = TRUE)
install.packages("e1071", dependencies = TRUE)
install.packages("FNN", dependencies = TRUE)
install.packages("gmodels", dependencies = TRUE)
install.packages("psych", dependencies = TRUE)
install.packages("data.table", dependencies=TRUE)
install.packages("BiocManager", dependencies= TRUE)
## --STEP 3--echo=TRUE-----------------------------------------------------------
#libraries needed
library(caret)
library(class)
library(dplyr)
library(e1071)
library(FNN)
library(gmodels)
library(psych)
library(plyr)
## --STEP 4--echo=TRUE-----------------------------------------------------------
data_class <- data
## ----echo=TRUE-----------------------------------------------------------
# put outcome in its own object
mjob_outcome <- data_class %>% select(mjob)
# remove original variable from the data set
data_class <- data_class %>% select(-mjob)
data_class$mjob <- NULL
## ---STEP 5---------------------------------------------------------------------
str(data_class)
## --STEP 6 --echo=TRUE Scale the variables-----------------------------------------------------------
data_class[, c("age", "medu", "fedu", "traveltime", "studytime", "failures", "famrel", "freetime", "goout", "dalc", "walc", "health", "absences", "g1", "g2", "g3")] <- scale(data_class[, c("age", "medu", "fedu", "traveltime", "studytime", "failures", "famrel", "freetime", "goout", "dalc", "walc", "health", "absences", "g1", "g2", "g3")])
head(data_class)
## --STEP 7--echo=TRUE-----------------------------------------------------------
str(data_class)
## ----echo=TRUE-----------------------------------------------------------
data_class$schoolsup <- ifelse(data_class$schoolsup == "yes", 1, 0)
data_class$famsup <- ifelse(data_class$famsup == "yes", 1, 0)
data_class$paid <- ifelse(data_class$paid == "yes", 1, 0)
data_class$activities <- ifelse(data_class$activities == "yes", 1, 0)
data_class$nursery <- ifelse(data_class$nursery == "yes", 1, 0)
data_class$higher <- ifelse(data_class$higher == "yes", 1, 0)
data_class$internet <- ifelse(data_class$internet == "yes", 1, 0)
data_class$romantic <- ifelse(data_class$romantic == "yes", 1, 0)
## ----echo=TRUE-----------------------------------------------------------
data_class$school <- dummy.code(data_class$school)
data_class$sex <- dummy.code(data_class$sex)
data_class$address <- dummy.code(data_class$address)
data_class$famsize <- dummy.code(data_class$famsize)
data_class$pstatus <- dummy.code(data_class$pstatus)
## ------------------------------------------------------------------------
fjob <- as.data.frame(dummy.code(data_class$fjob))
reason <- as.data.frame(dummy.code(data_class$reason))
guardian <- as.data.frame(dummy.code(data_class$guardian))
## ------------------------------------------------------------------------
fjob <- dplyr::rename(fjob, other_fjob = other)
fjob <- dplyr::rename(fjob, health_fjob = health)
reason <- dplyr::rename(reason, other_reason = other)
guardian <- dplyr::rename(guardian, other_guardian = other)
## ---STEP 8---------------------------------------------------------------------
data_class <- cbind(data_class, fjob, guardian, reason)
## ---STEP 9---------------------------------------------------------------------
data_class <- data_class %>% select(-one_of(c("fjob", "guardian", "reason")))
head(data_class)
##########################
## End of data Prep
##########################
## ------------------------------------------------------------------------
set.seed(1234) # set the seed to make the partition reproducible
# 75% of the sample size = 296
smp_size <- floor(0.75 * nrow(data_class))
train_ind <- sample(seq_len(nrow(data_class)), size = smp_size)
# creating test and training sets that contain all of the predictors
class_pred_train <- data_class[train_ind, ]
class_pred_test <- data_class[-train_ind, ]
## ------------------------------------------------------------------------
mjob_outcome_train <- mjob_outcome[train_ind, ]
mjob_outcome_test <- mjob_outcome[-train_ind, ]
## ----tidy=TRUE, results="asis"----sq root of 296 = 17---------------------------------------
mjob_pred_knn <- knn(train = class_pred_train, test = class_pred_test, cl = mjob_outcome_train, k=17)
## --STEP 11--results="asis"------------------------------------------------------
# put "mjob_outcome_test" in a data frame
mjob_outcome_test <- data.frame(mjob_outcome_test)
# merge "mjob_pred_knn" and "mjob_outcome_test"
class_comparison <- data.frame(mjob_pred_knn, mjob_outcome_test)
# specify column names for "class_comparison"
names(class_comparison) <- c("PredictedMjob", "ObservedMjob")
# inspect "class_comparison"
head(class_comparison)
## ----tidy=TRUE, results= "asis"------------------------------------------
# create table examining model accuracy
CrossTable(x = class_comparison$ObservedMjob, y = class_comparison$PredictedMjob, prop.chisq=FALSE, prop.c = FALSE, prop.r = FALSE, prop.t = FALSE)
## --STEP 13--tidy=TRUE, results="asis"-------------------------------------------
mjob_pred_caret <- train(class_pred_train, mjob_outcome_train, method = "knn", preProcess = c("center","scale"))
## ----tidy=TRUE, results="asis", echo=TRUE--------------------------------
mjob_pred_caret
## ----tidy=TRUE, results="asis", echo=TRUE--------------------------------
plot(mjob_pred_caret)
## ----tidy=TRUE, results="asis", echo=TRUE--------------------------------
library(BiocManager)
library(caret)
knnPredict <- predict(mjob_pred_caret, newdata = class_pred_test)
## --STEP 14--tidy=TRUE, results="asis"-------------------------------------------
library(caret)
library(ggplot2)
library(data.table)
library(Matrix)
confusion_matrix <- confusionMatrix(table(knnPredict, mjob_outcome_test$mjob_outcome_test))
## ------------------------------------------------------------------------
confusion_matrix
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_multi_pcl.R
\name{process_multi_pcl}
\alias{process_multi_pcl}
\title{Process multiplie PCL transects.}
\usage{
process_multi_pcl(
data_dir,
user_height = NULL,
method = NULL,
k = NULL,
marker.spacing = NULL,
max.vai = NULL,
ht.thresh = NULL,
pavd = FALSE,
hist = FALSE,
save_output = TRUE
)
}
\arguments{
\item{data_dir}{directory where PCL .csv files are stored}
\item{user_height}{height of laser from ground based on user in meters}
\item{method}{"MH" is MacArthur-Horn and "Bohrer" is the Bohrer method}
\item{k}{correction coeff for MH method (default is 1)}
\item{marker.spacing}{space between markers in the PCL data, in meters}
\item{max.vai}{the maximum value of column VAI. The default is 8. Should be a max value, not a mean.}
\item{ht.thresh}{the height at which to filter values below}
\item{pavd}{logical input to include Plant Area Volume Density Plot from [plot_pavd], if TRUE it is included, if FALSE, it is not.}
\item{hist}{logical input to include histogram of VAI with PAVD plot, if TRUE it is included, if FALSE, it is not.}
\item{save_output}{needs to be set to true, or else you are just going to get a lot of data on the screen}
}
\value{
writes the hit matrix, summary matrix, and output variables to csv in an output folder, along with hit grid plot
}
\description{
\code{process_multi_pcl} imports and processes mutiple PCL transect.
}
\details{
This is a specific function that works using the input of a data directory of .csv
files where the function cycles through the files there and processes multiple
files, producing the same output files described in \code{process_pcl}
}
\examples{
# This function works on a directory of raw PCL data
\dontrun{data_directory <- "./data/PCL_transects/" #data directory containing PCL transects
process_multi_pcl(data_directory, user_height = 1.05, marker.spacing = 10,
max.vai = 8, ht.thresh = 60, pavd = FALSE, hist = FALSE, save_output = FALSE)
process_multi_pcl("./data/PCL_transects/", user_height = 1.05, marker.spacing = 10,
max.vai = 8, ht.thresh = 60, pavd = FALSE, hist = FALSE, save_output = FALSE)
}
}
\seealso{
\code{\link{process_pcl}}
}
\keyword{file}
\keyword{import}
|
/man/process_multi_pcl.Rd
|
no_license
|
dondealban/forestr
|
R
| false
| true
| 2,274
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_multi_pcl.R
\name{process_multi_pcl}
\alias{process_multi_pcl}
\title{Process multiplie PCL transects.}
\usage{
process_multi_pcl(
data_dir,
user_height = NULL,
method = NULL,
k = NULL,
marker.spacing = NULL,
max.vai = NULL,
ht.thresh = NULL,
pavd = FALSE,
hist = FALSE,
save_output = TRUE
)
}
\arguments{
\item{data_dir}{directory where PCL .csv files are stored}
\item{user_height}{height of laser from ground based on user in meters}
\item{method}{"MH" is MacArthur-Horn and "Bohrer" is the Bohrer method}
\item{k}{correction coeff for MH method (default is 1)}
\item{marker.spacing}{space between markers in the PCL data, in meters}
\item{max.vai}{the maximum value of column VAI. The default is 8. Should be a max value, not a mean.}
\item{ht.thresh}{the height at which to filter values below}
\item{pavd}{logical input to include Plant Area Volume Density Plot from [plot_pavd], if TRUE it is included, if FALSE, it is not.}
\item{hist}{logical input to include histogram of VAI with PAVD plot, if TRUE it is included, if FALSE, it is not.}
\item{save_output}{needs to be set to true, or else you are just going to get a lot of data on the screen}
}
\value{
writes the hit matrix, summary matrix, and output variables to csv in an output folder, along with hit grid plot
}
\description{
\code{process_multi_pcl} imports and processes mutiple PCL transect.
}
\details{
This is a specific function that works using the input of a data directory of .csv
files where the function cycles through the files there and processes multiple
files, producing the same output files described in \code{process_pcl}
}
\examples{
# This function works on a directory of raw PCL data
\dontrun{data_directory <- "./data/PCL_transects/" #data directory containing PCL transects
process_multi_pcl(data_directory, user_height = 1.05, marker.spacing = 10,
max.vai = 8, ht.thresh = 60, pavd = FALSE, hist = FALSE, save_output = FALSE)
process_multi_pcl("./data/PCL_transects/", user_height = 1.05, marker.spacing = 10,
max.vai = 8, ht.thresh = 60, pavd = FALSE, hist = FALSE, save_output = FALSE)
}
}
\seealso{
\code{\link{process_pcl}}
}
\keyword{file}
\keyword{import}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_docx.R
\name{docx_body_xml}
\alias{docx_body_xml}
\title{Body xml document}
\usage{
docx_body_xml(x)
}
\arguments{
\item{x}{an rdocx object}
}
\description{
Get the body document as xml. This function
is not to be used by end users, it has been implemented
to allow other packages to work with officer.
}
\examples{
doc <- read_docx()
docx_body_xml(doc)
}
\keyword{internal}
|
/man/docx_body_xml.Rd
|
permissive
|
davidgohel/officer
|
R
| false
| true
| 458
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_docx.R
\name{docx_body_xml}
\alias{docx_body_xml}
\title{Body xml document}
\usage{
docx_body_xml(x)
}
\arguments{
\item{x}{an rdocx object}
}
\description{
Get the body document as xml. This function
is not to be used by end users, it has been implemented
to allow other packages to work with officer.
}
\examples{
doc <- read_docx()
docx_body_xml(doc)
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{ig_E_log}
\alias{ig_E_log}
\title{Inverse Gamma E[log(x)]}
\usage{
ig_E_log(a, b)
}
\arguments{
\item{a}{shape}
\item{b}{scale}
}
\description{
Calculate and return E[log(x)] where x ~ IG(a,b).
}
|
/man/ig_E_log.Rd
|
no_license
|
jatotterdell/varapproxr
|
R
| false
| true
| 295
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{ig_E_log}
\alias{ig_E_log}
\title{Inverse Gamma E[log(x)]}
\usage{
ig_E_log(a, b)
}
\arguments{
\item{a}{shape}
\item{b}{scale}
}
\description{
Calculate and return E[log(x)] where x ~ IG(a,b).
}
|
#### ENV 603 / 5-April-2021 / N.R. Sommer
# Dataset 1: Religion by region
# If you have not yet installed these libraries, use install.package("")
library(tidyverse)
library(socviz)
# Create a new table called rel_by_region
rel_by_region <- gss_sm %>%
group_by(bigregion, religion) %>%
summarize(N = n()) %>%
mutate(freq = N / sum(N),
pct = round((freq*100), 0))
# See how the pipeline above has taked the gss_sm dataframe and transformed it into a summary table.
View(gss_sum)
View(rel_by_region)
# Now let's make some plots!
p1 <- ggplot(rel_by_region, aes(x = bigregion, y = pct, fill = religion)) +
geom_col(position = "dodge2") +
labs(x = "Region",y = "Percent", fill = "Religion") +
theme(legend.position = "top")
p1
p2 <- ggplot(rel_by_region, aes(x = religion, y = pct, fill = religion)) +
geom_col(position = "dodge2") +
labs(x = NULL, y = "Percent", fill = "Religion") +
guides(fill = FALSE) +
coord_flip() +
facet_grid(~ bigregion)
p2
# Make modifications to either plot. Google is your friend here. A few suggestions:
# (1) Add a title
# (2) Remove the gridlines
# (3) Reorder the bars
# (4) Choose a new color scheme
library(viridis)
ggplot(rel_by_region, aes(x = bigregion, y = pct, fill = religion)) +
geom_col(position = "dodge2") +
labs(x = "Region",y = "Percent", fill = "Religion") +
theme(legend.position = "top") +
ggtitle("Protestants Comprise Majority of US Religious Affiliation") +
theme_bw() +
theme(panel.grid = element_blank(), panel.border = element_blank()) +
scale_color_viridis(discrete = FALSE, option = "C")
p3
?geom_col
?geom_histogram
# Once you're happy with your changes, save your plot:
ggsave("plot1.png",
plot = last_plot(),
dpi = 300)
|
/Exercise-1.R
|
no_license
|
DoctorTedNelson/ENV603-DataViz
|
R
| false
| false
| 1,747
|
r
|
#### ENV 603 / 5-April-2021 / N.R. Sommer
# Dataset 1: Religion by region
# If you have not yet installed these libraries, use install.package("")
library(tidyverse)
library(socviz)
# Create a new table called rel_by_region
rel_by_region <- gss_sm %>%
group_by(bigregion, religion) %>%
summarize(N = n()) %>%
mutate(freq = N / sum(N),
pct = round((freq*100), 0))
# See how the pipeline above has taked the gss_sm dataframe and transformed it into a summary table.
View(gss_sum)
View(rel_by_region)
# Now let's make some plots!
p1 <- ggplot(rel_by_region, aes(x = bigregion, y = pct, fill = religion)) +
geom_col(position = "dodge2") +
labs(x = "Region",y = "Percent", fill = "Religion") +
theme(legend.position = "top")
p1
p2 <- ggplot(rel_by_region, aes(x = religion, y = pct, fill = religion)) +
geom_col(position = "dodge2") +
labs(x = NULL, y = "Percent", fill = "Religion") +
guides(fill = FALSE) +
coord_flip() +
facet_grid(~ bigregion)
p2
# Make modifications to either plot. Google is your friend here. A few suggestions:
# (1) Add a title
# (2) Remove the gridlines
# (3) Reorder the bars
# (4) Choose a new color scheme
library(viridis)
ggplot(rel_by_region, aes(x = bigregion, y = pct, fill = religion)) +
geom_col(position = "dodge2") +
labs(x = "Region",y = "Percent", fill = "Religion") +
theme(legend.position = "top") +
ggtitle("Protestants Comprise Majority of US Religious Affiliation") +
theme_bw() +
theme(panel.grid = element_blank(), panel.border = element_blank()) +
scale_color_viridis(discrete = FALSE, option = "C")
p3
?geom_col
?geom_histogram
# Once you're happy with your changes, save your plot:
ggsave("plot1.png",
plot = last_plot(),
dpi = 300)
|
# zzz.R
#
# Package startup and unload functions
.onLoad <- function(libname, pkgname) {
# # Make list of package parameters and add to global options
# Example:
#
# # filepath of logfile
# optRpt <- list(rpt.logfile = logFileName() )
#
# # add more options ...
# optRpt[["nameOfOption"]] <- value
#
# optionsToSet <- !(names(optRpt) %in% names(options()))
#
# if(any(optionsToSet)) {
# options(optRpt[optionsToSet])
# }
invisible()
}
.onAttach <- function(libname, pkgname) {
# Startup message
# This works, but only once per session since there seems to be a bug in
# RStudio. cf. https://github.com/r-lib/devtools/issues/1442
m <- sprintf("\nWelcome: this is the %s package.\n", pkgname)
m <- c(m, sprintf("Author(s):\n %s\n",
utils::packageDescription(pkgname)$Author))
m <- c(m, sprintf("Maintainer:\n %s\n",
utils::packageDescription(pkgname)$Maintainer))
packageStartupMessage(paste(m, collapse=""))
}
# .onUnload <- function(libname, pkgname) {
#
# }
# [END]
|
/R/zzz.R
|
permissive
|
raywoo32/HPA
|
R
| false
| false
| 1,079
|
r
|
# zzz.R
#
# Package startup and unload functions
.onLoad <- function(libname, pkgname) {
# # Make list of package parameters and add to global options
# Example:
#
# # filepath of logfile
# optRpt <- list(rpt.logfile = logFileName() )
#
# # add more options ...
# optRpt[["nameOfOption"]] <- value
#
# optionsToSet <- !(names(optRpt) %in% names(options()))
#
# if(any(optionsToSet)) {
# options(optRpt[optionsToSet])
# }
invisible()
}
.onAttach <- function(libname, pkgname) {
# Startup message
# This works, but only once per session since there seems to be a bug in
# RStudio. cf. https://github.com/r-lib/devtools/issues/1442
m <- sprintf("\nWelcome: this is the %s package.\n", pkgname)
m <- c(m, sprintf("Author(s):\n %s\n",
utils::packageDescription(pkgname)$Author))
m <- c(m, sprintf("Maintainer:\n %s\n",
utils::packageDescription(pkgname)$Maintainer))
packageStartupMessage(paste(m, collapse=""))
}
# .onUnload <- function(libname, pkgname) {
#
# }
# [END]
|
path.work <- "E:/GitHub/MSDS-RegressionAnalysis/data"
path.home <- "D:/Projects/MSDS-RegressionAnalysis/data"
if (file.exists(path.home)) {
setwd(path.home)
} else {
setwd(path.work)
}
##################
mydata <- read.csv(file="ames_housing_data.csv",head=TRUE,sep=",")
str(mydata)
head(mydata)
names(mydata)
mydata$TotalFloorSF <- mydata$FirstFlrSF + mydata$SecondFlrSF
mydata$HouseAge <- mydata$YrSold - mydata$YearBuilt
mydata$QualityIndex <- mydata$OverallQual * mydata$OverallCond
mydata$logSalePrice <- log(mydata$SalePrice)
mydata$price_sqft <- mydata$SalePrice/mydata$TotalFloorSF
summary(mydata$price_sqft)
hist(mydata$price_sqft)
subdat <- subset(mydata, select=c("TotalFloorSF","HouseAge","QualityIndex",
"price_sqft", "SalePrice","LotArea",
"BsmtFinSF1","Neighborhood","HouseStyle",
"LotShape","OverallQual","logSalePrice",
"TotalBsmtSF","HouseStyle"))
str(subdat)
subdatnum <- subset(mydata, select=c("TotalFloorSF","HouseAge","QualityIndex",
"SalePrice","LotArea","OverallQual","logSalePrice"))
#####################################################################
######################### Assignment 1 ##############################
#####################################################################
#################################################################
################## univariate EDA ##############################
###############################################################
require(ggplot2)
ggplot(subdat) +
geom_bar( aes(LotShape) ) +
ggtitle("Number of houses per Lotshape") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
ggplot(subdat, aes(x=SalePrice)) +
geom_histogram(color="black", binwidth= 10000) +
labs(title="Distribution of Sale Price") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
ggplot(subdat, aes(x=TotalFloorSF)) +
geom_histogram(color="black", binwidth= 100) +
labs(title="Distribution of TotalFloorSF") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
ggplot(subdat, aes(x=QualityIndex)) +
geom_histogram(color="black", binwidth= 10) +
labs(title="Distribution of QualityIndex") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
#######################################################################
########### bivariate EDA ########################################
###################################################################
ggplot(subdat, aes(x=TotalFloorSF, y=QualityIndex)) +
geom_point(color="blue", shape=1) +
ggtitle("Scatter Plot of Total Floor SF vs QualityIndex") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
ggplot(subdat, aes(x=TotalFloorSF, y=HouseAge)) +
geom_point(color="blue", shape=1) +
ggtitle("Scatter Plot of Total Floor SF vs HouseAge") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
ggplot(subdat, aes(x=LotShape, y=HouseAge)) +
geom_boxplot(fill="blue") +
labs(title="Distribution of HouseAge") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
############################################################
################ model focussed EDA #######################
###########################################################
ggplot(subdat, aes(x=TotalFloorSF, y=SalePrice)) +
geom_point(color="blue", size=2) +
ggtitle("Scatter Plot of Sale Price vs Total Floor SF") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5)) +
geom_smooth(method=lm, se=FALSE) ## method=lm, se=FALSE ###
ggplot(subdat, aes(x=QualityIndex, y=SalePrice)) +
geom_point(color="blue", shape=1) +
ggtitle("Scatter Plot of Sale Price vs QualityIndex") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
ggplot(subdat, aes(x=LotShape, y=SalePrice)) +
geom_boxplot(fill="blue") +
labs(title="Distribution of Sale Price") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
#####################################################################
############# EDA for multiple variables ###########################
##################################################################
require(GGally)
ggpairs(subdat)
require(lattice)
pairs(subdat, pch = 21)
require(corrplot)
mcor <- cor(subdatnum)
corrplot(mcor, method="shade", shade.col=NA, tl.col="black",tl.cex=0.5)
#####################################################################
############# Define the sample data ###########################
##################################################################
subdat2 <- subdat[which(subdat$TotalFloorSF < 4000),]
###################################################################
################## Assignment 2 ################################
#################################################################
attach(subdat2)
ggplot(subdat2, aes(x=TotalFloorSF, y=SalePrice)) +
geom_point(color="blue", size=2) +
ggtitle("Scatter Plot of Sale Price vs Total Floor SF") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5)) +
geom_smooth(method=lm,se=FALSE) ## method=lm, se=FALSE ###
ggplot(subdat2, aes(x=QualityIndex, y=SalePrice)) +
geom_point(color="blue", shape=1) +
ggtitle("Scatter Plot of Sale Price vs QualityIndex") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5)) +
geom_smooth(method=lm) ## method=lm, se=FALSE ###
# 3D Scatterplot with Coloring and Vertical Lines
# and Regression Plane
library(scatterplot3d)
attach(subdat2)
s3d <-scatterplot3d(TotalFloorSF,QualityIndex,SalePrice,pch=16,
highlight.3d=TRUE,type="h", main="3D Scatterplot")
fit <- lm(SalePrice ~ TotalFloorSF + QualityIndex)
s3d$plane3d(fit)
library(Rcmdr)
attach(subdat2)
scatter3d(SalePrice,TotalFloorSF,QualityIndex)
############## fitting a SLR ###################################
SLRresult = lm(SalePrice ~ TotalFloorSF, data=subdatnum)#subdat2
anova(SLRresult)
summary(SLRresult)
par(mfrow=c(1,1)) # visualize four graphs at once
pred <- as.data.frame(predict(SLRresult,subdatnum,interval="prediction"))
str(pred)
head(pred)
subdatnum <- cbind(subdatnum,pred)
str(subdatnum)
head(subdatnum)
subdatnum <- subset( subdatnum, select = -lwr)
subdatnum <- subset( subdatnum, select = -upr)
library(reshape)
subdatnum <- rename(subdatnum, c(fit="fitSLR"))
############## fitting a MLR ###################################
MLRresult = lm(SalePrice ~ TotalFloorSF+OverallQual, data=subdatnum)
anova(MLRresult)
summary(MLRresult)
par(mfrow=c(2,2)) # visualize four graphs at once
plot(MLRresult)
pred <- as.data.frame(predict(MLRresult,subdatnum,interval="prediction"))
str(pred)
head(pred)
subdatnum <- cbind(subdatnum,pred)
subdatnum <- subset( subdatnum, select = -lwr)
subdatnum <- subset( subdatnum, select = -upr)
str(subdatnum)
head(subdatnum)
subdatnum <- rename(subdatnum, c(fit="fitMLR"))
subdatnum$res <- subdatnum$SalePrice - subdatnum$fitMLR
head(subdatnum)
###################################################################
##################### Assignment 3 ################################
#################################################################
################ MAE calculation ###################################
MLRresult = lm(SalePrice ~ TotalFloorSF+OverallQual, data=subdat)
anova(MLRresult)
summary(MLRresult)
par(mfrow=c(2,2)) # visualize four graphs at once
plot(MLRresult)
pred <- as.data.frame(predict(MLRresult,subdat))
names(pred)
library(reshape)
pred <- rename(pred, c("predict(MLRresult, subdat)" = "prd"))
subdat$pred <- pred$prd
subdat$res <- subdat$SalePrice - subdat$pred
subdat$absres <- abs(subdat$res)
MAE <- mean(subdat$absres)
MAE
require(ggplot2)
ggplot(subdat, aes(x=OverallQual, y=res)) +
geom_point(color="blue", size=2) +
ggtitle("Scatter Plot of Residual vs OverallQual") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5)) +
geom_smooth(method=lm,se=FALSE) ## method=lm, se=FALSE ###
################################################################
############### Log Transformation #############################
MLRLogresult = lm(logSalePrice ~ TotalFloorSF+OverallQual, data=subdatnum)
anova(MLRLogresult)
summary(MLRLogresult)
par(mfrow=c(2,2)) # visualize four graphs at once
plot(MLRLogresult)
pred <- as.data.frame(predict(MLRLogresult,subdatnum,interval="prediction"))
str(pred)
head(pred)
subdatnum <- cbind(subdatnum,pred)
subdatnum <- subset( subdatnum, select = -lwr)
subdatnum <- subset( subdatnum, select = -upr)
str(subdatnum)
head(subdatnum)
subdatnum <- rename(subdatnum, c(fit="fitMLRLog"))
subdatnum$reslog <- subdatnum$logSalePrice - subdatnum$fitMLRLog
MAE <- mean(abs(subdatnum$reslog))
MAE
head(subdatnum)
library(car)
vif(MLRLogresult)
par(mfrow=c(1,1))
influencePlot(MLRLogresult, id.method="identify", main="Influence Plot",
sub="Circle size is proportial to Cook's Distance")
summary(inflm.MLRLog <- influence.measures(MLRLogresult))
dffitslog <- dffits(MLRLogresult)
subdatnum <- cbind(subdatnum,dffitslog)
str(subdatnum)
ggplot(subdatnum, aes(x=OverallQual, y=reslog)) +
geom_point(color="blue", size=2) +
ggtitle("Scatter Plot of Residual vs OverallQual") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5)) +
geom_smooth(method=lm,se=FALSE) ## method=lm, se=FALSE ###
################ influential points removed #######
subdatnum$absdf <- abs(subdatnum$dffitslog)
head(subdatnum)
subdatnuminf <- subdatnum[which(subdatnum$absdf < 0.064),]
MLRLogresult = lm(logSalePrice ~ TotalFloorSF+OverallQual, data=subdatnuminf)
anova(MLRLogresult)
summary(MLRLogresult)
############## analyze Neighborhood variable #########
require(ggplot2)
ggplot(subdat, aes(x=Neighborhood, y=SalePrice)) +
geom_boxplot(fill="blue") +
labs(title="Distribution of Sale Price") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
library(plyr)
subdat1 <- ddply(subdat, .(Neighborhood), summarise,
MAE = mean(absres))
subdat2 <- ddply(subdat, .(Neighborhood), summarise,
MeanPrice = mean(SalePrice))
subdat3 <- ddply(subdat, .(Neighborhood), summarise,
TotalPrice = sum(SalePrice))
subdat4 <- ddply(subdat, .(Neighborhood), summarise,
TotalSqft = sum(TotalFloorSF))
subdat34 <- cbind(subdat3,subdat4)
subdat34$AvgPr_Sqft <- subdat34$TotalPrice/subdat34$TotalSqft
subdatall <- subdat1
subdatall$MeanPrice <- subdat2$MeanPrice
subdatall$AvgPr_Sqft <- subdat34$AvgPr_Sqft
require(ggplot2)
ggplot(subdatall, aes(x=AvgPr_Sqft, y=MeanPrice)) +
geom_point(color="blue", shape=1,size=3) +
ggtitle("Scatter Plot") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
#### Clean up of the Neighborhood varaible ########
subdat$NbhdGrp <-
ifelse(subdat$price_sqft<=100, "grp1",
ifelse(subdat$price_sqft<=120, "grp2",
ifelse(subdat$price_sqft<=140, "grp3",
"grp4")))
################ include categoriacl variable in the model #######
MLRresult = lm(SalePrice ~ TotalFloorSF+OverallQual+NbhdGrp, data=subdat)
anova(MLRresult)
summary(MLRresult)
pred <- as.data.frame(predict(MLRresult,subdat))
names(pred)
library(reshape)
pred <- rename(pred, c("predict(MLRresult, subdat)" = "prd"))
subdat$pred <- pred$prd
subdat$res <- subdat$SalePrice - subdat$pred
subdat$absres <- abs(subdat$res)
MAE <- mean(subdat$absres)
MAE
################# define dummy variables ###################
subdat$NbhdGrp1 <-
ifelse(subdat$NbhdGrp == "grp1", 1, 0)
subdat$NbhdGrp2 <-
ifelse(subdat$NbhdGrp == "grp2", 1, 0)
subdat$NbhdGrp3 <-
ifelse(subdat$NbhdGrp == "grp3", 1, 0)
MLRresult4 = lm(SalePrice ~ TotalFloorSF+OverallQual+NbhdGrp1+NbhdGrp2+NbhdGrp3,
data=subdat)
anova(MLRresult4)
summary(MLRresult4)
############################################################
############## assignment 5 #############################
#########################################################
# Set the seed on the random number generator so you get the same split every time that
# you run the code.
my.data <- subdat
set.seed(123)
my.data$u <- runif(n=dim(my.data)[1],min=0,max=1)
# Create train/test split;
train.df <- subset(my.data, u<0.70);
test.df <- subset(my.data, u>=0.70);
names(train.df)
train.clean <- subset(train.df, select=c("TotalFloorSF","HouseAge",
"OverallQual","LotArea","logSalePrice","BsmtFinSF1",
"TotalBsmtSF","Style1","Style2"))
test.clean <- subset(test.df, select=c("TotalFloorSF","HouseAge",
"OverallQual","LotArea","logSalePrice","BsmtFinSF1",
"TotalBsmtSF","Style1","Style2"))
# Check your data split. The sum of the parts should equal the whole.
# Do your totals add up?
dim(my.data)[1]
dim(train.df)[1]
dim(test.df)[1]
dim(train.df)[1]+dim(test.df)[1]
train.clean <- na.omit(train.clean)
test.clean <- na.omit(test.clean)
# Define the upper model as the FULL model
upper.lm <- lm(logSalePrice ~ .,data=train.clean);
summary(upper.lm)
# Define the lower model as the Intercept model
lower.lm <- lm(logSalePrice ~ 1,data=train.clean);
summary(lower.lm)
# Need a SLR to initialize stepwise selection
sqft.lm <- lm(logSalePrice ~ TotalFloorSF,data=train.clean);
summary(sqft.lm)
# Note: There is only one function for classical model selection in R - stepAIC();
# stepAIC() is part of the MASS library.
# The MASS library comes with the BASE R distribution, but you still need to load it;
library(MASS)
# Call stepAIC() for variable selection
forward.lm <- stepAIC(object=lower.lm,scope=list(upper=upper.lm,lower=lower.lm),
direction=c('forward'));
summary(forward.lm)
backward.lm <- stepAIC(object=upper.lm,direction=c('backward'));
summary(backward.lm)
stepwise.lm <- stepAIC(object=sqft.lm,scope=list(upper=formula(upper.lm),lower=~1),
direction=c('both'));
summary(stepwise.lm)
junk.lm <- lm(logSalePrice ~ OverallQual + LotArea, data=train.clean)
summary(junk.lm)
library(car)
sort(vif(forward.lm),decreasing=TRUE)
sort(vif(backward.lm),decreasing=TRUE)
sort(vif(stepwise.lm),decreasing=TRUE)
sort(vif(junk.lm),decreasing=TRUE)
forward.test <- predict(forward.lm,newdata=test.clean);
backward.test <- predict(backward.lm,newdata=test.clean);
stepwise.test <- predict(stepwise.lm,newdata=test.clean);
junk.test <- predict(junk.lm,newdata=test.clean);
# Training Data
# Abs Pct Error
forward.pct <- abs(forward.lm$residuals)/train.clean$logSalePrice;
MAPE <- mean(forward.pct)
MAPE
backward.pct <- abs(backward.lm$residuals)/train.clean$logSalePrice;
MAPE <- mean(backward.pct)
MAPE
stepwise.pct <- abs(stepwise.lm$residuals)/train.clean$logSalePrice;
MAPE <- mean(stepwise.pct)
MAPE
junk.pct <- abs(junk.lm$residuals)/train.clean$logSalePrice;
MAPE <- mean(junk.pct)
MAPE
# Test Data
# Abs Pct Error
forward.testPCT <- abs(test.df$logSalePrice-forward.test)/test.df$logSalePrice;
MAPE <- mean(forward.testPCT)
MAPE
backward.testPCT <- abs(test.df$logSalePrice-backward.test)/test.df$logSalePrice;
MAPE <- mean(backward.testPCT)
MAPE
stepwise.testPCT <- abs(test.df$logSalePrice-stepwise.test)/test.df$logSalePrice;
MAPE <- mean(stepwise.testPCT)
MAPE
junk.testPCT <- abs(test.df$logSalePrice-junk.test)/test.df$logSalePrice;
MAPE <- mean(junk.testPCT)
MAPE
# Assign Prediction Grades training data;
forward.PredictionGrade <- ifelse(forward.pct<=0.10,'Grade 1: [0.0.10]',
ifelse(forward.pct<=0.15,'Grade 2: (0.10,0.15]',
ifelse(forward.pct<=0.25,'Grade 3: (0.15,0.25]',
'Grade 4: (0.25+]')
)
)
forward.trainTable <- table(forward.PredictionGrade)
forward.trainTable/sum(forward.trainTable)
# Assign Prediction Grades test data;
forward.testPredictionGrade <- ifelse(forward.testPCT<=0.10,'Grade 1: [0.0.10]',
ifelse(forward.testPCT<=0.15,'Grade 2: (0.10,0.15]',
ifelse(forward.testPCT<=0.25,'Grade 3: (0.15,0.25]',
'Grade 4: (0.25+]')
)
)
forward.testTable <-table(forward.testPredictionGrade)
forward.testTable/sum(forward.testTable)
######################################################################
sub <- subset(subdat, select=c("TotalFloorSF","HouseAge",
"OverallQual","LotArea","logSalePrice","BsmtFinSF1",
"TotalBsmtSF","Style1","Style2"))
MLRresult1 = lm(logSalePrice ~ ., data=sub)
sub2 <- subset(subdat, select=c("TotalFloorSF","HouseAge",
"OverallQual","LotArea","logSalePrice","BsmtFinSF1",
"TotalBsmtSF"))
MLRresult2 = lm(logSalePrice ~ ., data=sub2)
anova(MLRresult1,MLRresult2)
anova(MLRresult1)
summary(MLRresult1)
par(mfrow=c(2,2)) # visualize four graphs at once
plot(MLRresult)
names(MLRresult)
head(MLRresult$df.residual)
inflm.MLRLog <- influence.measures(MLRresult)
names(inflm.MLRLog)
str(inflm.MLRLog)
inflmetrics <- as.data.frame(inflm.MLRLog$infmat)
dffit_df <- subset(inflmetrics, select= c(dffit))
sub$r1 <- row.names(sub)
dffit_df$r1 <- row.names(dffit_df)
subnew <- merge(sub, dffit_df, all=FALSE)
subnew <- subset(subnew, select= -c(r1))
subnew$absdffit <- abs(subnew$dffit)
subnewinf <- subnew[which(subnew$absdf < 0.064),]
MLRLogresult = lm(logSalePrice ~ TotalFloorSF+OverallQual+HouseAge+
LotArea+BsmtFinSF1+TotalBsmtSF+Style1+Style2,data=subnewinf)
anova(MLRLogresult)
summary(MLRLogresult)
par(mfrow=c(2,2)) # visualize four graphs at once
plot(MLRLogresult)
|
/SkeletonCode/AmesSkeletonCode.R
|
no_license
|
anhnguyendepocen/MSDS-Supervised-Learning
|
R
| false
| false
| 18,041
|
r
|
path.work <- "E:/GitHub/MSDS-RegressionAnalysis/data"
path.home <- "D:/Projects/MSDS-RegressionAnalysis/data"
if (file.exists(path.home)) {
setwd(path.home)
} else {
setwd(path.work)
}
##################
mydata <- read.csv(file="ames_housing_data.csv",head=TRUE,sep=",")
str(mydata)
head(mydata)
names(mydata)
mydata$TotalFloorSF <- mydata$FirstFlrSF + mydata$SecondFlrSF
mydata$HouseAge <- mydata$YrSold - mydata$YearBuilt
mydata$QualityIndex <- mydata$OverallQual * mydata$OverallCond
mydata$logSalePrice <- log(mydata$SalePrice)
mydata$price_sqft <- mydata$SalePrice/mydata$TotalFloorSF
summary(mydata$price_sqft)
hist(mydata$price_sqft)
subdat <- subset(mydata, select=c("TotalFloorSF","HouseAge","QualityIndex",
"price_sqft", "SalePrice","LotArea",
"BsmtFinSF1","Neighborhood","HouseStyle",
"LotShape","OverallQual","logSalePrice",
"TotalBsmtSF","HouseStyle"))
str(subdat)
subdatnum <- subset(mydata, select=c("TotalFloorSF","HouseAge","QualityIndex",
"SalePrice","LotArea","OverallQual","logSalePrice"))
#####################################################################
######################### Assignment 1 ##############################
#####################################################################
#################################################################
################## univariate EDA ##############################
###############################################################
require(ggplot2)
ggplot(subdat) +
geom_bar( aes(LotShape) ) +
ggtitle("Number of houses per Lotshape") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
ggplot(subdat, aes(x=SalePrice)) +
geom_histogram(color="black", binwidth= 10000) +
labs(title="Distribution of Sale Price") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
ggplot(subdat, aes(x=TotalFloorSF)) +
geom_histogram(color="black", binwidth= 100) +
labs(title="Distribution of TotalFloorSF") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
ggplot(subdat, aes(x=QualityIndex)) +
geom_histogram(color="black", binwidth= 10) +
labs(title="Distribution of QualityIndex") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
#######################################################################
########### bivariate EDA ########################################
###################################################################
ggplot(subdat, aes(x=TotalFloorSF, y=QualityIndex)) +
geom_point(color="blue", shape=1) +
ggtitle("Scatter Plot of Total Floor SF vs QualityIndex") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
ggplot(subdat, aes(x=TotalFloorSF, y=HouseAge)) +
geom_point(color="blue", shape=1) +
ggtitle("Scatter Plot of Total Floor SF vs HouseAge") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
ggplot(subdat, aes(x=LotShape, y=HouseAge)) +
geom_boxplot(fill="blue") +
labs(title="Distribution of HouseAge") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
############################################################
################ model focussed EDA #######################
###########################################################
ggplot(subdat, aes(x=TotalFloorSF, y=SalePrice)) +
geom_point(color="blue", size=2) +
ggtitle("Scatter Plot of Sale Price vs Total Floor SF") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5)) +
geom_smooth(method=lm, se=FALSE) ## method=lm, se=FALSE ###
ggplot(subdat, aes(x=QualityIndex, y=SalePrice)) +
geom_point(color="blue", shape=1) +
ggtitle("Scatter Plot of Sale Price vs QualityIndex") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
ggplot(subdat, aes(x=LotShape, y=SalePrice)) +
geom_boxplot(fill="blue") +
labs(title="Distribution of Sale Price") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
#####################################################################
############# EDA for multiple variables ###########################
##################################################################
require(GGally)
ggpairs(subdat)
require(lattice)
pairs(subdat, pch = 21)
require(corrplot)
mcor <- cor(subdatnum)
corrplot(mcor, method="shade", shade.col=NA, tl.col="black",tl.cex=0.5)
#####################################################################
############# Define the sample data ###########################
##################################################################
subdat2 <- subdat[which(subdat$TotalFloorSF < 4000),]
###################################################################
################## Assignment 2 ################################
#################################################################
attach(subdat2)
ggplot(subdat2, aes(x=TotalFloorSF, y=SalePrice)) +
geom_point(color="blue", size=2) +
ggtitle("Scatter Plot of Sale Price vs Total Floor SF") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5)) +
geom_smooth(method=lm,se=FALSE) ## method=lm, se=FALSE ###
ggplot(subdat2, aes(x=QualityIndex, y=SalePrice)) +
geom_point(color="blue", shape=1) +
ggtitle("Scatter Plot of Sale Price vs QualityIndex") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5)) +
geom_smooth(method=lm) ## method=lm, se=FALSE ###
# 3D Scatterplot with Coloring and Vertical Lines
# and Regression Plane
library(scatterplot3d)
attach(subdat2)
s3d <-scatterplot3d(TotalFloorSF,QualityIndex,SalePrice,pch=16,
highlight.3d=TRUE,type="h", main="3D Scatterplot")
fit <- lm(SalePrice ~ TotalFloorSF + QualityIndex)
s3d$plane3d(fit)
library(Rcmdr)
attach(subdat2)
scatter3d(SalePrice,TotalFloorSF,QualityIndex)
############## fitting a SLR ###################################
SLRresult = lm(SalePrice ~ TotalFloorSF, data=subdatnum)#subdat2
anova(SLRresult)
summary(SLRresult)
par(mfrow=c(1,1)) # visualize four graphs at once
pred <- as.data.frame(predict(SLRresult,subdatnum,interval="prediction"))
str(pred)
head(pred)
subdatnum <- cbind(subdatnum,pred)
str(subdatnum)
head(subdatnum)
subdatnum <- subset( subdatnum, select = -lwr)
subdatnum <- subset( subdatnum, select = -upr)
library(reshape)
subdatnum <- rename(subdatnum, c(fit="fitSLR"))
############## fitting a MLR ###################################
MLRresult = lm(SalePrice ~ TotalFloorSF+OverallQual, data=subdatnum)
anova(MLRresult)
summary(MLRresult)
par(mfrow=c(2,2)) # visualize four graphs at once
plot(MLRresult)
pred <- as.data.frame(predict(MLRresult,subdatnum,interval="prediction"))
str(pred)
head(pred)
subdatnum <- cbind(subdatnum,pred)
subdatnum <- subset( subdatnum, select = -lwr)
subdatnum <- subset( subdatnum, select = -upr)
str(subdatnum)
head(subdatnum)
subdatnum <- rename(subdatnum, c(fit="fitMLR"))
subdatnum$res <- subdatnum$SalePrice - subdatnum$fitMLR
head(subdatnum)
###################################################################
##################### Assignment 3 ################################
#################################################################
################ MAE calculation ###################################
MLRresult = lm(SalePrice ~ TotalFloorSF+OverallQual, data=subdat)
anova(MLRresult)
summary(MLRresult)
par(mfrow=c(2,2)) # visualize four graphs at once
plot(MLRresult)
pred <- as.data.frame(predict(MLRresult,subdat))
names(pred)
library(reshape)
pred <- rename(pred, c("predict(MLRresult, subdat)" = "prd"))
subdat$pred <- pred$prd
subdat$res <- subdat$SalePrice - subdat$pred
subdat$absres <- abs(subdat$res)
MAE <- mean(subdat$absres)
MAE
require(ggplot2)
ggplot(subdat, aes(x=OverallQual, y=res)) +
geom_point(color="blue", size=2) +
ggtitle("Scatter Plot of Residual vs OverallQual") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5)) +
geom_smooth(method=lm,se=FALSE) ## method=lm, se=FALSE ###
################################################################
############### Log Transformation #############################
MLRLogresult = lm(logSalePrice ~ TotalFloorSF+OverallQual, data=subdatnum)
anova(MLRLogresult)
summary(MLRLogresult)
par(mfrow=c(2,2)) # visualize four graphs at once
plot(MLRLogresult)
pred <- as.data.frame(predict(MLRLogresult,subdatnum,interval="prediction"))
str(pred)
head(pred)
subdatnum <- cbind(subdatnum,pred)
subdatnum <- subset( subdatnum, select = -lwr)
subdatnum <- subset( subdatnum, select = -upr)
str(subdatnum)
head(subdatnum)
subdatnum <- rename(subdatnum, c(fit="fitMLRLog"))
subdatnum$reslog <- subdatnum$logSalePrice - subdatnum$fitMLRLog
MAE <- mean(abs(subdatnum$reslog))
MAE
head(subdatnum)
library(car)
vif(MLRLogresult)
par(mfrow=c(1,1))
influencePlot(MLRLogresult, id.method="identify", main="Influence Plot",
sub="Circle size is proportial to Cook's Distance")
summary(inflm.MLRLog <- influence.measures(MLRLogresult))
dffitslog <- dffits(MLRLogresult)
subdatnum <- cbind(subdatnum,dffitslog)
str(subdatnum)
ggplot(subdatnum, aes(x=OverallQual, y=reslog)) +
geom_point(color="blue", size=2) +
ggtitle("Scatter Plot of Residual vs OverallQual") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5)) +
geom_smooth(method=lm,se=FALSE) ## method=lm, se=FALSE ###
################ influential points removed #######
subdatnum$absdf <- abs(subdatnum$dffitslog)
head(subdatnum)
subdatnuminf <- subdatnum[which(subdatnum$absdf < 0.064),]
MLRLogresult = lm(logSalePrice ~ TotalFloorSF+OverallQual, data=subdatnuminf)
anova(MLRLogresult)
summary(MLRLogresult)
############## analyze Neighborhood variable #########
require(ggplot2)
ggplot(subdat, aes(x=Neighborhood, y=SalePrice)) +
geom_boxplot(fill="blue") +
labs(title="Distribution of Sale Price") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
library(plyr)
subdat1 <- ddply(subdat, .(Neighborhood), summarise,
MAE = mean(absres))
subdat2 <- ddply(subdat, .(Neighborhood), summarise,
MeanPrice = mean(SalePrice))
subdat3 <- ddply(subdat, .(Neighborhood), summarise,
TotalPrice = sum(SalePrice))
subdat4 <- ddply(subdat, .(Neighborhood), summarise,
TotalSqft = sum(TotalFloorSF))
subdat34 <- cbind(subdat3,subdat4)
subdat34$AvgPr_Sqft <- subdat34$TotalPrice/subdat34$TotalSqft
subdatall <- subdat1
subdatall$MeanPrice <- subdat2$MeanPrice
subdatall$AvgPr_Sqft <- subdat34$AvgPr_Sqft
require(ggplot2)
ggplot(subdatall, aes(x=AvgPr_Sqft, y=MeanPrice)) +
geom_point(color="blue", shape=1,size=3) +
ggtitle("Scatter Plot") +
theme(plot.title=element_text(lineheight=0.8, face="bold", hjust=0.5))
#### Clean up of the Neighborhood varaible ########
subdat$NbhdGrp <-
ifelse(subdat$price_sqft<=100, "grp1",
ifelse(subdat$price_sqft<=120, "grp2",
ifelse(subdat$price_sqft<=140, "grp3",
"grp4")))
################ include categoriacl variable in the model #######
MLRresult = lm(SalePrice ~ TotalFloorSF+OverallQual+NbhdGrp, data=subdat)
anova(MLRresult)
summary(MLRresult)
pred <- as.data.frame(predict(MLRresult,subdat))
names(pred)
library(reshape)
pred <- rename(pred, c("predict(MLRresult, subdat)" = "prd"))
subdat$pred <- pred$prd
subdat$res <- subdat$SalePrice - subdat$pred
subdat$absres <- abs(subdat$res)
MAE <- mean(subdat$absres)
MAE
################# define dummy variables ###################
subdat$NbhdGrp1 <-
ifelse(subdat$NbhdGrp == "grp1", 1, 0)
subdat$NbhdGrp2 <-
ifelse(subdat$NbhdGrp == "grp2", 1, 0)
subdat$NbhdGrp3 <-
ifelse(subdat$NbhdGrp == "grp3", 1, 0)
MLRresult4 = lm(SalePrice ~ TotalFloorSF+OverallQual+NbhdGrp1+NbhdGrp2+NbhdGrp3,
data=subdat)
anova(MLRresult4)
summary(MLRresult4)
############################################################
############## assignment 5 #############################
#########################################################
# Set the seed on the random number generator so you get the same split every time that
# you run the code.
my.data <- subdat
set.seed(123)
my.data$u <- runif(n=dim(my.data)[1],min=0,max=1)
# Create train/test split;
train.df <- subset(my.data, u<0.70);
test.df <- subset(my.data, u>=0.70);
names(train.df)
train.clean <- subset(train.df, select=c("TotalFloorSF","HouseAge",
"OverallQual","LotArea","logSalePrice","BsmtFinSF1",
"TotalBsmtSF","Style1","Style2"))
test.clean <- subset(test.df, select=c("TotalFloorSF","HouseAge",
"OverallQual","LotArea","logSalePrice","BsmtFinSF1",
"TotalBsmtSF","Style1","Style2"))
# Check your data split. The sum of the parts should equal the whole.
# Do your totals add up?
dim(my.data)[1]
dim(train.df)[1]
dim(test.df)[1]
dim(train.df)[1]+dim(test.df)[1]
train.clean <- na.omit(train.clean)
test.clean <- na.omit(test.clean)
# Define the upper model as the FULL model
upper.lm <- lm(logSalePrice ~ .,data=train.clean);
summary(upper.lm)
# Define the lower model as the Intercept model
lower.lm <- lm(logSalePrice ~ 1,data=train.clean);
summary(lower.lm)
# Need a SLR to initialize stepwise selection
sqft.lm <- lm(logSalePrice ~ TotalFloorSF,data=train.clean);
summary(sqft.lm)
# Note: There is only one function for classical model selection in R - stepAIC();
# stepAIC() is part of the MASS library.
# The MASS library comes with the BASE R distribution, but you still need to load it;
library(MASS)
# Call stepAIC() for variable selection
forward.lm <- stepAIC(object=lower.lm,scope=list(upper=upper.lm,lower=lower.lm),
direction=c('forward'));
summary(forward.lm)
backward.lm <- stepAIC(object=upper.lm,direction=c('backward'));
summary(backward.lm)
stepwise.lm <- stepAIC(object=sqft.lm,scope=list(upper=formula(upper.lm),lower=~1),
direction=c('both'));
summary(stepwise.lm)
junk.lm <- lm(logSalePrice ~ OverallQual + LotArea, data=train.clean)
summary(junk.lm)
library(car)
sort(vif(forward.lm),decreasing=TRUE)
sort(vif(backward.lm),decreasing=TRUE)
sort(vif(stepwise.lm),decreasing=TRUE)
sort(vif(junk.lm),decreasing=TRUE)
forward.test <- predict(forward.lm,newdata=test.clean);
backward.test <- predict(backward.lm,newdata=test.clean);
stepwise.test <- predict(stepwise.lm,newdata=test.clean);
junk.test <- predict(junk.lm,newdata=test.clean);
# Training Data
# Abs Pct Error
forward.pct <- abs(forward.lm$residuals)/train.clean$logSalePrice;
MAPE <- mean(forward.pct)
MAPE
backward.pct <- abs(backward.lm$residuals)/train.clean$logSalePrice;
MAPE <- mean(backward.pct)
MAPE
stepwise.pct <- abs(stepwise.lm$residuals)/train.clean$logSalePrice;
MAPE <- mean(stepwise.pct)
MAPE
junk.pct <- abs(junk.lm$residuals)/train.clean$logSalePrice;
MAPE <- mean(junk.pct)
MAPE
# Test Data
# Abs Pct Error
forward.testPCT <- abs(test.df$logSalePrice-forward.test)/test.df$logSalePrice;
MAPE <- mean(forward.testPCT)
MAPE
backward.testPCT <- abs(test.df$logSalePrice-backward.test)/test.df$logSalePrice;
MAPE <- mean(backward.testPCT)
MAPE
stepwise.testPCT <- abs(test.df$logSalePrice-stepwise.test)/test.df$logSalePrice;
MAPE <- mean(stepwise.testPCT)
MAPE
junk.testPCT <- abs(test.df$logSalePrice-junk.test)/test.df$logSalePrice;
MAPE <- mean(junk.testPCT)
MAPE
# Assign Prediction Grades training data;
forward.PredictionGrade <- ifelse(forward.pct<=0.10,'Grade 1: [0.0.10]',
ifelse(forward.pct<=0.15,'Grade 2: (0.10,0.15]',
ifelse(forward.pct<=0.25,'Grade 3: (0.15,0.25]',
'Grade 4: (0.25+]')
)
)
forward.trainTable <- table(forward.PredictionGrade)
forward.trainTable/sum(forward.trainTable)
# Assign Prediction Grades test data;
forward.testPredictionGrade <- ifelse(forward.testPCT<=0.10,'Grade 1: [0.0.10]',
ifelse(forward.testPCT<=0.15,'Grade 2: (0.10,0.15]',
ifelse(forward.testPCT<=0.25,'Grade 3: (0.15,0.25]',
'Grade 4: (0.25+]')
)
)
forward.testTable <-table(forward.testPredictionGrade)
forward.testTable/sum(forward.testTable)
######################################################################
sub <- subset(subdat, select=c("TotalFloorSF","HouseAge",
"OverallQual","LotArea","logSalePrice","BsmtFinSF1",
"TotalBsmtSF","Style1","Style2"))
MLRresult1 = lm(logSalePrice ~ ., data=sub)
sub2 <- subset(subdat, select=c("TotalFloorSF","HouseAge",
"OverallQual","LotArea","logSalePrice","BsmtFinSF1",
"TotalBsmtSF"))
MLRresult2 = lm(logSalePrice ~ ., data=sub2)
anova(MLRresult1,MLRresult2)
anova(MLRresult1)
summary(MLRresult1)
par(mfrow=c(2,2)) # visualize four graphs at once
plot(MLRresult)
names(MLRresult)
head(MLRresult$df.residual)
inflm.MLRLog <- influence.measures(MLRresult)
names(inflm.MLRLog)
str(inflm.MLRLog)
inflmetrics <- as.data.frame(inflm.MLRLog$infmat)
dffit_df <- subset(inflmetrics, select= c(dffit))
sub$r1 <- row.names(sub)
dffit_df$r1 <- row.names(dffit_df)
subnew <- merge(sub, dffit_df, all=FALSE)
subnew <- subset(subnew, select= -c(r1))
subnew$absdffit <- abs(subnew$dffit)
subnewinf <- subnew[which(subnew$absdf < 0.064),]
MLRLogresult = lm(logSalePrice ~ TotalFloorSF+OverallQual+HouseAge+
LotArea+BsmtFinSF1+TotalBsmtSF+Style1+Style2,data=subnewinf)
anova(MLRLogresult)
summary(MLRLogresult)
par(mfrow=c(2,2)) # visualize four graphs at once
plot(MLRLogresult)
|
library(mefa4)
source("~/repos/abmianalytics/species/abmi-r-api.R")
#data.frame(table=get_table_names())
## settings
TAXA <- c("vplants", "mites", "mosses", "lichens")
ROOT <- "s:/AB_data_v2020"
## common stuff
DATE <- as.Date(Sys.time(), tz=Sys.timezone(location = TRUE))
gis <- read.csv("~/repos/abmianalytics/lookup/sitemetadata.csv")
add_labels <- function(res, sub_col) {
res$offgrid <- startsWith(as.character(res$ABMISite), "OG")
res$subunit <- res[[sub_col]]
res[[sub_col]] <- NULL
res$site_year <- interaction(res$ABMISite, res$Year, drop=TRUE, sep="_")
res$site_year_sub <- interaction(res$ABMISite, res$Year, res$subunit, drop=TRUE, sep="_")
tmp <- strsplit(as.character(res$ABMISite), "-")
res$nearest <- sapply(tmp, function(z) {
zz <- if (length(z) > 1) z[3] else z[1]
as.integer(gsub("\\D+", "", zz))
})
res
}
normalize_species <- function(res, spgen_only=TRUE) {
if (spgen_only) {
# res$ScientificName0 <- res$ScientificName
levels(res$ScientificName) <- gsub("X ", "", levels(res$ScientificName))
levels(res$ScientificName) <- gsub(" x ", " ", levels(res$ScientificName))
levels(res$ScientificName) <- sapply(strsplit(levels(res$ScientificName), " "), function(z) {
paste(z[1:min(2, length(z))], collapse=" ")
})
levels(res$TaxonomicResolution)[levels(res$TaxonomicResolution) %in%
c("Subspecies", "Variety")] <- "Species"
}
res$SpeciesID <- res$ScientificName
levels(res$SpeciesID) <- nameAlnum(levels(res$SpeciesID), capitalize="mixed", collapse="")
res$SpeciesID <- droplevels(res$SpeciesID)
res
}
cn1 <- c("ABMISite", "Year", "subunit", "site_year", "site_year_sub", "offgrid", "nearest")
cn2 <- c("SpeciesID", "CommonName", "ScientificName", "TaxonomicResolution",
"UniqueTaxonomicIdentificationNumber")
for (taxon in TAXA) {
cat("taxon:", taxon, "\n - pulling and normalizing data")
flush.console()
## vascular plants -------------------------
if (taxon == "vplants") {
tab <- "T15"
sub_col <- "Quadrant"
allowed_subunits <- c("NE", "NW", "SE", "SW")
allowed_resolution <- c("Genus", "Species")
sub_max <- 4
res <- get_table(tab)
res0 <- res
save_list <- "res0"
colnames(res) <- gsub(" ", "", colnames(res))
res <- add_labels(res, sub_col=sub_col)
res <- normalize_species(res)
}
## mosses -------------------------
if (taxon == "mosses") {
tab1 <- "T19A" # T19A Moss Identification (2003-2008)
tab2 <- "T19B" # T19B Moss Identification (since 2009)
sub_col <- "Quadrant"
allowed_subunits <- c("NE", "NW", "SE", "SW", "1ha")
allowed_resolution <- c("Genus", "Species")
sub_max <- 4
res1 <- get_table(tab1)
res2 <- get_table(tab2)
res01 <- res1
res02 <- res2
save_list <- c("res01", "res02")
colnames(res1) <- gsub(" ", "", colnames(res1))
res1[[sub_col]] <- as.factor("1ha")
res1 <- add_labels(res1, sub_col=sub_col)
res1 <- normalize_species(res1)
colnames(res2) <- gsub(" ", "", colnames(res2))
res2 <- add_labels(res2, sub_col=sub_col)
res2 <- normalize_species(res2)
tmp <- intersect(colnames(res1), colnames(res2))
res <- rbind(res1[,tmp], res2[,tmp])
}
## lichens -------------------------
if (taxon == "lichens") {
tab1 <- "T20A" # T20A Lichen Identification (2003-2008)
tab2 <- "T20B" # T20B Lichen Identification (since 2009)
sub_col <- "Quadrant"
allowed_subunits <- c("NE", "NW", "SE", "SW", "1ha")
allowed_resolution <- c("Genus", "Species")
sub_max <- 4
res1 <- get_table(tab1)
res2 <- get_table(tab2)
res01 <- res1
res02 <- res2
save_list <- c("res01", "res02")
colnames(res1) <- gsub(" ", "", colnames(res1))
res1[[sub_col]] <- as.factor("1ha")
res1 <- add_labels(res1, sub_col=sub_col)
res1 <- normalize_species(res1)
colnames(res2) <- gsub(" ", "", colnames(res2))
res2 <- add_labels(res2, sub_col=sub_col)
res2 <- normalize_species(res2)
tmp <- intersect(colnames(res1), colnames(res2))
res <- rbind(res1[,tmp], res2[,tmp])
}
## mites -------------------------
if (taxon == "mites") {
tab <- "T24A"
sub_col <- "Quadrant"
allowed_subunits <- c("NE", "NW", "SE", "SW")
allowed_resolution <- c("Genus", "Species", "Subspecies")
sub_max <- 4
res <- get_table(tab)
res0 <- res
save_list <- "res0"
colnames(res) <- gsub(" ", "", colnames(res))
res <- add_labels(res, sub_col=sub_col)
res <- normalize_species(res, spgen_only=FALSE) # keep spp names as is
res$CommonName <- NA
}
cat(" --- OK\n - processing attributes and x-tabs")
flush.console()
## sample attributes
x_site_year <- nonDuplicated(res, res$site_year, TRUE)[,cn1]
x_site_year$subunit <- x_site_year$site_year_sub <- NULL
rownames(x_site_year) <- x_site_year$site_year
x_site_year_sub <- nonDuplicated(res, res$site_year_sub, TRUE)[,cn1]
rownames(x_site_year_sub) <- x_site_year_sub$site_year_sub
## species attributes
z <- nonDuplicated(res, res$SpeciesID, TRUE)[,c(cn2)]
rownames(z) <- z$SpeciesID
## sample-species cross tabs
y_site_year_sub <- Xtab(~ site_year_sub + SpeciesID, res,
cdrop=c("NONE","SNI", "VNA", "DNC", "PNA"))
y_site_year_sub01 <- y_site_year_sub
y_site_year_sub01[y_site_year_sub01 > 0] <- 1
if (taxon %in% c("vplants", "mosses", "lichens"))
y_site_year_sub <- y_site_year_sub01
## mefa bundles for sample/subunits
m_site_year_sub <- Mefa(y_site_year_sub, x_site_year_sub, z)
m_site_year_sub <- m_site_year_sub[,taxa(m_site_year_sub)$TaxonomicResolution %in%
allowed_resolution]
m_site_year_sub01 <- Mefa(y_site_year_sub01, x_site_year_sub, z)
m_site_year_sub01 <- m_site_year_sub01[,taxa(m_site_year_sub)$TaxonomicResolution %in%
allowed_resolution]
## aggregated cross tabs for binomial tables
tmp <- m_site_year_sub01[samp(m_site_year_sub01)$subunit %in% allowed_subunits]
nn <- sum_by(rep(1, nrow(tmp)), droplevels(samp(tmp)$site_year))
y_site_year <- groupSums(xtab(tmp), 1, droplevels(samp(tmp)$site_year))
stopifnot(max(y_site_year) == sub_max)
## aggregated mefa bundles for samples
m_site_year <- Mefa(y_site_year, x_site_year, z)
samp(m_site_year)$nQuadrant <- nn[match(rownames(m_site_year), rownames(nn)),"by"]
## csv view
out_site_year <- data.frame(samp(m_site_year), as.matrix(xtab(m_site_year)))
out_site_year_sub <- data.frame(samp(m_site_year_sub), as.matrix(xtab(m_site_year_sub)))
out_species <- taxa(m_site_year)
cat(" --- OK\n - saving files")
flush.console()
## save raw input
save(list=save_list, file=file.path(ROOT, "data", "raw", "species",
paste0(taxon, "_", DATE, ".Rdata")))
## save bundles
save(m_site_year, m_site_year_sub, file=file.path(ROOT, "data", "inter", "species",
paste0(taxon, "_", DATE, ".Rdata")))
## write csv & binary
write.csv(out_site_year, row.names=FALSE,
file=file.path(ROOT, "data", "analysis", "species",
paste0(taxon, "_SiteBinom_", DATE, ".csv")))
write.csv(out_site_year_sub, row.names=FALSE,
file=file.path(ROOT, "data", "analysis", "species",
paste0(taxon, "_Quadrant_", DATE, ".csv")))
write.csv(out_species, row.names=FALSE,
file=file.path(ROOT, "data", "analysis", "species",
paste0(taxon, "_Species_", DATE, ".csv")))
save(out_site_year, out_site_year_sub, out_species,
file=file.path(ROOT, "data", "analysis", "species",
paste0(taxon, "_out_", DATE, ".Rdata")))
## vascular plants common/dominant
if (taxon == "vplants") {
res$comdom <- res$Present
levels(res$comdom)[!(levels(res$comdom) %in% c("Dominant", "Common"))] <- "Uncommon"
table(res$comdom)
xt0 <- Xtab(~ site_year_sub + SpeciesID + comdom, res,
cdrop=c("NONE","SNI", "VNA", "DNC", "PNA"))
xt1 <- xt0$Uncommon
xt1[xt1>0] <- 1
xt10 <- xt0$Common * 10
xt10[xt10>0] <- 10
xt100 <- xt0$Dominant * 100
xt100[xt100>0] <- 100
y_comdom <- xt1
y_comdom[xt10>0] <- xt10[xt10>0]
y_comdom[xt100>0] <- xt100[xt100>0]
save(y_comdom, file=file.path(ROOT, "data", "inter", "species",
paste0(taxon, "_comdom_", DATE, ".Rdata")))
}
cat(" --- OK\n")
flush.console()
}
## habitat elements
HEtabs <- list(
"T11" = list(
name="Canopy cover",
slug="canopy-cover",
sub="Sub-ordinaltransect"),
"T12B" = list(
name="Cover layers",
slug="cover-layers",
sub="Quadrant"),
"T08" = list(
name="CWD",
slug="cwd",
sub="Transect"),
"T01D" = list(
name="Ground cover",
slug="ground-cover",
sub=NULL),
"T25" = list(
name="Mineral soil",
slug="mineral-soil",
sub="Quadrant"),
"T02A" = list(
name="Surface substrate",
slug="surface-substrate",
sub=NULL),
"T09" = list(
name="Trees snags",
slug="trees-snags",
sub="Quadrant"))
HabElem <- list()
for (i in seq_along(HEtabs)) {
cat("* Pulling ", HEtabs[[i]]$name, "...")
res <- get_table(names(HEtabs)[i])
colnames(res) <- gsub(" ", "", colnames(res))
if (is.null(HEtabs[[i]]$sub)) {
subcol <- "sub_col"
res$sub_col <- rep("DNC", nrow(res))
} else {
subcol <- HEtabs[[i]]$sub
}
res <- add_labels(res, sub_col=subcol)
HabElem[[HEtabs[[i]]$slug]] <- res
write.csv(res, row.names=FALSE,
file=file.path(ROOT, "data", "analysis", "species",
paste0("habitatelements_", HEtabs[[i]]$slug, "_", DATE, ".csv")))
cat(" OK\n")
}
save(HabElem,
file=file.path(ROOT, "data", "analysis", "species",
paste0("habitatelements_out_", DATE, ".Rdata")))
if (FALSE) {
tn <- c("T19A Moss Identification (2003-2008)",
"T19B Moss Identification (since 2009)",
"T20A Lichen Identification (2003-2008)",
"T20B Lichen Identification (since 2009)",
"T24A Soil Arthropods (Mites) Identification",
"T15 Vascular Plants",
"T26A Breeding Birds",
"T26B Breeding Birds (ARUs)",
"T26C ARU Deployment and Retrieval",
"T26D Breeding Birds (ARU) Abiotic")
names(tn) <- tn
tn <- sapply(strsplit(tn, " "), "[[", 1)
x <- list()
for (tt in tn) {
cat(tt, "\n");flush.console()
try(x[[tt]] <- get_table(table = tt))
}
lapply(x, colnames)
lapply(x, function(z) max(z$Year))
}
|
/species/all-taxa.R
|
no_license
|
psolymos/abmianalytics
|
R
| false
| false
| 10,995
|
r
|
library(mefa4)
source("~/repos/abmianalytics/species/abmi-r-api.R")
#data.frame(table=get_table_names())
## settings
TAXA <- c("vplants", "mites", "mosses", "lichens")
ROOT <- "s:/AB_data_v2020"
## common stuff
DATE <- as.Date(Sys.time(), tz=Sys.timezone(location = TRUE))
gis <- read.csv("~/repos/abmianalytics/lookup/sitemetadata.csv")
add_labels <- function(res, sub_col) {
res$offgrid <- startsWith(as.character(res$ABMISite), "OG")
res$subunit <- res[[sub_col]]
res[[sub_col]] <- NULL
res$site_year <- interaction(res$ABMISite, res$Year, drop=TRUE, sep="_")
res$site_year_sub <- interaction(res$ABMISite, res$Year, res$subunit, drop=TRUE, sep="_")
tmp <- strsplit(as.character(res$ABMISite), "-")
res$nearest <- sapply(tmp, function(z) {
zz <- if (length(z) > 1) z[3] else z[1]
as.integer(gsub("\\D+", "", zz))
})
res
}
normalize_species <- function(res, spgen_only=TRUE) {
if (spgen_only) {
# res$ScientificName0 <- res$ScientificName
levels(res$ScientificName) <- gsub("X ", "", levels(res$ScientificName))
levels(res$ScientificName) <- gsub(" x ", " ", levels(res$ScientificName))
levels(res$ScientificName) <- sapply(strsplit(levels(res$ScientificName), " "), function(z) {
paste(z[1:min(2, length(z))], collapse=" ")
})
levels(res$TaxonomicResolution)[levels(res$TaxonomicResolution) %in%
c("Subspecies", "Variety")] <- "Species"
}
res$SpeciesID <- res$ScientificName
levels(res$SpeciesID) <- nameAlnum(levels(res$SpeciesID), capitalize="mixed", collapse="")
res$SpeciesID <- droplevels(res$SpeciesID)
res
}
cn1 <- c("ABMISite", "Year", "subunit", "site_year", "site_year_sub", "offgrid", "nearest")
cn2 <- c("SpeciesID", "CommonName", "ScientificName", "TaxonomicResolution",
"UniqueTaxonomicIdentificationNumber")
for (taxon in TAXA) {
cat("taxon:", taxon, "\n - pulling and normalizing data")
flush.console()
## vascular plants -------------------------
if (taxon == "vplants") {
tab <- "T15"
sub_col <- "Quadrant"
allowed_subunits <- c("NE", "NW", "SE", "SW")
allowed_resolution <- c("Genus", "Species")
sub_max <- 4
res <- get_table(tab)
res0 <- res
save_list <- "res0"
colnames(res) <- gsub(" ", "", colnames(res))
res <- add_labels(res, sub_col=sub_col)
res <- normalize_species(res)
}
## mosses -------------------------
if (taxon == "mosses") {
tab1 <- "T19A" # T19A Moss Identification (2003-2008)
tab2 <- "T19B" # T19B Moss Identification (since 2009)
sub_col <- "Quadrant"
allowed_subunits <- c("NE", "NW", "SE", "SW", "1ha")
allowed_resolution <- c("Genus", "Species")
sub_max <- 4
res1 <- get_table(tab1)
res2 <- get_table(tab2)
res01 <- res1
res02 <- res2
save_list <- c("res01", "res02")
colnames(res1) <- gsub(" ", "", colnames(res1))
res1[[sub_col]] <- as.factor("1ha")
res1 <- add_labels(res1, sub_col=sub_col)
res1 <- normalize_species(res1)
colnames(res2) <- gsub(" ", "", colnames(res2))
res2 <- add_labels(res2, sub_col=sub_col)
res2 <- normalize_species(res2)
tmp <- intersect(colnames(res1), colnames(res2))
res <- rbind(res1[,tmp], res2[,tmp])
}
## lichens -------------------------
if (taxon == "lichens") {
tab1 <- "T20A" # T20A Lichen Identification (2003-2008)
tab2 <- "T20B" # T20B Lichen Identification (since 2009)
sub_col <- "Quadrant"
allowed_subunits <- c("NE", "NW", "SE", "SW", "1ha")
allowed_resolution <- c("Genus", "Species")
sub_max <- 4
res1 <- get_table(tab1)
res2 <- get_table(tab2)
res01 <- res1
res02 <- res2
save_list <- c("res01", "res02")
colnames(res1) <- gsub(" ", "", colnames(res1))
res1[[sub_col]] <- as.factor("1ha")
res1 <- add_labels(res1, sub_col=sub_col)
res1 <- normalize_species(res1)
colnames(res2) <- gsub(" ", "", colnames(res2))
res2 <- add_labels(res2, sub_col=sub_col)
res2 <- normalize_species(res2)
tmp <- intersect(colnames(res1), colnames(res2))
res <- rbind(res1[,tmp], res2[,tmp])
}
## mites -------------------------
if (taxon == "mites") {
tab <- "T24A"
sub_col <- "Quadrant"
allowed_subunits <- c("NE", "NW", "SE", "SW")
allowed_resolution <- c("Genus", "Species", "Subspecies")
sub_max <- 4
res <- get_table(tab)
res0 <- res
save_list <- "res0"
colnames(res) <- gsub(" ", "", colnames(res))
res <- add_labels(res, sub_col=sub_col)
res <- normalize_species(res, spgen_only=FALSE) # keep spp names as is
res$CommonName <- NA
}
cat(" --- OK\n - processing attributes and x-tabs")
flush.console()
## sample attributes
x_site_year <- nonDuplicated(res, res$site_year, TRUE)[,cn1]
x_site_year$subunit <- x_site_year$site_year_sub <- NULL
rownames(x_site_year) <- x_site_year$site_year
x_site_year_sub <- nonDuplicated(res, res$site_year_sub, TRUE)[,cn1]
rownames(x_site_year_sub) <- x_site_year_sub$site_year_sub
## species attributes
z <- nonDuplicated(res, res$SpeciesID, TRUE)[,c(cn2)]
rownames(z) <- z$SpeciesID
## sample-species cross tabs
y_site_year_sub <- Xtab(~ site_year_sub + SpeciesID, res,
cdrop=c("NONE","SNI", "VNA", "DNC", "PNA"))
y_site_year_sub01 <- y_site_year_sub
y_site_year_sub01[y_site_year_sub01 > 0] <- 1
if (taxon %in% c("vplants", "mosses", "lichens"))
y_site_year_sub <- y_site_year_sub01
## mefa bundles for sample/subunits
m_site_year_sub <- Mefa(y_site_year_sub, x_site_year_sub, z)
m_site_year_sub <- m_site_year_sub[,taxa(m_site_year_sub)$TaxonomicResolution %in%
allowed_resolution]
m_site_year_sub01 <- Mefa(y_site_year_sub01, x_site_year_sub, z)
m_site_year_sub01 <- m_site_year_sub01[,taxa(m_site_year_sub)$TaxonomicResolution %in%
allowed_resolution]
## aggregated cross tabs for binomial tables
tmp <- m_site_year_sub01[samp(m_site_year_sub01)$subunit %in% allowed_subunits]
nn <- sum_by(rep(1, nrow(tmp)), droplevels(samp(tmp)$site_year))
y_site_year <- groupSums(xtab(tmp), 1, droplevels(samp(tmp)$site_year))
stopifnot(max(y_site_year) == sub_max)
## aggregated mefa bundles for samples
m_site_year <- Mefa(y_site_year, x_site_year, z)
samp(m_site_year)$nQuadrant <- nn[match(rownames(m_site_year), rownames(nn)),"by"]
## csv view
out_site_year <- data.frame(samp(m_site_year), as.matrix(xtab(m_site_year)))
out_site_year_sub <- data.frame(samp(m_site_year_sub), as.matrix(xtab(m_site_year_sub)))
out_species <- taxa(m_site_year)
cat(" --- OK\n - saving files")
flush.console()
## save raw input
save(list=save_list, file=file.path(ROOT, "data", "raw", "species",
paste0(taxon, "_", DATE, ".Rdata")))
## save bundles
save(m_site_year, m_site_year_sub, file=file.path(ROOT, "data", "inter", "species",
paste0(taxon, "_", DATE, ".Rdata")))
## write csv & binary
write.csv(out_site_year, row.names=FALSE,
file=file.path(ROOT, "data", "analysis", "species",
paste0(taxon, "_SiteBinom_", DATE, ".csv")))
write.csv(out_site_year_sub, row.names=FALSE,
file=file.path(ROOT, "data", "analysis", "species",
paste0(taxon, "_Quadrant_", DATE, ".csv")))
write.csv(out_species, row.names=FALSE,
file=file.path(ROOT, "data", "analysis", "species",
paste0(taxon, "_Species_", DATE, ".csv")))
save(out_site_year, out_site_year_sub, out_species,
file=file.path(ROOT, "data", "analysis", "species",
paste0(taxon, "_out_", DATE, ".Rdata")))
## vascular plants common/dominant
if (taxon == "vplants") {
res$comdom <- res$Present
levels(res$comdom)[!(levels(res$comdom) %in% c("Dominant", "Common"))] <- "Uncommon"
table(res$comdom)
xt0 <- Xtab(~ site_year_sub + SpeciesID + comdom, res,
cdrop=c("NONE","SNI", "VNA", "DNC", "PNA"))
xt1 <- xt0$Uncommon
xt1[xt1>0] <- 1
xt10 <- xt0$Common * 10
xt10[xt10>0] <- 10
xt100 <- xt0$Dominant * 100
xt100[xt100>0] <- 100
y_comdom <- xt1
y_comdom[xt10>0] <- xt10[xt10>0]
y_comdom[xt100>0] <- xt100[xt100>0]
save(y_comdom, file=file.path(ROOT, "data", "inter", "species",
paste0(taxon, "_comdom_", DATE, ".Rdata")))
}
cat(" --- OK\n")
flush.console()
}
## habitat elements
HEtabs <- list(
"T11" = list(
name="Canopy cover",
slug="canopy-cover",
sub="Sub-ordinaltransect"),
"T12B" = list(
name="Cover layers",
slug="cover-layers",
sub="Quadrant"),
"T08" = list(
name="CWD",
slug="cwd",
sub="Transect"),
"T01D" = list(
name="Ground cover",
slug="ground-cover",
sub=NULL),
"T25" = list(
name="Mineral soil",
slug="mineral-soil",
sub="Quadrant"),
"T02A" = list(
name="Surface substrate",
slug="surface-substrate",
sub=NULL),
"T09" = list(
name="Trees snags",
slug="trees-snags",
sub="Quadrant"))
HabElem <- list()
for (i in seq_along(HEtabs)) {
cat("* Pulling ", HEtabs[[i]]$name, "...")
res <- get_table(names(HEtabs)[i])
colnames(res) <- gsub(" ", "", colnames(res))
if (is.null(HEtabs[[i]]$sub)) {
subcol <- "sub_col"
res$sub_col <- rep("DNC", nrow(res))
} else {
subcol <- HEtabs[[i]]$sub
}
res <- add_labels(res, sub_col=subcol)
HabElem[[HEtabs[[i]]$slug]] <- res
write.csv(res, row.names=FALSE,
file=file.path(ROOT, "data", "analysis", "species",
paste0("habitatelements_", HEtabs[[i]]$slug, "_", DATE, ".csv")))
cat(" OK\n")
}
save(HabElem,
file=file.path(ROOT, "data", "analysis", "species",
paste0("habitatelements_out_", DATE, ".Rdata")))
if (FALSE) {
tn <- c("T19A Moss Identification (2003-2008)",
"T19B Moss Identification (since 2009)",
"T20A Lichen Identification (2003-2008)",
"T20B Lichen Identification (since 2009)",
"T24A Soil Arthropods (Mites) Identification",
"T15 Vascular Plants",
"T26A Breeding Birds",
"T26B Breeding Birds (ARUs)",
"T26C ARU Deployment and Retrieval",
"T26D Breeding Birds (ARU) Abiotic")
names(tn) <- tn
tn <- sapply(strsplit(tn, " "), "[[", 1)
x <- list()
for (tt in tn) {
cat(tt, "\n");flush.console()
try(x[[tt]] <- get_table(table = tt))
}
lapply(x, colnames)
lapply(x, function(z) max(z$Year))
}
|
testlist <- list(score = NULL, id = NULL, item_score = integer(0), person_id = c(-61696L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::im_booklet_score,testlist)
str(result)
|
/dexterMST/inst/testfiles/im_booklet_score/AFL_im_booklet_score/im_booklet_score_valgrind_files/1615945831-test.R
|
no_license
|
akhikolla/updatedatatype-list1
|
R
| false
| false
| 294
|
r
|
testlist <- list(score = NULL, id = NULL, item_score = integer(0), person_id = c(-61696L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::im_booklet_score,testlist)
str(result)
|
#' Modified from: "https://github.com/rstudio/keras/blob/master/vignettes/
#' examples/cifar10_cnn.R"
#'
#' Train a simple deep CNN on the CIFAR10 small images dataset.
#'
#' It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50
#' epochs, though it is still underfitting at that point.
library(keras)
library(azuremlsdk)
# Parameters --------------------------------------------------------------
args <- commandArgs(trailingOnly = TRUE)
batch_size <- as.numeric(args[2])
log_metric_to_run("batch_size", batch_size)
epochs <- as.numeric(args[4])
log_metric_to_run("epochs", epochs)
lr <- as.numeric(args[6])
log_metric_to_run("lr", lr)
decay <- as.numeric(args[8])
log_metric_to_run("decay", decay)
data_augmentation <- TRUE
# Data Preparation --------------------------------------------------------
# See ?dataset_cifar10 for more info
cifar10 <- dataset_cifar10()
# Feature scale RGB values in test and train inputs
x_train <- cifar10$train$x/255
x_test <- cifar10$test$x/255
y_train <- to_categorical(cifar10$train$y, num_classes = 10)
y_test <- to_categorical(cifar10$test$y, num_classes = 10)
# Defining Model ----------------------------------------------------------
# Initialize sequential model
model <- keras_model_sequential()
model %>%
# Start with hidden 2D convolutional layer being fed 32x32 pixel images
layer_conv_2d(
filter = 32, kernel_size = c(3,3), padding = "same",
input_shape = c(32, 32, 3)
) %>%
layer_activation("relu") %>%
# Second hidden layer
layer_conv_2d(filter = 32, kernel_size = c(3, 3)) %>%
layer_activation("relu") %>%
# Use max pooling
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(0.25) %>%
# 2 additional hidden 2D convolutional layers
layer_conv_2d(filter = 32, kernel_size = c(3, 3), padding = "same") %>%
layer_activation("relu") %>%
layer_conv_2d(filter = 32, kernel_size = c(3, 3)) %>%
layer_activation("relu") %>%
# Use max pooling once more
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(0.25) %>%
# Flatten max filtered output into feature vector
# and feed into dense layer
layer_flatten() %>%
layer_dense(512) %>%
layer_activation("relu") %>%
layer_dropout(0.5) %>%
# Outputs from dense layer are projected onto 10 unit output layer
layer_dense(10) %>%
layer_activation("softmax")
opt <- optimizer_rmsprop(lr, decay)
model %>%
compile(loss = "categorical_crossentropy",
optimizer = opt,
metrics = "accuracy"
)
# Training ----------------------------------------------------------------
if (!data_augmentation){
model %>%
fit(x_train,
y_train,
batch_size = batch_size,
epochs = epochs,
validation_data = list(x_test, y_test),
shuffle = TRUE
)
} else {
datagen <- image_data_generator(rotation_range = 20,
width_shift_range = 0.2,
height_shift_range = 0.2,
horizontal_flip = TRUE
)
datagen %>% fit_image_data_generator(x_train)
results <- evaluate(model, x_train, y_train, batch_size)
log_metric_to_run("Loss", results[[1]])
cat("Loss: ", results[[1]], "\n")
cat("Accuracy: ", results[[2]], "\n")
}
|
/vignettes/hyperparameter-tune-with-keras/cifar10_cnn.R
|
permissive
|
Rajesh16702/azureml-sdk-for-r
|
R
| false
| false
| 3,308
|
r
|
#' Modified from: "https://github.com/rstudio/keras/blob/master/vignettes/
#' examples/cifar10_cnn.R"
#'
#' Train a simple deep CNN on the CIFAR10 small images dataset.
#'
#' It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50
#' epochs, though it is still underfitting at that point.
library(keras)
library(azuremlsdk)
# Parameters --------------------------------------------------------------
args <- commandArgs(trailingOnly = TRUE)
batch_size <- as.numeric(args[2])
log_metric_to_run("batch_size", batch_size)
epochs <- as.numeric(args[4])
log_metric_to_run("epochs", epochs)
lr <- as.numeric(args[6])
log_metric_to_run("lr", lr)
decay <- as.numeric(args[8])
log_metric_to_run("decay", decay)
data_augmentation <- TRUE
# Data Preparation --------------------------------------------------------
# See ?dataset_cifar10 for more info
cifar10 <- dataset_cifar10()
# Feature scale RGB values in test and train inputs
x_train <- cifar10$train$x/255
x_test <- cifar10$test$x/255
y_train <- to_categorical(cifar10$train$y, num_classes = 10)
y_test <- to_categorical(cifar10$test$y, num_classes = 10)
# Defining Model ----------------------------------------------------------
# Initialize sequential model
model <- keras_model_sequential()
model %>%
# Start with hidden 2D convolutional layer being fed 32x32 pixel images
layer_conv_2d(
filter = 32, kernel_size = c(3,3), padding = "same",
input_shape = c(32, 32, 3)
) %>%
layer_activation("relu") %>%
# Second hidden layer
layer_conv_2d(filter = 32, kernel_size = c(3, 3)) %>%
layer_activation("relu") %>%
# Use max pooling
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(0.25) %>%
# 2 additional hidden 2D convolutional layers
layer_conv_2d(filter = 32, kernel_size = c(3, 3), padding = "same") %>%
layer_activation("relu") %>%
layer_conv_2d(filter = 32, kernel_size = c(3, 3)) %>%
layer_activation("relu") %>%
# Use max pooling once more
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(0.25) %>%
# Flatten max filtered output into feature vector
# and feed into dense layer
layer_flatten() %>%
layer_dense(512) %>%
layer_activation("relu") %>%
layer_dropout(0.5) %>%
# Outputs from dense layer are projected onto 10 unit output layer
layer_dense(10) %>%
layer_activation("softmax")
opt <- optimizer_rmsprop(lr, decay)
model %>%
compile(loss = "categorical_crossentropy",
optimizer = opt,
metrics = "accuracy"
)
# Training ----------------------------------------------------------------
if (!data_augmentation){
model %>%
fit(x_train,
y_train,
batch_size = batch_size,
epochs = epochs,
validation_data = list(x_test, y_test),
shuffle = TRUE
)
} else {
datagen <- image_data_generator(rotation_range = 20,
width_shift_range = 0.2,
height_shift_range = 0.2,
horizontal_flip = TRUE
)
datagen %>% fit_image_data_generator(x_train)
results <- evaluate(model, x_train, y_train, batch_size)
log_metric_to_run("Loss", results[[1]])
cat("Loss: ", results[[1]], "\n")
cat("Accuracy: ", results[[2]], "\n")
}
|
##' .onAttach
##'
##' A message shown when attaching the package, to make clear what the
##' main data objects are.
##' @title .onAttach
##' @param libname gssr
##' @param pkgname gssr
##' @return Message
##' @author Kieran Healy
##' @keywords internal
.onAttach <- function(libname, pkgname) {
packageStartupMessage("Package loaded. To attach the GSS data, type data(gss_all) at the console.\nFor the codebook, type data(gss_doc).\nFor the panel data and documentation, type e.g. data(gss_panel08_long) and data(gss_panel_doc).")
}
|
/R/onLoad.r
|
permissive
|
kjhealy/gssr
|
R
| false
| false
| 539
|
r
|
##' .onAttach
##'
##' A message shown when attaching the package, to make clear what the
##' main data objects are.
##' @title .onAttach
##' @param libname gssr
##' @param pkgname gssr
##' @return Message
##' @author Kieran Healy
##' @keywords internal
.onAttach <- function(libname, pkgname) {
packageStartupMessage("Package loaded. To attach the GSS data, type data(gss_all) at the console.\nFor the codebook, type data(gss_doc).\nFor the panel data and documentation, type e.g. data(gss_panel08_long) and data(gss_panel_doc).")
}
|
library(shiny)
library(mcr)
library(shinydashboard)
library(rhandsontable)
library(rmarkdown)
shinyServer(function(input, output, session) {
datasetInput <- reactive({
# Statistical results should appear here
})
#text output for sens, spec, and overall agreement
output$sens <- renderText({
paste(round((input$TP / (input$TP + input$FN))*100, digits = 2), "%", sep = "")
})
output$spec <- renderText({
paste(round((input$TN / (input$TN + input$FP))*100, digits = 2), "%", sep = "")
})
output$overall <- renderText({
paste(round((input$TP+input$TN)/(input$TP + input$FP + input$TN + input$FN)*100, digits = 2), "%", sep = "")
})
#Q value functions
{
#functions for Q1, 2, and 3 for sens
sensQ1 <- reactive({
2*input$TP + 3.84
})
sensQ2 <- reactive({
1.96*sqrt(3.84+4*input$TP*input$FN/(input$TP+input$FN))
})
sensQ3 <- reactive({
2*(input$TP+input$FN)+7.68
})
#functions for Q1, 2, and 3 for spec
specQ1 <- reactive({
2*input$TN + 3.84
})
specQ2 <- reactive({
1.96*sqrt(3.84+4*input$TN*input$FP/(input$TN+input$FP))
})
specQ3 <- reactive({
2*(input$TN+input$FP)+7.68
})
#overall Q1, 2, and 3 values
overallQ1 <- reactive({
2*(input$TN + input$TP) + 3.84
})
overallQ2 <- reactive({
1.96*sqrt(3.84+4*(input$TP + input$TN)*(input$FP + input$FN)/(input$TP + input$FP + input$TN + input$FN))
})
overallQ3 <- reactive({
2*(input$TP + input$FP + input$TN + input$FN)+7.68
})
}
# text outputs for lo and hi limits
output$sens_lo <- renderText({
paste(round(100*((sensQ1()-sensQ2())/sensQ3()), digits = 2), "%", sep = "")
})
output$sens_hi <- renderText({
paste(round(100*((sensQ1()+sensQ2())/sensQ3()), digits = 2), "%", sep = "")
})
output$spec_lo <- renderText({
paste(round(100*((specQ1()-specQ2())/specQ3()), digits = 2), "%", sep = "")
})
output$spec_hi <- renderText({
paste(round(100*((specQ1()+specQ2())/specQ3()), digits = 2), "%", sep = "")
})
output$overall_lo <- renderText({
paste(round(100*((overallQ1()-overallQ2())/overallQ3()), digits = 2), "%", sep = "")
})
output$overall_hi <- renderText({
paste(round(100*((overallQ1()+overallQ2())/overallQ3()), digits = 2), "%", sep = "")
})
#Calculates the totals of each row and the table
{
#Total positive candidate tests
output$totposcand <- renderText({
input$TP + input$FP
})
#Total negative candidate tests
output$totnegcand <- renderText({
input$TN + input$FN
})
#Total positive comparative tests
output$totposcomp <- renderText({
input$TP + input$FN
})
#Total negative comparative tests
output$totnegcomp <- renderText({
input$TN + input$FP
})
#Total tests
output$tottest <- renderText({
input$TP + input$FP + input$TN + input$FN
})
}
output$downloadReport <- downloadHandler(
filename = function() {
paste(paste(input$m1,'vs.',input$m2, '@', Sys.Date()), sep = '.', switch(
input$format, PDF = 'pdf', HTML = 'html'
))
},
content = function(file) {
src <- normalizePath('report.Rmd')
owd <- setwd(tempdir())
on.exit(setwd(owd))
file.copy(src, 'report.Rmd')
out <- rmarkdown::render('report.Rmd', switch(
input$format,
PDF = pdf_document(), HTML = html_document(), Word = word_document()
))
file.rename(out, file)
}
)
})
|
/server.R
|
permissive
|
Natani16/method_compare
|
R
| false
| false
| 3,453
|
r
|
library(shiny)
library(mcr)
library(shinydashboard)
library(rhandsontable)
library(rmarkdown)
shinyServer(function(input, output, session) {
datasetInput <- reactive({
# Statistical results should appear here
})
#text output for sens, spec, and overall agreement
output$sens <- renderText({
paste(round((input$TP / (input$TP + input$FN))*100, digits = 2), "%", sep = "")
})
output$spec <- renderText({
paste(round((input$TN / (input$TN + input$FP))*100, digits = 2), "%", sep = "")
})
output$overall <- renderText({
paste(round((input$TP+input$TN)/(input$TP + input$FP + input$TN + input$FN)*100, digits = 2), "%", sep = "")
})
#Q value functions
{
#functions for Q1, 2, and 3 for sens
sensQ1 <- reactive({
2*input$TP + 3.84
})
sensQ2 <- reactive({
1.96*sqrt(3.84+4*input$TP*input$FN/(input$TP+input$FN))
})
sensQ3 <- reactive({
2*(input$TP+input$FN)+7.68
})
#functions for Q1, 2, and 3 for spec
specQ1 <- reactive({
2*input$TN + 3.84
})
specQ2 <- reactive({
1.96*sqrt(3.84+4*input$TN*input$FP/(input$TN+input$FP))
})
specQ3 <- reactive({
2*(input$TN+input$FP)+7.68
})
#overall Q1, 2, and 3 values
overallQ1 <- reactive({
2*(input$TN + input$TP) + 3.84
})
overallQ2 <- reactive({
1.96*sqrt(3.84+4*(input$TP + input$TN)*(input$FP + input$FN)/(input$TP + input$FP + input$TN + input$FN))
})
overallQ3 <- reactive({
2*(input$TP + input$FP + input$TN + input$FN)+7.68
})
}
# text outputs for lo and hi limits
output$sens_lo <- renderText({
paste(round(100*((sensQ1()-sensQ2())/sensQ3()), digits = 2), "%", sep = "")
})
output$sens_hi <- renderText({
paste(round(100*((sensQ1()+sensQ2())/sensQ3()), digits = 2), "%", sep = "")
})
output$spec_lo <- renderText({
paste(round(100*((specQ1()-specQ2())/specQ3()), digits = 2), "%", sep = "")
})
output$spec_hi <- renderText({
paste(round(100*((specQ1()+specQ2())/specQ3()), digits = 2), "%", sep = "")
})
output$overall_lo <- renderText({
paste(round(100*((overallQ1()-overallQ2())/overallQ3()), digits = 2), "%", sep = "")
})
output$overall_hi <- renderText({
paste(round(100*((overallQ1()+overallQ2())/overallQ3()), digits = 2), "%", sep = "")
})
#Calculates the totals of each row and the table
{
#Total positive candidate tests
output$totposcand <- renderText({
input$TP + input$FP
})
#Total negative candidate tests
output$totnegcand <- renderText({
input$TN + input$FN
})
#Total positive comparative tests
output$totposcomp <- renderText({
input$TP + input$FN
})
#Total negative comparative tests
output$totnegcomp <- renderText({
input$TN + input$FP
})
#Total tests
output$tottest <- renderText({
input$TP + input$FP + input$TN + input$FN
})
}
output$downloadReport <- downloadHandler(
filename = function() {
paste(paste(input$m1,'vs.',input$m2, '@', Sys.Date()), sep = '.', switch(
input$format, PDF = 'pdf', HTML = 'html'
))
},
content = function(file) {
src <- normalizePath('report.Rmd')
owd <- setwd(tempdir())
on.exit(setwd(owd))
file.copy(src, 'report.Rmd')
out <- rmarkdown::render('report.Rmd', switch(
input$format,
PDF = pdf_document(), HTML = html_document(), Word = word_document()
))
file.rename(out, file)
}
)
})
|
\name{bigGP.exit}
\alias{bigGP.exit}
\alias{bigGP.quit}
\title{
Exit bigGP Environment
}
\description{
\code{bigGP.exit} terminates the package's execution environment and
detaches the package. After that, you can still work in R.
\code{bigGP.quit} terminates the package's execution environment and quits R.
}
\usage{
bigGP.exit()
bigGP.quit(save = "no")
}
\arguments{
\item{save}{
the same argument as \code{quit}, but defaulting to "no".}
}
\details{
These functions should be used to safely leave the \pkg{"bigGP"}
execution context, specifically MPI, when R is started via MPI such as
by calling mpirun or analogous executables. They close the slave
processes and then invoke either \code{mpi.exit} or \code{mpi.quit}.
If leaving R altogether, one simply uses \code{bigGP.quit}.
}
\seealso{
\code{\link{mpi.exit}}
\code{\link{mpi.quit}}
}
\keyword{utilities}
|
/bigGP/man/bigGP.exit.Rd
|
no_license
|
paciorek/bigGP
|
R
| false
| false
| 892
|
rd
|
\name{bigGP.exit}
\alias{bigGP.exit}
\alias{bigGP.quit}
\title{
Exit bigGP Environment
}
\description{
\code{bigGP.exit} terminates the package's execution environment and
detaches the package. After that, you can still work in R.
\code{bigGP.quit} terminates the package's execution environment and quits R.
}
\usage{
bigGP.exit()
bigGP.quit(save = "no")
}
\arguments{
\item{save}{
the same argument as \code{quit}, but defaulting to "no".}
}
\details{
These functions should be used to safely leave the \pkg{"bigGP"}
execution context, specifically MPI, when R is started via MPI such as
by calling mpirun or analogous executables. They close the slave
processes and then invoke either \code{mpi.exit} or \code{mpi.quit}.
If leaving R altogether, one simply uses \code{bigGP.quit}.
}
\seealso{
\code{\link{mpi.exit}}
\code{\link{mpi.quit}}
}
\keyword{utilities}
|
xpathSApply(rootNode, "//name", xmlValue)
##> xpathSApply(rootNode, "//name", xmlValue)
##[1] "Belgian Waffles" "Strawberry Belgian Waffles"
##[3] "Berry-Berry Belgian Waffles" "French Toast"
##[5] "Homestyle Breakfast"
## load xml data from vebsite
## Notice!!
## To access https webpage use RCurl package
##> library(RCurl)
##> xdata <- getURL(fileUrl)
##> doc <- xmlTreeParse(xdata, useInternalNodes = TRUE)
##> rootNode <- xmlRoot(doc)
##> xmlName(rootNode)
fileUrl <- "https://www.espn.com/nfl/team/_/name/bal/baltimore-ravens"
xdata <- getURL(fileUrl)
doc <- htmlTreeParse(xdata, useInternal = TRUE)
scores <- xpathSApply(doc, "//li[@class='score']", xmlValue)
teams <- xpathSApply(doc, "//li[@class='team-name']", xmlValue)
|
/XMLreading.R
|
no_license
|
NikitaSoftware/datasciencecoursera
|
R
| false
| false
| 762
|
r
|
xpathSApply(rootNode, "//name", xmlValue)
##> xpathSApply(rootNode, "//name", xmlValue)
##[1] "Belgian Waffles" "Strawberry Belgian Waffles"
##[3] "Berry-Berry Belgian Waffles" "French Toast"
##[5] "Homestyle Breakfast"
## load xml data from vebsite
## Notice!!
## To access https webpage use RCurl package
##> library(RCurl)
##> xdata <- getURL(fileUrl)
##> doc <- xmlTreeParse(xdata, useInternalNodes = TRUE)
##> rootNode <- xmlRoot(doc)
##> xmlName(rootNode)
fileUrl <- "https://www.espn.com/nfl/team/_/name/bal/baltimore-ravens"
xdata <- getURL(fileUrl)
doc <- htmlTreeParse(xdata, useInternal = TRUE)
scores <- xpathSApply(doc, "//li[@class='score']", xmlValue)
teams <- xpathSApply(doc, "//li[@class='team-name']", xmlValue)
|
#' @describeIn QUBIC Performs a QUalitative BIClustering.
#'
#' @usage qubic(i, R = FALSE, F = FALSE, d = FALSE, f = 0.85, k = 13, c = 0.90, o = 5000)
#'
#' @importFrom Rcpp evalCpp
#' @export
qubic <- function(i, N = FALSE, R = FALSE, F = FALSE, d = FALSE, D = FALSE, n = FALSE, f = 0.85, k = 13, c = 0.90, o = 5000) {
vec <- c("./qubic", "-i", i)
if(N) vec <- c(vec, "-N")
if(R) vec <- c(vec, "-R")
if(F) vec <- c(vec, "-F")
if(d) vec <- c(vec, "-d")
if(D) vec <- c(vec, "-D")
if(n) vec <- c(vec, "-n")
vec <- c(vec, "-f", as.character(f))
vec <- c(vec, "-k", as.character(k))
vec <- c(vec, "-c", as.character(c))
vec <- c(vec, "-o", as.character(o))
unloadNamespace("BRIC")
ret <- .main(vec)
if(ret == 42) return(BRIC::qubic(paste0(i, ".chars"), d = TRUE))
return (ret)
}
.onUnload <- function (libpath) {
library.dynam.unload("BRIC", libpath)
}
|
/R/bric.R
|
no_license
|
OSU-BMBL/BRIC
|
R
| false
| false
| 889
|
r
|
#' @describeIn QUBIC Performs a QUalitative BIClustering.
#'
#' @usage qubic(i, R = FALSE, F = FALSE, d = FALSE, f = 0.85, k = 13, c = 0.90, o = 5000)
#'
#' @importFrom Rcpp evalCpp
#' @export
qubic <- function(i, N = FALSE, R = FALSE, F = FALSE, d = FALSE, D = FALSE, n = FALSE, f = 0.85, k = 13, c = 0.90, o = 5000) {
vec <- c("./qubic", "-i", i)
if(N) vec <- c(vec, "-N")
if(R) vec <- c(vec, "-R")
if(F) vec <- c(vec, "-F")
if(d) vec <- c(vec, "-d")
if(D) vec <- c(vec, "-D")
if(n) vec <- c(vec, "-n")
vec <- c(vec, "-f", as.character(f))
vec <- c(vec, "-k", as.character(k))
vec <- c(vec, "-c", as.character(c))
vec <- c(vec, "-o", as.character(o))
unloadNamespace("BRIC")
ret <- .main(vec)
if(ret == 42) return(BRIC::qubic(paste0(i, ".chars"), d = TRUE))
return (ret)
}
.onUnload <- function (libpath) {
library.dynam.unload("BRIC", libpath)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/acquisition.R
\name{prob_improve}
\alias{prob_improve}
\alias{exp_improve}
\alias{conf_bound}
\title{Acquisition function for scoring parameter combinations}
\usage{
prob_improve(trade_off = 0, eps = .Machine$double.eps)
exp_improve(trade_off = 0, eps = .Machine$double.eps)
conf_bound(kappa = 0.1)
}
\arguments{
\item{trade_off}{A number or function that describes the trade-off between
exploitation and exploration. Smaller values favor exploitation.}
\item{eps}{A small constant to avoid division by zero.}
\item{kappa}{A positive number (or function) that corresponds to the
multiplier of the standard deviation in a confidence bound (e.g. 1.96 in
normal-theory 95 percent confidence intervals). Smaller values lean more
towards exploitation.}
}
\value{
An object of class \code{prob_improve}, \code{exp_improve}, or \code{conf_bounds}
along with an extra class of \code{acquisition_function}.
}
\description{
These functions can be used to score candidate tuning parameter combinations
as a function of their predicted mean and variation.
}
\details{
The acquisition functions often combine the mean and variance
predictions from the Gaussian process model into an objective to be
optimized.
For this documentation, we assume that the metric in question is better when
\emph{maximized} (e.g. accuracy, the coefficient of determination, etc).
The expected improvement of a point \code{x} is based on the predicted mean and
variation at that point as well as the current best value (denoted here as
\code{x_b}). The vignette linked below contains the formulas for this acquisition
function. When the \code{trade_off} parameter is greater than zero, the
acquisition function will down-play the effect of the \emph{mean} prediction and
give more weight to the variation. This has the effect of searching for new
parameter combinations that are in areas that have yet to be sampled.
Note that for \code{exp_improve()} and \code{prob_improve()}, the \code{trade_off} value is
in the units of the outcome. The functions are parameterized so that the
\code{trade_off} value should always be non-negative.
The confidence bound function does not take into account the current best
results in the data.
If a function is passed to \code{exp_improve()} or \code{prob_improve()}, the function
can have multiple arguments but only the first (the current iteration number)
is given to the function. In other words, the function argument should have
defaults for all but the first argument. See \code{expo_decay()} as an example of
a function.
}
\examples{
prob_improve()
}
|
/man/prob_improve.Rd
|
no_license
|
NanaAkwasiAbayieBoateng/tune
|
R
| false
| true
| 2,650
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/acquisition.R
\name{prob_improve}
\alias{prob_improve}
\alias{exp_improve}
\alias{conf_bound}
\title{Acquisition function for scoring parameter combinations}
\usage{
prob_improve(trade_off = 0, eps = .Machine$double.eps)
exp_improve(trade_off = 0, eps = .Machine$double.eps)
conf_bound(kappa = 0.1)
}
\arguments{
\item{trade_off}{A number or function that describes the trade-off between
exploitation and exploration. Smaller values favor exploitation.}
\item{eps}{A small constant to avoid division by zero.}
\item{kappa}{A positive number (or function) that corresponds to the
multiplier of the standard deviation in a confidence bound (e.g. 1.96 in
normal-theory 95 percent confidence intervals). Smaller values lean more
towards exploitation.}
}
\value{
An object of class \code{prob_improve}, \code{exp_improve}, or \code{conf_bounds}
along with an extra class of \code{acquisition_function}.
}
\description{
These functions can be used to score candidate tuning parameter combinations
as a function of their predicted mean and variation.
}
\details{
The acquisition functions often combine the mean and variance
predictions from the Gaussian process model into an objective to be
optimized.
For this documentation, we assume that the metric in question is better when
\emph{maximized} (e.g. accuracy, the coefficient of determination, etc).
The expected improvement of a point \code{x} is based on the predicted mean and
variation at that point as well as the current best value (denoted here as
\code{x_b}). The vignette linked below contains the formulas for this acquisition
function. When the \code{trade_off} parameter is greater than zero, the
acquisition function will down-play the effect of the \emph{mean} prediction and
give more weight to the variation. This has the effect of searching for new
parameter combinations that are in areas that have yet to be sampled.
Note that for \code{exp_improve()} and \code{prob_improve()}, the \code{trade_off} value is
in the units of the outcome. The functions are parameterized so that the
\code{trade_off} value should always be non-negative.
The confidence bound function does not take into account the current best
results in the data.
If a function is passed to \code{exp_improve()} or \code{prob_improve()}, the function
can have multiple arguments but only the first (the current iteration number)
is given to the function. In other words, the function argument should have
defaults for all but the first argument. See \code{expo_decay()} as an example of
a function.
}
\examples{
prob_improve()
}
|
#To predict the churn rate of the customer
#install packages
install.packages("dplyr")
#load the library
library('dplyr') # data manipulation
#set the working directory
setwd("E:\\IMS Course Content\\Course Content\\Data Science Term 1\\Capstone Project\\Data")
#read the data working directory
Tele.Train <- read.csv('Train_tele.csv', header = TRUE, stringsAsFactors = F)
Tele.Test <- read.csv('Test_tele.csv', header = TRUE, stringsAsFactors = F)
#------------------------------------------------------------------------------------------------------------------------------------------
#Exploratory Data Analysis
#Summary of the data
summary(Tele.Train)
summary(Tele.Test)
str(Tele.Train)
str(Tele.Test)
#Bind the data Train and Test
Tele.Full <- bind_rows(Tele.Train, Tele.Test)
summary(Tele.Full)
str(Tele.Full)
#Filling the NA for Number Vmail Msgs with 0 as the customer doenst have a voice mail plan
Tele.Full[79,]
Tele.Full[79,]$number.vmail.messages <- 0
#Filling the Mean NA for Total Day Calls
mean(Tele.Full$total.day.calls)
TotDayCall <- Tele.Full[c(-30,-90),8:9]
mean(TotDayCall$total.day.calls)
Tele.Full[c(30,90),8] <- 100
#Filling the Mean NA for Total Day Charge based on State
TotDayCharge <- Tele.Full[,c(1,9,17)]
mean(TotDayCharge$total.day.charge, na.rm = TRUE)
#Total day Charge for IN State
TotDayChargeIN <- Tele.Full[Tele.Full$state=='IN',c(1,9)]
mean(TotDayChargeIN$total.day.charge, na.rm = TRUE)
Tele.Full[11,9] <- 33.57
#Total day Charge for OK State
TotDayChargeOK <- Tele.Full[Tele.Full$state=='OK',c(1,9)]
mean(TotDayChargeOK$total.day.charge, na.rm = TRUE)
Tele.Full[35,9] <- 30.59
#Total day Charge for NM State
TotDayChargeNM <- Tele.Full[Tele.Full$state=='NM',c(1,9)]
mean(TotDayChargeNM$total.day.charge, na.rm = TRUE)
Tele.Full[73,9] <- 28.92
#Filling the NA for Total Eve
TotEve <- Tele.Full[,c(1,10,11)]
#Filling NA for Total Eve Call for MA State
TotEveCallMA <- TotEve[TotEve$state=='MA',c(1,2)]
mean(TotEveCallMA$total.eve.calls, na.rm = TRUE)
mean(Tele.Full[Tele.Full$state=='MA',10], na.rm = TRUE)
Tele.Full[7,10]
Tele.Full[7,10] <- 98
#Filling NA for Total Eve Call for OR State
mean(Tele.Full[Tele.Full$state=='OR',10], na.rm = TRUE)
Tele.Full[46,10]
Tele.Full[46,10] <- 98
#Filling NA for Total Eve Charge for SC State
mean(Tele.Full[Tele.Full$state=='SC',11], na.rm = TRUE)
Tele.Full[24,11]
Tele.Full[24,11] <- 17.73
#Filling NA for Total Eve Charge for MD State
mean(Tele.Full[Tele.Full$state=='MD',11], na.rm = TRUE)
Tele.Full[42,11]
Tele.Full[42,11] <- 16.65
#Filling NA for Total Eve Charge for OR State
mean(Tele.Full[Tele.Full$state=='OR',11], na.rm = TRUE)
Tele.Full[82,11]
Tele.Full[82,11] <- 16.65
#Filling NA for Total Eve Charge for ID State
mean(Tele.Full[Tele.Full$state=='ID',11], na.rm = TRUE)
Tele.Full[100,11]
Tele.Full[100,11] <- 16.52
#Filling the NA rows for Total night based on state wise
#Filling the NA for Total Night Calls for IA State and 415 Area Code
mean(Tele.Full[Tele.Full$state=='IA' & Tele.Full$area.code==415,12], na.rm = TRUE)
Tele.Full[15,12]
Tele.Full[15,12] <- 97
#Filling the NA for Total Night Calls for WI State and 415 Area Code
mean(Tele.Full[Tele.Full$state=='WI' & Tele.Full$area.code==415,12], na.rm = TRUE)
Tele.Full[59,12]
Tele.Full[59,12] <- 101
#Filling the NA for Total Night Calls for LA State and 415 Area Code
mean(Tele.Full[Tele.Full$state=='LA' & Tele.Full$area.code==415,12], na.rm = TRUE)
Tele.Full[92,12]
Tele.Full[92,12] <- 98
#Filling NA for Total Night Charge
mean(Tele.Full[Tele.Full$state=='IA' & Tele.Full$area.code==510,13], na.rm = TRUE)
Tele.Full[101,13]
Tele.Full[101,13] <- 9.63
#Filling the NA Values for Total International
#Filling the NA for Total International Call for SC State and Area Code
mean(Tele.Full[Tele.Full$state=='SC' & Tele.Full$area.code==415,14], na.rm = TRUE)
Tele.Full[24,14]
Tele.Full[24,14] <- 4
#Filling the NA for Total International Call for WI State and Area Code
mean(Tele.Full[Tele.Full$state=='WI' & Tele.Full$area.code==415,14], na.rm = TRUE)
Tele.Full[59,14]
Tele.Full[59,14] <- 4
#Filling the NA for Total International Charge for IN State and Area Code
mean(Tele.Full[Tele.Full$state=='IN' & Tele.Full$area.code==415,15], na.rm = TRUE)
Tele.Full[11,15]
Tele.Full[11,15] <- 2.77
#Filling the NA for Total International Charge for VT State and Area Code
mean(Tele.Full[Tele.Full$state=='VT' & Tele.Full$area.code==510,15], na.rm = TRUE)
Tele.Full[18,15]
Tele.Full[18,15] <- 2.37
#Filling the NA for Total International Charge for VT State and Area Code
mean(Tele.Full[Tele.Full$state=='WY' & Tele.Full$area.code==415,15], na.rm = TRUE)
Tele.Full[55,15]
Tele.Full[55,15] <- 2.75
#Filling the NA for Customer Service Call
mean(Tele.Full[Tele.Full$state=='NM' & Tele.Full$area.code==510,16], na.rm = TRUE)
Tele.Full[73,16]
Tele.Full[73,16] <- 1
#------------------------------------------------------------------------------------------------------------------------------------------
# #converting Variables to factors..
# #Converting Internation Plan to factors
# Tele.Full$international.plan <- ifelse(Tele.Full$international.plan == 'yes', 1, 0)
# #Tele.Full$international.plan <- as.factor(Tele.Full$international.plan)
# Tele.Full$international.plan <- as.numeric(Tele.Full$international.plan)
#
# #Converting Voice Mail Plan to factors
# Tele.Full$voice.mail.plan <- ifelse(Tele.Full$voice.mail.plan == 'yes', 1, 0)
# #Tele.Full$voice.mail.plan <- as.factor(Tele.Full$voice.mail.plan)
# Tele.Full$voice.mail.plan <- as.numeric(Tele.Full$voice.mail.plan)
#
# #Converting Churn to factors
# Tele.Full$churn <- ifelse(Tele.Full$churn == TRUE, 1, 0)
#
# #Converting State to factor and then numbers
# Tele.Full$state <- as.factor(Tele.Full$state)
# Tele.Full$state <- as.numeric(Tele.Full$state)
#
# #Converting Area Code to factor and then numbers
# Tele.Full$area.code <- as.factor(Tele.Full$area.code)
# Tele.Full$area.code <- as.numeric(Tele.Full$area.code)
#
# #converting Account length to numeric
# Tele.Full$account.length <- as.numeric(Tele.Full$account.length)
#-------------------------------------------------------------------------------------------------------------------------------------
#Create the Training data
TrainTele <- Tele.Full[1:2850,]
summary(TrainTele)
View(TrainTele)
#---------------------------------------------------------------------------------------------------------------------------------------
#Visualisation of the Training Dataset with GGPLOT2
library('ggplot2') # visualization
library('ggthemes') # visualization
library('scales') # visualization
#------------------------------------------------------------------------------------------------------------------------------------
#Create the Test dataset
TestTele <- Tele.Full[2851:3333,]
#------------------------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------
#Logistic regression Approach to find the churn rate of the telecom Customer
install.packages('stringi')
install.packages('caret')
library('caret')
library('stringi')
#split the Training date for Train and Test
set.seed(1234)
inTrain = createDataPartition(TrainTele$churn, p=0.8, list = FALSE)
TrainingData = TrainTele[inTrain,]
TestingData = TrainTele[-inTrain,]
str(TrainingData)
str(TestingData)
#To find the Dimension of the Data
dim(TrainingData)
dim(TestingData)
summary(Tele.Full)
#To check for the Mulitcolinearity of the Variables in the Dataset
# multicollinearity a phenomenon in which one predictor variable in a multiple regression model can be linearly predicted from the others with a
#substantial degree of accuracy.
#variance inflation factor (VIF) is the ratio of variance in a model with multiple terms, divided by the variance of a model with one term alone
library('usdm')
vifstep(Tele.Full[,c(-4,-17)], th=2)
#Akaike Information Criterion
#is an estimator of the relative quality of statistical models for a given set of data. Given a collection of models for the data, AIC estimates
#the quality of each model, relative to each of the other models. Thus, AIC provides a means for model selection.
#To check for the correlation of the variable and the graph
#dependence or association is any statistical relationship, whether causal or not, between two random variables or bivariate data.
#Correlation is any of a broad class of statistical relationships involving dependence, though in common usage it most often refers to how close
#two variables are to having a linear relationship with each other
library('corrplot')
cr <- cor(Tele.Full[,c(-4,-17)])
corrplot(cr, type = "lower")
#Fitting the model with the VIF value below 2
fit0 <- glm(churn ~ state + account.length + area.code + international.plan + number.vmail.messages + total.day.calls + total.day.charge +
total.eve.calls + total.eve.charge + total.night.calls + total.night.charge + total.intl.calls + total.intl.charge + customer.service.calls,
data = TrainingData, family = binomial(link = "logit"))
summary(fit0)
#Select the model with the VIF variable for the next stepwise regression and get the minimum AIC Value
library('MASS')
step = stepAIC(fit0, direction = "both")
#Optimizing the model
#Null Deviance is the deviance of the actual value of the dataset - not using the independent variable only using the intercept - should be the same for all the model
#Residual Deviance - including the independent variable the value is less showing the deviance from the dependent variable - should be less
#AIC - should be less
#Degree of Freedom - The number of independent ways by which a dynamic system can move, without violating any constraint imposed on it, is called
#number of degrees of freedom. In other words, the number of degrees of freedom can be defined as the minimum number of independent coordinates that
#can specify the position of the system completely.
#Fitting the model with the lowest AIC value
fit1 <- glm(churn ~ international.plan + number.vmail.messages + total.day.charge +
total.eve.charge + total.night.charge + total.intl.calls +
total.intl.charge + customer.service.calls, data = TrainingData, family = binomial(link = "logit"))
summary(fit1)
#Fit1 is the best fit with the AIC score of 1529.3
fit2 <- update(fit1, .~. -total.intl.calls, data = TrainingData)
Summary(fit2)
#Predicting the Model with the Test Dataset
#To get the probability value of the rtesponse
Pred <- predict(fit1, newdata=TestingData[,-17], type = "response")
Pred
View(Pred)
#Converting the Predicted to 0 and 1 with .5 and the threshold
Preds <- ifelse(Pred<0.5, 0, 1)
#Confusion MAtrix is a special kind of contingency table, with two dimensions ("actual" and "predicted"), and identical sets of "classes" in both
#dimensions (each combination of dimension and class is a variable in the contingency table).
#TN FP
#FN TP
confusionMatrix(table(TestingData$churn,Preds,dnn=list('Actual', 'Predicted')))
#Predicted with 89% Accuracy
((486+21)/(486+10+53+21))*100
#-------------------------------------------------------------------------
#ROC Curve to find the threshold
#Receiver Operating Characteristic(ROC) Curve
#The ROC curve is created by plotting the true positive rate (TPR) against the false positive rate (FPR) at various threshold settings.
res <- predict(fit1, newdata=TrainingData, type = "response")
install.packages('ROCR')
library(ROCR)
ROCRPred <- prediction(res,TrainingData$churn)
#Performance of the ROC curve from the TPR and FPR
ROCRPref <- performance(ROCRPred, "tpr", "fpr")
#plot the ROCR Graph to find the TPR and FPR
plot(ROCRPref, colorize = TRUE, print.cutoffs.at=seq(0.1, by=0.1))
PredROCR1 <- ifelse(Pred<0.5, 0, 1)
confusionMatrix(table(TestingData$churn,PredROCR1,dnn=list('Actual', 'Predicted')))
#Area under the ROC Curve
install.packages("InformationValue")
library(InformationValue)
plotROC(actuals=TrainingData$churn,predictedScores=as.numeric(fitted(fit1)))
#-------------------------------------------------------------------------------
## chart measures the performance of classification models :- Kolmogorov Smirnov test
ks_plot(actuals=TrainingData$churn,predictedScores=as.numeric(fitted(fit1)))
### Kolmogorov Smirnov Statistic :- Higher the value , better the model
ks_stat(actuals=TrainingData$churn,predictedScores=as.numeric(fitted(fit1)))
#model is not that efficient in capturing the responders
#-------------------------------------------------------------------------
#Goodness of Fit – Hosmer Lemeshow Test
#The test assesses whether or not the observed event rates match expected event rates in subgroups of the model population.
install.packages("ResourceSelection")
library(ResourceSelection)
hoslem.test(TrainingData$churn,fitted(fit1),g=10)
#----------------------------------------------------------------------------
#Wald Test
# if explanatory variables in a model are significant.
install.packages("survey")
library(survey)
regTermTest(fit1,"international.plan")
#-----------------------------------------------------------------------------
## Pseudo R2 statistic :- Mc Fadden test
install.packages("pscl")
library(pscl)
pR2(fit1)
#----------------------------------------------------------------------------
#Plotting the Grapoh for logistic Regression
library(ggplot2)
ggplot(TrainingData, aes(state,churn)) + geom_point() + geom_smooth(method = "glm", se = FALSE, method.args = list(family = "binomial"))
#--------------------------------------------------------------------------
#Predicting the value for the test data
Tele.test1 <- TestTele[,-17]
summary(Tele.test1)
PredTest <- predict(fit1, newdata=Tele.test1, type = "response")
PredTest
#Converting the Predicted to 0 and 1 with .5 and the threshold
churn <- ifelse(PredTest<0.5, 0, 1)
churn
TelecomPredicted <- cbind(Tele.test1,churn)
write.csv(TelecomPredicted, file = 'Telecom Churn Logistic Regression.csv', row.names = F)
#------------------------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------
#Naive Bayes Algorithm Approach to find the churn rate of the telecom Customer
#Bayesian Theoren
#used when the dimension of the vairable are high
install.packages(e1071)
library('e1071')
tele_nb <- naiveBayes(churn ~ state + account.length + area.code + international.plan + number.vmail.messages + total.day.calls + total.day.charge +
total.eve.calls + total.eve.charge + total.night.calls + total.night.charge + total.intl.calls + total.intl.charge + customer.service.calls,
data = Tele.Train)
tele_nb
PredNB <- predict(tele_nb, Tele.Test[,-17], type = "class")
#To get the raw probability data
PredNB <- predict(tele_nb, Tele.Test[,-17], type = "raw")
PredNB
PredNBB <- ifelse(PredNB == TRUE, 1, 0)
#Model Tuning
#Kappa
#Sensitivity
#Specificity
#------------------------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------
#Decision Tree Algorithm Approach to find the churn rate of the telecom Customer
#Building the Decision Tree
library(rpart)
#rpart - recursive partition
Tele_DT <- rpart(churn ~ state + account.length + area.code + international.plan + number.vmail.messages + total.day.calls + total.day.charge +
total.eve.calls + total.eve.charge + total.night.calls + total.night.charge + total.intl.calls + total.intl.charge + customer.service.calls,
data = Tele.Train)
Tele_DT
#plot the Desicion Tree
plot(Tele_DT, margin = 0.1)
text(Tele_DT, use.n = TRUE, pretty = TRUE, cex=0.8)
#Predict the Decision Tree
#Confusion Matrix with the Test Dataset
#------------------------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------
#Random Forest Classiofication Algorithm Approach to find the churn rate of the telecom Customer
#Load the Random Forest Library
library(randomForest)
Tele_RF <- randomForest(churn ~ ., data = Tele.Train)
|
/Churn_Prediction.R
|
no_license
|
blessondensil294/Telecom_Churn
|
R
| false
| false
| 17,026
|
r
|
#To predict the churn rate of the customer
#install packages
install.packages("dplyr")
#load the library
library('dplyr') # data manipulation
#set the working directory
setwd("E:\\IMS Course Content\\Course Content\\Data Science Term 1\\Capstone Project\\Data")
#read the data working directory
Tele.Train <- read.csv('Train_tele.csv', header = TRUE, stringsAsFactors = F)
Tele.Test <- read.csv('Test_tele.csv', header = TRUE, stringsAsFactors = F)
#------------------------------------------------------------------------------------------------------------------------------------------
#Exploratory Data Analysis
#Summary of the data
summary(Tele.Train)
summary(Tele.Test)
str(Tele.Train)
str(Tele.Test)
#Bind the data Train and Test
Tele.Full <- bind_rows(Tele.Train, Tele.Test)
summary(Tele.Full)
str(Tele.Full)
#Filling the NA for Number Vmail Msgs with 0 as the customer doenst have a voice mail plan
Tele.Full[79,]
Tele.Full[79,]$number.vmail.messages <- 0
#Filling the Mean NA for Total Day Calls
mean(Tele.Full$total.day.calls)
TotDayCall <- Tele.Full[c(-30,-90),8:9]
mean(TotDayCall$total.day.calls)
Tele.Full[c(30,90),8] <- 100
#Filling the Mean NA for Total Day Charge based on State
TotDayCharge <- Tele.Full[,c(1,9,17)]
mean(TotDayCharge$total.day.charge, na.rm = TRUE)
#Total day Charge for IN State
TotDayChargeIN <- Tele.Full[Tele.Full$state=='IN',c(1,9)]
mean(TotDayChargeIN$total.day.charge, na.rm = TRUE)
Tele.Full[11,9] <- 33.57
#Total day Charge for OK State
TotDayChargeOK <- Tele.Full[Tele.Full$state=='OK',c(1,9)]
mean(TotDayChargeOK$total.day.charge, na.rm = TRUE)
Tele.Full[35,9] <- 30.59
#Total day Charge for NM State
TotDayChargeNM <- Tele.Full[Tele.Full$state=='NM',c(1,9)]
mean(TotDayChargeNM$total.day.charge, na.rm = TRUE)
Tele.Full[73,9] <- 28.92
#Filling the NA for Total Eve
TotEve <- Tele.Full[,c(1,10,11)]
#Filling NA for Total Eve Call for MA State
TotEveCallMA <- TotEve[TotEve$state=='MA',c(1,2)]
mean(TotEveCallMA$total.eve.calls, na.rm = TRUE)
mean(Tele.Full[Tele.Full$state=='MA',10], na.rm = TRUE)
Tele.Full[7,10]
Tele.Full[7,10] <- 98
#Filling NA for Total Eve Call for OR State
mean(Tele.Full[Tele.Full$state=='OR',10], na.rm = TRUE)
Tele.Full[46,10]
Tele.Full[46,10] <- 98
#Filling NA for Total Eve Charge for SC State
mean(Tele.Full[Tele.Full$state=='SC',11], na.rm = TRUE)
Tele.Full[24,11]
Tele.Full[24,11] <- 17.73
#Filling NA for Total Eve Charge for MD State
mean(Tele.Full[Tele.Full$state=='MD',11], na.rm = TRUE)
Tele.Full[42,11]
Tele.Full[42,11] <- 16.65
#Filling NA for Total Eve Charge for OR State
mean(Tele.Full[Tele.Full$state=='OR',11], na.rm = TRUE)
Tele.Full[82,11]
Tele.Full[82,11] <- 16.65
#Filling NA for Total Eve Charge for ID State
mean(Tele.Full[Tele.Full$state=='ID',11], na.rm = TRUE)
Tele.Full[100,11]
Tele.Full[100,11] <- 16.52
#Filling the NA rows for Total night based on state wise
#Filling the NA for Total Night Calls for IA State and 415 Area Code
mean(Tele.Full[Tele.Full$state=='IA' & Tele.Full$area.code==415,12], na.rm = TRUE)
Tele.Full[15,12]
Tele.Full[15,12] <- 97
#Filling the NA for Total Night Calls for WI State and 415 Area Code
mean(Tele.Full[Tele.Full$state=='WI' & Tele.Full$area.code==415,12], na.rm = TRUE)
Tele.Full[59,12]
Tele.Full[59,12] <- 101
#Filling the NA for Total Night Calls for LA State and 415 Area Code
mean(Tele.Full[Tele.Full$state=='LA' & Tele.Full$area.code==415,12], na.rm = TRUE)
Tele.Full[92,12]
Tele.Full[92,12] <- 98
#Filling NA for Total Night Charge
mean(Tele.Full[Tele.Full$state=='IA' & Tele.Full$area.code==510,13], na.rm = TRUE)
Tele.Full[101,13]
Tele.Full[101,13] <- 9.63
#Filling the NA Values for Total International
#Filling the NA for Total International Call for SC State and Area Code
mean(Tele.Full[Tele.Full$state=='SC' & Tele.Full$area.code==415,14], na.rm = TRUE)
Tele.Full[24,14]
Tele.Full[24,14] <- 4
#Filling the NA for Total International Call for WI State and Area Code
mean(Tele.Full[Tele.Full$state=='WI' & Tele.Full$area.code==415,14], na.rm = TRUE)
Tele.Full[59,14]
Tele.Full[59,14] <- 4
#Filling the NA for Total International Charge for IN State and Area Code
mean(Tele.Full[Tele.Full$state=='IN' & Tele.Full$area.code==415,15], na.rm = TRUE)
Tele.Full[11,15]
Tele.Full[11,15] <- 2.77
#Filling the NA for Total International Charge for VT State and Area Code
mean(Tele.Full[Tele.Full$state=='VT' & Tele.Full$area.code==510,15], na.rm = TRUE)
Tele.Full[18,15]
Tele.Full[18,15] <- 2.37
#Filling the NA for Total International Charge for VT State and Area Code
mean(Tele.Full[Tele.Full$state=='WY' & Tele.Full$area.code==415,15], na.rm = TRUE)
Tele.Full[55,15]
Tele.Full[55,15] <- 2.75
#Filling the NA for Customer Service Call
mean(Tele.Full[Tele.Full$state=='NM' & Tele.Full$area.code==510,16], na.rm = TRUE)
Tele.Full[73,16]
Tele.Full[73,16] <- 1
#------------------------------------------------------------------------------------------------------------------------------------------
# #converting Variables to factors..
# #Converting Internation Plan to factors
# Tele.Full$international.plan <- ifelse(Tele.Full$international.plan == 'yes', 1, 0)
# #Tele.Full$international.plan <- as.factor(Tele.Full$international.plan)
# Tele.Full$international.plan <- as.numeric(Tele.Full$international.plan)
#
# #Converting Voice Mail Plan to factors
# Tele.Full$voice.mail.plan <- ifelse(Tele.Full$voice.mail.plan == 'yes', 1, 0)
# #Tele.Full$voice.mail.plan <- as.factor(Tele.Full$voice.mail.plan)
# Tele.Full$voice.mail.plan <- as.numeric(Tele.Full$voice.mail.plan)
#
# #Converting Churn to factors
# Tele.Full$churn <- ifelse(Tele.Full$churn == TRUE, 1, 0)
#
# #Converting State to factor and then numbers
# Tele.Full$state <- as.factor(Tele.Full$state)
# Tele.Full$state <- as.numeric(Tele.Full$state)
#
# #Converting Area Code to factor and then numbers
# Tele.Full$area.code <- as.factor(Tele.Full$area.code)
# Tele.Full$area.code <- as.numeric(Tele.Full$area.code)
#
# #converting Account length to numeric
# Tele.Full$account.length <- as.numeric(Tele.Full$account.length)
#-------------------------------------------------------------------------------------------------------------------------------------
#Create the Training data
TrainTele <- Tele.Full[1:2850,]
summary(TrainTele)
View(TrainTele)
#---------------------------------------------------------------------------------------------------------------------------------------
#Visualisation of the Training Dataset with GGPLOT2
library('ggplot2') # visualization
library('ggthemes') # visualization
library('scales') # visualization
#------------------------------------------------------------------------------------------------------------------------------------
#Create the Test dataset
TestTele <- Tele.Full[2851:3333,]
#------------------------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------
#Logistic regression Approach to find the churn rate of the telecom Customer
install.packages('stringi')
install.packages('caret')
library('caret')
library('stringi')
#split the Training date for Train and Test
set.seed(1234)
inTrain = createDataPartition(TrainTele$churn, p=0.8, list = FALSE)
TrainingData = TrainTele[inTrain,]
TestingData = TrainTele[-inTrain,]
str(TrainingData)
str(TestingData)
#To find the Dimension of the Data
dim(TrainingData)
dim(TestingData)
summary(Tele.Full)
#To check for the Mulitcolinearity of the Variables in the Dataset
# multicollinearity a phenomenon in which one predictor variable in a multiple regression model can be linearly predicted from the others with a
#substantial degree of accuracy.
#variance inflation factor (VIF) is the ratio of variance in a model with multiple terms, divided by the variance of a model with one term alone
library('usdm')
vifstep(Tele.Full[,c(-4,-17)], th=2)
#Akaike Information Criterion
#is an estimator of the relative quality of statistical models for a given set of data. Given a collection of models for the data, AIC estimates
#the quality of each model, relative to each of the other models. Thus, AIC provides a means for model selection.
#To check for the correlation of the variable and the graph
#dependence or association is any statistical relationship, whether causal or not, between two random variables or bivariate data.
#Correlation is any of a broad class of statistical relationships involving dependence, though in common usage it most often refers to how close
#two variables are to having a linear relationship with each other
library('corrplot')
cr <- cor(Tele.Full[,c(-4,-17)])
corrplot(cr, type = "lower")
#Fitting the model with the VIF value below 2
fit0 <- glm(churn ~ state + account.length + area.code + international.plan + number.vmail.messages + total.day.calls + total.day.charge +
total.eve.calls + total.eve.charge + total.night.calls + total.night.charge + total.intl.calls + total.intl.charge + customer.service.calls,
data = TrainingData, family = binomial(link = "logit"))
summary(fit0)
#Select the model with the VIF variable for the next stepwise regression and get the minimum AIC Value
library('MASS')
step = stepAIC(fit0, direction = "both")
#Optimizing the model
#Null Deviance is the deviance of the actual value of the dataset - not using the independent variable only using the intercept - should be the same for all the model
#Residual Deviance - including the independent variable the value is less showing the deviance from the dependent variable - should be less
#AIC - should be less
#Degree of Freedom - The number of independent ways by which a dynamic system can move, without violating any constraint imposed on it, is called
#number of degrees of freedom. In other words, the number of degrees of freedom can be defined as the minimum number of independent coordinates that
#can specify the position of the system completely.
#Fitting the model with the lowest AIC value
fit1 <- glm(churn ~ international.plan + number.vmail.messages + total.day.charge +
total.eve.charge + total.night.charge + total.intl.calls +
total.intl.charge + customer.service.calls, data = TrainingData, family = binomial(link = "logit"))
summary(fit1)
#Fit1 is the best fit with the AIC score of 1529.3
fit2 <- update(fit1, .~. -total.intl.calls, data = TrainingData)
Summary(fit2)
#Predicting the Model with the Test Dataset
#To get the probability value of the rtesponse
Pred <- predict(fit1, newdata=TestingData[,-17], type = "response")
Pred
View(Pred)
#Converting the Predicted to 0 and 1 with .5 and the threshold
Preds <- ifelse(Pred<0.5, 0, 1)
#Confusion MAtrix is a special kind of contingency table, with two dimensions ("actual" and "predicted"), and identical sets of "classes" in both
#dimensions (each combination of dimension and class is a variable in the contingency table).
#TN FP
#FN TP
confusionMatrix(table(TestingData$churn,Preds,dnn=list('Actual', 'Predicted')))
#Predicted with 89% Accuracy
((486+21)/(486+10+53+21))*100
#-------------------------------------------------------------------------
#ROC Curve to find the threshold
#Receiver Operating Characteristic(ROC) Curve
#The ROC curve is created by plotting the true positive rate (TPR) against the false positive rate (FPR) at various threshold settings.
res <- predict(fit1, newdata=TrainingData, type = "response")
install.packages('ROCR')
library(ROCR)
ROCRPred <- prediction(res,TrainingData$churn)
#Performance of the ROC curve from the TPR and FPR
ROCRPref <- performance(ROCRPred, "tpr", "fpr")
#plot the ROCR Graph to find the TPR and FPR
plot(ROCRPref, colorize = TRUE, print.cutoffs.at=seq(0.1, by=0.1))
PredROCR1 <- ifelse(Pred<0.5, 0, 1)
confusionMatrix(table(TestingData$churn,PredROCR1,dnn=list('Actual', 'Predicted')))
#Area under the ROC Curve
install.packages("InformationValue")
library(InformationValue)
plotROC(actuals=TrainingData$churn,predictedScores=as.numeric(fitted(fit1)))
#-------------------------------------------------------------------------------
## chart measures the performance of classification models :- Kolmogorov Smirnov test
ks_plot(actuals=TrainingData$churn,predictedScores=as.numeric(fitted(fit1)))
### Kolmogorov Smirnov Statistic :- Higher the value , better the model
ks_stat(actuals=TrainingData$churn,predictedScores=as.numeric(fitted(fit1)))
#model is not that efficient in capturing the responders
#-------------------------------------------------------------------------
#Goodness of Fit – Hosmer Lemeshow Test
#The test assesses whether or not the observed event rates match expected event rates in subgroups of the model population.
install.packages("ResourceSelection")
library(ResourceSelection)
hoslem.test(TrainingData$churn,fitted(fit1),g=10)
#----------------------------------------------------------------------------
#Wald Test
# if explanatory variables in a model are significant.
install.packages("survey")
library(survey)
regTermTest(fit1,"international.plan")
#-----------------------------------------------------------------------------
## Pseudo R2 statistic :- Mc Fadden test
install.packages("pscl")
library(pscl)
pR2(fit1)
#----------------------------------------------------------------------------
#Plotting the Grapoh for logistic Regression
library(ggplot2)
ggplot(TrainingData, aes(state,churn)) + geom_point() + geom_smooth(method = "glm", se = FALSE, method.args = list(family = "binomial"))
#--------------------------------------------------------------------------
#Predicting the value for the test data
Tele.test1 <- TestTele[,-17]
summary(Tele.test1)
PredTest <- predict(fit1, newdata=Tele.test1, type = "response")
PredTest
#Converting the Predicted to 0 and 1 with .5 and the threshold
churn <- ifelse(PredTest<0.5, 0, 1)
churn
TelecomPredicted <- cbind(Tele.test1,churn)
write.csv(TelecomPredicted, file = 'Telecom Churn Logistic Regression.csv', row.names = F)
#------------------------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------
#Naive Bayes Algorithm Approach to find the churn rate of the telecom Customer
#Bayesian Theoren
#used when the dimension of the vairable are high
install.packages(e1071)
library('e1071')
tele_nb <- naiveBayes(churn ~ state + account.length + area.code + international.plan + number.vmail.messages + total.day.calls + total.day.charge +
total.eve.calls + total.eve.charge + total.night.calls + total.night.charge + total.intl.calls + total.intl.charge + customer.service.calls,
data = Tele.Train)
tele_nb
PredNB <- predict(tele_nb, Tele.Test[,-17], type = "class")
#To get the raw probability data
PredNB <- predict(tele_nb, Tele.Test[,-17], type = "raw")
PredNB
PredNBB <- ifelse(PredNB == TRUE, 1, 0)
#Model Tuning
#Kappa
#Sensitivity
#Specificity
#------------------------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------
#Decision Tree Algorithm Approach to find the churn rate of the telecom Customer
#Building the Decision Tree
library(rpart)
#rpart - recursive partition
Tele_DT <- rpart(churn ~ state + account.length + area.code + international.plan + number.vmail.messages + total.day.calls + total.day.charge +
total.eve.calls + total.eve.charge + total.night.calls + total.night.charge + total.intl.calls + total.intl.charge + customer.service.calls,
data = Tele.Train)
Tele_DT
#plot the Desicion Tree
plot(Tele_DT, margin = 0.1)
text(Tele_DT, use.n = TRUE, pretty = TRUE, cex=0.8)
#Predict the Decision Tree
#Confusion Matrix with the Test Dataset
#------------------------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------
#Random Forest Classiofication Algorithm Approach to find the churn rate of the telecom Customer
#Load the Random Forest Library
library(randomForest)
Tele_RF <- randomForest(churn ~ ., data = Tele.Train)
|
setContentType(type='image/png')
png("/tmp/1.png", width=800, height=500)
obj <- plot(1:100)
dev.off()
t <- "/tmp/1.png"
sendBin(object=readBin(t,'raw',n=file.info(t)$size))
|
/utils/jirastat/html/test.R
|
no_license
|
euriion/code_snippets
|
R
| false
| false
| 174
|
r
|
setContentType(type='image/png')
png("/tmp/1.png", width=800, height=500)
obj <- plot(1:100)
dev.off()
t <- "/tmp/1.png"
sendBin(object=readBin(t,'raw',n=file.info(t)$size))
|
#!usr/bin/env R
i <- 0 #Initialize i
while(i < Inf) {
if (i == 10) {
break
} # Break out of the while loop!
else {
cat("i equals ", i, " \n")
i <- i + 1 # Update i
}
}
|
/Week3/Code/break.R
|
no_license
|
rte19/CMEECoursework
|
R
| false
| false
| 188
|
r
|
#!usr/bin/env R
i <- 0 #Initialize i
while(i < Inf) {
if (i == 10) {
break
} # Break out of the while loop!
else {
cat("i equals ", i, " \n")
i <- i + 1 # Update i
}
}
|
wt.sig <-
function (d, dt, scale, sig.test=0, sig.level=0.95, dof=2, lag1=NULL,
mother=c("morlet", "paul", "dog"), param=-1, sigma2=NULL) {
mothers=c("morlet", "paul", "dog")
mother=match.arg(tolower(mother), mothers)
## Find the AR1 coefficient
if (is.null(dt) & NCOL(d) > 1) {
dt = diff(d[, 1])[1]
x = d[, 2] - mean(d[, 2])
}
else {
x = d - mean(d)
}
if (is.null(lag1))
lag1=arima(x, order=c(1, 0, 0))$coef[1]
n1=NROW(d)
J1=length(scale) - 1
s0=min(scale)
dj=log(scale[2] / scale[1]) / log(2)
if (is.null(sigma2))
sigma2=var(x)
types=c("morlet", "paul", "dog")
mother=match.arg(tolower(mother), types)
## Get the appropriate parameters [see Table(2)]
if (mother=='morlet') {
if (param == -1)
param = 6
k0 = param
fourier.factor = (4 * pi) / (k0 + sqrt(2 + k0^2))
empir = c(2, -1, -1, -1)
if (k0 == 6)
empir[2:4] = c(0.776, 2.32, 0.60)
}
else if (mother == 'paul') {
if (param == -1)
param = 4
m = param
fourier.factor = 4 * pi/(2 * m + 1)
empir = c(2, -1, -1, -1)
if (m == 4)
empir[2:4]=c(1.132, 1.17, 1.5)
}
else if (mother=='dog') {
if (param == -1)
param = 2
m = param
fourier.factor = 2 * pi * sqrt(2 / (2 * m + 1));
empir = c(1, -1, -1, -1)
if (m == 2)
empir[2:4] = c(3.541, 1.43, 1.4)
if (m == 6)
empir[2:4] = c(1.966, 1.37, 0.97)
}
else
stop("mother wavelet parameter must be 'morlet', 'paul', or 'dog'")
period = scale * fourier.factor
dofmin = empir[1] ## Degrees of freedom with no smoothing
Cdelta = empir[2] ## reconstruction factor
gamma.fac = empir[3] ## time-decorrelation factor
dj0 = empir[4] ## scale-decorrelation factor
freq = dt / period ## normalized frequency
fft.theor = (1 - lag1^2) / (1 - 2 * lag1 * cos(freq * 2 * pi) + lag1^2)
fft.theor = sigma2 * fft.theor ## include time-series variance
signif = fft.theor
if (dof == -1)
dof = dofmin
if (sig.test == 0) { ## no smoothing, DOF=dofmin
dof = dofmin
chisquare = qchisq(sig.level, dof) / dof
signif = fft.theor * chisquare
}
else if (sig.test == 1) { ## time-averaged significance
if (length(dof) == 1)
dof=rep(dof, J1 + 1)
truncate = which(dof < 1)
dof[truncate] = rep(1, length(truncate))
dof = dofmin * sqrt(1 + (dof * dt / gamma.fac / scale)^2)
truncate = which(dof < dofmin)
dof[truncate] = dofmin * rep(1, length(truncate)) ## minimum DOF is dofmin
for (a1 in seq(1, J1+1, 1)) {
chisquare = qchisq(sig.level, dof[a1]) / dof[a1]
signif[a1] = fft.theor[a1]*chisquare
}
}
else if (sig.test == 2) { ## time-averaged significance
if (length(dof) != 2)
stop('DOF must be set to [S1,S2], the range of scale-averages')
if (Cdelta == -1) {
stop(paste('Cdelta & dj0 not defined for', mother, 'with param=', param))
}
s1 = dof[1]
s2 = dof[2]
avg = which((scale >= s1) & (scale <= s2)) ## scales between S1 & S2
navg = length(avg)
if (navg == 0)
stop(paste("No valid scales between", s1, 'and', s2))
Savg = 1 /sum(1 / scale[avg])
Smid = exp((log(s1) + log(s2)) / 2) ## power-of-two midpoint
dof = (dofmin * navg * Savg / Smid) * sqrt(1 + (navg * dj / dj0)^2)
fft.theor = Savg * sum(fft.theor[avg] / scale[avg])
chisquare = qchisq(sig.level, dof) / dof
signif = (dj * dt / Cdelta / Savg) * fft.theor * chisquare
}
else
stop('sig.test must be 0, 1, or 2')
return (list(signif=signif, fft.theor=fft.theor))
}
|
/R/wt.sig.R
|
no_license
|
wafels/biwavelet
|
R
| false
| false
| 3,599
|
r
|
wt.sig <-
function (d, dt, scale, sig.test=0, sig.level=0.95, dof=2, lag1=NULL,
mother=c("morlet", "paul", "dog"), param=-1, sigma2=NULL) {
mothers=c("morlet", "paul", "dog")
mother=match.arg(tolower(mother), mothers)
## Find the AR1 coefficient
if (is.null(dt) & NCOL(d) > 1) {
dt = diff(d[, 1])[1]
x = d[, 2] - mean(d[, 2])
}
else {
x = d - mean(d)
}
if (is.null(lag1))
lag1=arima(x, order=c(1, 0, 0))$coef[1]
n1=NROW(d)
J1=length(scale) - 1
s0=min(scale)
dj=log(scale[2] / scale[1]) / log(2)
if (is.null(sigma2))
sigma2=var(x)
types=c("morlet", "paul", "dog")
mother=match.arg(tolower(mother), types)
## Get the appropriate parameters [see Table(2)]
if (mother=='morlet') {
if (param == -1)
param = 6
k0 = param
fourier.factor = (4 * pi) / (k0 + sqrt(2 + k0^2))
empir = c(2, -1, -1, -1)
if (k0 == 6)
empir[2:4] = c(0.776, 2.32, 0.60)
}
else if (mother == 'paul') {
if (param == -1)
param = 4
m = param
fourier.factor = 4 * pi/(2 * m + 1)
empir = c(2, -1, -1, -1)
if (m == 4)
empir[2:4]=c(1.132, 1.17, 1.5)
}
else if (mother=='dog') {
if (param == -1)
param = 2
m = param
fourier.factor = 2 * pi * sqrt(2 / (2 * m + 1));
empir = c(1, -1, -1, -1)
if (m == 2)
empir[2:4] = c(3.541, 1.43, 1.4)
if (m == 6)
empir[2:4] = c(1.966, 1.37, 0.97)
}
else
stop("mother wavelet parameter must be 'morlet', 'paul', or 'dog'")
period = scale * fourier.factor
dofmin = empir[1] ## Degrees of freedom with no smoothing
Cdelta = empir[2] ## reconstruction factor
gamma.fac = empir[3] ## time-decorrelation factor
dj0 = empir[4] ## scale-decorrelation factor
freq = dt / period ## normalized frequency
fft.theor = (1 - lag1^2) / (1 - 2 * lag1 * cos(freq * 2 * pi) + lag1^2)
fft.theor = sigma2 * fft.theor ## include time-series variance
signif = fft.theor
if (dof == -1)
dof = dofmin
if (sig.test == 0) { ## no smoothing, DOF=dofmin
dof = dofmin
chisquare = qchisq(sig.level, dof) / dof
signif = fft.theor * chisquare
}
else if (sig.test == 1) { ## time-averaged significance
if (length(dof) == 1)
dof=rep(dof, J1 + 1)
truncate = which(dof < 1)
dof[truncate] = rep(1, length(truncate))
dof = dofmin * sqrt(1 + (dof * dt / gamma.fac / scale)^2)
truncate = which(dof < dofmin)
dof[truncate] = dofmin * rep(1, length(truncate)) ## minimum DOF is dofmin
for (a1 in seq(1, J1+1, 1)) {
chisquare = qchisq(sig.level, dof[a1]) / dof[a1]
signif[a1] = fft.theor[a1]*chisquare
}
}
else if (sig.test == 2) { ## time-averaged significance
if (length(dof) != 2)
stop('DOF must be set to [S1,S2], the range of scale-averages')
if (Cdelta == -1) {
stop(paste('Cdelta & dj0 not defined for', mother, 'with param=', param))
}
s1 = dof[1]
s2 = dof[2]
avg = which((scale >= s1) & (scale <= s2)) ## scales between S1 & S2
navg = length(avg)
if (navg == 0)
stop(paste("No valid scales between", s1, 'and', s2))
Savg = 1 /sum(1 / scale[avg])
Smid = exp((log(s1) + log(s2)) / 2) ## power-of-two midpoint
dof = (dofmin * navg * Savg / Smid) * sqrt(1 + (navg * dj / dj0)^2)
fft.theor = Savg * sum(fft.theor[avg] / scale[avg])
chisquare = qchisq(sig.level, dof) / dof
signif = (dj * dt / Cdelta / Savg) * fft.theor * chisquare
}
else
stop('sig.test must be 0, 1, or 2')
return (list(signif=signif, fft.theor=fft.theor))
}
|
#' Add a data validation rule to a cell range
#'
#' @description
#' *Note: not yet exported, still very alpha. Usage still requires using
#' low-level helpers.*
#'
#' `range_add_validation()` adds a data validation rule to a range of cells.
#'
#' @eval param_ss()
#' @eval param_sheet()
#' @param range Cells to apply data validation to. This `range` argument has
#' important similarities and differences to `range` elsewhere (e.g.
#' [range_read()]):
#' * Similarities: Can be a cell range, using A1 notation ("A1:D3") or using
#' the helpers in [`cell-specification`]. Can combine sheet name and cell
#' range ("Sheet1!A5:A") or refer to a sheet by name (`range = "Sheet1"`,
#' although `sheet = "Sheet1"` is preferred for clarity).
#' * Difference: Can NOT be a named range.
#' @param rule An instance of `googlesheets4_schema_DataValidationRule`, which
#' implements the
#' [DataValidationRule](https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/cells#datavalidationrule)
#' schema.
#'
#' @template ss-return
#' @seealso Makes a `SetDataValidationRequest`:
#' * <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/request#setdatavalidationrequest>
#'
#' @keywords internal
#' @noRd
#'
#' @examplesIf gs4_has_token()
#' # create a data frame to use as initial data
#' df <- data.frame(
#' id = 1:3,
#' "Hungry?" = NA,
#' ice_cream = NA,
#' check.names = FALSE
#' )
#'
#' # create Sheet
#' ss <- gs4_create("range-add-validation-demo", sheets = list(df))
#'
#' # create a column that presents as a basic TRUE/FALSE checkbox
#' rule_checkbox <- googlesheets4:::new(
#' "DataValidationRule",
#' condition = googlesheets4:::new_BooleanCondition(type = "BOOLEAN"),
#' inputMessage = "Please let us know if you are hungry.",
#' strict = TRUE,
#' showCustomUi = TRUE
#' )
#' googlesheets4:::range_add_validation(
#' ss,
#' range = "Sheet1!B2:B", rule = rule_checkbox
#' )
#'
#' # create a column that presents as a dropdown list
#' rule_dropdown_list <- googlesheets4:::new(
#' "DataValidationRule",
#' condition = googlesheets4:::new_BooleanCondition(
#' type = "ONE_OF_LIST", values = c("vanilla", "chocolate", "strawberry")
#' ),
#' inputMessage = "Which ice cream flavor do you want?",
#' strict = TRUE,
#' showCustomUi = TRUE
#' )
#' googlesheets4:::range_add_validation(
#' ss,
#' range = "Sheet1!C2:C", rule = rule_dropdown_list
#' )
#'
#' read_sheet(ss)
#'
#' # clean up
#' gs4_find("range-add-validation-demo") %>%
#' googledrive::drive_trash()
range_add_validation <- function(ss,
sheet = NULL,
range = NULL,
rule) {
ssid <- as_sheets_id(ss)
maybe_sheet(sheet)
check_range(range)
if (!is.null(rule)) {
stopifnot(inherits(rule, "googlesheets4_schema_DataValidationRule"))
}
x <- gs4_get(ssid)
gs4_bullets(c(v = "Editing {.s_sheet {x$name}}."))
# determine (work)sheet ------------------------------------------------------
range_spec <- as_range_spec(
range,
sheet = sheet,
sheets_df = x$sheets, nr_df = x$named_ranges
)
range_spec$sheet_name <- range_spec$sheet_name %||% first_visible_name(x$sheets)
s <- lookup_sheet(range_spec$sheet_name, sheets_df = x$sheets)
gs4_bullets(c(v = "Editing sheet {.w_sheet {range_spec$sheet_name}}."))
# form batch update request --------------------------------------------------
sdv_req <- list(setDataValidation = new(
"SetDataValidationRequest",
range = as_GridRange(range_spec),
rule = rule
))
# do it ----------------------------------------------------------------------
req <- request_generate(
"sheets.spreadsheets.batchUpdate",
params = list(
spreadsheetId = ssid,
requests = list(sdv_req)
)
)
resp_raw <- request_make(req)
gargle::response_process(resp_raw)
invisible(ssid)
}
# helpers ----
new_BooleanCondition <- function(type = "NOT_BLANK", values = NULL) {
out <- new("BooleanCondition", type = type)
# TODO: build enum checking into our schema-based construction
schema <- attr(out, "schema")
enum <- schema$enum[[which(schema$property == "type")]]
stopifnot(type %in% enum$enum)
if (length(values) < 1) {
return(out)
}
needs_relative_date <- c(
"DATE_BEFORE", "DATE_AFTER", "DATE_ON_OR_BEFORE", "DATE_ON_OR_AFTER"
)
if (type %in% needs_relative_date) {
gs4_abort(
"{.field relativeDate} not yet supported as a {.code conditionValue}.",
.internal = TRUE
)
}
patch(out, values = map(values, ~ list(userEnteredValue = as.character(.x))))
}
|
/R/range_add_validation.R
|
no_license
|
cran/googlesheets4
|
R
| false
| false
| 4,665
|
r
|
#' Add a data validation rule to a cell range
#'
#' @description
#' *Note: not yet exported, still very alpha. Usage still requires using
#' low-level helpers.*
#'
#' `range_add_validation()` adds a data validation rule to a range of cells.
#'
#' @eval param_ss()
#' @eval param_sheet()
#' @param range Cells to apply data validation to. This `range` argument has
#' important similarities and differences to `range` elsewhere (e.g.
#' [range_read()]):
#' * Similarities: Can be a cell range, using A1 notation ("A1:D3") or using
#' the helpers in [`cell-specification`]. Can combine sheet name and cell
#' range ("Sheet1!A5:A") or refer to a sheet by name (`range = "Sheet1"`,
#' although `sheet = "Sheet1"` is preferred for clarity).
#' * Difference: Can NOT be a named range.
#' @param rule An instance of `googlesheets4_schema_DataValidationRule`, which
#' implements the
#' [DataValidationRule](https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/cells#datavalidationrule)
#' schema.
#'
#' @template ss-return
#' @seealso Makes a `SetDataValidationRequest`:
#' * <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/request#setdatavalidationrequest>
#'
#' @keywords internal
#' @noRd
#'
#' @examplesIf gs4_has_token()
#' # create a data frame to use as initial data
#' df <- data.frame(
#' id = 1:3,
#' "Hungry?" = NA,
#' ice_cream = NA,
#' check.names = FALSE
#' )
#'
#' # create Sheet
#' ss <- gs4_create("range-add-validation-demo", sheets = list(df))
#'
#' # create a column that presents as a basic TRUE/FALSE checkbox
#' rule_checkbox <- googlesheets4:::new(
#' "DataValidationRule",
#' condition = googlesheets4:::new_BooleanCondition(type = "BOOLEAN"),
#' inputMessage = "Please let us know if you are hungry.",
#' strict = TRUE,
#' showCustomUi = TRUE
#' )
#' googlesheets4:::range_add_validation(
#' ss,
#' range = "Sheet1!B2:B", rule = rule_checkbox
#' )
#'
#' # create a column that presents as a dropdown list
#' rule_dropdown_list <- googlesheets4:::new(
#' "DataValidationRule",
#' condition = googlesheets4:::new_BooleanCondition(
#' type = "ONE_OF_LIST", values = c("vanilla", "chocolate", "strawberry")
#' ),
#' inputMessage = "Which ice cream flavor do you want?",
#' strict = TRUE,
#' showCustomUi = TRUE
#' )
#' googlesheets4:::range_add_validation(
#' ss,
#' range = "Sheet1!C2:C", rule = rule_dropdown_list
#' )
#'
#' read_sheet(ss)
#'
#' # clean up
#' gs4_find("range-add-validation-demo") %>%
#' googledrive::drive_trash()
range_add_validation <- function(ss,
sheet = NULL,
range = NULL,
rule) {
ssid <- as_sheets_id(ss)
maybe_sheet(sheet)
check_range(range)
if (!is.null(rule)) {
stopifnot(inherits(rule, "googlesheets4_schema_DataValidationRule"))
}
x <- gs4_get(ssid)
gs4_bullets(c(v = "Editing {.s_sheet {x$name}}."))
# determine (work)sheet ------------------------------------------------------
range_spec <- as_range_spec(
range,
sheet = sheet,
sheets_df = x$sheets, nr_df = x$named_ranges
)
range_spec$sheet_name <- range_spec$sheet_name %||% first_visible_name(x$sheets)
s <- lookup_sheet(range_spec$sheet_name, sheets_df = x$sheets)
gs4_bullets(c(v = "Editing sheet {.w_sheet {range_spec$sheet_name}}."))
# form batch update request --------------------------------------------------
sdv_req <- list(setDataValidation = new(
"SetDataValidationRequest",
range = as_GridRange(range_spec),
rule = rule
))
# do it ----------------------------------------------------------------------
req <- request_generate(
"sheets.spreadsheets.batchUpdate",
params = list(
spreadsheetId = ssid,
requests = list(sdv_req)
)
)
resp_raw <- request_make(req)
gargle::response_process(resp_raw)
invisible(ssid)
}
# helpers ----
new_BooleanCondition <- function(type = "NOT_BLANK", values = NULL) {
out <- new("BooleanCondition", type = type)
# TODO: build enum checking into our schema-based construction
schema <- attr(out, "schema")
enum <- schema$enum[[which(schema$property == "type")]]
stopifnot(type %in% enum$enum)
if (length(values) < 1) {
return(out)
}
needs_relative_date <- c(
"DATE_BEFORE", "DATE_AFTER", "DATE_ON_OR_BEFORE", "DATE_ON_OR_AFTER"
)
if (type %in% needs_relative_date) {
gs4_abort(
"{.field relativeDate} not yet supported as a {.code conditionValue}.",
.internal = TRUE
)
}
patch(out, values = map(values, ~ list(userEnteredValue = as.character(.x))))
}
|
library(HoRM)
### Name: poly2form
### Title: Expands Design Matrix Based on Polynomials
### Aliases: poly2form
### Keywords: file
### ** Examples
## Evaluating the order 5 Legendre polynomials.
require(orthopolynom)
px <- legendre.polynomials(n = 5, normalized = FALSE)
lx <- poly2form(poly.out = px, x = 1:10)
lx
|
/data/genthat_extracted_code/HoRM/examples/poly2form.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 326
|
r
|
library(HoRM)
### Name: poly2form
### Title: Expands Design Matrix Based on Polynomials
### Aliases: poly2form
### Keywords: file
### ** Examples
## Evaluating the order 5 Legendre polynomials.
require(orthopolynom)
px <- legendre.polynomials(n = 5, normalized = FALSE)
lx <- poly2form(poly.out = px, x = 1:10)
lx
|
#' Computes upper identification interval with symmetry constraints.
#'
#' @param X The observed data.
#' @param sampling.ratio Bound on the sampling weights gamma.
#' @param xmin Used to construct histogram representation.
#' @param xmax Used to construct histogram representation.
#' @param buckets Used to construct histogram representation.
#' @param alpha Significance level used for KS bounds.
#'
#' @return mu.bound The upper bound for mu(x).
#'
#' @return Xhat Unweighted empirical CDF of the data.
#' @return xvals Points at which Xhat is evaluated.
#' @return Xhat.weighted Weighted version of Xhat that maximizes mu, subject to symmetry.
#'
#' @export bounds.symmetric.internal
bounds.symmetric.internal = function(X, sampling.ratio = 5,
xmin = NULL, xmax = NULL, buckets = 1000, alpha = 1/sqrt(length(X))) {
n = length(X)
if(is.null(xmin)) { xmin = min(X) }
if(is.null(xmax)) { xmax = max(X) }
if(xmin > min(X) | xmax < max(X)) { stop ("support too short") }
xvals = seq(xmin, xmax, length.out = buckets + 1)
Xhat = ecdf(X)(xvals)
delta = qnorm(1 - alpha) * sqrt((1 + sampling.ratio) * (1 + 1/sampling.ratio) / 4 / n)
center.candidates = quantile(X, seq(1/sampling.ratio/2, 1 - 1/sampling.ratio/2, length.out = 10))
Xhat.candidates = lapply(center.candidates, function(center) {
hajek.constrained.symmetric(Xhat, xvals, sampling.ratio, center, delta)
})
mu.bound = sapply(Xhat.candidates, function(Xhat) {
sum(xvals * (Xhat - c(0, Xhat[-length(Xhat)])))
})
opt.idx = which.max(mu.bound)
ret = list(mu.bound=mu.bound[opt.idx],
raw=data.frame(
xvals=xvals,
Xhat=Xhat,
Xhat.weighted=Xhat.candidates[[opt.idx]]
))
return(ret)
}
|
/R/symmetric.R
|
no_license
|
jrzubizarreta/scbounds
|
R
| false
| false
| 1,811
|
r
|
#' Computes upper identification interval with symmetry constraints.
#'
#' @param X The observed data.
#' @param sampling.ratio Bound on the sampling weights gamma.
#' @param xmin Used to construct histogram representation.
#' @param xmax Used to construct histogram representation.
#' @param buckets Used to construct histogram representation.
#' @param alpha Significance level used for KS bounds.
#'
#' @return mu.bound The upper bound for mu(x).
#'
#' @return Xhat Unweighted empirical CDF of the data.
#' @return xvals Points at which Xhat is evaluated.
#' @return Xhat.weighted Weighted version of Xhat that maximizes mu, subject to symmetry.
#'
#' @export bounds.symmetric.internal
bounds.symmetric.internal = function(X, sampling.ratio = 5,
xmin = NULL, xmax = NULL, buckets = 1000, alpha = 1/sqrt(length(X))) {
n = length(X)
if(is.null(xmin)) { xmin = min(X) }
if(is.null(xmax)) { xmax = max(X) }
if(xmin > min(X) | xmax < max(X)) { stop ("support too short") }
xvals = seq(xmin, xmax, length.out = buckets + 1)
Xhat = ecdf(X)(xvals)
delta = qnorm(1 - alpha) * sqrt((1 + sampling.ratio) * (1 + 1/sampling.ratio) / 4 / n)
center.candidates = quantile(X, seq(1/sampling.ratio/2, 1 - 1/sampling.ratio/2, length.out = 10))
Xhat.candidates = lapply(center.candidates, function(center) {
hajek.constrained.symmetric(Xhat, xvals, sampling.ratio, center, delta)
})
mu.bound = sapply(Xhat.candidates, function(Xhat) {
sum(xvals * (Xhat - c(0, Xhat[-length(Xhat)])))
})
opt.idx = which.max(mu.bound)
ret = list(mu.bound=mu.bound[opt.idx],
raw=data.frame(
xvals=xvals,
Xhat=Xhat,
Xhat.weighted=Xhat.candidates[[opt.idx]]
))
return(ret)
}
|
library(keras)
#load vgg16 as base
conv_base <- application_vgg16(weights = "imagenet", include_top = FALSE, input_shape = c (224,224,3))
# optionally freeze to prevent new training
freeze_weights(conv_base)
#create first part of own model only consisting of the first few layers from vgg16 + a dropout for preventing overfitting
test_model <- conv_base$layers[[7]]$output %>% layer_dropout(rate = 0.5)
conclayer1 <- conv_base$layers[[6]]$output
conclayer2 <- conv_base$layers[[3]]$output
# add the second part of 'U' for segemntation
#conv
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 256, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling" )
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 256, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling" )
#up-convolution 1
test_model <- layer_conv_2d_transpose(test_model, dtype = "float32", filters = 128, kernel_size = 2, strides = 2, padding = "same", data_format = "channels_last", activation = "linear")
# concatenation 1
test_model <- layer_concatenate(list(conclayer1, test_model))
# convolution
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 128, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling" )
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 128, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling" )
#up-convolution 2
test_model <- layer_conv_2d_transpose(test_model, dtype = "float32", filters = 64, kernel_size = 2, strides = 2, padding = "same", data_format = "channels_last", activation = "linear")
# concatenation 2
test_model <- layer_concatenate(list(conclayer2, test_model))
#final convolution
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 64, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling" )
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 64, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling" )
#output
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 1, kernel_size = 1, padding = "valid", data_format = "channels_last", activation = "sigmoid")
#add input and create model
test_model <- keras_model(inputs = conv_base$input, outputs = test_model)
test_model
#compile
test_model %>% compile(
loss = "binary_crossentropy",
optimizer = optimizer_rmsprop(lr = 2e-5),
metrics = c("accuracy")
)
######smaller version of that model with only one max pool (here I add dropout after the conv. layer in order to keep these layers trained as in vgg)####
# this time I use the first 6 layers of vgg16, I do not add own conv layers, but use layers
# 5 and 6, and add dropout afterwards
test_model <- conv_base$layers[[6]]$output %>% layer_dropout(rate = 0.5)
conclayer <- conv_base$layers[[3]]$output
#up-convolution
test_model <- layer_conv_2d_transpose(test_model, dtype = "float32", filters = 64, kernel_size = 2, strides = 2, padding = "same", data_format = "channels_last", activation = "linear")
# concatenation 1
test_model <- layer_concatenate(list(conclayer, test_model))
# convolution
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 64, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling" )
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 64, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling" )
#output
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 1, kernel_size = 1, padding = "valid", data_format = "channels_last", activation = "sigmoid")
#add input and create model
test_model <- keras_model(inputs = conv_base$input, outputs = test_model)
test_model
|
/create_own_unet.R
|
no_license
|
DaChro/cannons_at_marmots
|
R
| false
| false
| 4,220
|
r
|
library(keras)
#load vgg16 as base
conv_base <- application_vgg16(weights = "imagenet", include_top = FALSE, input_shape = c (224,224,3))
# optionally freeze to prevent new training
freeze_weights(conv_base)
#create first part of own model only consisting of the first few layers from vgg16 + a dropout for preventing overfitting
test_model <- conv_base$layers[[7]]$output %>% layer_dropout(rate = 0.5)
conclayer1 <- conv_base$layers[[6]]$output
conclayer2 <- conv_base$layers[[3]]$output
# add the second part of 'U' for segemntation
#conv
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 256, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling" )
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 256, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling" )
#up-convolution 1
test_model <- layer_conv_2d_transpose(test_model, dtype = "float32", filters = 128, kernel_size = 2, strides = 2, padding = "same", data_format = "channels_last", activation = "linear")
# concatenation 1
test_model <- layer_concatenate(list(conclayer1, test_model))
# convolution
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 128, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling" )
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 128, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling" )
#up-convolution 2
test_model <- layer_conv_2d_transpose(test_model, dtype = "float32", filters = 64, kernel_size = 2, strides = 2, padding = "same", data_format = "channels_last", activation = "linear")
# concatenation 2
test_model <- layer_concatenate(list(conclayer2, test_model))
#final convolution
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 64, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling" )
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 64, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling" )
#output
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 1, kernel_size = 1, padding = "valid", data_format = "channels_last", activation = "sigmoid")
#add input and create model
test_model <- keras_model(inputs = conv_base$input, outputs = test_model)
test_model
#compile
test_model %>% compile(
loss = "binary_crossentropy",
optimizer = optimizer_rmsprop(lr = 2e-5),
metrics = c("accuracy")
)
######smaller version of that model with only one max pool (here I add dropout after the conv. layer in order to keep these layers trained as in vgg)####
# this time I use the first 6 layers of vgg16, I do not add own conv layers, but use layers
# 5 and 6, and add dropout afterwards
test_model <- conv_base$layers[[6]]$output %>% layer_dropout(rate = 0.5)
conclayer <- conv_base$layers[[3]]$output
#up-convolution
test_model <- layer_conv_2d_transpose(test_model, dtype = "float32", filters = 64, kernel_size = 2, strides = 2, padding = "same", data_format = "channels_last", activation = "linear")
# concatenation 1
test_model <- layer_concatenate(list(conclayer, test_model))
# convolution
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 64, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling" )
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 64, kernel_size = 3, padding = "same", data_format = "channels_last", activation = "relu", kernel_initializer = "VarianceScaling" )
#output
test_model <- layer_conv_2d(test_model, dtype = "float32", filters = 1, kernel_size = 1, padding = "valid", data_format = "channels_last", activation = "sigmoid")
#add input and create model
test_model <- keras_model(inputs = conv_base$input, outputs = test_model)
test_model
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fe.prov.R
\name{fe.prov}
\alias{fe.prov}
\title{Fit logistic fixed-effect model with high-dimensional predictors}
\usage{
fe.prov(data, Y.char, Z.char, prov.char, tol = 1e-05, null = "median")
}
\arguments{
\item{data}{prepared \code{data.frame}. Use \code{\link{fe.data.prep}} to prepare the raw data}
\item{Y.char}{name of the response variable from \code{data} as a character string}
\item{Z.char}{names of covariates from \code{data} as vector of character strings}
\item{prov.char}{name of provider IDs variable as a character string}
\item{tol}{tolerance level for convergence. Default is \code{1e-5}}
\item{null}{use median for null comparison}
}
\value{
An object of class \code{fe.prov}, which is just a \code{List} object with the following named elements:
\itemize{
\item \code{beta:} a vector of fixed effect estimates
\item \code{Obs:} a vector of responses for included providers
\item \code{Exp:} a vector of expected probabilities of readmission within 30 days of discharge
\item \code{iter:} number of iterations needed for convergence
\item \code{beta.max.diff:} value of the stopping criterion
\item \code{df.prov:}
}
\code{df.prov} is a \code{data.frame} of provider-level information with the following items:
\itemize{
\item \code{Obs:} provider-level observed number of readmissions within 30 days
\item \code{Exp:} expected number of readmissions within 30 days
\item \code{SRR:} standardized readmission ratios for each hospital
\item \code{gamma:} a vector of provider effect estimates for included hospitals
}
}
\description{
\code{fe.prov} fits a fixed-effect logistic model using structured profile
likelihood algorithm. Standardized readmission ratios (SRRs) are also computed.
Go to \href{https://github.com/umich-biostatistics/FEprovideR}{Github} for
a tutorial.
}
\examples{
# Name input variables and other parameters
# a small positive number specifying stopping
# criterion of Newton-Raphson algorithm
tol <- 1e-5
Y.char <- 'Y'
prov.char <- 'prov.ID'
Z.char <- paste0('z', 1:3)
data(hospital_prepared) # build in data set
fe.ls <- fe.prov(hospital_prepared, Y.char, Z.char, prov.char, tol) # model fitting
}
\references{
He, K., Kalbfleisch, J.D., Li, Y. and Li, Y., 2013. Evaluating hospital
readmission rates in dialysis facilities; adjusting for hospital effects. Lifetime data
analysis, 19(4), pp.490-512.
}
\seealso{
\code{\link{fe.data.prep}}, \code{\link{test.fe.prov}},
\code{\link{funnel.SRR}}, \code{\link{confint.fe.prov}}
}
|
/man/fe.prov.Rd
|
no_license
|
cran/FEprovideR
|
R
| false
| true
| 2,626
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fe.prov.R
\name{fe.prov}
\alias{fe.prov}
\title{Fit logistic fixed-effect model with high-dimensional predictors}
\usage{
fe.prov(data, Y.char, Z.char, prov.char, tol = 1e-05, null = "median")
}
\arguments{
\item{data}{prepared \code{data.frame}. Use \code{\link{fe.data.prep}} to prepare the raw data}
\item{Y.char}{name of the response variable from \code{data} as a character string}
\item{Z.char}{names of covariates from \code{data} as vector of character strings}
\item{prov.char}{name of provider IDs variable as a character string}
\item{tol}{tolerance level for convergence. Default is \code{1e-5}}
\item{null}{use median for null comparison}
}
\value{
An object of class \code{fe.prov}, which is just a \code{List} object with the following named elements:
\itemize{
\item \code{beta:} a vector of fixed effect estimates
\item \code{Obs:} a vector of responses for included providers
\item \code{Exp:} a vector of expected probabilities of readmission within 30 days of discharge
\item \code{iter:} number of iterations needed for convergence
\item \code{beta.max.diff:} value of the stopping criterion
\item \code{df.prov:}
}
\code{df.prov} is a \code{data.frame} of provider-level information with the following items:
\itemize{
\item \code{Obs:} provider-level observed number of readmissions within 30 days
\item \code{Exp:} expected number of readmissions within 30 days
\item \code{SRR:} standardized readmission ratios for each hospital
\item \code{gamma:} a vector of provider effect estimates for included hospitals
}
}
\description{
\code{fe.prov} fits a fixed-effect logistic model using structured profile
likelihood algorithm. Standardized readmission ratios (SRRs) are also computed.
Go to \href{https://github.com/umich-biostatistics/FEprovideR}{Github} for
a tutorial.
}
\examples{
# Name input variables and other parameters
# a small positive number specifying stopping
# criterion of Newton-Raphson algorithm
tol <- 1e-5
Y.char <- 'Y'
prov.char <- 'prov.ID'
Z.char <- paste0('z', 1:3)
data(hospital_prepared) # build in data set
fe.ls <- fe.prov(hospital_prepared, Y.char, Z.char, prov.char, tol) # model fitting
}
\references{
He, K., Kalbfleisch, J.D., Li, Y. and Li, Y., 2013. Evaluating hospital
readmission rates in dialysis facilities; adjusting for hospital effects. Lifetime data
analysis, 19(4), pp.490-512.
}
\seealso{
\code{\link{fe.data.prep}}, \code{\link{test.fe.prov}},
\code{\link{funnel.SRR}}, \code{\link{confint.fe.prov}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Filter.R
\docType{class}
\name{tiledb_filter-class}
\alias{tiledb_filter-class}
\title{An S4 class for a TileDB filter}
\description{
An S4 class for a TileDB filter
}
\section{Slots}{
\describe{
\item{\code{ptr}}{External pointer to the underlying implementation}
}}
|
/man/tiledb_filter-class.Rd
|
permissive
|
aaronwolen/TileDB-R
|
R
| false
| true
| 348
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Filter.R
\docType{class}
\name{tiledb_filter-class}
\alias{tiledb_filter-class}
\title{An S4 class for a TileDB filter}
\description{
An S4 class for a TileDB filter
}
\section{Slots}{
\describe{
\item{\code{ptr}}{External pointer to the underlying implementation}
}}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate_taxonomy_table.R
\name{mk_table}
\alias{mk_table}
\title{Helper function to create a taxonomy table}
\usage{
mk_table(intable, taxon_ranks)
}
\description{
Helper function to create a taxonomy table
}
|
/man/mk_table.Rd
|
no_license
|
hjfan527/MetaScope
|
R
| false
| true
| 288
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate_taxonomy_table.R
\name{mk_table}
\alias{mk_table}
\title{Helper function to create a taxonomy table}
\usage{
mk_table(intable, taxon_ranks)
}
\description{
Helper function to create a taxonomy table
}
|
#---------------------------------------------#
#---# Mass Shootings in the United States #---#
#---------------------------------------------#
library("shinydashboard")
library("dplyr")
library("lubridate")
library("leaflet")
library("magrittr")
library("forcats")
library("ggplot2")
library("tidyr")
library("shinyWidgets")
library("DT")
library("viridis")
library("plotly")
library("stringr")
#---# original data #---#
shootings <- read.csv('mass-shootingsupdate.csv')
#---# clean the data #---#
mass_shootings <- shootings %>%
mutate(date = mdy(date)
, site = fct_recode(location.1
, "Workplace" = "\nWorkplace"
, "Other" = "Other\n")
, race = gsub(" ", "", race, fixed = TRUE) # eliminate blank spaces
, race = fct_recode(race
, "unclear" = "-"
, "Black" = "black"
, "White" = "white")
, state = word(location, -1)
, mental_health_issues = gsub(" ", "", prior_signs_mental_health_issues, fixed = TRUE) # eliminate blank spaces
, mental_health_issues = fct_recode(mental_health_issues
, "Unclear" = "-"
, "Yes" ="yes"
, "TBD" = "Unknown")
, legal_weapons = word(weapons_obtained_legally, 1)
, legal_weapons = fct_recode(legal_weapons
, "Yes" = "\nYes"
, "Unknown" = "-"
, "Unknown" = "TBD"
, "Unknown" = "Kelley")
, gender = fct_recode(gender
, "M" = "Male"
, "F" = "Female"
, "Other" = "-"
, "M & F" = "Male & Female")
, age_of_shooter = as.numeric(age_of_shooter)
, age_of_shooter = ifelse(age_of_shooter < 10, age_of_shooter + 10, age_of_shooter )
, weapon_type = fct_recode(weapon_type
, "handgun" = "Handgun"
, "shotgun" = "Shotgun"
, "rifle" = "Rifle")
, handgun = ifelse(str_detect(weapon_type, "handgun"), 1, 0)
, rifle = ifelse(str_detect(weapon_type, "rifle"), 1, 0)
, revolver = ifelse(str_detect(weapon_type, "revolver"), 1, 0)
, shotgun = ifelse(str_detect(weapon_type, "shotgun"), 1, 0)) %>%
select('date','location', 'state', 'site', 'fatalities', 'injured', 'total_victims'
, 'handgun', 'rifle', 'revolver', 'shotgun', 'weapon_type', 'age_of_shooter'
, 'mental_health_issues', 'legal_weapons', 'race', 'gender', 'latitude', 'longitude', 'summary')
#----------------#
#---# HEADER #---#
#----------------#
header <- dashboardHeader(title ="Mass Shootings in the USA", titleWidth = 350)
#-----------------#
#---# SIDEBAR #---#
#-----------------#
sidebar <- dashboardSidebar(
sidebarMenu(
menuItem("Geographic location"
, tabName = "map"
, icon = icon("globe-americas")
)
, menuItem("Location"
, tabName = "location"
, icon = icon("map-pin")
)
, menuItem("Perpetrators's profile"
, tabName = "profile"
, icon = icon("user-alt")
)
, menuItem("Type of weapon"
, tabName = "weapon"
, icon = icon("unlock-alt")
)
, menuItem("Data"
, tabName = "data"
, icon = icon("table")
)
, sliderInput(inputId = 'nb_fatalities'
, label = 'Minimum Fatalities'
, min = min(mass_shootings$fatalities)
, max = max(mass_shootings$fatalities)
, value = min(mass_shootings$fatalities)
, step = 1
)
, dateRangeInput(inputId = 'date_range'
, label = 'Select Date'
, start = min(mass_shootings$date)
, end = max(mass_shootings$date)
)
, br()
, actionButton(inputId = 'show_about' , label = 'About')
)
)
#--------------#
#---# BODY #---#
#--------------#
body <- dashboardBody(
tabItems(
#-----------------#
#---# MAP TAB #---#
#-----------------#
tabItem(tabName = "map"
, textOutput("summary_inf")
, fluidRow(valueBoxOutput("victimsBox")
, valueBoxOutput("FatalitiesBox")
, valueBoxOutput("InjuredBox"))
, leaflet::leafletOutput('map',width = "100%", height = 600)
, tags$head(tags$style("#summary_inf{color: black;
font-size: 22px;
font-style: bold;
}"
)
)
)
#----------------------#
#---# LOCATION TAB #---#
#----------------------#
, tabItem(tabName = "location"
, fluidRow(box(em("The most common locations of public shooting are retail
establishments such as restaurants and stores.")
, br()
, strong("shooting location.")
, plotlyOutput('shooting_location', height = 300))
, box(strong("The 10 most affected states from suffering attacks.")
, plotlyOutput('shooting_state', height = 300))
)
, fluidRow(box(width = 12
, strong("The number of attacks has risen dramatically,
and many of the deadliest shootings have occurred within the past few years.")
, plotlyOutput('shooting_by_year', height = 300)))
)
#---------------------#
#---# PROFILE TAB #---#
#---------------------#
, tabItem(tabName = "profile"
, fluidRow(column(width = 12
, box(strong("Race of the shooters."), plotlyOutput('race_shooters', height = 300))
, box(strong("Age by race of the shooters."), plotOutput('age_by_race', height = 300))
)
, column(width = 12
, box(em('Some of these mass shooters were known to have violent tendencies or criminal pasts.')
, br()
, strong("Did the shooter had mental health issues?")
, plotlyOutput('mental_health', height = 300)
)
, valueBoxOutput("mental_health_info", width = 6)
, valueBoxOutput("race_shooters_info", width = 6)
, valueBoxOutput("median_age_info", width = 6))
)
)
#--------------------#
#---# WEAPON TAB #---#
#--------------------#
, tabItem(tabName = "weapon"
, fluidRow(
column(width = 12
, box(strong("Did weapons were obtained legally?")
, plotlyOutput('legal_weapon', height = 300))
, box(strong("Type of weapon ")
, plotlyOutput('type_weapon', height = 300))
)
, column(width = 12
, box(em('Shooters often carried more than one weapon.')
, br()
, strong("How many types of weapons did the shooter possed?")
, plotlyOutput('several_types', height = 300))
, valueBoxOutput("legal_weapon_info", width = 6)
, valueBoxOutput("type_weapon_info", width = 6)
, valueBoxOutput("several_types_info", width = 6))
)
)
#------------------#
#---# DATA TAB #---#
#------------------#
, tabItem(tabName = "data"
, downloadButton(outputId = "download_data", label = "Download")
, br()
, br()
, DT::DTOutput("table")
, actionButton(inputId = 'learn_more', label = 'Learn more')
, tags$style(" #download_data {
/* Change the background color of the download button to orange. */
background: orange;}"
)
)
)
)
#-----------------#
#---# UI call #---#
#-----------------#
ui <- dashboardPage(skin= 'purple'
, header = header
, sidebar=sidebar
, body = body)
|
/Shiny App/ui.R
|
no_license
|
DanaeMirel/usa_shootings_dashboard
|
R
| false
| false
| 8,140
|
r
|
#---------------------------------------------#
#---# Mass Shootings in the United States #---#
#---------------------------------------------#
library("shinydashboard")
library("dplyr")
library("lubridate")
library("leaflet")
library("magrittr")
library("forcats")
library("ggplot2")
library("tidyr")
library("shinyWidgets")
library("DT")
library("viridis")
library("plotly")
library("stringr")
#---# original data #---#
shootings <- read.csv('mass-shootingsupdate.csv')
#---# clean the data #---#
mass_shootings <- shootings %>%
mutate(date = mdy(date)
, site = fct_recode(location.1
, "Workplace" = "\nWorkplace"
, "Other" = "Other\n")
, race = gsub(" ", "", race, fixed = TRUE) # eliminate blank spaces
, race = fct_recode(race
, "unclear" = "-"
, "Black" = "black"
, "White" = "white")
, state = word(location, -1)
, mental_health_issues = gsub(" ", "", prior_signs_mental_health_issues, fixed = TRUE) # eliminate blank spaces
, mental_health_issues = fct_recode(mental_health_issues
, "Unclear" = "-"
, "Yes" ="yes"
, "TBD" = "Unknown")
, legal_weapons = word(weapons_obtained_legally, 1)
, legal_weapons = fct_recode(legal_weapons
, "Yes" = "\nYes"
, "Unknown" = "-"
, "Unknown" = "TBD"
, "Unknown" = "Kelley")
, gender = fct_recode(gender
, "M" = "Male"
, "F" = "Female"
, "Other" = "-"
, "M & F" = "Male & Female")
, age_of_shooter = as.numeric(age_of_shooter)
, age_of_shooter = ifelse(age_of_shooter < 10, age_of_shooter + 10, age_of_shooter )
, weapon_type = fct_recode(weapon_type
, "handgun" = "Handgun"
, "shotgun" = "Shotgun"
, "rifle" = "Rifle")
, handgun = ifelse(str_detect(weapon_type, "handgun"), 1, 0)
, rifle = ifelse(str_detect(weapon_type, "rifle"), 1, 0)
, revolver = ifelse(str_detect(weapon_type, "revolver"), 1, 0)
, shotgun = ifelse(str_detect(weapon_type, "shotgun"), 1, 0)) %>%
select('date','location', 'state', 'site', 'fatalities', 'injured', 'total_victims'
, 'handgun', 'rifle', 'revolver', 'shotgun', 'weapon_type', 'age_of_shooter'
, 'mental_health_issues', 'legal_weapons', 'race', 'gender', 'latitude', 'longitude', 'summary')
#----------------#
#---# HEADER #---#
#----------------#
header <- dashboardHeader(title ="Mass Shootings in the USA", titleWidth = 350)
#-----------------#
#---# SIDEBAR #---#
#-----------------#
sidebar <- dashboardSidebar(
sidebarMenu(
menuItem("Geographic location"
, tabName = "map"
, icon = icon("globe-americas")
)
, menuItem("Location"
, tabName = "location"
, icon = icon("map-pin")
)
, menuItem("Perpetrators's profile"
, tabName = "profile"
, icon = icon("user-alt")
)
, menuItem("Type of weapon"
, tabName = "weapon"
, icon = icon("unlock-alt")
)
, menuItem("Data"
, tabName = "data"
, icon = icon("table")
)
, sliderInput(inputId = 'nb_fatalities'
, label = 'Minimum Fatalities'
, min = min(mass_shootings$fatalities)
, max = max(mass_shootings$fatalities)
, value = min(mass_shootings$fatalities)
, step = 1
)
, dateRangeInput(inputId = 'date_range'
, label = 'Select Date'
, start = min(mass_shootings$date)
, end = max(mass_shootings$date)
)
, br()
, actionButton(inputId = 'show_about' , label = 'About')
)
)
#--------------#
#---# BODY #---#
#--------------#
body <- dashboardBody(
tabItems(
#-----------------#
#---# MAP TAB #---#
#-----------------#
tabItem(tabName = "map"
, textOutput("summary_inf")
, fluidRow(valueBoxOutput("victimsBox")
, valueBoxOutput("FatalitiesBox")
, valueBoxOutput("InjuredBox"))
, leaflet::leafletOutput('map',width = "100%", height = 600)
, tags$head(tags$style("#summary_inf{color: black;
font-size: 22px;
font-style: bold;
}"
)
)
)
#----------------------#
#---# LOCATION TAB #---#
#----------------------#
, tabItem(tabName = "location"
, fluidRow(box(em("The most common locations of public shooting are retail
establishments such as restaurants and stores.")
, br()
, strong("shooting location.")
, plotlyOutput('shooting_location', height = 300))
, box(strong("The 10 most affected states from suffering attacks.")
, plotlyOutput('shooting_state', height = 300))
)
, fluidRow(box(width = 12
, strong("The number of attacks has risen dramatically,
and many of the deadliest shootings have occurred within the past few years.")
, plotlyOutput('shooting_by_year', height = 300)))
)
#---------------------#
#---# PROFILE TAB #---#
#---------------------#
, tabItem(tabName = "profile"
, fluidRow(column(width = 12
, box(strong("Race of the shooters."), plotlyOutput('race_shooters', height = 300))
, box(strong("Age by race of the shooters."), plotOutput('age_by_race', height = 300))
)
, column(width = 12
, box(em('Some of these mass shooters were known to have violent tendencies or criminal pasts.')
, br()
, strong("Did the shooter had mental health issues?")
, plotlyOutput('mental_health', height = 300)
)
, valueBoxOutput("mental_health_info", width = 6)
, valueBoxOutput("race_shooters_info", width = 6)
, valueBoxOutput("median_age_info", width = 6))
)
)
#--------------------#
#---# WEAPON TAB #---#
#--------------------#
, tabItem(tabName = "weapon"
, fluidRow(
column(width = 12
, box(strong("Did weapons were obtained legally?")
, plotlyOutput('legal_weapon', height = 300))
, box(strong("Type of weapon ")
, plotlyOutput('type_weapon', height = 300))
)
, column(width = 12
, box(em('Shooters often carried more than one weapon.')
, br()
, strong("How many types of weapons did the shooter possed?")
, plotlyOutput('several_types', height = 300))
, valueBoxOutput("legal_weapon_info", width = 6)
, valueBoxOutput("type_weapon_info", width = 6)
, valueBoxOutput("several_types_info", width = 6))
)
)
#------------------#
#---# DATA TAB #---#
#------------------#
, tabItem(tabName = "data"
, downloadButton(outputId = "download_data", label = "Download")
, br()
, br()
, DT::DTOutput("table")
, actionButton(inputId = 'learn_more', label = 'Learn more')
, tags$style(" #download_data {
/* Change the background color of the download button to orange. */
background: orange;}"
)
)
)
)
#-----------------#
#---# UI call #---#
#-----------------#
ui <- dashboardPage(skin= 'purple'
, header = header
, sidebar=sidebar
, body = body)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/window.r
\name{create_windows}
\alias{create_windows}
\title{Bin a Dataframe of Genomic Ranges into Windows}
\usage{
create_windows(.data, width)
}
\arguments{
\item{.data}{Dataframe with columns \code{chrom}, \code{start}, \code{end}
\code{chrom} and \code{length}.}
\item{width}{Width of window in base pairs.}
}
\description{
Simple strand-ignoring binning procedure for genomic ranges stored in a
dataframe. The dataframe must have columns \code{chrom}, \code{start},
\code{end}.
}
|
/man/create_windows.Rd
|
no_license
|
vsbuffalo/gplyr
|
R
| false
| true
| 565
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/window.r
\name{create_windows}
\alias{create_windows}
\title{Bin a Dataframe of Genomic Ranges into Windows}
\usage{
create_windows(.data, width)
}
\arguments{
\item{.data}{Dataframe with columns \code{chrom}, \code{start}, \code{end}
\code{chrom} and \code{length}.}
\item{width}{Width of window in base pairs.}
}
\description{
Simple strand-ignoring binning procedure for genomic ranges stored in a
dataframe. The dataframe must have columns \code{chrom}, \code{start},
\code{end}.
}
|
test_that("can parse special cases", {
url <- url_parse("//google.com")
expect_equal(url$scheme, NULL)
expect_equal(url$hostname, "google.com")
url <- url_parse("file:///tmp")
expect_equal(url$scheme, "file")
expect_equal(url$path, "/tmp")
url <- url_parse("/")
expect_equal(url$scheme, NULL)
expect_equal(url$path, "/")
})
test_that("can round trip urls", {
urls <- list(
"/",
"//google.com",
"file:///",
"http://google.com/",
"http://google.com/path",
"http://google.com/path?a=1&b=2",
"http://google.com:80/path?a=1&b=2",
"http://google.com:80/path?a=1&b=2#frag",
"http://user@google.com:80/path?a=1&b=2",
"http://user:pass@google.com:80/path?a=1&b=2",
"svn+ssh://my.svn.server/repo/trunk"
)
expect_equal(map(urls, ~ url_build(url_parse(.x))), urls)
})
test_that("can print all url details", {
expect_snapshot(
url_parse("http://user:pass@example.com:80/path?a=1&b=2#frag")
)
})
# query -------------------------------------------------------------------
test_that("missing query values become empty strings", {
expect_equal(query_parse("?q="), list(q = ""))
expect_equal(query_parse("?q"), list(q = ""))
expect_equal(query_parse("?a&q"), list(a = "", q = ""))
})
test_that("empty queries become NULL", {
expect_equal(query_parse("?"), NULL)
expect_equal(query_parse(""), NULL)
})
|
/tests/testthat/test-url.R
|
permissive
|
adithirgis/httr2
|
R
| false
| false
| 1,379
|
r
|
test_that("can parse special cases", {
url <- url_parse("//google.com")
expect_equal(url$scheme, NULL)
expect_equal(url$hostname, "google.com")
url <- url_parse("file:///tmp")
expect_equal(url$scheme, "file")
expect_equal(url$path, "/tmp")
url <- url_parse("/")
expect_equal(url$scheme, NULL)
expect_equal(url$path, "/")
})
test_that("can round trip urls", {
urls <- list(
"/",
"//google.com",
"file:///",
"http://google.com/",
"http://google.com/path",
"http://google.com/path?a=1&b=2",
"http://google.com:80/path?a=1&b=2",
"http://google.com:80/path?a=1&b=2#frag",
"http://user@google.com:80/path?a=1&b=2",
"http://user:pass@google.com:80/path?a=1&b=2",
"svn+ssh://my.svn.server/repo/trunk"
)
expect_equal(map(urls, ~ url_build(url_parse(.x))), urls)
})
test_that("can print all url details", {
expect_snapshot(
url_parse("http://user:pass@example.com:80/path?a=1&b=2#frag")
)
})
# query -------------------------------------------------------------------
test_that("missing query values become empty strings", {
expect_equal(query_parse("?q="), list(q = ""))
expect_equal(query_parse("?q"), list(q = ""))
expect_equal(query_parse("?a&q"), list(a = "", q = ""))
})
test_that("empty queries become NULL", {
expect_equal(query_parse("?"), NULL)
expect_equal(query_parse(""), NULL)
})
|
# Jags-Ymet-XmetSsubj-MrobustHierQuadWt.R
# Accompanies the book:
# Kruschke, J. K. (2015). Doing Bayesian Data Analysis, Second Edition:
# A Tutorial with R, JAGS, and Stan. Academic Press / Elsevier.
source("DBDA2E-utilities.R")
#===============================================================================
genMCMC = function( data , xName="x" , yName="y" , sName="s" , wName=NULL ,
numSavedSteps=10000 , thinSteps = 1 , saveName=NULL ,
runjagsMethod=runjagsMethodDefault ,
nChains=nChainsDefault ) {
#-----------------------------------------------------------------------------
# THE DATA.
y = data[,yName]
x = data[,xName]
s = as.numeric(data[,sName])
if ( !is.null(wName) ) {
w = data[,wName]
} else {
w = rep(1,length(y))
}
# Do some checking that data make sense:
if ( any( !is.finite(y) ) ) { stop("All y values must be finite.") }
if ( any( !is.finite(x) ) ) { stop("All x values must be finite.") }
#Ntotal = length(y)
# Specify the data in a list, for later shipment to JAGS:
dataList = list(
x = x ,
y = y ,
s = s ,
w = w ,
Nsubj = max(s) # should equal length(unique(s))
)
#-----------------------------------------------------------------------------
# THE MODEL.
modelString = "
# Standardize the data:
data {
Ntotal <- length(y)
xm <- mean(x)
ym <- mean(y)
wm <- mean(w)
xsd <- sd(x)
ysd <- sd(y)
for ( i in 1:length(y) ) {
zx[i] <- ( x[i] - xm ) / xsd
zy[i] <- ( y[i] - ym ) / ysd
zw[i] <- w[i] / wm
}
}
# Specify the model for standardized data:
model {
for ( i in 1:Ntotal ) {
zy[i] ~ dt( zbeta0[s[i]] + zbeta1[s[i]] * zx[i] + zbeta2[s[i]] * zx[i]^2 ,
1/(zw[i]*zsigma)^2 , nu )
}
for ( j in 1:Nsubj ) {
zbeta0[j] ~ dnorm( zbeta0mu , 1/(zbeta0sigma)^2 )
zbeta1[j] ~ dnorm( zbeta1mu , 1/(zbeta1sigma)^2 )
zbeta2[j] ~ dnorm( zbeta2mu , 1/(zbeta2sigma)^2 )
}
# Priors vague on standardized scale:
zbeta0mu ~ dnorm( 0 , 1/(10)^2 )
zbeta1mu ~ dnorm( 0 , 1/(10)^2 )
zbeta2mu ~ dnorm( 0 , 1/(10)^2 )
zsigma ~ dunif( 1.0E-3 , 1.0E+3 )
zbeta0sigma ~ dunif( 1.0E-3 , 1.0E+3 )
zbeta1sigma ~ dunif( 1.0E-3 , 1.0E+3 )
zbeta2sigma ~ dunif( 1.0E-3 , 1.0E+3 )
nu <- nuMinusOne+1
nuMinusOne ~ dexp(1/29.0)
# Transform to original scale:
for ( j in 1:Nsubj ) {
beta2[j] <- zbeta2[j]*ysd/xsd^2
beta1[j] <- zbeta1[j]*ysd/xsd - 2*zbeta2[j]*xm*ysd/xsd^2
beta0[j] <- zbeta0[j]*ysd + ym - zbeta1[j]*xm*ysd/xsd + zbeta2[j]*xm^2*ysd/xsd^2
}
beta2mu <- zbeta2mu*ysd/xsd^2
beta1mu <- zbeta1mu*ysd/xsd - 2*zbeta2mu*xm*ysd/xsd^2
beta0mu <- zbeta0mu*ysd + ym - zbeta1mu*xm*ysd/xsd + zbeta2mu*xm^2*ysd/xsd^2
sigma <- zsigma * ysd
}
" # close quote for modelString
# Write out modelString to a text file
writeLines( modelString , con="TEMPmodel.txt" )
#-----------------------------------------------------------------------------
# INTIALIZE THE CHAINS.
# Use lm() to find reasonable coefficients overall, then start all individual
# units and overall at those values.
# N.B. THIS DOES NOT ALWAYS WORK AND DOES NOT ALWAYS IMPROVE THE MCMC SAMPLE.
# IF IT'S A PROBLEM, COMMENT OUT THE inits ARGUMENT IN THE run.jags COMMAND.
zx = ( x - mean(x) ) / sd(x)
zxsq = zx^2
zy = ( y - mean(y) ) / sd(y)
lmInfo = lm( zy ~ zx + zxsq )
b0init = lmInfo$coef[1]
b1init = lmInfo$coef[2]
b2init = lmInfo$coef[3]
sigmaInit = sqrt(mean(lmInfo$res^2))
nuInit = 10 # arbitrary
initsList = list(
zsigma=sigmaInit ,
nuMinusOne=nuInit ,
zbeta0mu=b0init ,
zbeta1mu=b1init ,
zbeta2mu=b2init ,
zbeta0=rep(b0init,max(s)) ,
zbeta1=rep(b1init,max(s)) ,
zbeta2=rep(b2init,max(s)) # other params filled in by JAGS
)
#-----------------------------------------------------------------------------
# RUN THE CHAINS
parameters = c( "beta0" , "beta1" , "beta2" ,
"beta0mu" , "beta1mu" , "beta2mu" ,
"zbeta0" , "zbeta1" , "zbeta2" ,
"zbeta0mu" , "zbeta1mu" , "zbeta2mu" ,
"sigma" , "nu" ,
"zsigma", "zbeta0sigma" , "zbeta1sigma", "zbeta2sigma" )
adaptSteps = 1000 # Number of steps to "tune" the samplers
burnInSteps = 10000
runJagsOut <- run.jags( method=runjagsMethod ,
model="TEMPmodel.txt" ,
monitor=parameters ,
data=dataList ,
inits=initsList ,
n.chains=nChains ,
adapt=adaptSteps ,
burnin=burnInSteps ,
sample=ceiling(numSavedSteps/nChains) ,
thin=thinSteps ,
summarise=FALSE ,
plots=FALSE )
codaSamples = as.mcmc.list( runJagsOut )
# resulting codaSamples object has these indices:
# codaSamples[[ chainIdx ]][ stepIdx , paramIdx ]
if ( !is.null(saveName) ) {
save( codaSamples , file=paste(saveName,"Mcmc.Rdata",sep="") )
}
return( codaSamples )
} # end function
#===============================================================================
smryMCMC = function( codaSamples ,
saveName=NULL ) {
mcmcMat = as.matrix(codaSamples,chains=FALSE)
paramNames = colnames(mcmcMat)
summaryInfo = NULL
for ( pName in paramNames ) {
summaryInfo = rbind( summaryInfo , summarizePost( mcmcMat[,pName] ) )
}
rownames(summaryInfo) = paramNames
if ( !is.null(saveName) ) {
write.csv( summaryInfo , file=paste(saveName,"SummaryInfo.csv",sep="") )
}
return( summaryInfo )
}
#===============================================================================
plotMCMC = function( codaSamples , data ,
xName="x" , yName="y" , sName="s" , wName="w" ,
compValBeta0=NULL , ropeBeta0=NULL ,
compValBeta1=NULL , ropeBeta1=NULL ,
compValSigma=NULL , ropeSigma=NULL ,
showCurve=FALSE , pairsPlot=FALSE ,
saveName=NULL , saveType="jpg" ) {
# showCurve is TRUE or FALSE and indicates whether the posterior should
# be displayed as a histogram (by default) or by an approximate curve.
# pairsPlot is TRUE or FALSE and indicates whether scatterplots of pairs
# of parameters should be displayed.
#-----------------------------------------------------------------------------
y = data[,yName]
x = data[,xName]
s = factor(data[,sName])
nSubj = length(unique(s)) # should be same as max(s)
mcmcMat = as.matrix(codaSamples,chains=TRUE)
chainLength = NROW( mcmcMat )
beta0mu = mcmcMat[,"beta0mu"]
beta1mu = mcmcMat[,"beta1mu"]
beta2mu = mcmcMat[,"beta2mu"]
zbeta0mu = mcmcMat[,"zbeta0mu"]
zbeta1mu = mcmcMat[,"zbeta1mu"]
zbeta2mu = mcmcMat[,"zbeta2mu"]
sigma = mcmcMat[,"sigma"]
nu = mcmcMat[,"nu"]
log10nu = log10(nu)
#-----------------------------------------------------------------------------
if ( pairsPlot ) {
# Plot the parameters pairwise, to see correlations:
openGraph()
nPtToPlot = 1000
plotIdx = floor(seq(1,chainLength,by=chainLength/nPtToPlot))
panel.cor = function(x, y, digits=2, prefix="", cex.cor, ...) {
usr = par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r = (cor(x, y))
txt = format(c(r, 0.123456789), digits=digits)[1]
txt = paste(prefix, txt, sep="")
if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt)
text(0.5, 0.5, txt, cex=1.25 ) # was cex=cex.cor*r
}
pairs( cbind( beta0mu , beta1mu , beta2mu , sigma , log10nu )[plotIdx,] ,
labels=c( expression(mu[beta*0]) , expression(mu[beta*1]) ,
expression(mu[beta*2]) ,
expression(sigma) , expression(log10(nu)) ) ,
lower.panel=panel.cor , col="skyblue" )
if ( !is.null(saveName) ) {
saveGraph( file=paste(saveName,"PostPairs",sep=""), type=saveType)
}
}
#-----------------------------------------------------------------------------
# Marginal histograms:
# Set up window and layout:
nPtToPlot = 1000
plotIdx = floor(seq(1,chainLength,by=chainLength/nPtToPlot))
openGraph(width=8,height=8)
layout( matrix( 1:9 , nrow=3, byrow=TRUE ) )
par( mar=c(4,4,2.5,0.5) , mgp=c(2.5,0.7,0) )
histInfo = plotPost( beta0mu , cex.lab = 1.75 , showCurve=showCurve ,
compVal=compValBeta0 , ROPE=ropeBeta0 ,
xlab=bquote(mu[beta*0]) , main=paste("Intercept, Group Level") )
histInfo = plotPost( beta1mu , cex.lab = 1.75 , showCurve=showCurve ,
compVal=compValBeta1 , ROPE=ropeBeta1 ,
xlab=bquote(mu[beta*1]) , main=paste("Slope, Group Level") )
histInfo = plotPost( beta2mu , cex.lab = 1.75 , showCurve=showCurve ,
#compVal=compValBeta1 , ROPE=ropeBeta1 ,
xlab=bquote(mu[beta*2]) , main=paste("Quad, Group Level") )
histInfo = plotPost( zbeta0mu , cex.lab = 1.75 , showCurve=showCurve ,
#compVal=compValBeta0 , ROPE=ropeBeta0 ,
xlab=bquote(zmu[beta*0]) , main=paste("Intercept, Group Level") )
histInfo = plotPost( zbeta1mu , cex.lab = 1.75 , showCurve=showCurve ,
#compVal=compValBeta1 , ROPE=ropeBeta1 ,
xlab=bquote(zmu[beta*1]) , main=paste("Slope, Group Level") )
histInfo = plotPost( zbeta2mu , cex.lab = 1.75 , showCurve=showCurve ,
#compVal=compValBeta1 , ROPE=ropeBeta1 ,
xlab=bquote(zmu[beta*2]) , main=paste("Quad, Group Level") )
#plot( beta1mu[plotIdx] , beta0mu[plotIdx] ,
# xlab=bquote(mu[beta*1]) , ylab=bquote(mu[beta*0]) ,
# col="skyblue" , cex.lab = 1.75 )
histInfo = plotPost( sigma , cex.lab = 1.75 , showCurve=showCurve ,
compVal=compValSigma , ROPE=ropeSigma ,
xlab=bquote(sigma) , main=paste("Scale, Subj Level") )
histInfo = plotPost( log10nu , cex.lab = 1.75 , showCurve=showCurve ,
compVal=NULL , ROPE=NULL ,
xlab=bquote(log10(nu)) , main=paste("Normality, Subj Level") )
plot( log10nu[plotIdx] , sigma[plotIdx] ,
xlab=bquote(log10(nu)) ,ylab=bquote(sigma) ,
col="skyblue" , cex.lab = 1.75 )
if ( !is.null(saveName) ) {
saveGraph( file=paste(saveName,"PostMarg",sep=""), type=saveType)
}
#-----------------------------------------------------------------------------
# Data with superimposed regression lines and noise distributions:
nPanels=25
nPlots = ceiling(nSubj/nPanels)
for ( plotIdx in 1:nPlots ) {
openGraph()
par( mar=c(2,2,1,0)+.5 , mgp=c(1.5,0.5,0) )
layout(matrix(1:nPanels,nrow=5,byrow=TRUE))
xRang = max(x)-min(x)
yRang = max(y)-min(y)
xLimMult = 0.1
yLimMult = 0.1
xLim= c( min(x)-xLimMult*xRang , max(x)+xLimMult*xRang )
yLim= c( min(y)-yLimMult*yRang , max(y)+yLimMult*yRang )
#for ( sIdx in unique(ceiling(seq(1,nSubj,length=nPanels))) ) {
for ( sIdx in ((plotIdx-1)*nPanels+1):min(nSubj,(plotIdx-1)*nPanels+nPanels)) {
thisSrows = (as.numeric(s)==sIdx)
plot( x[thisSrows] , y[thisSrows] ,
cex=1.0 , lwd=1 , col="black" , xlim=xLim , ylim=yLim ,
xlab=xName , ylab=yName , cex.lab=1.0 ,
main=paste0("Unit: ",levels(s)[sIdx]) ,
cex.main=1.0 )
# Superimpose a smattering of believable regression lines:
nPredCurves=30
xComb = seq(xLim[1],xLim[2],length=301)
for ( i in floor(seq(1,chainLength,length=nPredCurves)) ) {
b0 = mcmcMat[i,paste0("beta0[",sIdx,"]")]
b1 = mcmcMat[i,paste0("beta1[",sIdx,"]")]
b2 = mcmcMat[i,paste0("beta2[",sIdx,"]")]
lines( xComb , b0+b1*xComb+b2*xComb^2 , col="skyblue" )
}
points( x[thisSrows] , y[thisSrows] , pch=19 )
}
if ( !is.null(saveName) ) {
saveGraph( file=paste0(saveName,"PostPredSubj",plotIdx), type=saveType)
}
}
#-----------------------------------------------------------------------------
# Data with superimposed regression lines and noise distributions:
openGraph()
par( mar=c(2,2,1,0)+.5 , mgp=c(1.5,0.5,0) )
# Plot data values:
xRang = max(x)-min(x)
yRang = max(y)-min(y)
xLimMult = 0.2
yLimMult = 0.2
xLim= c( min(x)-xLimMult*xRang , max(x)+xLimMult*xRang )
yLim= c( min(y)-yLimMult*yRang , max(y)+yLimMult*yRang )
plot( x , y , pch="" , cex=1.0 , col="black" ,
xlim=xLim , ylim=yLim ,
xlab=xName , ylab=yName , cex.lab=1.0 ,
main="All Units" , cex.main=1.0 )
# Superimpose a smattering of believable regression lines:
nPredCurves=70
xComb = seq(xLim[1],xLim[2],length=301)
for ( i in floor(seq(1,chainLength,length=nPredCurves)) ) {
b0 = mcmcMat[i,paste0("beta0mu")]
b1 = mcmcMat[i,paste0("beta1mu")]
b2 = mcmcMat[i,paste0("beta2mu")]
lines( xComb , b0+b1*xComb+b2*xComb^2 , col="skyblue" )
}
for ( sIdx in 1:nSubj ) {
thisSrows = (as.numeric(s)==sIdx)
lines( x[thisSrows] , y[thisSrows] , type="o" , pch=19 ) #, pch=sIdx , col=sIdx )
}
#
if ( !is.null(saveName) ) {
saveGraph( file=paste(saveName,"PostPredAll",sep=""), type=saveType)
}
}
#===============================================================================
|
/DBDA2Eprograms/Jags-Ymet-XmetSsubj-MrobustHierQuadWt.R
|
no_license
|
davidkretch/dbda
|
R
| false
| false
| 13,588
|
r
|
# Jags-Ymet-XmetSsubj-MrobustHierQuadWt.R
# Accompanies the book:
# Kruschke, J. K. (2015). Doing Bayesian Data Analysis, Second Edition:
# A Tutorial with R, JAGS, and Stan. Academic Press / Elsevier.
source("DBDA2E-utilities.R")
#===============================================================================
genMCMC = function( data , xName="x" , yName="y" , sName="s" , wName=NULL ,
numSavedSteps=10000 , thinSteps = 1 , saveName=NULL ,
runjagsMethod=runjagsMethodDefault ,
nChains=nChainsDefault ) {
#-----------------------------------------------------------------------------
# THE DATA.
y = data[,yName]
x = data[,xName]
s = as.numeric(data[,sName])
if ( !is.null(wName) ) {
w = data[,wName]
} else {
w = rep(1,length(y))
}
# Do some checking that data make sense:
if ( any( !is.finite(y) ) ) { stop("All y values must be finite.") }
if ( any( !is.finite(x) ) ) { stop("All x values must be finite.") }
#Ntotal = length(y)
# Specify the data in a list, for later shipment to JAGS:
dataList = list(
x = x ,
y = y ,
s = s ,
w = w ,
Nsubj = max(s) # should equal length(unique(s))
)
#-----------------------------------------------------------------------------
# THE MODEL.
modelString = "
# Standardize the data:
data {
Ntotal <- length(y)
xm <- mean(x)
ym <- mean(y)
wm <- mean(w)
xsd <- sd(x)
ysd <- sd(y)
for ( i in 1:length(y) ) {
zx[i] <- ( x[i] - xm ) / xsd
zy[i] <- ( y[i] - ym ) / ysd
zw[i] <- w[i] / wm
}
}
# Specify the model for standardized data:
model {
for ( i in 1:Ntotal ) {
zy[i] ~ dt( zbeta0[s[i]] + zbeta1[s[i]] * zx[i] + zbeta2[s[i]] * zx[i]^2 ,
1/(zw[i]*zsigma)^2 , nu )
}
for ( j in 1:Nsubj ) {
zbeta0[j] ~ dnorm( zbeta0mu , 1/(zbeta0sigma)^2 )
zbeta1[j] ~ dnorm( zbeta1mu , 1/(zbeta1sigma)^2 )
zbeta2[j] ~ dnorm( zbeta2mu , 1/(zbeta2sigma)^2 )
}
# Priors vague on standardized scale:
zbeta0mu ~ dnorm( 0 , 1/(10)^2 )
zbeta1mu ~ dnorm( 0 , 1/(10)^2 )
zbeta2mu ~ dnorm( 0 , 1/(10)^2 )
zsigma ~ dunif( 1.0E-3 , 1.0E+3 )
zbeta0sigma ~ dunif( 1.0E-3 , 1.0E+3 )
zbeta1sigma ~ dunif( 1.0E-3 , 1.0E+3 )
zbeta2sigma ~ dunif( 1.0E-3 , 1.0E+3 )
nu <- nuMinusOne+1
nuMinusOne ~ dexp(1/29.0)
# Transform to original scale:
for ( j in 1:Nsubj ) {
beta2[j] <- zbeta2[j]*ysd/xsd^2
beta1[j] <- zbeta1[j]*ysd/xsd - 2*zbeta2[j]*xm*ysd/xsd^2
beta0[j] <- zbeta0[j]*ysd + ym - zbeta1[j]*xm*ysd/xsd + zbeta2[j]*xm^2*ysd/xsd^2
}
beta2mu <- zbeta2mu*ysd/xsd^2
beta1mu <- zbeta1mu*ysd/xsd - 2*zbeta2mu*xm*ysd/xsd^2
beta0mu <- zbeta0mu*ysd + ym - zbeta1mu*xm*ysd/xsd + zbeta2mu*xm^2*ysd/xsd^2
sigma <- zsigma * ysd
}
" # close quote for modelString
# Write out modelString to a text file
writeLines( modelString , con="TEMPmodel.txt" )
#-----------------------------------------------------------------------------
# INTIALIZE THE CHAINS.
# Use lm() to find reasonable coefficients overall, then start all individual
# units and overall at those values.
# N.B. THIS DOES NOT ALWAYS WORK AND DOES NOT ALWAYS IMPROVE THE MCMC SAMPLE.
# IF IT'S A PROBLEM, COMMENT OUT THE inits ARGUMENT IN THE run.jags COMMAND.
zx = ( x - mean(x) ) / sd(x)
zxsq = zx^2
zy = ( y - mean(y) ) / sd(y)
lmInfo = lm( zy ~ zx + zxsq )
b0init = lmInfo$coef[1]
b1init = lmInfo$coef[2]
b2init = lmInfo$coef[3]
sigmaInit = sqrt(mean(lmInfo$res^2))
nuInit = 10 # arbitrary
initsList = list(
zsigma=sigmaInit ,
nuMinusOne=nuInit ,
zbeta0mu=b0init ,
zbeta1mu=b1init ,
zbeta2mu=b2init ,
zbeta0=rep(b0init,max(s)) ,
zbeta1=rep(b1init,max(s)) ,
zbeta2=rep(b2init,max(s)) # other params filled in by JAGS
)
#-----------------------------------------------------------------------------
# RUN THE CHAINS
parameters = c( "beta0" , "beta1" , "beta2" ,
"beta0mu" , "beta1mu" , "beta2mu" ,
"zbeta0" , "zbeta1" , "zbeta2" ,
"zbeta0mu" , "zbeta1mu" , "zbeta2mu" ,
"sigma" , "nu" ,
"zsigma", "zbeta0sigma" , "zbeta1sigma", "zbeta2sigma" )
adaptSteps = 1000 # Number of steps to "tune" the samplers
burnInSteps = 10000
runJagsOut <- run.jags( method=runjagsMethod ,
model="TEMPmodel.txt" ,
monitor=parameters ,
data=dataList ,
inits=initsList ,
n.chains=nChains ,
adapt=adaptSteps ,
burnin=burnInSteps ,
sample=ceiling(numSavedSteps/nChains) ,
thin=thinSteps ,
summarise=FALSE ,
plots=FALSE )
codaSamples = as.mcmc.list( runJagsOut )
# resulting codaSamples object has these indices:
# codaSamples[[ chainIdx ]][ stepIdx , paramIdx ]
if ( !is.null(saveName) ) {
save( codaSamples , file=paste(saveName,"Mcmc.Rdata",sep="") )
}
return( codaSamples )
} # end function
#===============================================================================
smryMCMC = function( codaSamples ,
saveName=NULL ) {
mcmcMat = as.matrix(codaSamples,chains=FALSE)
paramNames = colnames(mcmcMat)
summaryInfo = NULL
for ( pName in paramNames ) {
summaryInfo = rbind( summaryInfo , summarizePost( mcmcMat[,pName] ) )
}
rownames(summaryInfo) = paramNames
if ( !is.null(saveName) ) {
write.csv( summaryInfo , file=paste(saveName,"SummaryInfo.csv",sep="") )
}
return( summaryInfo )
}
#===============================================================================
plotMCMC = function( codaSamples , data ,
xName="x" , yName="y" , sName="s" , wName="w" ,
compValBeta0=NULL , ropeBeta0=NULL ,
compValBeta1=NULL , ropeBeta1=NULL ,
compValSigma=NULL , ropeSigma=NULL ,
showCurve=FALSE , pairsPlot=FALSE ,
saveName=NULL , saveType="jpg" ) {
# showCurve is TRUE or FALSE and indicates whether the posterior should
# be displayed as a histogram (by default) or by an approximate curve.
# pairsPlot is TRUE or FALSE and indicates whether scatterplots of pairs
# of parameters should be displayed.
#-----------------------------------------------------------------------------
y = data[,yName]
x = data[,xName]
s = factor(data[,sName])
nSubj = length(unique(s)) # should be same as max(s)
mcmcMat = as.matrix(codaSamples,chains=TRUE)
chainLength = NROW( mcmcMat )
beta0mu = mcmcMat[,"beta0mu"]
beta1mu = mcmcMat[,"beta1mu"]
beta2mu = mcmcMat[,"beta2mu"]
zbeta0mu = mcmcMat[,"zbeta0mu"]
zbeta1mu = mcmcMat[,"zbeta1mu"]
zbeta2mu = mcmcMat[,"zbeta2mu"]
sigma = mcmcMat[,"sigma"]
nu = mcmcMat[,"nu"]
log10nu = log10(nu)
#-----------------------------------------------------------------------------
if ( pairsPlot ) {
# Plot the parameters pairwise, to see correlations:
openGraph()
nPtToPlot = 1000
plotIdx = floor(seq(1,chainLength,by=chainLength/nPtToPlot))
panel.cor = function(x, y, digits=2, prefix="", cex.cor, ...) {
usr = par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r = (cor(x, y))
txt = format(c(r, 0.123456789), digits=digits)[1]
txt = paste(prefix, txt, sep="")
if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt)
text(0.5, 0.5, txt, cex=1.25 ) # was cex=cex.cor*r
}
pairs( cbind( beta0mu , beta1mu , beta2mu , sigma , log10nu )[plotIdx,] ,
labels=c( expression(mu[beta*0]) , expression(mu[beta*1]) ,
expression(mu[beta*2]) ,
expression(sigma) , expression(log10(nu)) ) ,
lower.panel=panel.cor , col="skyblue" )
if ( !is.null(saveName) ) {
saveGraph( file=paste(saveName,"PostPairs",sep=""), type=saveType)
}
}
#-----------------------------------------------------------------------------
# Marginal histograms:
# Set up window and layout:
nPtToPlot = 1000
plotIdx = floor(seq(1,chainLength,by=chainLength/nPtToPlot))
openGraph(width=8,height=8)
layout( matrix( 1:9 , nrow=3, byrow=TRUE ) )
par( mar=c(4,4,2.5,0.5) , mgp=c(2.5,0.7,0) )
histInfo = plotPost( beta0mu , cex.lab = 1.75 , showCurve=showCurve ,
compVal=compValBeta0 , ROPE=ropeBeta0 ,
xlab=bquote(mu[beta*0]) , main=paste("Intercept, Group Level") )
histInfo = plotPost( beta1mu , cex.lab = 1.75 , showCurve=showCurve ,
compVal=compValBeta1 , ROPE=ropeBeta1 ,
xlab=bquote(mu[beta*1]) , main=paste("Slope, Group Level") )
histInfo = plotPost( beta2mu , cex.lab = 1.75 , showCurve=showCurve ,
#compVal=compValBeta1 , ROPE=ropeBeta1 ,
xlab=bquote(mu[beta*2]) , main=paste("Quad, Group Level") )
histInfo = plotPost( zbeta0mu , cex.lab = 1.75 , showCurve=showCurve ,
#compVal=compValBeta0 , ROPE=ropeBeta0 ,
xlab=bquote(zmu[beta*0]) , main=paste("Intercept, Group Level") )
histInfo = plotPost( zbeta1mu , cex.lab = 1.75 , showCurve=showCurve ,
#compVal=compValBeta1 , ROPE=ropeBeta1 ,
xlab=bquote(zmu[beta*1]) , main=paste("Slope, Group Level") )
histInfo = plotPost( zbeta2mu , cex.lab = 1.75 , showCurve=showCurve ,
#compVal=compValBeta1 , ROPE=ropeBeta1 ,
xlab=bquote(zmu[beta*2]) , main=paste("Quad, Group Level") )
#plot( beta1mu[plotIdx] , beta0mu[plotIdx] ,
# xlab=bquote(mu[beta*1]) , ylab=bquote(mu[beta*0]) ,
# col="skyblue" , cex.lab = 1.75 )
histInfo = plotPost( sigma , cex.lab = 1.75 , showCurve=showCurve ,
compVal=compValSigma , ROPE=ropeSigma ,
xlab=bquote(sigma) , main=paste("Scale, Subj Level") )
histInfo = plotPost( log10nu , cex.lab = 1.75 , showCurve=showCurve ,
compVal=NULL , ROPE=NULL ,
xlab=bquote(log10(nu)) , main=paste("Normality, Subj Level") )
plot( log10nu[plotIdx] , sigma[plotIdx] ,
xlab=bquote(log10(nu)) ,ylab=bquote(sigma) ,
col="skyblue" , cex.lab = 1.75 )
if ( !is.null(saveName) ) {
saveGraph( file=paste(saveName,"PostMarg",sep=""), type=saveType)
}
#-----------------------------------------------------------------------------
# Data with superimposed regression lines and noise distributions:
nPanels=25
nPlots = ceiling(nSubj/nPanels)
for ( plotIdx in 1:nPlots ) {
openGraph()
par( mar=c(2,2,1,0)+.5 , mgp=c(1.5,0.5,0) )
layout(matrix(1:nPanels,nrow=5,byrow=TRUE))
xRang = max(x)-min(x)
yRang = max(y)-min(y)
xLimMult = 0.1
yLimMult = 0.1
xLim= c( min(x)-xLimMult*xRang , max(x)+xLimMult*xRang )
yLim= c( min(y)-yLimMult*yRang , max(y)+yLimMult*yRang )
#for ( sIdx in unique(ceiling(seq(1,nSubj,length=nPanels))) ) {
for ( sIdx in ((plotIdx-1)*nPanels+1):min(nSubj,(plotIdx-1)*nPanels+nPanels)) {
thisSrows = (as.numeric(s)==sIdx)
plot( x[thisSrows] , y[thisSrows] ,
cex=1.0 , lwd=1 , col="black" , xlim=xLim , ylim=yLim ,
xlab=xName , ylab=yName , cex.lab=1.0 ,
main=paste0("Unit: ",levels(s)[sIdx]) ,
cex.main=1.0 )
# Superimpose a smattering of believable regression lines:
nPredCurves=30
xComb = seq(xLim[1],xLim[2],length=301)
for ( i in floor(seq(1,chainLength,length=nPredCurves)) ) {
b0 = mcmcMat[i,paste0("beta0[",sIdx,"]")]
b1 = mcmcMat[i,paste0("beta1[",sIdx,"]")]
b2 = mcmcMat[i,paste0("beta2[",sIdx,"]")]
lines( xComb , b0+b1*xComb+b2*xComb^2 , col="skyblue" )
}
points( x[thisSrows] , y[thisSrows] , pch=19 )
}
if ( !is.null(saveName) ) {
saveGraph( file=paste0(saveName,"PostPredSubj",plotIdx), type=saveType)
}
}
#-----------------------------------------------------------------------------
# Data with superimposed regression lines and noise distributions:
openGraph()
par( mar=c(2,2,1,0)+.5 , mgp=c(1.5,0.5,0) )
# Plot data values:
xRang = max(x)-min(x)
yRang = max(y)-min(y)
xLimMult = 0.2
yLimMult = 0.2
xLim= c( min(x)-xLimMult*xRang , max(x)+xLimMult*xRang )
yLim= c( min(y)-yLimMult*yRang , max(y)+yLimMult*yRang )
plot( x , y , pch="" , cex=1.0 , col="black" ,
xlim=xLim , ylim=yLim ,
xlab=xName , ylab=yName , cex.lab=1.0 ,
main="All Units" , cex.main=1.0 )
# Superimpose a smattering of believable regression lines:
nPredCurves=70
xComb = seq(xLim[1],xLim[2],length=301)
for ( i in floor(seq(1,chainLength,length=nPredCurves)) ) {
b0 = mcmcMat[i,paste0("beta0mu")]
b1 = mcmcMat[i,paste0("beta1mu")]
b2 = mcmcMat[i,paste0("beta2mu")]
lines( xComb , b0+b1*xComb+b2*xComb^2 , col="skyblue" )
}
for ( sIdx in 1:nSubj ) {
thisSrows = (as.numeric(s)==sIdx)
lines( x[thisSrows] , y[thisSrows] , type="o" , pch=19 ) #, pch=sIdx , col=sIdx )
}
#
if ( !is.null(saveName) ) {
saveGraph( file=paste(saveName,"PostPredAll",sep=""), type=saveType)
}
}
#===============================================================================
|
setwd(r"(D:\OneDrive - HKUST Connect\Courses\CSIC5011\Project\Project2\data)")
### read in preprocessed data
seurat_obj = readRDS('part1.rds')
# define output path
output.path = 'outputs_part2'
# get meta data
meta.data = seurat.obj@meta.data
# convert week string to number, e.g., "week08" -> 8
week.num = numeric_value <- as.numeric(gsub("GW", "", meta.data$week))
names(week.num) = rownames(meta.data)
week.info = as.data.frame(week.num)
week.info$sample_name = rownames(week.info)
############# Monocle 3 ############
library("monocle3")
### generate gt_anno.txt
# anno = read_excel(file.path(data.path, 'GSE104276_readme_sample_barcode.xlsx'),sheet = 'SampleInfo')
# colnames(anno)[1] = 'name'
# write.table(anno, file.path(out.path, 'gt_anno.txt'), quote = F, row.names = F, col.names = T, sep = '\t')
# read in ground truth annotation for the cells.
anno_gt<- read.table("gt_anno.txt",header = T,sep = "\t",row.names = 1)
dim(anno_gt)
anno<- read.table("./meta.data.Serat.Cluster.txt",header = T,sep = "\t",row.names = 1)
CellS<- intersect(rownames(anno_gt),colnames(pbmc.data))
expression_matrix<- seurat.obj@assays$RNA@counts[,CellS]
dim(expression_matrix)
anno<- anno[CellS,]
dim(anno)
gene_annotation<- data.frame(Gene=rownames(expression_matrix))
rownames(gene_annotation)<- rownames(expression_matrix)
gene_annotation$gene_short_name<- gene_annotation$Gene
row.names(anno) = colnames(expression_matrix)
cds <- new_cell_data_set(as.matrix(expression_matrix),
cell_metadata = anno,
gene_metadata = gene_annotation)
cds <- cds[,Matrix::colSums(exprs(cds)) != 0]
cds <- estimate_size_factors(cds)
cds <- preprocess_cds(cds, num_dim = 50)
# Reduce dimensionality
cds <- reduce_dimension(cds,reduction_method="UMAP")
# Clustering cells
cds <- cluster_cells(cds,reduction_method="UMAP")
# learn_graph
cds <- learn_graph(cds)
# Order the cells in pseudotime and determined the starting point
cds <- order_cells(cds)
saveRDS(cds, 'cds.rds')
# save plots
plot_cells(cds, label_groups_by_cluster=T, color_cells_by = "CellType", label_branch_points = F, group_label_size = 3, label_leaves = T, label_roots=F)
ggsave(file.path(pre.path, "celltype.png"), width = 1000, height = 600, units = 'px', dpi=200,)
plot_cells(cds, label_groups_by_cluster=T, color_cells_by = "week",label_branch_points = F, group_label_size = 3, label_leaves = F, label_roots=F)
ggsave(file.path(pre.path, "week.png"), width = 1000, height = 600, units = 'px', dpi=200,)
#### save pseudo time
mnc.df<- cds@principal_graph_aux@listData[["UMAP"]][["pseudotime"]]
mnc.df<- as.data.frame(mnc.df)
rownames(mnc.df) = colnames(cds)
length(interaction(names(week.num), rownames(mnc.df)))
mnc.df = merge(mnc.df, week.info, by = "row.names", all = TRUE)
head(mnc.df)
sum(is.na(mnc.df$sample_name))
sum(is.na(mnc.df$State))
sum(is.na(mnc.df$Pseudotime))
sum(is.na(mnc.df$week.num))
dim(mnc.df)
mnc.df <- na.omit(mnc.df)
dim(mnc.df)
head(mnc.df)
colnames(mnc.df) = c('cell','pseudo_time','gt','sample_name')
write.table(mnc.df, file.path(output.path, 'final_mnc.txt') , quote = F, sep = '\t', row.names = T, col.names = T)
################ Slingshot #################
library(SingleCellExperiment)
library(Seurat)
library(slingshot)
library(Polychrome)
# conver from seurat
deng_SCE = as.SingleCellExperiment(seurat.obj)
# run slingshot
deng_SCE <- slingshot(deng_SCE, clusterLabels = 'anno',reducedDim = "PCA",
allow.breaks = FALSE)
# get summary
summary(deng_SCE$slingPseudotime_1)
lnes <- getLineages(reducedDim(deng_SCE,"PCA"),
deng_SCE$anno)
# plot trajectory
my_color <- createPalette(10, c("#010101", "#ff0000"), M=1000)
plot(reducedDims(deng_SCE)$PCA, col = my_color[as.factor(deng_SCE$anno)],
pch=16,
asp = 1)
legend("bottomleft",legend = names(my_color[levels(deng_SCE$anno)]),
fill = my_color[levels(deng_SCE$anno)])
lines(SlingshotDataSet(deng_SCE), lwd=2, type = 'lineages', col = c("black"))
slingshot_df = as.data.frame(deng_SCE[[c('slingPseudotime_1')]])
rownames(slingshot_df) = colnames(deng_SCE)
slingshot_df = na.omit(slingshot_df)
colnames(slingshot_df) = c('pseudo_time')
sls.merged = merge(slingshot_df, week.num, by = "row.names", all = F)
colnames(sls.merged) = c('cell','pseudo_time','gt')
write.table(sls.merged, file.path(output.path, 'final_slingshot.txt') , quote = F, sep = '\t', row.names = T, col.names = T)
################ TSCAN #################
library(ggplot2)
library(TSCAN)
library(ggbeeswarm)
# get counts from preprocessed data
seuratdf<-as.matrix(seurat.obj@assays$RNA@counts)
# preprocess
procdata <- preprocess(seuratdf,cvcutoff = 0)
dim(procdata)
# clustering
lpsmclust <- exprmclust(procdata)
# show clusters
plotmclust(lpsmclust,show_cell_names = F)
# order cells
lpsorder <- TSCANorder(lpsmclust)
### output pseudo time
tscan.df = TSCANorder(lpsmclust,flip=FALSE,orderonly=FALSE)
rownames(tscan.df) = tscan.df$sample_name
length(interaction(names(week.num), tscan.df$sample_name))
week.info$sample_name = rownames(week.info)
tscan.merged = merge(tscan.df, week.info, by = "sample_name", all = TRUE)
head(tscan.merged)
sum(is.na(tscan.merged$sample_name))
sum(is.na(tscan.merged$State))
sum(is.na(tscan.merged$Pseudotime))
sum(is.na(tscan.merged$week.num))
dim(tscan.merged)
tscan.merged <- na.omit(tscan.merged)
dim(tscan.merged)
head(tscan.merged)
colnames(tscan.merged) = c('cell','cluster_num','pseudo_time','gt')
write.table(tscan.merged, file.path(output.path, 'final_tscan.txt') , quote = F, sep = '\t', row.names = T, col.names = T)
|
/course/csic5011/2023/project2/MA_TANG_RUAN_HUANG/code/part2_1_trajectory_inference.R
|
no_license
|
yao-lab/yao-lab.github.io
|
R
| false
| false
| 5,732
|
r
|
setwd(r"(D:\OneDrive - HKUST Connect\Courses\CSIC5011\Project\Project2\data)")
### read in preprocessed data
seurat_obj = readRDS('part1.rds')
# define output path
output.path = 'outputs_part2'
# get meta data
meta.data = seurat.obj@meta.data
# convert week string to number, e.g., "week08" -> 8
week.num = numeric_value <- as.numeric(gsub("GW", "", meta.data$week))
names(week.num) = rownames(meta.data)
week.info = as.data.frame(week.num)
week.info$sample_name = rownames(week.info)
############# Monocle 3 ############
library("monocle3")
### generate gt_anno.txt
# anno = read_excel(file.path(data.path, 'GSE104276_readme_sample_barcode.xlsx'),sheet = 'SampleInfo')
# colnames(anno)[1] = 'name'
# write.table(anno, file.path(out.path, 'gt_anno.txt'), quote = F, row.names = F, col.names = T, sep = '\t')
# read in ground truth annotation for the cells.
anno_gt<- read.table("gt_anno.txt",header = T,sep = "\t",row.names = 1)
dim(anno_gt)
anno<- read.table("./meta.data.Serat.Cluster.txt",header = T,sep = "\t",row.names = 1)
CellS<- intersect(rownames(anno_gt),colnames(pbmc.data))
expression_matrix<- seurat.obj@assays$RNA@counts[,CellS]
dim(expression_matrix)
anno<- anno[CellS,]
dim(anno)
gene_annotation<- data.frame(Gene=rownames(expression_matrix))
rownames(gene_annotation)<- rownames(expression_matrix)
gene_annotation$gene_short_name<- gene_annotation$Gene
row.names(anno) = colnames(expression_matrix)
cds <- new_cell_data_set(as.matrix(expression_matrix),
cell_metadata = anno,
gene_metadata = gene_annotation)
cds <- cds[,Matrix::colSums(exprs(cds)) != 0]
cds <- estimate_size_factors(cds)
cds <- preprocess_cds(cds, num_dim = 50)
# Reduce dimensionality
cds <- reduce_dimension(cds,reduction_method="UMAP")
# Clustering cells
cds <- cluster_cells(cds,reduction_method="UMAP")
# learn_graph
cds <- learn_graph(cds)
# Order the cells in pseudotime and determined the starting point
cds <- order_cells(cds)
saveRDS(cds, 'cds.rds')
# save plots
plot_cells(cds, label_groups_by_cluster=T, color_cells_by = "CellType", label_branch_points = F, group_label_size = 3, label_leaves = T, label_roots=F)
ggsave(file.path(pre.path, "celltype.png"), width = 1000, height = 600, units = 'px', dpi=200,)
plot_cells(cds, label_groups_by_cluster=T, color_cells_by = "week",label_branch_points = F, group_label_size = 3, label_leaves = F, label_roots=F)
ggsave(file.path(pre.path, "week.png"), width = 1000, height = 600, units = 'px', dpi=200,)
#### save pseudo time
mnc.df<- cds@principal_graph_aux@listData[["UMAP"]][["pseudotime"]]
mnc.df<- as.data.frame(mnc.df)
rownames(mnc.df) = colnames(cds)
length(interaction(names(week.num), rownames(mnc.df)))
mnc.df = merge(mnc.df, week.info, by = "row.names", all = TRUE)
head(mnc.df)
sum(is.na(mnc.df$sample_name))
sum(is.na(mnc.df$State))
sum(is.na(mnc.df$Pseudotime))
sum(is.na(mnc.df$week.num))
dim(mnc.df)
mnc.df <- na.omit(mnc.df)
dim(mnc.df)
head(mnc.df)
colnames(mnc.df) = c('cell','pseudo_time','gt','sample_name')
write.table(mnc.df, file.path(output.path, 'final_mnc.txt') , quote = F, sep = '\t', row.names = T, col.names = T)
################ Slingshot #################
library(SingleCellExperiment)
library(Seurat)
library(slingshot)
library(Polychrome)
# conver from seurat
deng_SCE = as.SingleCellExperiment(seurat.obj)
# run slingshot
deng_SCE <- slingshot(deng_SCE, clusterLabels = 'anno',reducedDim = "PCA",
allow.breaks = FALSE)
# get summary
summary(deng_SCE$slingPseudotime_1)
lnes <- getLineages(reducedDim(deng_SCE,"PCA"),
deng_SCE$anno)
# plot trajectory
my_color <- createPalette(10, c("#010101", "#ff0000"), M=1000)
plot(reducedDims(deng_SCE)$PCA, col = my_color[as.factor(deng_SCE$anno)],
pch=16,
asp = 1)
legend("bottomleft",legend = names(my_color[levels(deng_SCE$anno)]),
fill = my_color[levels(deng_SCE$anno)])
lines(SlingshotDataSet(deng_SCE), lwd=2, type = 'lineages', col = c("black"))
slingshot_df = as.data.frame(deng_SCE[[c('slingPseudotime_1')]])
rownames(slingshot_df) = colnames(deng_SCE)
slingshot_df = na.omit(slingshot_df)
colnames(slingshot_df) = c('pseudo_time')
sls.merged = merge(slingshot_df, week.num, by = "row.names", all = F)
colnames(sls.merged) = c('cell','pseudo_time','gt')
write.table(sls.merged, file.path(output.path, 'final_slingshot.txt') , quote = F, sep = '\t', row.names = T, col.names = T)
################ TSCAN #################
library(ggplot2)
library(TSCAN)
library(ggbeeswarm)
# get counts from preprocessed data
seuratdf<-as.matrix(seurat.obj@assays$RNA@counts)
# preprocess
procdata <- preprocess(seuratdf,cvcutoff = 0)
dim(procdata)
# clustering
lpsmclust <- exprmclust(procdata)
# show clusters
plotmclust(lpsmclust,show_cell_names = F)
# order cells
lpsorder <- TSCANorder(lpsmclust)
### output pseudo time
tscan.df = TSCANorder(lpsmclust,flip=FALSE,orderonly=FALSE)
rownames(tscan.df) = tscan.df$sample_name
length(interaction(names(week.num), tscan.df$sample_name))
week.info$sample_name = rownames(week.info)
tscan.merged = merge(tscan.df, week.info, by = "sample_name", all = TRUE)
head(tscan.merged)
sum(is.na(tscan.merged$sample_name))
sum(is.na(tscan.merged$State))
sum(is.na(tscan.merged$Pseudotime))
sum(is.na(tscan.merged$week.num))
dim(tscan.merged)
tscan.merged <- na.omit(tscan.merged)
dim(tscan.merged)
head(tscan.merged)
colnames(tscan.merged) = c('cell','cluster_num','pseudo_time','gt')
write.table(tscan.merged, file.path(output.path, 'final_tscan.txt') , quote = F, sep = '\t', row.names = T, col.names = T)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/backup_operations.R
\name{backup_list_backup_vaults}
\alias{backup_list_backup_vaults}
\title{Returns a list of recovery point storage containers along with
information about them}
\usage{
backup_list_backup_vaults(NextToken = NULL, MaxResults = NULL)
}
\arguments{
\item{NextToken}{The next item following a partial list of returned items. For example,
if a request is made to return \code{maxResults} number of items, \code{NextToken}
allows you to return more items in your list starting at the location
pointed to by the next token.}
\item{MaxResults}{The maximum number of items to be returned.}
}
\description{
Returns a list of recovery point storage containers along with information about them.
See \url{https://www.paws-r-sdk.com/docs/backup_list_backup_vaults/} for full documentation.
}
\keyword{internal}
|
/cran/paws.storage/man/backup_list_backup_vaults.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 898
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/backup_operations.R
\name{backup_list_backup_vaults}
\alias{backup_list_backup_vaults}
\title{Returns a list of recovery point storage containers along with
information about them}
\usage{
backup_list_backup_vaults(NextToken = NULL, MaxResults = NULL)
}
\arguments{
\item{NextToken}{The next item following a partial list of returned items. For example,
if a request is made to return \code{maxResults} number of items, \code{NextToken}
allows you to return more items in your list starting at the location
pointed to by the next token.}
\item{MaxResults}{The maximum number of items to be returned.}
}
\description{
Returns a list of recovery point storage containers along with information about them.
See \url{https://www.paws-r-sdk.com/docs/backup_list_backup_vaults/} for full documentation.
}
\keyword{internal}
|
#' Sex Ratio and the Price of Agricultural Crops in China
#'
#' Demographic and agricultural crop data for individual counties in China.
#'
#' @format A data frame with 51766 rows and 9 variables:
#' \describe{
#' \item{ admin }{ integer: unique county identifier }
#' \item{ biryr }{ integer: year of cohort (birth year) }
#' \item{ birpop }{ integer: birth population in a given year }
#' \item{ han }{ numeric }
#' \item{ sex }{ numeric: proportion of males in the birth cohort }
#' \item{ teasown }{ numeric: quantity of tea sown in the county }
#' \item{ orch }{ numeric: quantity of orchard-type crops planted in the county }
#' \item{ cashcrop }{ numeric: quantity of cash crops planted in the county }
#' \item{ post }{ integer: indicator variable for the introduction of price reforms }
#' }
#'
#'
#' @details
#' See \emph{QSS} Table 7.4.
#'
#'
#' @references
#' \itemize{
#' \item{ Imai, Kosuke. 2017. \emph{Quantitative Social Science: An Introduction}.
#' Princeton University Press. \href{http://press.princeton.edu/titles/11025.html}{URL}. }
#' \item { Missing women and the price of tea in China: The effect of sex-specific earnings
#' on sex imbalance.” \emph{Quarterly Journal of Economics}, vol. 123, no. 3, pp. 1251–1285.
#' }
#'}
"chinawomen"
|
/R/chinawomen.R
|
no_license
|
Musaab-Farooqui/qss-package
|
R
| false
| false
| 1,276
|
r
|
#' Sex Ratio and the Price of Agricultural Crops in China
#'
#' Demographic and agricultural crop data for individual counties in China.
#'
#' @format A data frame with 51766 rows and 9 variables:
#' \describe{
#' \item{ admin }{ integer: unique county identifier }
#' \item{ biryr }{ integer: year of cohort (birth year) }
#' \item{ birpop }{ integer: birth population in a given year }
#' \item{ han }{ numeric }
#' \item{ sex }{ numeric: proportion of males in the birth cohort }
#' \item{ teasown }{ numeric: quantity of tea sown in the county }
#' \item{ orch }{ numeric: quantity of orchard-type crops planted in the county }
#' \item{ cashcrop }{ numeric: quantity of cash crops planted in the county }
#' \item{ post }{ integer: indicator variable for the introduction of price reforms }
#' }
#'
#'
#' @details
#' See \emph{QSS} Table 7.4.
#'
#'
#' @references
#' \itemize{
#' \item{ Imai, Kosuke. 2017. \emph{Quantitative Social Science: An Introduction}.
#' Princeton University Press. \href{http://press.princeton.edu/titles/11025.html}{URL}. }
#' \item { Missing women and the price of tea in China: The effect of sex-specific earnings
#' on sex imbalance.” \emph{Quarterly Journal of Economics}, vol. 123, no. 3, pp. 1251–1285.
#' }
#'}
"chinawomen"
|
library(stringr)
library(httr)
library(jsonlite)
library(ethr)
library(ether)
library(DescTools)
library(curl)
#hereinafter, 'smart contracts' will be refered as 'addresses'
#retriving a list of ponzi addresses (for now, just ponzi addresses, later both ponzi and non-ponzi addresses)
ponzi.addresses <- read.csv("https://raw.githubusercontent.com//sfdk29//2021Ethereum//main//ponzi-addresses.csv",header = FALSE,stringsAsFactors=FALSE,colClasses=c("character","character"))
ponzi.addresses <- ponzi.addresses[,1] #these are the smart contract addresses
#we need an Etherscan API in order to retrieve information relating to each such address
#API <- ???
#Now, using such API and the function 'GET' from the package 'httr', we will retrive information relating to:
#(i) 'normal' transactions of each address (these are exclusively incoming payments [i.e. amounts of Ethereum deposited into the smart contract])
#(ii) 'internal' transactions of each address (these are either incoming or outgoing payments [i.e. amounts of Ethereum deposited into or distributed by the smart contract])
#(iii) opcodes of each smart contract
#(i) 'normal' transactions
#we first need to define the url specific to each address in order to retrieve information on the 'normal' transactions relating to each address
urls <- sapply(ponzi.addresses,function(u) paste0("https://api.etherscan.io/api?module=account&action=txlist&address=",u,"&startblock=0&endblock=99999999&sort=asc&apikey=",API))
#creating a list which will store the information retrieved from Etherscan.io
strings.list <- as.list(NA)
for (j in 1:length(urls)){
obj <- content(GET(urls[j]),"text")
obj <- strsplit(obj,"\\{")
obj <- unlist(obj) #check if this should actually remain a list
strings.list[[j]] <- obj
}
#saving the list for convenience (since the API endpoint is throttled to 2 calls/second)
save(strings.list,file="C:\\Users\\zenob\\OneDrive\\Documenti\\ETH thesis\\strings.list.Rdata")
load("C:\\Users\\zenob\\OneDrive\\Documenti\\ETH thesis\\strings.list.Rdata")
#each element of the strings.list object is a vector of strings that relate to the 'normal' transactions of a smart contract
#for instance, for smart contract no. 4, we have
str(strings.list[[4]])
#note that the first three elements of this vector do not relate to actual incoming transactions:
strings.list[[4]][1:3]
#whereas all other elements of the vector do relate to actual incoming transactions
#e.g.
strings.list[[4]][4]
strings.list[[4]][length(strings.list[[4]])]
strings.list2 <- as.list(NA)
for(i in 1:length(strings.list)){
strings.list2[i] <- ifelse(length(strings.list[[i]])>3,strings.list[i],NA)
}
list.obj <- as.list(rep(NA,length(strings.list2)))
for (j in 1:length(strings.list2)){
obj <- strings.list2[[j]]
if (length(obj)>3) {
obj <- obj[-(1:3)]
m <- matrix(NA,nrow=length(obj),ncol = 10)
for (i in 1:length(obj)){
from <- as.character(str_match(obj[i],"\"from\":\"\\s*(.*?)\\s*\""))
from <- from[2] #no idea why but I need to not use a comma here
to <- as.character(str_match(obj[i],"\"to\":\"\\s*(.*?)\\s*\""))
to <- to[2] #no idea why but I need to not use a comma here
a <- str_match(obj[i],"blockNumber\":\"\\s*(.*?)\\s*\"")
a <- as.numeric(a[,2]) #once inserted into the matrix, will be forced to character vector
b <- str_match(obj[i],"timeStamp\":\"\\s*(.*?)\\s*\"")
b <- as.numeric(b[,2]) #once inserted into the matrix, will be forced to character vector
b.asdate <- as.POSIXct(b, origin="1970-01-01")
c <- str_match(obj[i],"value\":\"\\s*(.*?)\\s*\"")
c <- as.numeric(c[,2]) #once inserted into the matrix, will be forced to character vector
d <- str_match(obj[i],"gas\":\"\\s*(.*?)\\s*\"")
d <- as.numeric(d[,2]) #once inserted into the matrix, will be forced to character vector
e <- str_match(obj[i],"gasPrice\":\"\\s*(.*?)\\s*\"")
e <- as.numeric(e[,2]) #once inserted into the matrix, will be forced to character vector
f <- str_match(obj[i],"cumulativeGasUsed\":\"\\s*(.*?)\\s*\"")
f <- as.numeric(f[,2]) #once inserted into the matrix, will be forced to character vector
g <- str_match(obj[i],"gasUsed\":\"\\s*(.*?)\\s*\"")
g <- as.numeric(g[,2]) #once inserted into the matrix, will be forced to character vector
m[i,] <- c(from,to,a,b,b.asdate,c,d,e,f,g)
}
d <- data.frame(m[,1],m[,2],as.numeric(m[,3]),as.numeric(m[,4]),as.numeric(m[,5]),as.numeric(m[,6]),as.numeric(m[,7]),as.numeric(m[,8]),as.numeric(m[,9]),as.numeric(m[,10]))
colnames(d) <- c("from","to","blockNumber","timeStamp","timeStamp.asdate","value","gas","gasPrice","cumulativeGasUsed","gasUsed")
list.obj[[j]] <- d
}
else {
d <- data.frame(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA)
colnames(d) <- c("from","to","blockNumber","timeStamp","timeStamp.asdate","value","gas","gasPrice","cumulativeGasUsed","gasUsed")
list.obj[[j]] <- d
}
}
#list.obj is a list whose elements are datasframes of 10 columns and as many rows as the number of 'normal' transactions to the smart contract
str(list.obj)
#e.g.
list.obj[183] #no transactions at all
list.obj[184] #only five transactions
list.obj[2] #many transactions ('successful' Ponzi smart contract)
#note the class of variables within each such dataframe
class(list.obj[[184]][1,"from"]) #good
class(list.obj[[184]][1,"value"]) #good
#(ii) 'internal' transactions
#retrievial process as in (i), yet keep in mind that 'internal' transactions might refer to both incoming and outcoming payments
urls.internaltx <- sapply(ponzi.addresses,function(u) paste0("https://api.etherscan.io/api?module=account&action=txlistinternal&address=",u,"&startblock=0&endblock=2702578&sort=asc&apikey=",API))
strings.list.internaltx <- as.list(NA)
for (j in 1:length(urls.internaltx)){
obj <- content(GET(urls.internaltx[j]),"text")
obj <- strsplit(obj,"\\{")
obj <- unlist(obj) #check if this should actually remain a list
strings.list.internaltx[[j]] <- obj
}
save(strings.list.internaltx,file="C:\\Users\\zenob\\OneDrive\\Documenti\\ETH thesis\\strings.list.internaltx.Rdata")
#in this case, only the first two transactions are not relevant (whereas in the case of 'normal' transactions, the first three are not relevant)
strings.list.internaltx[[4]][1:2] #not relevant
strings.list.internaltx[[4]][3] #first relevant
strings.list.internaltx2 <- as.list(NA)
for(i in 1:length(strings.list.internaltx)){
strings.list.internaltx2[i] <- ifelse(length(strings.list.internaltx[[i]])>2,strings.list.internaltx[i],NA)
}
list.obj.internaltx <- as.list(rep(NA,length(strings.list.internaltx2)))
for (j in 1:length(strings.list.internaltx2)){
obj <- strings.list.internaltx2[[j]]
if (length(obj)>2) {
obj <- obj[-(1:2)]
m <- as.data.frame(matrix(NA,nrow=length(obj),ncol = 8)) #I need to use a data.frame as opposed to a matrix so to store both character and numeric values
for (i in 1:length(obj)){
from <- str_match(obj[i],"\"from\":\"\\s*(.*?)\\s*\"")
from <- from[2] #no idea why but I need to not use a comma here
to <- str_match(obj[i],"\"to\":\"\\s*(.*?)\\s*\"")
to <- to[2] #no idea why but I need to not use a comma here
a <- str_match(obj[i],"blockNumber\":\"\\s*(.*?)\\s*\"")
a <- as.numeric(a[,2])
b <- str_match(obj[i],"timeStamp\":\"\\s*(.*?)\\s*\"")
b <- as.numeric(b[,2])
b.asdate <- as.POSIXct(b, origin="1970-01-01")
c <- str_match(obj[i],"value\":\"\\s*(.*?)\\s*\"")
c <- as.numeric(c[,2])
d <- str_match(obj[i],"gas\":\"\\s*(.*?)\\s*\"")
d <- as.numeric(d[,2])
e <- str_match(obj[i],"gasUsed\":\"\\s*(.*?)\\s*\"")
e <- as.numeric(e[,2])
m[i,] <- c(from,to,a,b,b.asdate,c,d,e)
}
d <- data.frame(m[,1],m[,2],as.numeric(m[,3]),as.numeric(m[,4]),as.numeric(m[,5]),as.numeric(m[,6]),as.numeric(m[,7]),as.numeric(m[,8]))
colnames(d) <- c("from","to","blockNumber","timeStamp","timeStamp.asdate","value","gas","gasUsed")
list.obj.internaltx[[j]] <- d
}
else {
d <- data.frame(NA,NA,NA,NA,NA,NA,NA,NA)
colnames(d) <- c("from","to","blockNumber","timeStamp","timeStamp.asdate","value","gas","gasUsed")
list.obj.internaltx[[j]] <- d
}
}
class(list.obj.internaltx[[184]][1,"from"]) #good
class(list.obj.internaltx[[184]][1,"value"]) #good
###############
#Now we combine the information of both (i) and (ii) through certain indexes (e.g. Gini coefficient).
#the variables obtained will be the 'behavioural' variables that we'll feed into the classifier models
dat <- matrix(NA,nrow = length(list.obj),ncol=4)
for (i in 1:length(list.obj)){
if (!is.na(list.obj[[i]][1,1]) | !is.na(list.obj.internaltx[[i]][1,1])) {
#gini index
incoming.amounts.normal <- list.obj[[i]][,"value"]
incoming.amounts.internal <- list.obj.internaltx[[i]][,"value"] [ list.obj.internaltx[[i]][,"to"]==ponzi.addresses[i]]
incoming.amounts <- c(incoming.amounts.normal,incoming.amounts.internal)
dat[i,1] <- if(!is.na(Gini(incoming.amounts,unbiased = FALSE,na.rm = TRUE))) {Gini(incoming.amounts,unbiased = FALSE,na.rm = TRUE)} else {NA}
#lifetime
incoming.time.normal <- list.obj[[i]][,"timeStamp"]
incoming.time.internal <- list.obj.internaltx[[i]][,"timeStamp"] [ list.obj.internaltx[[i]][,"to"]==ponzi.addresses[i]]
incoming.time <- c(incoming.time.normal,incoming.time.internal)
incoming.time <- ifelse(is.na(incoming.time),NA,sort(incoming.time)) #as I sort the vector, the NA is already excluded (and the length reduced by one)
incomingtx.lifetime <- ifelse(is.na(incoming.time[1]),NA,range(incoming.time,na.rm = TRUE)[2] - range(incoming.time,na.rm = TRUE)[1])
dat[i,2] <- incomingtx.lifetime #could be zero if we only have one transaction. Do we keep it 0 or force it to NA?
#avereage time btw two transactions
dat[i,3] <- if (!is.nan(mean(diff(incoming.time)))) {mean(diff(incoming.time))} else {NA}
#average gas expenditure #note that this is relevant only for 'normal' transactions, I supppose
gas <- list.obj[[i]][,"gas"]
gas.price <- list.obj[[i]][,"gasPrice"]
dat[i,4] <- mean(gas*gas.price)
}
else {
dat[i,1] <- NA
dat[i,2] <- NA
dat[i,3] <- NA
dat[i,4] <- NA
}
}
colnames(dat) <- c("gini","lifespan","ave.time.btw.tx","gas.expenditure")
head(dat)
#(iii) retrieving opcodes
###########
#we need an API from infura.io
#infura.API <- ???
set_rpc_address(infura.API)
#list of opcodes (available at https://ethervm.io/)
opcodes <- c("STOP", "ADD", "MUL", "SUB", "DIV", "SDIV", "MOD", "SMOD", "ADDMOD", "MULMOD", "EXP", "SIGNEXTEND", "LT", "GT", "SLT", "SGT", "EQ", "ISZERO", "AND", "OR", "XOR", "NOT", "BYTE", "SHL", "SHR", "SAR", "SHA3", "ADDRESS", "BALANCE", "ORIGIN", "CALLER", "CALLVALUE", "CALLDATALOAD", "CALLDATASIZE", "CALLDATACOPY", "CODESIZE", "CODECOPY", "GASPRICE", "EXTCODESIZE", "EXTCODECOPY", "RETURNDATASIZE", "RETURNDATACOPY", "EXTCODEHASH", "BLOCKHASH", "COINBASE", "TIMESTAMP", "NUMBER", "DIFFICULTY", "GASLIMIT", "POP", "MLOAD", "MSTORE", "MSTORE8", "SLOAD", "SSTORE", "JUMP", "JUMPI", "PC", "MSIZE", "GAS", "JUMPDEST", "PUSH1", "PUSH2", "PUSH3", "PUSH4", "PUSH5", "PUSH6", "PUSH7", "PUSH8", "PUSH9", "PUSH10", "PUSH11", "PUSH12", "PUSH13", "PUSH14", "PUSH15", "PUSH16", "PUSH17", "PUSH18", "PUSH19", "PUSH20", "PUSH21", "PUSH22", "PUSH23", "PUSH24", "PUSH25", "PUSH26", "PUSH27", "PUSH28", "PUSH29", "PUSH30", "PUSH31", "PUSH32", "DUP1", "DUP2", "DUP3", "DUP4", "DUP5", "DUP6", "DUP7", "DUP8", "DUP9", "DUP10", "DUP11", "DUP12", "DUP13", "DUP14", "DUP15", "DUP16", "SWAP1", "SWAP2", "SWAP3", "SWAP4", "SWAP5", "SWAP6", "SWAP7", "SWAP8", "SWAP9", "SWAP10", "SWAP11", "SWAP12", "SWAP13", "SWAP14", "SWAP15", "SWAP16", "LOG0", "LOG1", "LOG2", "LOG3", "LOG4", "PUSH", "DUP", "SWAP", "CREATE", "CALL", "CALLCODE", "RETURN", "DELEGATECALL", "CREATE2", "STATICCALL", "REVERT", "SELFDESTRUCT")
save(opcodes,file="C:\\Users\\zenob\\OneDrive\\Documenti\\ETH thesis\\opcodes.Rdata")
m <- matrix(NA,nrow = length(ponzi.addresses),ncol = length(opcodes))
for (i in 1:length(ponzi.addresses)){
address <- ponzi.addresses[i]
url <- paste0("https://ethervm.io/decompile/",address,"#disassembly")
obj <- content(GET(url),"text")
vector.of.opcodes.counts <- str_count(obj,opcodes)
m[i,] <- vector.of.opcodes.counts
}
colnames(m) <- opcodes
head(m)
opcodes.count.matrix <- m
save(opcodes.count.matrix,file="C:\\Users\\zenob\\OneDrive\\Documenti\\ETH thesis\\opcodes.count.matrix.Rdata")
load("C:\\Users\\zenob\\OneDrive\\Documenti\\ETH thesis\\opcodes.count.matrix.Rdata")
dat <- cbind(dat,opcodes.count.matrix)
head(dat)
|
/retrieving data (no API).R
|
no_license
|
sfdk29/2021Ethereum
|
R
| false
| false
| 13,060
|
r
|
library(stringr)
library(httr)
library(jsonlite)
library(ethr)
library(ether)
library(DescTools)
library(curl)
#hereinafter, 'smart contracts' will be refered as 'addresses'
#retriving a list of ponzi addresses (for now, just ponzi addresses, later both ponzi and non-ponzi addresses)
ponzi.addresses <- read.csv("https://raw.githubusercontent.com//sfdk29//2021Ethereum//main//ponzi-addresses.csv",header = FALSE,stringsAsFactors=FALSE,colClasses=c("character","character"))
ponzi.addresses <- ponzi.addresses[,1] #these are the smart contract addresses
#we need an Etherscan API in order to retrieve information relating to each such address
#API <- ???
#Now, using such API and the function 'GET' from the package 'httr', we will retrive information relating to:
#(i) 'normal' transactions of each address (these are exclusively incoming payments [i.e. amounts of Ethereum deposited into the smart contract])
#(ii) 'internal' transactions of each address (these are either incoming or outgoing payments [i.e. amounts of Ethereum deposited into or distributed by the smart contract])
#(iii) opcodes of each smart contract
#(i) 'normal' transactions
#we first need to define the url specific to each address in order to retrieve information on the 'normal' transactions relating to each address
urls <- sapply(ponzi.addresses,function(u) paste0("https://api.etherscan.io/api?module=account&action=txlist&address=",u,"&startblock=0&endblock=99999999&sort=asc&apikey=",API))
#creating a list which will store the information retrieved from Etherscan.io
strings.list <- as.list(NA)
for (j in 1:length(urls)){
obj <- content(GET(urls[j]),"text")
obj <- strsplit(obj,"\\{")
obj <- unlist(obj) #check if this should actually remain a list
strings.list[[j]] <- obj
}
#saving the list for convenience (since the API endpoint is throttled to 2 calls/second)
save(strings.list,file="C:\\Users\\zenob\\OneDrive\\Documenti\\ETH thesis\\strings.list.Rdata")
load("C:\\Users\\zenob\\OneDrive\\Documenti\\ETH thesis\\strings.list.Rdata")
#each element of the strings.list object is a vector of strings that relate to the 'normal' transactions of a smart contract
#for instance, for smart contract no. 4, we have
str(strings.list[[4]])
#note that the first three elements of this vector do not relate to actual incoming transactions:
strings.list[[4]][1:3]
#whereas all other elements of the vector do relate to actual incoming transactions
#e.g.
strings.list[[4]][4]
strings.list[[4]][length(strings.list[[4]])]
strings.list2 <- as.list(NA)
for(i in 1:length(strings.list)){
strings.list2[i] <- ifelse(length(strings.list[[i]])>3,strings.list[i],NA)
}
list.obj <- as.list(rep(NA,length(strings.list2)))
for (j in 1:length(strings.list2)){
obj <- strings.list2[[j]]
if (length(obj)>3) {
obj <- obj[-(1:3)]
m <- matrix(NA,nrow=length(obj),ncol = 10)
for (i in 1:length(obj)){
from <- as.character(str_match(obj[i],"\"from\":\"\\s*(.*?)\\s*\""))
from <- from[2] #no idea why but I need to not use a comma here
to <- as.character(str_match(obj[i],"\"to\":\"\\s*(.*?)\\s*\""))
to <- to[2] #no idea why but I need to not use a comma here
a <- str_match(obj[i],"blockNumber\":\"\\s*(.*?)\\s*\"")
a <- as.numeric(a[,2]) #once inserted into the matrix, will be forced to character vector
b <- str_match(obj[i],"timeStamp\":\"\\s*(.*?)\\s*\"")
b <- as.numeric(b[,2]) #once inserted into the matrix, will be forced to character vector
b.asdate <- as.POSIXct(b, origin="1970-01-01")
c <- str_match(obj[i],"value\":\"\\s*(.*?)\\s*\"")
c <- as.numeric(c[,2]) #once inserted into the matrix, will be forced to character vector
d <- str_match(obj[i],"gas\":\"\\s*(.*?)\\s*\"")
d <- as.numeric(d[,2]) #once inserted into the matrix, will be forced to character vector
e <- str_match(obj[i],"gasPrice\":\"\\s*(.*?)\\s*\"")
e <- as.numeric(e[,2]) #once inserted into the matrix, will be forced to character vector
f <- str_match(obj[i],"cumulativeGasUsed\":\"\\s*(.*?)\\s*\"")
f <- as.numeric(f[,2]) #once inserted into the matrix, will be forced to character vector
g <- str_match(obj[i],"gasUsed\":\"\\s*(.*?)\\s*\"")
g <- as.numeric(g[,2]) #once inserted into the matrix, will be forced to character vector
m[i,] <- c(from,to,a,b,b.asdate,c,d,e,f,g)
}
d <- data.frame(m[,1],m[,2],as.numeric(m[,3]),as.numeric(m[,4]),as.numeric(m[,5]),as.numeric(m[,6]),as.numeric(m[,7]),as.numeric(m[,8]),as.numeric(m[,9]),as.numeric(m[,10]))
colnames(d) <- c("from","to","blockNumber","timeStamp","timeStamp.asdate","value","gas","gasPrice","cumulativeGasUsed","gasUsed")
list.obj[[j]] <- d
}
else {
d <- data.frame(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA)
colnames(d) <- c("from","to","blockNumber","timeStamp","timeStamp.asdate","value","gas","gasPrice","cumulativeGasUsed","gasUsed")
list.obj[[j]] <- d
}
}
#list.obj is a list whose elements are datasframes of 10 columns and as many rows as the number of 'normal' transactions to the smart contract
str(list.obj)
#e.g.
list.obj[183] #no transactions at all
list.obj[184] #only five transactions
list.obj[2] #many transactions ('successful' Ponzi smart contract)
#note the class of variables within each such dataframe
class(list.obj[[184]][1,"from"]) #good
class(list.obj[[184]][1,"value"]) #good
#(ii) 'internal' transactions
#retrievial process as in (i), yet keep in mind that 'internal' transactions might refer to both incoming and outcoming payments
urls.internaltx <- sapply(ponzi.addresses,function(u) paste0("https://api.etherscan.io/api?module=account&action=txlistinternal&address=",u,"&startblock=0&endblock=2702578&sort=asc&apikey=",API))
strings.list.internaltx <- as.list(NA)
for (j in 1:length(urls.internaltx)){
obj <- content(GET(urls.internaltx[j]),"text")
obj <- strsplit(obj,"\\{")
obj <- unlist(obj) #check if this should actually remain a list
strings.list.internaltx[[j]] <- obj
}
save(strings.list.internaltx,file="C:\\Users\\zenob\\OneDrive\\Documenti\\ETH thesis\\strings.list.internaltx.Rdata")
#in this case, only the first two transactions are not relevant (whereas in the case of 'normal' transactions, the first three are not relevant)
strings.list.internaltx[[4]][1:2] #not relevant
strings.list.internaltx[[4]][3] #first relevant
strings.list.internaltx2 <- as.list(NA)
for(i in 1:length(strings.list.internaltx)){
strings.list.internaltx2[i] <- ifelse(length(strings.list.internaltx[[i]])>2,strings.list.internaltx[i],NA)
}
list.obj.internaltx <- as.list(rep(NA,length(strings.list.internaltx2)))
for (j in 1:length(strings.list.internaltx2)){
obj <- strings.list.internaltx2[[j]]
if (length(obj)>2) {
obj <- obj[-(1:2)]
m <- as.data.frame(matrix(NA,nrow=length(obj),ncol = 8)) #I need to use a data.frame as opposed to a matrix so to store both character and numeric values
for (i in 1:length(obj)){
from <- str_match(obj[i],"\"from\":\"\\s*(.*?)\\s*\"")
from <- from[2] #no idea why but I need to not use a comma here
to <- str_match(obj[i],"\"to\":\"\\s*(.*?)\\s*\"")
to <- to[2] #no idea why but I need to not use a comma here
a <- str_match(obj[i],"blockNumber\":\"\\s*(.*?)\\s*\"")
a <- as.numeric(a[,2])
b <- str_match(obj[i],"timeStamp\":\"\\s*(.*?)\\s*\"")
b <- as.numeric(b[,2])
b.asdate <- as.POSIXct(b, origin="1970-01-01")
c <- str_match(obj[i],"value\":\"\\s*(.*?)\\s*\"")
c <- as.numeric(c[,2])
d <- str_match(obj[i],"gas\":\"\\s*(.*?)\\s*\"")
d <- as.numeric(d[,2])
e <- str_match(obj[i],"gasUsed\":\"\\s*(.*?)\\s*\"")
e <- as.numeric(e[,2])
m[i,] <- c(from,to,a,b,b.asdate,c,d,e)
}
d <- data.frame(m[,1],m[,2],as.numeric(m[,3]),as.numeric(m[,4]),as.numeric(m[,5]),as.numeric(m[,6]),as.numeric(m[,7]),as.numeric(m[,8]))
colnames(d) <- c("from","to","blockNumber","timeStamp","timeStamp.asdate","value","gas","gasUsed")
list.obj.internaltx[[j]] <- d
}
else {
d <- data.frame(NA,NA,NA,NA,NA,NA,NA,NA)
colnames(d) <- c("from","to","blockNumber","timeStamp","timeStamp.asdate","value","gas","gasUsed")
list.obj.internaltx[[j]] <- d
}
}
class(list.obj.internaltx[[184]][1,"from"]) #good
class(list.obj.internaltx[[184]][1,"value"]) #good
###############
#Now we combine the information of both (i) and (ii) through certain indexes (e.g. Gini coefficient).
#the variables obtained will be the 'behavioural' variables that we'll feed into the classifier models
dat <- matrix(NA,nrow = length(list.obj),ncol=4)
for (i in 1:length(list.obj)){
if (!is.na(list.obj[[i]][1,1]) | !is.na(list.obj.internaltx[[i]][1,1])) {
#gini index
incoming.amounts.normal <- list.obj[[i]][,"value"]
incoming.amounts.internal <- list.obj.internaltx[[i]][,"value"] [ list.obj.internaltx[[i]][,"to"]==ponzi.addresses[i]]
incoming.amounts <- c(incoming.amounts.normal,incoming.amounts.internal)
dat[i,1] <- if(!is.na(Gini(incoming.amounts,unbiased = FALSE,na.rm = TRUE))) {Gini(incoming.amounts,unbiased = FALSE,na.rm = TRUE)} else {NA}
#lifetime
incoming.time.normal <- list.obj[[i]][,"timeStamp"]
incoming.time.internal <- list.obj.internaltx[[i]][,"timeStamp"] [ list.obj.internaltx[[i]][,"to"]==ponzi.addresses[i]]
incoming.time <- c(incoming.time.normal,incoming.time.internal)
incoming.time <- ifelse(is.na(incoming.time),NA,sort(incoming.time)) #as I sort the vector, the NA is already excluded (and the length reduced by one)
incomingtx.lifetime <- ifelse(is.na(incoming.time[1]),NA,range(incoming.time,na.rm = TRUE)[2] - range(incoming.time,na.rm = TRUE)[1])
dat[i,2] <- incomingtx.lifetime #could be zero if we only have one transaction. Do we keep it 0 or force it to NA?
#avereage time btw two transactions
dat[i,3] <- if (!is.nan(mean(diff(incoming.time)))) {mean(diff(incoming.time))} else {NA}
#average gas expenditure #note that this is relevant only for 'normal' transactions, I supppose
gas <- list.obj[[i]][,"gas"]
gas.price <- list.obj[[i]][,"gasPrice"]
dat[i,4] <- mean(gas*gas.price)
}
else {
dat[i,1] <- NA
dat[i,2] <- NA
dat[i,3] <- NA
dat[i,4] <- NA
}
}
colnames(dat) <- c("gini","lifespan","ave.time.btw.tx","gas.expenditure")
head(dat)
#(iii) retrieving opcodes
###########
#we need an API from infura.io
#infura.API <- ???
set_rpc_address(infura.API)
#list of opcodes (available at https://ethervm.io/)
opcodes <- c("STOP", "ADD", "MUL", "SUB", "DIV", "SDIV", "MOD", "SMOD", "ADDMOD", "MULMOD", "EXP", "SIGNEXTEND", "LT", "GT", "SLT", "SGT", "EQ", "ISZERO", "AND", "OR", "XOR", "NOT", "BYTE", "SHL", "SHR", "SAR", "SHA3", "ADDRESS", "BALANCE", "ORIGIN", "CALLER", "CALLVALUE", "CALLDATALOAD", "CALLDATASIZE", "CALLDATACOPY", "CODESIZE", "CODECOPY", "GASPRICE", "EXTCODESIZE", "EXTCODECOPY", "RETURNDATASIZE", "RETURNDATACOPY", "EXTCODEHASH", "BLOCKHASH", "COINBASE", "TIMESTAMP", "NUMBER", "DIFFICULTY", "GASLIMIT", "POP", "MLOAD", "MSTORE", "MSTORE8", "SLOAD", "SSTORE", "JUMP", "JUMPI", "PC", "MSIZE", "GAS", "JUMPDEST", "PUSH1", "PUSH2", "PUSH3", "PUSH4", "PUSH5", "PUSH6", "PUSH7", "PUSH8", "PUSH9", "PUSH10", "PUSH11", "PUSH12", "PUSH13", "PUSH14", "PUSH15", "PUSH16", "PUSH17", "PUSH18", "PUSH19", "PUSH20", "PUSH21", "PUSH22", "PUSH23", "PUSH24", "PUSH25", "PUSH26", "PUSH27", "PUSH28", "PUSH29", "PUSH30", "PUSH31", "PUSH32", "DUP1", "DUP2", "DUP3", "DUP4", "DUP5", "DUP6", "DUP7", "DUP8", "DUP9", "DUP10", "DUP11", "DUP12", "DUP13", "DUP14", "DUP15", "DUP16", "SWAP1", "SWAP2", "SWAP3", "SWAP4", "SWAP5", "SWAP6", "SWAP7", "SWAP8", "SWAP9", "SWAP10", "SWAP11", "SWAP12", "SWAP13", "SWAP14", "SWAP15", "SWAP16", "LOG0", "LOG1", "LOG2", "LOG3", "LOG4", "PUSH", "DUP", "SWAP", "CREATE", "CALL", "CALLCODE", "RETURN", "DELEGATECALL", "CREATE2", "STATICCALL", "REVERT", "SELFDESTRUCT")
save(opcodes,file="C:\\Users\\zenob\\OneDrive\\Documenti\\ETH thesis\\opcodes.Rdata")
m <- matrix(NA,nrow = length(ponzi.addresses),ncol = length(opcodes))
for (i in 1:length(ponzi.addresses)){
address <- ponzi.addresses[i]
url <- paste0("https://ethervm.io/decompile/",address,"#disassembly")
obj <- content(GET(url),"text")
vector.of.opcodes.counts <- str_count(obj,opcodes)
m[i,] <- vector.of.opcodes.counts
}
colnames(m) <- opcodes
head(m)
opcodes.count.matrix <- m
save(opcodes.count.matrix,file="C:\\Users\\zenob\\OneDrive\\Documenti\\ETH thesis\\opcodes.count.matrix.Rdata")
load("C:\\Users\\zenob\\OneDrive\\Documenti\\ETH thesis\\opcodes.count.matrix.Rdata")
dat <- cbind(dat,opcodes.count.matrix)
head(dat)
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Slider App"),
sidebarLayout(
sidebarPanel(
h1("Move the Slider!"),
sliderInput("slider1", "Slide Me!", 0, 100, 0)
),
mainPanel(
h3("Slider Value:"),
textOutput("text")
)
)
))
|
/jhu-9-developing-data-products/shiny-2/ui.R
|
no_license
|
MicheleVNG/DataScience-Reference-and-Notes
|
R
| false
| false
| 244
|
r
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Slider App"),
sidebarLayout(
sidebarPanel(
h1("Move the Slider!"),
sliderInput("slider1", "Slide Me!", 0, 100, 0)
),
mainPanel(
h3("Slider Value:"),
textOutput("text")
)
)
))
|
# TOOL bwa-mem-paired-end-with-index-building.R: "BWA MEM for paired-end reads and own genome" (Aligns reads to genomes using the BWA MEM algorithm. Results are sorted and indexed BAM files, which are ready for viewing in the Chipster genome browser.
# Note that this BWA MEM tool requires that you have imported the reference genome to Chipster in fasta format. If you would like to align paired-end reads against publicly available genomes, please use the tool \"BWA MEM for paired-end reads\".)
# INPUT reads1.txt: "Reads to align" TYPE GENERIC
# INPUT reads2.txt: "Reads to align" TYPE GENERIC
# INPUT genome.txt: "Reference genome" TYPE GENERIC
# OUTPUT bwa.bam
# OUTPUT bwa.log
# OUTPUT OPTIONAL bwa.bam.bai
# PARAMETER OPTIONAL index.file: "Create index file" TYPE [index_file: "Create index file", no_index: "No index file"] DEFAULT no_index (Creates index file for BAM. By default no index file.)
# PARAMETER mode: "Data source" TYPE [ normal: " Illumina, 454, IonTorrent reads longer than 70 base pairs", pacbio: "PacBio subreads"] DEFAULT normal (Defining the type of reads will instruct the tool to use a predefined set of parameters optimized for that read type.)
# RUNTIME R-4.1.1
# KM 11.11.2014
# check out if the file is compressed and if so unzip it
source(file.path(chipster.common.lib.path, "zip-utils.R"))
unzipIfGZipFile("reads1.txt")
unzipIfGZipFile("reads2.txt")
unzipIfGZipFile("genome.txt")
# bwa
bwa.binary <- file.path(chipster.tools.path, "bwa", "bwa mem")
bwa.index.binary <- file.path(chipster.module.path, "shell", "check_bwa_index.sh")
command.start <- paste("bash -c '", bwa.binary)
# Do indexing
print("Indexing the genome...")
runExternal("echo Indexing the genome... > bwa.log")
check.command <- paste(bwa.index.binary, "genome.txt| tail -1 ")
# genome.dir <- system(check.command, intern = TRUE)
# bwa.genome <- file.path( genome.dir , "genome.txt")
bwa.genome <- system(check.command, intern = TRUE)
mode.parameters <- ifelse(mode == "pacbio", "-x pacbio", "")
# command ending
command.end <- paste(bwa.genome, "reads1.txt reads2.txt 1> alignment.sam 2>> bwa.log'")
# run bwa alignment
bwa.command <- paste(command.start, mode.parameters, command.end)
echo.command <- paste("echo '", bwa.binary, mode.parameters, bwa.genome, "reads.txt ' > bwa.log")
# stop(paste('CHIPSTER-NOTE: ', bwa.command))
runExternal(echo.command)
runExternal(bwa.command)
# samtools binary
samtools.binary <- c(file.path(chipster.tools.path, "samtools", "bin", "samtools"))
# convert sam to bam
runExternal(paste(samtools.binary, "view -bS alignment.sam -o alignment.bam"))
# sort bam
runExternal(paste(samtools.binary, "sort alignment.bam -o alignment.sorted.bam"))
# index bam
runExternal(paste(samtools.binary, "index alignment.sorted.bam"))
# rename result files
runExternal("mv alignment.sorted.bam bwa.bam")
if (index.file == "index_file") {
runExternal("mv alignment.sorted.bam.bai bwa.bam.bai")
}
# Handle output names
#
source(file.path(chipster.common.lib.path, "tool-utils.R"))
# read input names
inputnames <- read_input_definitions()
# Determine base name
base1 <- strip_name(inputnames$reads1.txt)
base2 <- strip_name(inputnames$reads2.txt)
basename <- paired_name(base1, base2)
# Make a matrix of output names
outputnames <- matrix(NA, nrow = 2, ncol = 2)
outputnames[1, ] <- c("bwa.bam", paste(basename, ".bam", sep = ""))
outputnames[2, ] <- c("bwa.bam.bai", paste(basename, ".bam.bai", sep = ""))
# Write output definitions file
write_output_definitions(outputnames)
|
/tools/ngs/R/bwa-mem-paired-end-with-index-building.R
|
permissive
|
chipster/chipster-tools
|
R
| false
| false
| 3,520
|
r
|
# TOOL bwa-mem-paired-end-with-index-building.R: "BWA MEM for paired-end reads and own genome" (Aligns reads to genomes using the BWA MEM algorithm. Results are sorted and indexed BAM files, which are ready for viewing in the Chipster genome browser.
# Note that this BWA MEM tool requires that you have imported the reference genome to Chipster in fasta format. If you would like to align paired-end reads against publicly available genomes, please use the tool \"BWA MEM for paired-end reads\".)
# INPUT reads1.txt: "Reads to align" TYPE GENERIC
# INPUT reads2.txt: "Reads to align" TYPE GENERIC
# INPUT genome.txt: "Reference genome" TYPE GENERIC
# OUTPUT bwa.bam
# OUTPUT bwa.log
# OUTPUT OPTIONAL bwa.bam.bai
# PARAMETER OPTIONAL index.file: "Create index file" TYPE [index_file: "Create index file", no_index: "No index file"] DEFAULT no_index (Creates index file for BAM. By default no index file.)
# PARAMETER mode: "Data source" TYPE [ normal: " Illumina, 454, IonTorrent reads longer than 70 base pairs", pacbio: "PacBio subreads"] DEFAULT normal (Defining the type of reads will instruct the tool to use a predefined set of parameters optimized for that read type.)
# RUNTIME R-4.1.1
# KM 11.11.2014
# check out if the file is compressed and if so unzip it
source(file.path(chipster.common.lib.path, "zip-utils.R"))
unzipIfGZipFile("reads1.txt")
unzipIfGZipFile("reads2.txt")
unzipIfGZipFile("genome.txt")
# bwa
bwa.binary <- file.path(chipster.tools.path, "bwa", "bwa mem")
bwa.index.binary <- file.path(chipster.module.path, "shell", "check_bwa_index.sh")
command.start <- paste("bash -c '", bwa.binary)
# Do indexing
print("Indexing the genome...")
runExternal("echo Indexing the genome... > bwa.log")
check.command <- paste(bwa.index.binary, "genome.txt| tail -1 ")
# genome.dir <- system(check.command, intern = TRUE)
# bwa.genome <- file.path( genome.dir , "genome.txt")
bwa.genome <- system(check.command, intern = TRUE)
mode.parameters <- ifelse(mode == "pacbio", "-x pacbio", "")
# command ending
command.end <- paste(bwa.genome, "reads1.txt reads2.txt 1> alignment.sam 2>> bwa.log'")
# run bwa alignment
bwa.command <- paste(command.start, mode.parameters, command.end)
echo.command <- paste("echo '", bwa.binary, mode.parameters, bwa.genome, "reads.txt ' > bwa.log")
# stop(paste('CHIPSTER-NOTE: ', bwa.command))
runExternal(echo.command)
runExternal(bwa.command)
# samtools binary
samtools.binary <- c(file.path(chipster.tools.path, "samtools", "bin", "samtools"))
# convert sam to bam
runExternal(paste(samtools.binary, "view -bS alignment.sam -o alignment.bam"))
# sort bam
runExternal(paste(samtools.binary, "sort alignment.bam -o alignment.sorted.bam"))
# index bam
runExternal(paste(samtools.binary, "index alignment.sorted.bam"))
# rename result files
runExternal("mv alignment.sorted.bam bwa.bam")
if (index.file == "index_file") {
runExternal("mv alignment.sorted.bam.bai bwa.bam.bai")
}
# Handle output names
#
source(file.path(chipster.common.lib.path, "tool-utils.R"))
# read input names
inputnames <- read_input_definitions()
# Determine base name
base1 <- strip_name(inputnames$reads1.txt)
base2 <- strip_name(inputnames$reads2.txt)
basename <- paired_name(base1, base2)
# Make a matrix of output names
outputnames <- matrix(NA, nrow = 2, ncol = 2)
outputnames[1, ] <- c("bwa.bam", paste(basename, ".bam", sep = ""))
outputnames[2, ] <- c("bwa.bam.bai", paste(basename, ".bam.bai", sep = ""))
# Write output definitions file
write_output_definitions(outputnames)
|
# OEML - REST API
#
# This section will provide necessary information about the `CoinAPI OEML REST API` protocol. <br/> This API is also available in the Postman application: <a href=\"https://postman.coinapi.io/\" target=\"_blank\">https://postman.coinapi.io/</a> <br/><br/> Implemented Standards: * [HTTP1.0](https://datatracker.ietf.org/doc/html/rfc1945) * [HTTP1.1](https://datatracker.ietf.org/doc/html/rfc2616) * [HTTP2.0](https://datatracker.ietf.org/doc/html/rfc7540)
#
# The version of the OpenAPI document: v1
# Contact: support@coinapi.io
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title Fills
#'
#' @description Fills Class
#'
#' @format An \code{R6Class} generator object
#'
#' @field time character [optional]
#'
#' @field price numeric [optional]
#'
#' @field amount numeric [optional]
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
Fills <- R6::R6Class(
'Fills',
public = list(
`time` = NULL,
`price` = NULL,
`amount` = NULL,
initialize = function(
`time`=NULL, `price`=NULL, `amount`=NULL, ...
) {
local.optional.var <- list(...)
if (!is.null(`time`)) {
self$`time` <- `time`
}
if (!is.null(`price`)) {
self$`price` <- `price`
}
if (!is.null(`amount`)) {
self$`amount` <- `amount`
}
},
toJSON = function() {
FillsObject <- list()
if (!is.null(self$`time`)) {
FillsObject[['time']] <-
self$`time`
}
if (!is.null(self$`price`)) {
FillsObject[['price']] <-
self$`price`
}
if (!is.null(self$`amount`)) {
FillsObject[['amount']] <-
self$`amount`
}
FillsObject
},
fromJSON = function(FillsJson) {
FillsObject <- jsonlite::fromJSON(FillsJson)
if (!is.null(FillsObject$`time`)) {
self$`time` <- FillsObject$`time`
}
if (!is.null(FillsObject$`price`)) {
self$`price` <- FillsObject$`price`
}
if (!is.null(FillsObject$`amount`)) {
self$`amount` <- FillsObject$`amount`
}
self
},
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`time`)) {
sprintf(
'"time":
"%s"
',
self$`time`
)},
if (!is.null(self$`price`)) {
sprintf(
'"price":
%d
',
self$`price`
)},
if (!is.null(self$`amount`)) {
sprintf(
'"amount":
%d
',
self$`amount`
)}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
paste('{', jsoncontent, '}', sep = "")
},
fromJSONString = function(FillsJson) {
FillsObject <- jsonlite::fromJSON(FillsJson)
self$`time` <- FillsObject$`time`
self$`price` <- FillsObject$`price`
self$`amount` <- FillsObject$`amount`
self
}
)
)
|
/oeml-sdk/r/R/fills.R
|
permissive
|
franklili3/coinapi-sdk
|
R
| false
| false
| 2,983
|
r
|
# OEML - REST API
#
# This section will provide necessary information about the `CoinAPI OEML REST API` protocol. <br/> This API is also available in the Postman application: <a href=\"https://postman.coinapi.io/\" target=\"_blank\">https://postman.coinapi.io/</a> <br/><br/> Implemented Standards: * [HTTP1.0](https://datatracker.ietf.org/doc/html/rfc1945) * [HTTP1.1](https://datatracker.ietf.org/doc/html/rfc2616) * [HTTP2.0](https://datatracker.ietf.org/doc/html/rfc7540)
#
# The version of the OpenAPI document: v1
# Contact: support@coinapi.io
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title Fills
#'
#' @description Fills Class
#'
#' @format An \code{R6Class} generator object
#'
#' @field time character [optional]
#'
#' @field price numeric [optional]
#'
#' @field amount numeric [optional]
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
Fills <- R6::R6Class(
'Fills',
public = list(
`time` = NULL,
`price` = NULL,
`amount` = NULL,
initialize = function(
`time`=NULL, `price`=NULL, `amount`=NULL, ...
) {
local.optional.var <- list(...)
if (!is.null(`time`)) {
self$`time` <- `time`
}
if (!is.null(`price`)) {
self$`price` <- `price`
}
if (!is.null(`amount`)) {
self$`amount` <- `amount`
}
},
toJSON = function() {
FillsObject <- list()
if (!is.null(self$`time`)) {
FillsObject[['time']] <-
self$`time`
}
if (!is.null(self$`price`)) {
FillsObject[['price']] <-
self$`price`
}
if (!is.null(self$`amount`)) {
FillsObject[['amount']] <-
self$`amount`
}
FillsObject
},
fromJSON = function(FillsJson) {
FillsObject <- jsonlite::fromJSON(FillsJson)
if (!is.null(FillsObject$`time`)) {
self$`time` <- FillsObject$`time`
}
if (!is.null(FillsObject$`price`)) {
self$`price` <- FillsObject$`price`
}
if (!is.null(FillsObject$`amount`)) {
self$`amount` <- FillsObject$`amount`
}
self
},
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`time`)) {
sprintf(
'"time":
"%s"
',
self$`time`
)},
if (!is.null(self$`price`)) {
sprintf(
'"price":
%d
',
self$`price`
)},
if (!is.null(self$`amount`)) {
sprintf(
'"amount":
%d
',
self$`amount`
)}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
paste('{', jsoncontent, '}', sep = "")
},
fromJSONString = function(FillsJson) {
FillsObject <- jsonlite::fromJSON(FillsJson)
self$`time` <- FillsObject$`time`
self$`price` <- FillsObject$`price`
self$`amount` <- FillsObject$`amount`
self
}
)
)
|
\name{lmerControl}
\title{Control of Mixed Model Fitting}
\alias{glmerControl}
\alias{lmerControl}
\alias{nlmerControl}
\alias{.makeCC}
\description{
Construct control structures for mixed model fitting. All arguments
have defaults, and can be grouped into
\itemize{
\item general control parameters, most importantly \code{optimizer},
further \code{restart_edge}, etc;
\item model- or data-checking specifications, in short
\dQuote{checking options}, such as \code{check.nobs.vs.rankZ}, or
\code{check.rankX} (currently not for \code{nlmerControl});
\item all the parameters to be passed to the optimizer, e.g.,
maximal number of iterations, passed via the \code{optCtrl} list
argument.
}
}
\usage{
lmerControl(optimizer = "bobyqa",% was "Nelder_Mead" till Dec.2013
restart_edge = TRUE,
boundary.tol = 1e-5,
calc.derivs=TRUE,
use.last.params=FALSE,
sparseX = FALSE,
## input checking options
check.nobs.vs.rankZ = "warningSmall",
check.nobs.vs.nlev = "stop",
check.nlev.gtreq.5 = "ignore",
check.nlev.gtr.1 = "stop",
check.nobs.vs.nRE="stop",
check.rankX = c("message+drop.cols", "silent.drop.cols", "warn+drop.cols",
"stop.deficient", "ignore"),
check.scaleX = "warning",
check.formula.LHS = "stop",
## convergence checking options
check.conv.grad = .makeCC("warning", tol = 2e-3, relTol = NULL),
check.conv.singular = .makeCC(action = "ignore", tol = 1e-4),
check.conv.hess = .makeCC(action = "warning", tol = 1e-6),
## optimizer args
optCtrl = list())
glmerControl(optimizer = c("bobyqa", "Nelder_Mead"),
restart_edge = FALSE,
boundary.tol = 1e-5,
calc.derivs=TRUE,
use.last.params=FALSE,
sparseX = FALSE,
tolPwrss=1e-7,
compDev=TRUE,
## input checking options
check.nobs.vs.rankZ = "warningSmall",
check.nobs.vs.nlev = "stop",
check.nlev.gtreq.5 = "ignore",
check.nlev.gtr.1 = "stop",
check.nobs.vs.nRE="stop",
check.rankX = c("message+drop.cols", "silent.drop.cols", "warn+drop.cols",
"stop.deficient", "ignore"),
check.scaleX = "warning",
check.formula.LHS = "stop",
## convergence checking options
check.conv.grad = .makeCC("warning", tol = 1e-3, relTol = NULL),
check.conv.singular = .makeCC(action = "ignore", tol = 1e-4),
check.conv.hess = .makeCC(action = "warning", tol = 1e-6),
## optimizer args
optCtrl = list())
nlmerControl(optimizer = "Nelder_Mead", tolPwrss = 1e-10,
optCtrl = list())
.makeCC(action, tol, relTol, \dots)
}
\arguments{
\item{optimizer}{character - name of optimizing
function(s). A character vector or list of functions: length 1 for
\code{lmer} or \code{glmer}, possibly length 2 for \code{glmer}).
The built-in optimizers are \code{\link{Nelder_Mead}} and
\code{\link[minqa]{bobyqa}} (from the \pkg{minqa} package). Any
minimizing function that allows box constraints can be used provided
that it
\describe{
\item{(1)}{takes input parameters \code{fn} (function to be
optimized), \code{par} (starting parameter values), \code{lower}
(lower bounds) and \code{control} (control parameters, passed
through from the \code{control} argument) and}
\item{(2)}{returns a list with (at least) elements \code{par}
(best-fit parameters), \code{fval} (best-fit function value),
\code{conv} (convergence code, equal to zero for
successful convergence) and (optionally) \code{message}
(informational message, or explanation of convergence failure).}
}
Special provisions are made for \code{\link{bobyqa}},
\code{\link{Nelder_Mead}}, and optimizers wrapped in the
\pkg{optimx} package; to use the \pkg{optimx} optimizers (including
\code{L-BFGS-B} from base \code{\link{optim}} and
\code{\link{nlminb}}), pass the \code{method} argument to
\code{optim} in the \code{optCtrl} argument (you may also
need to load the \code{optimx} package manually using
\code{\link{library}(optimx)} or \code{\link{require}(optimx)}).
For \code{glmer}, if \code{length(optimizer)==2}, the first element
will be used for the preliminary (random effects parameters only)
optimization, while the second will be used for the final (random
effects plus fixed effect parameters) phase. See
\code{\link{modular}} for more information on these two phases.
}
\item{calc.derivs}{logical - compute gradient and Hessian of nonlinear
optimization solution?}
\item{use.last.params}{logical - should the last value of the
parameters evaluated (\code{TRUE}), rather than the value of the
parameters corresponding to the minimum deviance, be returned?
This is a "backward bug-compatibility" option; use \code{TRUE}
only when trying to match previous results.}
\item{sparseX}{logical - should a sparse model matrix be
used for the fixed-effects terms?
Currently inactive.}
\item{restart_edge}{logical - should the optimizer
attempt a restart when it finds a solution at the
boundary (i.e. zero random-effect variances or perfect
+/-1 correlations)? (Currently only implemented for
\code{lmerControl}.)}
\item{boundary.tol}{numeric - within what distance of
a boundary should the boundary be checked for a better fit?
(Set to zero to disable boundary checking.)}
\item{tolPwrss}{numeric scalar - the tolerance for declaring
convergence in the penalized iteratively weighted residual
sum-of-squares step.}
\item{compDev}{logical scalar - should compiled code be
used for the deviance evaluation during the optimization
of the parameter estimates?}
\item{check.nlev.gtreq.5}{character - rules for
checking whether all random effects have >= 5 levels.
See \code{action}.}
\item{check.nlev.gtr.1}{character - rules for checking
whether all random effects have > 1 level. See \code{action}.}
\item{check.nobs.vs.rankZ}{character - rules for
checking whether the number of observations is greater
than (or greater than or equal to) the rank of the random
effects design matrix (Z), usually necessary for
identifiable variances. As for \code{action}, with
the addition of \code{"warningSmall"} and \code{"stopSmall"}, which run
the test only if the dimensions of \code{Z} are < 1e6.
\code{nobs > rank(Z)} will be tested for LMMs and GLMMs with
estimated scale parameters; \code{nobs >= rank(Z)} will be tested
for GLMMs with fixed scale parameter.
The rank test is done using the
\code{method="qr"} option of the \code{\link[Matrix]{rankMatrix}}
function.
}
\item{check.nobs.vs.nlev}{ character - rules for checking whether the
number of observations is less than (or less than or equal to) the
number of levels of every grouping factor, usually necessary for
identifiable variances. As for \code{action}.
\code{nobs<nlevels} will be tested for LMMs and GLMMs with estimated
scale parameters; \code{nobs<=nlevels} will be tested for GLMMs with
fixed scale parameter.}
\item{check.nobs.vs.nRE}{character - rules for
checking whether the number of observations is greater
than (or greater than or equal to) the number of random-effects
levels for each term, usually necessary for identifiable variances.
As for \code{check.nobs.vs.nlev}.}
\item{check.conv.grad}{rules for checking the gradient of the deviance
function for convergence. A list as returned
by \code{.makeCC}, or a character string with only the action.}
\item{check.conv.singular}{rules for checking for a singular fit,
i.e. one where some parameters are on the boundary of the feasible
space (for example, random effects variances equal to 0 or
correlations between random effects equal to +/- 1.0);
as for \code{check.conv.grad} above.}
\item{check.conv.hess}{rules for checking the Hessian of the deviance
function for convergence.; as for \code{check.conv.grad}
above.}
\item{check.rankX}{character - specifying if \code{\link[Matrix]{rankMatrix}(X)}
should be compared with \code{ncol(X)} and if columns from the design
matrix should possibly be dropped to ensure that it has full rank.
Sometimes needed to make the model identifiable. The options can be
abbreviated; the three \code{"*.drop.cols"} options all do drop
columns, \code{"stop.deficient"} gives an error when the rank is
smaller than the number of columns where \code{"ignore"} does no
rank computation, and will typically lead to less easily
understandable errors, later.}
\item{check.scaleX}{character - check for problematic scaling of
columns of fixed-effect model matrix, e.g. parameters measured on
very different scales.}
\item{check.formula.LHS}{ check whether specified formula has
a left-hand side. Primarily for internal use within
\code{simulate.merMod};
\emph{use at your own risk} as it may allow the generation
of unstable \code{merMod} objects}
\item{optCtrl}{a \code{\link{list}} of additional arguments to be
passed to the nonlinear optimizer (see \code{\link{Nelder_Mead}},
\code{\link[minqa]{bobyqa}}). In particular, both
\code{Nelder_Mead} and \code{bobyqa} use \code{maxfun} to
specify the maximum number of function evaluations they
will try before giving up - in contrast to
\code{\link{optim}} and \code{optimx}-wrapped optimizers,
which use \code{maxit}.}
\item{action}{character - generic choices for the severity level
of any test. "ignore": skip the test. "warning": warn if test fails.
"stop": throw an error if test fails.}
\item{tol}{numeric - tolerance for check }
\item{relTol}{numeric - tolerance for checking relative variation}
\item{\dots}{other elements to include in check specification}
}
\value{
The \code{*Control} functions return a list (inheriting from class
\code{"merControl"}) containing
\enumerate{
\item general control parameters, such as \code{optimizer}, \code{restart_edge};
\item (currently not for \code{nlmerControl}:)
\code{"checkControl"}, a \code{\link{list}} of data-checking
specifications, e.g., \code{check.nobs.vs.rankZ};
\item parameters to be passed to the optimizer, i.e., the \code{optCtrl}
list, which may contain \code{maxiter}.
}
\code{.makeCC} returns a list containing the check specification
(action, tolerance, and optionally relative tolerance).
}
\details{
Note that (only!) the pre-fitting \dQuote{checking options}
(i.e., all those starting with \code{"check."} but \emph{not}
including the convergence checks (\code{"check.conv.*"}) or
rank-checking (\code{"check.rank*"}) options)
may also be set globally via \code{\link{options}}.
In that case, \code{(g)lmerControl} will use them rather than the
default values, but will \emph{not} override values that are passed as
explicit arguments.
For example, \code{options(lmerControl=list(check.nobs.vs.rankZ = "ignore"))}
will suppress warnings that the number of observations is less than
the rank of the random effects model matrix \code{Z}.
}
\examples{
str(lmerControl())
str(glmerControl())
\dontrun{
## fit with default Nelder-Mead algorithm ...
fm0 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy)
fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy)
## or with minqa::bobyqa ...
fm1_bobyqa <- update(fm1,control=lmerControl(optimizer="bobyqa"))
## or with the nlminb function used in older (<1.0) versions of lme4;
## this will usually replicate older results
require(optimx)
fm1_nlminb <- update(fm1,control=lmerControl(optimizer="optimx",
optCtrl=list(method="nlminb")))
## The other option here is method="L-BFGS-B".
## Or we can wrap base::optim():
optimwrap <- function(fn,par,lower,upper,control=list(),
...) {
if (is.null(control$method)) stop("must specify method in optCtrl")
method <- control$method
control$method <- NULL
## "Brent" requires finite upper values (lower bound will always
## be zero in this case)
if (method=="Brent") upper <- pmin(1e4,upper)
res <- optim(par=par,fn=fn,lower=lower,upper=upper,
control=control,method=method,...)
with(res,list(par=par,
fval=value,
feval=counts[1],
conv=convergence,
message=message))
}
fm0_brent <- update(fm0,control=lmerControl(optimizer="optimwrap",
optCtrl=list(method="Brent")))
## You can also use functions from the nloptr package.
## You must run library(nloptr) here ... it is commented out
## to avoid making lme4 dependent on nloptr
defaultControl <- list(algorithm="NLOPT_LN_BOBYQA",
xtol_rel=1e-6,maxeval=1e5)
nloptwrap2 <- function(fn,par,lower,upper,control=list(),...) {
for (n in names(defaultControl))
if (is.null(control[[n]])) control[[n]] <- defaultControl[[n]]
res <- nloptr(x0=par,eval_f=fn,lb=lower,ub=upper,opts=control,...)
with(res,list(par=solution,
fval=objective,
feval=iterations,
conv=if (status>0) 0 else status,
message=message))
}
fm1_nloptr <- update(fm1,control=lmerControl(optimizer="nloptwrap2"))
fm1_nloptr_NM <- update(fm1,control=lmerControl(optimizer="nloptwrap2",
optCtrl=list(algorithm="NLOPT_LN_NELDERMEAD")))
## other algorithm options include NLOPT_LN_COBYLA, NLOPT_LN_SBPLX
}
}
|
/man/lmerControl.Rd
|
no_license
|
TotallyBullshit/lme4
|
R
| false
| false
| 13,828
|
rd
|
\name{lmerControl}
\title{Control of Mixed Model Fitting}
\alias{glmerControl}
\alias{lmerControl}
\alias{nlmerControl}
\alias{.makeCC}
\description{
Construct control structures for mixed model fitting. All arguments
have defaults, and can be grouped into
\itemize{
\item general control parameters, most importantly \code{optimizer},
further \code{restart_edge}, etc;
\item model- or data-checking specifications, in short
\dQuote{checking options}, such as \code{check.nobs.vs.rankZ}, or
\code{check.rankX} (currently not for \code{nlmerControl});
\item all the parameters to be passed to the optimizer, e.g.,
maximal number of iterations, passed via the \code{optCtrl} list
argument.
}
}
\usage{
lmerControl(optimizer = "bobyqa",% was "Nelder_Mead" till Dec.2013
restart_edge = TRUE,
boundary.tol = 1e-5,
calc.derivs=TRUE,
use.last.params=FALSE,
sparseX = FALSE,
## input checking options
check.nobs.vs.rankZ = "warningSmall",
check.nobs.vs.nlev = "stop",
check.nlev.gtreq.5 = "ignore",
check.nlev.gtr.1 = "stop",
check.nobs.vs.nRE="stop",
check.rankX = c("message+drop.cols", "silent.drop.cols", "warn+drop.cols",
"stop.deficient", "ignore"),
check.scaleX = "warning",
check.formula.LHS = "stop",
## convergence checking options
check.conv.grad = .makeCC("warning", tol = 2e-3, relTol = NULL),
check.conv.singular = .makeCC(action = "ignore", tol = 1e-4),
check.conv.hess = .makeCC(action = "warning", tol = 1e-6),
## optimizer args
optCtrl = list())
glmerControl(optimizer = c("bobyqa", "Nelder_Mead"),
restart_edge = FALSE,
boundary.tol = 1e-5,
calc.derivs=TRUE,
use.last.params=FALSE,
sparseX = FALSE,
tolPwrss=1e-7,
compDev=TRUE,
## input checking options
check.nobs.vs.rankZ = "warningSmall",
check.nobs.vs.nlev = "stop",
check.nlev.gtreq.5 = "ignore",
check.nlev.gtr.1 = "stop",
check.nobs.vs.nRE="stop",
check.rankX = c("message+drop.cols", "silent.drop.cols", "warn+drop.cols",
"stop.deficient", "ignore"),
check.scaleX = "warning",
check.formula.LHS = "stop",
## convergence checking options
check.conv.grad = .makeCC("warning", tol = 1e-3, relTol = NULL),
check.conv.singular = .makeCC(action = "ignore", tol = 1e-4),
check.conv.hess = .makeCC(action = "warning", tol = 1e-6),
## optimizer args
optCtrl = list())
nlmerControl(optimizer = "Nelder_Mead", tolPwrss = 1e-10,
optCtrl = list())
.makeCC(action, tol, relTol, \dots)
}
\arguments{
\item{optimizer}{character - name of optimizing
function(s). A character vector or list of functions: length 1 for
\code{lmer} or \code{glmer}, possibly length 2 for \code{glmer}).
The built-in optimizers are \code{\link{Nelder_Mead}} and
\code{\link[minqa]{bobyqa}} (from the \pkg{minqa} package). Any
minimizing function that allows box constraints can be used provided
that it
\describe{
\item{(1)}{takes input parameters \code{fn} (function to be
optimized), \code{par} (starting parameter values), \code{lower}
(lower bounds) and \code{control} (control parameters, passed
through from the \code{control} argument) and}
\item{(2)}{returns a list with (at least) elements \code{par}
(best-fit parameters), \code{fval} (best-fit function value),
\code{conv} (convergence code, equal to zero for
successful convergence) and (optionally) \code{message}
(informational message, or explanation of convergence failure).}
}
Special provisions are made for \code{\link{bobyqa}},
\code{\link{Nelder_Mead}}, and optimizers wrapped in the
\pkg{optimx} package; to use the \pkg{optimx} optimizers (including
\code{L-BFGS-B} from base \code{\link{optim}} and
\code{\link{nlminb}}), pass the \code{method} argument to
\code{optim} in the \code{optCtrl} argument (you may also
need to load the \code{optimx} package manually using
\code{\link{library}(optimx)} or \code{\link{require}(optimx)}).
For \code{glmer}, if \code{length(optimizer)==2}, the first element
will be used for the preliminary (random effects parameters only)
optimization, while the second will be used for the final (random
effects plus fixed effect parameters) phase. See
\code{\link{modular}} for more information on these two phases.
}
\item{calc.derivs}{logical - compute gradient and Hessian of nonlinear
optimization solution?}
\item{use.last.params}{logical - should the last value of the
parameters evaluated (\code{TRUE}), rather than the value of the
parameters corresponding to the minimum deviance, be returned?
This is a "backward bug-compatibility" option; use \code{TRUE}
only when trying to match previous results.}
\item{sparseX}{logical - should a sparse model matrix be
used for the fixed-effects terms?
Currently inactive.}
\item{restart_edge}{logical - should the optimizer
attempt a restart when it finds a solution at the
boundary (i.e. zero random-effect variances or perfect
+/-1 correlations)? (Currently only implemented for
\code{lmerControl}.)}
\item{boundary.tol}{numeric - within what distance of
a boundary should the boundary be checked for a better fit?
(Set to zero to disable boundary checking.)}
\item{tolPwrss}{numeric scalar - the tolerance for declaring
convergence in the penalized iteratively weighted residual
sum-of-squares step.}
\item{compDev}{logical scalar - should compiled code be
used for the deviance evaluation during the optimization
of the parameter estimates?}
\item{check.nlev.gtreq.5}{character - rules for
checking whether all random effects have >= 5 levels.
See \code{action}.}
\item{check.nlev.gtr.1}{character - rules for checking
whether all random effects have > 1 level. See \code{action}.}
\item{check.nobs.vs.rankZ}{character - rules for
checking whether the number of observations is greater
than (or greater than or equal to) the rank of the random
effects design matrix (Z), usually necessary for
identifiable variances. As for \code{action}, with
the addition of \code{"warningSmall"} and \code{"stopSmall"}, which run
the test only if the dimensions of \code{Z} are < 1e6.
\code{nobs > rank(Z)} will be tested for LMMs and GLMMs with
estimated scale parameters; \code{nobs >= rank(Z)} will be tested
for GLMMs with fixed scale parameter.
The rank test is done using the
\code{method="qr"} option of the \code{\link[Matrix]{rankMatrix}}
function.
}
\item{check.nobs.vs.nlev}{ character - rules for checking whether the
number of observations is less than (or less than or equal to) the
number of levels of every grouping factor, usually necessary for
identifiable variances. As for \code{action}.
\code{nobs<nlevels} will be tested for LMMs and GLMMs with estimated
scale parameters; \code{nobs<=nlevels} will be tested for GLMMs with
fixed scale parameter.}
\item{check.nobs.vs.nRE}{character - rules for
checking whether the number of observations is greater
than (or greater than or equal to) the number of random-effects
levels for each term, usually necessary for identifiable variances.
As for \code{check.nobs.vs.nlev}.}
\item{check.conv.grad}{rules for checking the gradient of the deviance
function for convergence. A list as returned
by \code{.makeCC}, or a character string with only the action.}
\item{check.conv.singular}{rules for checking for a singular fit,
i.e. one where some parameters are on the boundary of the feasible
space (for example, random effects variances equal to 0 or
correlations between random effects equal to +/- 1.0);
as for \code{check.conv.grad} above.}
\item{check.conv.hess}{rules for checking the Hessian of the deviance
function for convergence.; as for \code{check.conv.grad}
above.}
\item{check.rankX}{character - specifying if \code{\link[Matrix]{rankMatrix}(X)}
should be compared with \code{ncol(X)} and if columns from the design
matrix should possibly be dropped to ensure that it has full rank.
Sometimes needed to make the model identifiable. The options can be
abbreviated; the three \code{"*.drop.cols"} options all do drop
columns, \code{"stop.deficient"} gives an error when the rank is
smaller than the number of columns where \code{"ignore"} does no
rank computation, and will typically lead to less easily
understandable errors, later.}
\item{check.scaleX}{character - check for problematic scaling of
columns of fixed-effect model matrix, e.g. parameters measured on
very different scales.}
\item{check.formula.LHS}{ check whether specified formula has
a left-hand side. Primarily for internal use within
\code{simulate.merMod};
\emph{use at your own risk} as it may allow the generation
of unstable \code{merMod} objects}
\item{optCtrl}{a \code{\link{list}} of additional arguments to be
passed to the nonlinear optimizer (see \code{\link{Nelder_Mead}},
\code{\link[minqa]{bobyqa}}). In particular, both
\code{Nelder_Mead} and \code{bobyqa} use \code{maxfun} to
specify the maximum number of function evaluations they
will try before giving up - in contrast to
\code{\link{optim}} and \code{optimx}-wrapped optimizers,
which use \code{maxit}.}
\item{action}{character - generic choices for the severity level
of any test. "ignore": skip the test. "warning": warn if test fails.
"stop": throw an error if test fails.}
\item{tol}{numeric - tolerance for check }
\item{relTol}{numeric - tolerance for checking relative variation}
\item{\dots}{other elements to include in check specification}
}
\value{
The \code{*Control} functions return a list (inheriting from class
\code{"merControl"}) containing
\enumerate{
\item general control parameters, such as \code{optimizer}, \code{restart_edge};
\item (currently not for \code{nlmerControl}:)
\code{"checkControl"}, a \code{\link{list}} of data-checking
specifications, e.g., \code{check.nobs.vs.rankZ};
\item parameters to be passed to the optimizer, i.e., the \code{optCtrl}
list, which may contain \code{maxiter}.
}
\code{.makeCC} returns a list containing the check specification
(action, tolerance, and optionally relative tolerance).
}
\details{
Note that (only!) the pre-fitting \dQuote{checking options}
(i.e., all those starting with \code{"check."} but \emph{not}
including the convergence checks (\code{"check.conv.*"}) or
rank-checking (\code{"check.rank*"}) options)
may also be set globally via \code{\link{options}}.
In that case, \code{(g)lmerControl} will use them rather than the
default values, but will \emph{not} override values that are passed as
explicit arguments.
For example, \code{options(lmerControl=list(check.nobs.vs.rankZ = "ignore"))}
will suppress warnings that the number of observations is less than
the rank of the random effects model matrix \code{Z}.
}
\examples{
str(lmerControl())
str(glmerControl())
\dontrun{
## fit with default Nelder-Mead algorithm ...
fm0 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy)
fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy)
## or with minqa::bobyqa ...
fm1_bobyqa <- update(fm1,control=lmerControl(optimizer="bobyqa"))
## or with the nlminb function used in older (<1.0) versions of lme4;
## this will usually replicate older results
require(optimx)
fm1_nlminb <- update(fm1,control=lmerControl(optimizer="optimx",
optCtrl=list(method="nlminb")))
## The other option here is method="L-BFGS-B".
## Or we can wrap base::optim():
optimwrap <- function(fn,par,lower,upper,control=list(),
...) {
if (is.null(control$method)) stop("must specify method in optCtrl")
method <- control$method
control$method <- NULL
## "Brent" requires finite upper values (lower bound will always
## be zero in this case)
if (method=="Brent") upper <- pmin(1e4,upper)
res <- optim(par=par,fn=fn,lower=lower,upper=upper,
control=control,method=method,...)
with(res,list(par=par,
fval=value,
feval=counts[1],
conv=convergence,
message=message))
}
fm0_brent <- update(fm0,control=lmerControl(optimizer="optimwrap",
optCtrl=list(method="Brent")))
## You can also use functions from the nloptr package.
## You must run library(nloptr) here ... it is commented out
## to avoid making lme4 dependent on nloptr
defaultControl <- list(algorithm="NLOPT_LN_BOBYQA",
xtol_rel=1e-6,maxeval=1e5)
nloptwrap2 <- function(fn,par,lower,upper,control=list(),...) {
for (n in names(defaultControl))
if (is.null(control[[n]])) control[[n]] <- defaultControl[[n]]
res <- nloptr(x0=par,eval_f=fn,lb=lower,ub=upper,opts=control,...)
with(res,list(par=solution,
fval=objective,
feval=iterations,
conv=if (status>0) 0 else status,
message=message))
}
fm1_nloptr <- update(fm1,control=lmerControl(optimizer="nloptwrap2"))
fm1_nloptr_NM <- update(fm1,control=lmerControl(optimizer="nloptwrap2",
optCtrl=list(algorithm="NLOPT_LN_NELDERMEAD")))
## other algorithm options include NLOPT_LN_COBYLA, NLOPT_LN_SBPLX
}
}
|
T2funcrep <-
function(X,n,m,p,r1,r2,r3,start,conv,model,A,B,C,H){
X=as.matrix(X)
if (model==1){
C=diag(r3)
}
if (model==2){
B=diag(r2)
}
if (model==3){
A=diag(r1)
}
cputime=system.time({
# initialize A, B and C
ss=sum(X^2)
dys=0
if (start==0){
# rational starts via eigendecompositions
if (model!=3){
EIG=eigen(X%*%t(X))
A=EIG$vectors[,1:r1]
}
Z=permnew(X,n,m,p) # yields m x p x n array
if (model!=2){
EIG=eigen(Z%*%t(Z))
B=EIG$vectors[,1:r2]
}
Z=permnew(Z,m,p,n) # yields p x n x m array
if (model!=1){
EIG=eigen(Z%*%t(Z))
C=EIG$vectors[,1:r3]
}
}
if (start==1){
if (model!=3){
if (n>=r1){
A=orth(matrix(runif(n*r1,0,1),n,r1)-.5)
} else{
A=orth(matrix(runif(r1*r1,0,1),r1,r1)-.5)
A=A[1:n,]
}
}
if (model!=2){
if (m>=r2){
B=orth(matrix(runif(m*r2,0,1),m,r2)-.5)
} else{
B=orth(matrix(runif(r2*r2,0,1),r2,r2)-.5)
B=B[1:m,]
}
}
if (model!=1){
if (p>=r3){
C=orth(matrix(runif(p*r3,0,1),p,r3)-.5)
} else{
C=orth(matrix(runif(r3*r3,0,1),r3,r3)-.5)
C=C[1:p,]
}
}
}
# Update Core
if (start!=2){
Z=permnew(t(A)%*%X,r1,m,p)
Z=permnew(t(B)%*%Z,r2,p,r1)
H=permnew(t(C)%*%Z,r3,r1,r2)
}
# Evaluate f
if (start==2){
Z=B%*%permnew(A%*%H,n,r2,r3)
Z=C%*%permnew(Z,m,r3,n)
Z=permnew(Z,p,n,m) # Z = Xhat, nxmxp
f=sum((X-Z)^2) # use full formula, taking into account possibility of nonoptimal core in start
} else{
f=ss-sum(H^2)
}
iter=0
fold=f+2*conv*f
while (fold-f>f*conv){
iter=iter+1
fold=f
if (model!=3){
# update A (Z=X*C'x B' - GS Z*Z'*A)
Z=permnew(X,n,m,p)
Z=permnew(t(B)%*%Z,r2,p,n)
Z=permnew(t(C)%*%Z,r3,n,r2) # yields n x r2 x r3 array
A=qr.Q(qr(Z%*%(t(Z)%*%A)),complete=FALSE)
}
if (model!=2){
# update B
Z=permnew(X,n,m,p)
Z=permnew(Z,m,p,n)
Z=permnew(t(C)%*%Z,r3,n,m)
Z=permnew(t(A)%*%Z,r1,m,r3) # yields m x r3 x r1 array
B=qr.Q(qr(Z%*%(t(Z)%*%B)),complete=FALSE)
}
if (model!=1){
# update C
Z=permnew(t(A)%*%X,r1,m,p)
Z=permnew(t(B)%*%Z,r2,p,r1) # yields p x r1 x r2 array
C=qr.Q(qr(Z%*%(t(Z)%*%C)),complete=FALSE)
}
# Update Core
Z=permnew(t(A)%*%X,r1,m,p)
Z=permnew(t(B)%*%Z,r2,p,r1)
H=permnew(t(C)%*%Z,r3,r1,r2)
# Evaluate f
f=ss-sum(H^2)
}
})
ss=sum(X^2)
fp=100*(ss-f)/ss
# compute "intrinsic eigenvalues"
# eigenvalues for A-mode:
La=H%*%t(H)
Y=permnew(H,r1,r2,r3)
Lb=Y%*%t(Y)
Y=permnew(Y,r2,r3,r1)
Lc=Y%*%t(Y)
out=list()
out$A=A
out$B=B
out$C=C
out$H=H
out$f=f
out$fp=fp
out$iter=iter
out$cputime=cputime[1]
out$La=La
out$Lb=Lb
out$Lc=Lc
return(out)
}
|
/ThreeWay/R/T2funcrep.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 2,781
|
r
|
T2funcrep <-
function(X,n,m,p,r1,r2,r3,start,conv,model,A,B,C,H){
X=as.matrix(X)
if (model==1){
C=diag(r3)
}
if (model==2){
B=diag(r2)
}
if (model==3){
A=diag(r1)
}
cputime=system.time({
# initialize A, B and C
ss=sum(X^2)
dys=0
if (start==0){
# rational starts via eigendecompositions
if (model!=3){
EIG=eigen(X%*%t(X))
A=EIG$vectors[,1:r1]
}
Z=permnew(X,n,m,p) # yields m x p x n array
if (model!=2){
EIG=eigen(Z%*%t(Z))
B=EIG$vectors[,1:r2]
}
Z=permnew(Z,m,p,n) # yields p x n x m array
if (model!=1){
EIG=eigen(Z%*%t(Z))
C=EIG$vectors[,1:r3]
}
}
if (start==1){
if (model!=3){
if (n>=r1){
A=orth(matrix(runif(n*r1,0,1),n,r1)-.5)
} else{
A=orth(matrix(runif(r1*r1,0,1),r1,r1)-.5)
A=A[1:n,]
}
}
if (model!=2){
if (m>=r2){
B=orth(matrix(runif(m*r2,0,1),m,r2)-.5)
} else{
B=orth(matrix(runif(r2*r2,0,1),r2,r2)-.5)
B=B[1:m,]
}
}
if (model!=1){
if (p>=r3){
C=orth(matrix(runif(p*r3,0,1),p,r3)-.5)
} else{
C=orth(matrix(runif(r3*r3,0,1),r3,r3)-.5)
C=C[1:p,]
}
}
}
# Update Core
if (start!=2){
Z=permnew(t(A)%*%X,r1,m,p)
Z=permnew(t(B)%*%Z,r2,p,r1)
H=permnew(t(C)%*%Z,r3,r1,r2)
}
# Evaluate f
if (start==2){
Z=B%*%permnew(A%*%H,n,r2,r3)
Z=C%*%permnew(Z,m,r3,n)
Z=permnew(Z,p,n,m) # Z = Xhat, nxmxp
f=sum((X-Z)^2) # use full formula, taking into account possibility of nonoptimal core in start
} else{
f=ss-sum(H^2)
}
iter=0
fold=f+2*conv*f
while (fold-f>f*conv){
iter=iter+1
fold=f
if (model!=3){
# update A (Z=X*C'x B' - GS Z*Z'*A)
Z=permnew(X,n,m,p)
Z=permnew(t(B)%*%Z,r2,p,n)
Z=permnew(t(C)%*%Z,r3,n,r2) # yields n x r2 x r3 array
A=qr.Q(qr(Z%*%(t(Z)%*%A)),complete=FALSE)
}
if (model!=2){
# update B
Z=permnew(X,n,m,p)
Z=permnew(Z,m,p,n)
Z=permnew(t(C)%*%Z,r3,n,m)
Z=permnew(t(A)%*%Z,r1,m,r3) # yields m x r3 x r1 array
B=qr.Q(qr(Z%*%(t(Z)%*%B)),complete=FALSE)
}
if (model!=1){
# update C
Z=permnew(t(A)%*%X,r1,m,p)
Z=permnew(t(B)%*%Z,r2,p,r1) # yields p x r1 x r2 array
C=qr.Q(qr(Z%*%(t(Z)%*%C)),complete=FALSE)
}
# Update Core
Z=permnew(t(A)%*%X,r1,m,p)
Z=permnew(t(B)%*%Z,r2,p,r1)
H=permnew(t(C)%*%Z,r3,r1,r2)
# Evaluate f
f=ss-sum(H^2)
}
})
ss=sum(X^2)
fp=100*(ss-f)/ss
# compute "intrinsic eigenvalues"
# eigenvalues for A-mode:
La=H%*%t(H)
Y=permnew(H,r1,r2,r3)
Lb=Y%*%t(Y)
Y=permnew(Y,r2,r3,r1)
Lc=Y%*%t(Y)
out=list()
out$A=A
out$B=B
out$C=C
out$H=H
out$f=f
out$fp=fp
out$iter=iter
out$cputime=cputime[1]
out$La=La
out$Lb=Lb
out$Lc=Lc
return(out)
}
|
library(PortfolioAnalytics)
library(quantmod)
library(PerformanceAnalytics)
library(zoo)
library(plotly)
# Get data
getSymbols(c("TLT", "SHY", "HYG", "LQD", "MBB"))
# Assign to dataframe
# Get adjusted prices
prices.data <- merge.zoo(TLT[,6], SHY[,6], HYG[,6], LQD[,6], MBB[,6])
# Calculate returns
returns.data <- CalculateReturns(prices.data)
returns.data <- na.omit(returns.data)
# Set names
colnames(returns.data) <- c("TLT", "SHY", "HYG", "LQD", "MBB")
# Save mean return vector and sample covariance matrix
meanReturns <- colMeans(returns.data)
covMat <- cov(returns.data)
# Start with the names of the assets
port <- portfolio.spec(assets = c("TLT", "SHY", "HYG", "LQD", "MBB"))
# Box
port <- add.constraint(port, type = "box", min = 0.05, max = 0.8)
# Leverage
port <- add.constraint(portfolio = port, type = "full_investment")
# Generate random portfolios
rportfolios <- random_portfolios(port, permutations = 100000, rp_method = "sample")
# Get minimum variance portfolio
minvar.port <- add.objective(port, type = "risk", name = "var")
# Optimize
minvar.opt <- optimize.portfolio(returns.data, minvar.port, optimize_method = "random",
rp = rportfolios)
# Generate maximum return portfolio
maxret.port <- add.objective(port, type = "return", name = "mean")
# Optimize
maxret.opt <- optimize.portfolio(returns.data, maxret.port, optimize_method = "random",
rp = rportfolios)
# Generate vector of returns
minret <- 0
maxret <- maxret.opt$weights %*% meanReturns
vec <- seq(minret, maxret, length.out = nrow(rportfolios))
eff.frontier <- data.frame(Risk = rep(NA, length(vec)),
Return = rep(NA, length(vec)),
Sharperatio = rep(NA, length(vec)))
frontier.weights <- mat.or.vec(nr = length(vec), nc = ncol(returns.data))
colnames(frontier.weights) <- colnames(returns.data)
for(i in 1:length(vec)){
eff.port <- add.constraint(port, type = "return", name = "mean", return_target = vec[i])
eff.port <- add.objective(eff.port, type = "risk", name = "var")
# eff.port <- add.objective(eff.port, type = "weight_concentration", name = "HHI",
# conc_aversion = 0.001)
eff.port <- optimize.portfolio(returns.data, eff.port, optimize_method = "ROI")
eff.frontier$Risk[i] <- sqrt(t(eff.port$weights) %*% covMat %*% eff.port$weights)
eff.frontier$Return[i] <- eff.port$weights %*% meanReturns
eff.frontier$Sharperatio[i] <- eff.frontier$Return[i] / eff.frontier$Risk[i]
frontier.weights[i,] = eff.port$weights
print(paste(round(i/length(vec) * 100, 0), "% done..."))
}
feasible.sd <- apply(rportfolios, 1, function(x){
return(sqrt(matrix(x, nrow = 1) %*% covMat %*% matrix(x, ncol = 1)))
})
feasible.means <- apply(rportfolios, 1, function(x){
return(x %*% meanReturns)
})
feasible.sr <- feasible.means / feasible.sd
p <- plot_ly(x = feasible.sd, y = feasible.means, color = feasible.sr,
mode = "markers", type = "scattergl", showlegend = F,
marker = list(size = 3, opacity = 0.5,
colorbar = list(title = "Sharpe Ratio"))) %>%
add_trace(data = eff.frontier, x = ~Risk, y = ~Return, mode = "markers",
type = "scattergl", showlegend = F,
marker = list(color = "#F7C873", size = 5)) %>%
layout(title = "Random Portfolios with Plotly",
yaxis = list(title = "Mean Returns"),
xaxis = list(title = "Standard Deviation"),
# plot_bgcolor = "#434343",
# paper_bgcolor = "#F8F8F8",
annotations = list(
list(x = 0.004, y = 0.000075,
ax = -30, ay = -30,
text = "Efficient frontier",
font = list(color = "#F6E7C1", size = 15),
arrowcolor = "white")
))
print_app <- function(widget) {
# Generate random file name
temp <- paste(tempfile('plotly'), 'html', sep = '.')
# Save. Note, leaving selfcontained=TRUE created files that froze my browser
htmlwidgets::saveWidget(widget, temp, selfcontained = FALSE)
# Launch with desired application
system(sprintf("chromium-browser -app=file://%s", temp))
# Return file name if it's needed for any other purpose
temp
}
library(ggplot2)
ggplotly(p)
plotly_json(p)
# use plotly_build() to get at the plotly.js definition
# behind *any* plotly object
b <- plotly_build(p)
# Confirm there 8 traces
length(b$x$data)
# Extract the `name` of each trace. plotly.js uses `name` to
# populate legend entries and tooltips
purrr::map_chr(b$x$data, "name")
# Every trace has a type of histogram
unique(purrr::map_chr(b$x$data, "type"))
frontier.weights.melt <- reshape2::melt(frontier.weights)
q <- plot_ly(frontier.weights.melt, x = ~Var1, y = ~value, split = ~Var2, type = "bar") %>%
layout(title = "Portfolio weights across frontier", barmode = "stack",
xaxis = list(title = "Index"),
yaxis = list(title = "Weights(%)", tickformat = ".0%"))
q
|
/Mean Variance.R
|
no_license
|
avalxxhj/Portfolio-Allocation
|
R
| false
| false
| 5,208
|
r
|
library(PortfolioAnalytics)
library(quantmod)
library(PerformanceAnalytics)
library(zoo)
library(plotly)
# Get data
getSymbols(c("TLT", "SHY", "HYG", "LQD", "MBB"))
# Assign to dataframe
# Get adjusted prices
prices.data <- merge.zoo(TLT[,6], SHY[,6], HYG[,6], LQD[,6], MBB[,6])
# Calculate returns
returns.data <- CalculateReturns(prices.data)
returns.data <- na.omit(returns.data)
# Set names
colnames(returns.data) <- c("TLT", "SHY", "HYG", "LQD", "MBB")
# Save mean return vector and sample covariance matrix
meanReturns <- colMeans(returns.data)
covMat <- cov(returns.data)
# Start with the names of the assets
port <- portfolio.spec(assets = c("TLT", "SHY", "HYG", "LQD", "MBB"))
# Box
port <- add.constraint(port, type = "box", min = 0.05, max = 0.8)
# Leverage
port <- add.constraint(portfolio = port, type = "full_investment")
# Generate random portfolios
rportfolios <- random_portfolios(port, permutations = 100000, rp_method = "sample")
# Get minimum variance portfolio
minvar.port <- add.objective(port, type = "risk", name = "var")
# Optimize
minvar.opt <- optimize.portfolio(returns.data, minvar.port, optimize_method = "random",
rp = rportfolios)
# Generate maximum return portfolio
maxret.port <- add.objective(port, type = "return", name = "mean")
# Optimize
maxret.opt <- optimize.portfolio(returns.data, maxret.port, optimize_method = "random",
rp = rportfolios)
# Generate vector of returns
minret <- 0
maxret <- maxret.opt$weights %*% meanReturns
vec <- seq(minret, maxret, length.out = nrow(rportfolios))
eff.frontier <- data.frame(Risk = rep(NA, length(vec)),
Return = rep(NA, length(vec)),
Sharperatio = rep(NA, length(vec)))
frontier.weights <- mat.or.vec(nr = length(vec), nc = ncol(returns.data))
colnames(frontier.weights) <- colnames(returns.data)
for(i in 1:length(vec)){
eff.port <- add.constraint(port, type = "return", name = "mean", return_target = vec[i])
eff.port <- add.objective(eff.port, type = "risk", name = "var")
# eff.port <- add.objective(eff.port, type = "weight_concentration", name = "HHI",
# conc_aversion = 0.001)
eff.port <- optimize.portfolio(returns.data, eff.port, optimize_method = "ROI")
eff.frontier$Risk[i] <- sqrt(t(eff.port$weights) %*% covMat %*% eff.port$weights)
eff.frontier$Return[i] <- eff.port$weights %*% meanReturns
eff.frontier$Sharperatio[i] <- eff.frontier$Return[i] / eff.frontier$Risk[i]
frontier.weights[i,] = eff.port$weights
print(paste(round(i/length(vec) * 100, 0), "% done..."))
}
feasible.sd <- apply(rportfolios, 1, function(x){
return(sqrt(matrix(x, nrow = 1) %*% covMat %*% matrix(x, ncol = 1)))
})
feasible.means <- apply(rportfolios, 1, function(x){
return(x %*% meanReturns)
})
feasible.sr <- feasible.means / feasible.sd
p <- plot_ly(x = feasible.sd, y = feasible.means, color = feasible.sr,
mode = "markers", type = "scattergl", showlegend = F,
marker = list(size = 3, opacity = 0.5,
colorbar = list(title = "Sharpe Ratio"))) %>%
add_trace(data = eff.frontier, x = ~Risk, y = ~Return, mode = "markers",
type = "scattergl", showlegend = F,
marker = list(color = "#F7C873", size = 5)) %>%
layout(title = "Random Portfolios with Plotly",
yaxis = list(title = "Mean Returns"),
xaxis = list(title = "Standard Deviation"),
# plot_bgcolor = "#434343",
# paper_bgcolor = "#F8F8F8",
annotations = list(
list(x = 0.004, y = 0.000075,
ax = -30, ay = -30,
text = "Efficient frontier",
font = list(color = "#F6E7C1", size = 15),
arrowcolor = "white")
))
print_app <- function(widget) {
# Generate random file name
temp <- paste(tempfile('plotly'), 'html', sep = '.')
# Save. Note, leaving selfcontained=TRUE created files that froze my browser
htmlwidgets::saveWidget(widget, temp, selfcontained = FALSE)
# Launch with desired application
system(sprintf("chromium-browser -app=file://%s", temp))
# Return file name if it's needed for any other purpose
temp
}
library(ggplot2)
ggplotly(p)
plotly_json(p)
# use plotly_build() to get at the plotly.js definition
# behind *any* plotly object
b <- plotly_build(p)
# Confirm there 8 traces
length(b$x$data)
# Extract the `name` of each trace. plotly.js uses `name` to
# populate legend entries and tooltips
purrr::map_chr(b$x$data, "name")
# Every trace has a type of histogram
unique(purrr::map_chr(b$x$data, "type"))
frontier.weights.melt <- reshape2::melt(frontier.weights)
q <- plot_ly(frontier.weights.melt, x = ~Var1, y = ~value, split = ~Var2, type = "bar") %>%
layout(title = "Portfolio weights across frontier", barmode = "stack",
xaxis = list(title = "Index"),
yaxis = list(title = "Weights(%)", tickformat = ".0%"))
q
|
\name{scatter}
\alias{scatter}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Functions to split a file over several parts}
\description{
Takes in input a file and pushes it through a mapreduce jobs that writes it over a number of parts (system dependent, specifically on the number of reducers). This helps
with parallelization of the next map phase}
\usage{
scatter(input, output = NULL)
}
\arguments{
\item{input}{
The input file}
\item{output}{
Output, defaults to the same as \code{\link{mapreduce}} output}
}
\value{
Same as for \code{\link{mapreduce}}.
}
|
/rmr/pkg/man/scatter.Rd
|
no_license
|
fengzanfeng/RHadoop
|
R
| false
| false
| 591
|
rd
|
\name{scatter}
\alias{scatter}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Functions to split a file over several parts}
\description{
Takes in input a file and pushes it through a mapreduce jobs that writes it over a number of parts (system dependent, specifically on the number of reducers). This helps
with parallelization of the next map phase}
\usage{
scatter(input, output = NULL)
}
\arguments{
\item{input}{
The input file}
\item{output}{
Output, defaults to the same as \code{\link{mapreduce}} output}
}
\value{
Same as for \code{\link{mapreduce}}.
}
|
#*****************************************************************************************
# *
# [To Airbnb?] Custom functions for working with APM and AirDNA data in the *
# *
#*****************************************************************************************
### Calculate the booking status ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
abbCalcBookStr <- function(book_data){
# If property has more than one status (one daily booking) find summaries
if (nrow(book_data) > 1){
# Find min and max date
id_min <- min(book_data$date)
id_max <- max(book_data$date)
# Divide by status and collapse
st_v <- book_data$status[1]
for(ss in 2:length(book_data$status)){
if(book_data$status[ss] == book_data$status[[ss - 1]]){
st_v <- paste0(st_v, book_data$status[[ss]])
} else {
st_v <- paste0(st_v, '.' ,book_data$status[[ss]])
}
}
# Collapse into list objects
ss_. <- strsplit(st_v, '[.]')[[1]]
# Grab the first status of each
sg_. <- substr(ss_.[[1]], 1, 1)
for(sg in 1:length(ss_.)){
sg_.[sg] <- substr(ss_.[[sg]], 1, 1)
}
# Find location of three types
id_B <- which(unlist(sg_.) == 'B')
id_R <- which(unlist(sg_.) == 'R')
id_A <- which(unlist(sg_.) == 'A')
# Extract
if (length(id_R) > 0){
r_. <- ss_.[id_R]
bookings <- sum(nchar(unlist(r_.)))
} else {
bookings <- 0
}
if (length(id_A) > 0){
a_. <- ss_.[id_A]
avails <- unlist(lapply(a_., nchar))
avail_rate <- sum(avails) / length(book_data$status)
} else {
avail_rate <- 0
}
if (length(id_B) > 0){
b_. <- ss_.[id_B]
# Count longest and blocked times
blocks <- unlist(lapply(b_., nchar))
block_rate <- sum(blocks) / length(book_data$status)
longest_block <- max(blocks)
med_block <- median(blocks)
nbr_block <- length(id_B)
} else {
block_rate <- 0
longest_block <- 0
med_block <- 0
nbr_block <- 0
}
total_days <- length(book_data$status)
} else {
block_rate <- NA
longest_block <- NA
med_block <- NA
nbr_block <- NA
days <- NA
id_min <- NA
id_max <- NA
avail_rate <- NA
bookings <- NA
}
## Return Values
return(data.frame(min_date=id_min,
max_date=id_max,
total_days=total_days,
block_rate=round(block_rate, 3),
avail_rate=round(avail_rate, 3),
longest_block=longest_block,
nbr_block=nbr_block,
med_block=med_block,
bookings=bookings))
}
### Correct the APM data dates ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
fixAPMDates <- function(x_date){
# x.date: date vector
## Convert to character (from factor or numberic)
if (class(x_date) != 'character') x_date <- as.character(x_date)
## Remove Time suffixes
x_date <- str_replace_all(x_date, ' 0:00', '')
## Standardize all years to 2000s
x_date <- str_replace_all(x_date, '/20', '/')
## Return values as standardized, British/Australian date format
as.Date(x_date, "%d/%m/%y")
}
### Set the cleaning counter ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
setCleanCount <- function(){
# Make counts of initial sizes
str_orig <- nrow(str_tdf)
ltr_orig <- nrow(ltr_tdf)
# Create initial data.frame
clean_df <- data.frame(operation='initial',
str=str_orig,
ltr=ltr_orig)
# Return
structure(list(count_df = clean_df,
str_running = str_orig,
ltr_running = ltr_orig),
class='clean')
}
### Cleaning counting updater ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
countCleaning <- function(clean_obj, operation){
if (!'clean' %in% class(clean_obj)){
message('"clean_obj" not an object of class "clean"')
return(NULL)
}
# Count recent cuts
str_cut <- clean_obj$str_running - nrow(str_tdf)
ltr_cut <- clean_obj$ltr_running - nrow(ltr_tdf)
# Build new dataframe
new_df <- data.frame(operation=operation,
str=str_cut,
ltr=ltr_cut)
# Add to existing DF
comb_df <- rbind(clean_obj$count_df, new_df)
# Return
structure(list(count_df = comb_df,
str_running = nrow(str_tdf),
ltr_running = nrow(ltr_tdf)),
class='clean')
}
### Impute likely bookings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
impLikelyBookings <- function(prop_df,
rate_summ){
# Unnest the str prop data
prop_df <- prop_df %>%
tidyr::unnest()
# Get relative booking rate (prop versus other props on those days)
rsp <- rate_summ %>%
dplyr::filter(date %in% prop_df$date)
w_obs <- sum(rsp$obs * rsp$rate) / sum(rsp$obs)
rel_rate <- (1 - prop_df$block_rate - prop_df$avail_rate) / w_obs
# Estimate likely bookings with dates not currently in property
rso <- rate_summ %>%
dplyr::filter(!date %in% prop_df$date)
lik_book <- round(sum(rso$obs * rso$rate) / sum(rso$obs) * nrow(rso) * rel_rate, 0)
# Add to existing to create likely and limit columns
p_df <- prop_df %>%
dplyr::mutate(lik_bookings = bookings + lik_book) %>%
dplyr::select(property_id, lik_bookings) %>%
dplyr::slice(1)
# Return
p_df
}
### Impute likely bookings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
impPotentialBookings <- function(prop_df,
rate_summ){
prop_df <- prop_df %>% tidyr::unnest()
# Unnest the str prop data
notblock_df <- prop_df %>%
tidyr::unnest() %>%
dplyr::filter(status != 'B')
block_df <- prop_df %>%
tidyr::unnest() %>%
dplyr::filter(status == 'B')
# Get relative booking rate (prop versus other props on those days)
rsp <- rate_summ %>%
dplyr::filter(date %in% notblock_df$date)
w_obs <- sum(rsp$obs * rsp$rate) / sum(rsp$obs)
rel_rate <- unique((1 - prop_df$block_rate - prop_df$avail_rate) / w_obs)
# Estimate likely bookings with dates not currently in property
rso <- rate_summ %>%
dplyr::filter(date %in% block_df$date)
pot_book <- round(sum(rso$obs * rso$rate) / sum(rso$obs) * nrow(rso) * rel_rate, 0)
# Add to existing to create likely and limit columns
p_df <- prop_df %>%
dplyr::mutate(pot_bookings = lik_bookings + pot_book) %>%
dplyr::select(property_id, pot_bookings) %>%
dplyr::slice(1)
# Return
p_df
}
### Wrapper function to handle all of the imputation and comparison ~~~~~~~~~~~~~~~~~~~~~~
abbImputeCompare <- function(str.df,
ltr.df,
mod.spec,
match.factor=NULL,
split.field=NULL,
verbose=FALSE){
init_names <- names(str.df)
## Split data by field
# If field is specified
if (!is.null(split.field)){
str.list <- split(str.df, str.df[ ,split.field])
ltr.list <- split(ltr.df, ltr.df[ ,split.field])
# If no field specified
} else {
str.list <- list(str.df)
ltr.list <- list(ltr.df)
if(verbose){
cat('Data analyzed at global level')
}
}
## Loop through split dfs
# Set up capture list
imp.list <- list()
# Run Loop
for (il in 1:length(str.list)){
#if(verbose) cat('Imputing and Comparing: ', split.levels[il], '\n')
# Add quartile information to the data
str.list[[il]]$rate_qtl <- makeWtdQtl(str.list[[il]]$med_rate,
return.type='rank')
str.list[[il]]$occ_qtl <- makeWtdQtl(str.list[[il]]$occ_rate,
return.type='rank')
str.list[[il]]$pot_occ_qtl <- makeWtdQtl(str.list[[il]]$pot_occ_rate,
return.type='rank')
# Impute long term rents
imp.temp <- imputeLtrRents(ltr.df=ltr.df,
str.df=str.df,
mod.spec=mod.spec,
match.factor=match.factor)
# Add imputed LTRs to the STR data
str.list[[il]] <- merge(str.list[[il]], imp.temp$imp_rent, by='property_id')
imp.list[[il]] <- imp.temp
# Impute days on market
str.list[[il]]$imp_dom <- imputeDOM(str.list[[il]],
ltr.list[[il]],
calc.type='median')
# Create imputed LTR Revenue
str.list[[il]]$ltr_imp_revenue <- (str.list[[il]]$imp_rent *
(52 - str.list[[il]]$imp_dom / 7))
# Compare revenues
#comp.revs <- compareRevenues(str.list[[il]])
# Add revenue comparison fields to str data
#str.list[[il]] <- merge(str.list[[il]],
# comp.revs,
# by='property_id')
}
## Convert list into a df
str.df <- plyr::rbind.fill(str.list)
## Add indicator of which field was the split based on
str.df$split_field <- split.field
## Return Values
return(str.df[, c('property_id', names(str.df)[!names(str.df) %in% init_names])])
}
### Assign quartile values based on a give vector, weighted if necessary ~~~~~~~~~~~~~~~~~
makeWtdQtl <- function(data.vec,
wgts=rep(1,length(data.vec)),
return.type='rank')
{
## Load required library
require(Hmisc)
## Set the adjustment jitter to prevent identical breaks
adj.jit <- abs(mean(data.vec) / 100000)
## Calculate the weighted quantiles 0 to 1000
wtd.qtl <- Hmisc::wtd.quantile(data.vec + runif(length(data.vec), 0, adj.jit),
weights=wgts,
probs=seq(0, 1, .01))
## Fix the ends
# Minimum
if(wtd.qtl[1] > min(data.vec)){
wtd.qtl[1] <- min(data.vec) - adj.jit
}
# Maximum
if(wtd.qtl[length(wtd.qtl)] < max(data.vec)){
wtd.qtl[length(wtd.qtl)] <- max(data.vec) + adj.jit
}
## Convert to a vector of quantile indicators
qtl.vec <- as.numeric(as.factor(cut(data.vec,
breaks=(wtd.qtl + seq(0, 1, .01) * adj.jit))))
## Return value
if(return.type == 'rank'){
return(qtl.vec)
} else {
return(wtd.qtl)
}
}
### Cross impute rates and rents ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
imputeLtrRents <- function(ltr.df,
str.df,
mod.spec,
match.factor=NULL)
{
## Arguments
# ltr.df: data.frame of long term rental observations
# str.df: data.frame of airbnb properties
# ltr.mod.spec: specification for rent price model
# str.mod.spec: specification for airbnb properties
# clip.field: field to ensure factors match between rent and str
## Remove those within the clip field that isn't present in both
if(!is.null(match.factor)){
for(i.cf in 1:length(match.factor)){
# Find the fields that are used to clip
l.cf <- which(names(ltr.df) == match.factor[i.cf])
s.cf <- which(names(str.df) == match.factor[i.cf])
# Get IDs for those to be removed
ltr.df <- ltr.df[ltr.df[[l.cf]] %in% str.df[[s.cf]], ]
str.df <- str.df[str.df[[s.cf]] %in% ltr.df[[l.cf]], ]
# id.l <- ltr.df[ ,l.cf] %in% names(table(as.character(str.df[ ,s.cf])))
# id.s <- str.df[ ,s.cf] %in% names(table(as.character(ltr.df[ ,l.cf])))
#
# # Filter out obs missing matched factors
# ltr.df <- ltr.df[id.l, ]
# str.df <- str.df[id.s, ]
}
}
## Add the monthly factors
str.df$month <- '2015_8'
## Build regression models for rental values
ltr.mod <- lm(mod.spec, data=ltr.df)
## Add the predicted values to the short term data
imp.rent <- exp(predict(ltr.mod, str.df))
## Return Values
return(list(imp_rent=data.frame(property_id=str.df$property_id,
imp_rent=round(imp.rent, 0)),
model=ltr.mod))
}
knnImputeWrap <- function(ltr_df,
str_df,
knns = c(3, 5, 7)){
# Create Split Lists
ltr_ <- plyr::dlply(ltr_df, .variables = c('bedrooms', 'bathrooms',
'type'))
str_ <- plyr::dlply(str_tdf, .variables = c('bedrooms', 'bathrooms',
'type'))
ltr_ <- ltr_[names(ltr_) %in% names(str_)]
k_ <- list()
for (j in knns){
k_[[j]] <- purrr::map2(.x = str_,
.y = ltr_,
.f = knnImpute,
k = j) %>%
dplyr::bind_rows()
}
x <- tidyr::spread(k_ %>% bind_rows(), k, weekly_rent)
if (length(unique(knns)) > 1){
x$mean <- rowMeans(x[,grepl('k_', names(x))])
} else {
x$mean <- x[,grepl('k_', names(x))]
}
x$year <- x$mean * 52
x
}
knnImpute <- function(s_df, l_df, k){
nn <- RANN::nn2(data=l_df[, c('longitude', 'latitude')],
query = s_df[, c('longitude', 'latitude')],
k = k)
getMed <- function(x,y){median(y$price[x])}
kp_df <- plyr::adply(nn$nn.idx, 1, getMed, y=l_df)
return(data.frame(property_id = s_df$property_id,
weekly_rent = kp_df$V1,
k = paste0('k_', k)))
}
### Impute days on market for the airbnb properties ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
imputeDOM <- function(str.df,
ltr.df,
calc.type='median'){
## If median type
if(calc.type == 'median'){
dom.qtl <- makeWtdQtl(ltr.df$dom, return.type='raw')
str.df$imp.dom <- dom.qtl[51]
}
## if model type
if(calc.type == 'model'){
# Save for later
}
## Return Values
return(round(str.df$imp.dom, 0))
}
# Place multiple ggplots into a configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ggMultiPlots <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
require(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
###~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tapply2DF <- function(xData, # Vector being tapply'd
byField, # Field to split vector by
xFunc, # Function to apply
newName='Var', # Name of new variable
idName='ID',
na.rm=FALSE) # Name of identification field
{
## Execute tapply()
xTable <- as.data.frame(tapply(xData, byField, xFunc, na.rm=na.rm))
## Add names and new fields
# Give calculated field a name
names(xTable) <- newName
# Add id field and give it a name
xTable[ ,2] <- rownames(xTable)
names(xTable)[2] <- idName
# Reorder columns
xTable <- xTable[ ,c(2, 1)]
# Remove existing field names
rownames(xTable) <- 1:nrow(xTable)
## Return values
return(xTable)
}
### Create comparison table ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
abbCreateCompTable <- function(ic.df,
split.field=NULL){
if(split.field == 'none'){
str.act <- mean(ic.df$act_pref)
str.lik <- mean(ic.df$lik_pref)
str.pot <- mean(ltr.df$pot_pref)
rate.table <- data.frame(ID='all',
var=c(str.act, str.lik, str.pot),
rev.type=c('Actual',
'Likely',
'Potential'))
} else {
# Calculate cross-tab values
str.act <- tapply2DF(ic.df$act_pref, ic.df[ ,split.field], mean)
str.lik <- tapply2DF(ic.df$lik_pref, ic.df[ ,split.field], mean)
str.pot <- tapply2DF(ic.df$pot_pref, ic.df[, split.field], mean)
# Add names
str.act$rev.type <- 'Actual'
str.lik$rev.type <- 'Likely'
str.pot$rev.type <- 'Potential'
# Combine into table
rate.table <- rbind(str.act, str.lik, str.pot)
# Reorder factors for common split fields
if(split.field == 'geo_mrkt'){
rate.table$ID <- factor(rate.table$ID,
levels=c('city-core', 'city', 'beach',
'suburban', 'rural'))
}
if(split.field == 'host_type'){
rate.table$ID <- factor(rate.table$ID,
levels=c('Profit Seeker', 'Opportunistic Sharer',
'Multi-Platform User', 'Unknown'))
}
}
## Return Values
return(rate.table)
}
### Create a vector of significance stars for regression results ~~~~~~~~~~~~~~~~~~~~~~~~~
makeStatSig <- function(x){
x <- as.numeric(x)
y <- rep('***', length(x))
y[x > .01] <- '** '
y[x > .05] <- '* '
y[x > .1] <- ' '
y
}
### Diagnostics for logistic regression models ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
logDx <- function(log.model, data, resp.var){
pred <- prediction(predict(log.model, data, type='response'), resp.var)
auc <- performance(pred, measure='auc')
ll <- logLik(log.model)
AIC <- AIC(log.model)
return(list(AIC=AIC,
logLik=ll,
auc=auc))
}
|
/functions/abb_Functions.R
|
no_license
|
andykrause/to_airbnb
|
R
| false
| false
| 18,982
|
r
|
#*****************************************************************************************
# *
# [To Airbnb?] Custom functions for working with APM and AirDNA data in the *
# *
#*****************************************************************************************
### Calculate the booking status ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
abbCalcBookStr <- function(book_data){
# If property has more than one status (one daily booking) find summaries
if (nrow(book_data) > 1){
# Find min and max date
id_min <- min(book_data$date)
id_max <- max(book_data$date)
# Divide by status and collapse
st_v <- book_data$status[1]
for(ss in 2:length(book_data$status)){
if(book_data$status[ss] == book_data$status[[ss - 1]]){
st_v <- paste0(st_v, book_data$status[[ss]])
} else {
st_v <- paste0(st_v, '.' ,book_data$status[[ss]])
}
}
# Collapse into list objects
ss_. <- strsplit(st_v, '[.]')[[1]]
# Grab the first status of each
sg_. <- substr(ss_.[[1]], 1, 1)
for(sg in 1:length(ss_.)){
sg_.[sg] <- substr(ss_.[[sg]], 1, 1)
}
# Find location of three types
id_B <- which(unlist(sg_.) == 'B')
id_R <- which(unlist(sg_.) == 'R')
id_A <- which(unlist(sg_.) == 'A')
# Extract
if (length(id_R) > 0){
r_. <- ss_.[id_R]
bookings <- sum(nchar(unlist(r_.)))
} else {
bookings <- 0
}
if (length(id_A) > 0){
a_. <- ss_.[id_A]
avails <- unlist(lapply(a_., nchar))
avail_rate <- sum(avails) / length(book_data$status)
} else {
avail_rate <- 0
}
if (length(id_B) > 0){
b_. <- ss_.[id_B]
# Count longest and blocked times
blocks <- unlist(lapply(b_., nchar))
block_rate <- sum(blocks) / length(book_data$status)
longest_block <- max(blocks)
med_block <- median(blocks)
nbr_block <- length(id_B)
} else {
block_rate <- 0
longest_block <- 0
med_block <- 0
nbr_block <- 0
}
total_days <- length(book_data$status)
} else {
block_rate <- NA
longest_block <- NA
med_block <- NA
nbr_block <- NA
days <- NA
id_min <- NA
id_max <- NA
avail_rate <- NA
bookings <- NA
}
## Return Values
return(data.frame(min_date=id_min,
max_date=id_max,
total_days=total_days,
block_rate=round(block_rate, 3),
avail_rate=round(avail_rate, 3),
longest_block=longest_block,
nbr_block=nbr_block,
med_block=med_block,
bookings=bookings))
}
### Correct the APM data dates ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
fixAPMDates <- function(x_date){
# x.date: date vector
## Convert to character (from factor or numberic)
if (class(x_date) != 'character') x_date <- as.character(x_date)
## Remove Time suffixes
x_date <- str_replace_all(x_date, ' 0:00', '')
## Standardize all years to 2000s
x_date <- str_replace_all(x_date, '/20', '/')
## Return values as standardized, British/Australian date format
as.Date(x_date, "%d/%m/%y")
}
### Set the cleaning counter ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
setCleanCount <- function(){
# Make counts of initial sizes
str_orig <- nrow(str_tdf)
ltr_orig <- nrow(ltr_tdf)
# Create initial data.frame
clean_df <- data.frame(operation='initial',
str=str_orig,
ltr=ltr_orig)
# Return
structure(list(count_df = clean_df,
str_running = str_orig,
ltr_running = ltr_orig),
class='clean')
}
### Cleaning counting updater ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
countCleaning <- function(clean_obj, operation){
if (!'clean' %in% class(clean_obj)){
message('"clean_obj" not an object of class "clean"')
return(NULL)
}
# Count recent cuts
str_cut <- clean_obj$str_running - nrow(str_tdf)
ltr_cut <- clean_obj$ltr_running - nrow(ltr_tdf)
# Build new dataframe
new_df <- data.frame(operation=operation,
str=str_cut,
ltr=ltr_cut)
# Add to existing DF
comb_df <- rbind(clean_obj$count_df, new_df)
# Return
structure(list(count_df = comb_df,
str_running = nrow(str_tdf),
ltr_running = nrow(ltr_tdf)),
class='clean')
}
### Impute likely bookings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
impLikelyBookings <- function(prop_df,
rate_summ){
# Unnest the str prop data
prop_df <- prop_df %>%
tidyr::unnest()
# Get relative booking rate (prop versus other props on those days)
rsp <- rate_summ %>%
dplyr::filter(date %in% prop_df$date)
w_obs <- sum(rsp$obs * rsp$rate) / sum(rsp$obs)
rel_rate <- (1 - prop_df$block_rate - prop_df$avail_rate) / w_obs
# Estimate likely bookings with dates not currently in property
rso <- rate_summ %>%
dplyr::filter(!date %in% prop_df$date)
lik_book <- round(sum(rso$obs * rso$rate) / sum(rso$obs) * nrow(rso) * rel_rate, 0)
# Add to existing to create likely and limit columns
p_df <- prop_df %>%
dplyr::mutate(lik_bookings = bookings + lik_book) %>%
dplyr::select(property_id, lik_bookings) %>%
dplyr::slice(1)
# Return
p_df
}
### Impute likely bookings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
impPotentialBookings <- function(prop_df,
rate_summ){
prop_df <- prop_df %>% tidyr::unnest()
# Unnest the str prop data
notblock_df <- prop_df %>%
tidyr::unnest() %>%
dplyr::filter(status != 'B')
block_df <- prop_df %>%
tidyr::unnest() %>%
dplyr::filter(status == 'B')
# Get relative booking rate (prop versus other props on those days)
rsp <- rate_summ %>%
dplyr::filter(date %in% notblock_df$date)
w_obs <- sum(rsp$obs * rsp$rate) / sum(rsp$obs)
rel_rate <- unique((1 - prop_df$block_rate - prop_df$avail_rate) / w_obs)
# Estimate likely bookings with dates not currently in property
rso <- rate_summ %>%
dplyr::filter(date %in% block_df$date)
pot_book <- round(sum(rso$obs * rso$rate) / sum(rso$obs) * nrow(rso) * rel_rate, 0)
# Add to existing to create likely and limit columns
p_df <- prop_df %>%
dplyr::mutate(pot_bookings = lik_bookings + pot_book) %>%
dplyr::select(property_id, pot_bookings) %>%
dplyr::slice(1)
# Return
p_df
}
### Wrapper function to handle all of the imputation and comparison ~~~~~~~~~~~~~~~~~~~~~~
abbImputeCompare <- function(str.df,
ltr.df,
mod.spec,
match.factor=NULL,
split.field=NULL,
verbose=FALSE){
init_names <- names(str.df)
## Split data by field
# If field is specified
if (!is.null(split.field)){
str.list <- split(str.df, str.df[ ,split.field])
ltr.list <- split(ltr.df, ltr.df[ ,split.field])
# If no field specified
} else {
str.list <- list(str.df)
ltr.list <- list(ltr.df)
if(verbose){
cat('Data analyzed at global level')
}
}
## Loop through split dfs
# Set up capture list
imp.list <- list()
# Run Loop
for (il in 1:length(str.list)){
#if(verbose) cat('Imputing and Comparing: ', split.levels[il], '\n')
# Add quartile information to the data
str.list[[il]]$rate_qtl <- makeWtdQtl(str.list[[il]]$med_rate,
return.type='rank')
str.list[[il]]$occ_qtl <- makeWtdQtl(str.list[[il]]$occ_rate,
return.type='rank')
str.list[[il]]$pot_occ_qtl <- makeWtdQtl(str.list[[il]]$pot_occ_rate,
return.type='rank')
# Impute long term rents
imp.temp <- imputeLtrRents(ltr.df=ltr.df,
str.df=str.df,
mod.spec=mod.spec,
match.factor=match.factor)
# Add imputed LTRs to the STR data
str.list[[il]] <- merge(str.list[[il]], imp.temp$imp_rent, by='property_id')
imp.list[[il]] <- imp.temp
# Impute days on market
str.list[[il]]$imp_dom <- imputeDOM(str.list[[il]],
ltr.list[[il]],
calc.type='median')
# Create imputed LTR Revenue
str.list[[il]]$ltr_imp_revenue <- (str.list[[il]]$imp_rent *
(52 - str.list[[il]]$imp_dom / 7))
# Compare revenues
#comp.revs <- compareRevenues(str.list[[il]])
# Add revenue comparison fields to str data
#str.list[[il]] <- merge(str.list[[il]],
# comp.revs,
# by='property_id')
}
## Convert list into a df
str.df <- plyr::rbind.fill(str.list)
## Add indicator of which field was the split based on
str.df$split_field <- split.field
## Return Values
return(str.df[, c('property_id', names(str.df)[!names(str.df) %in% init_names])])
}
### Assign quartile values based on a give vector, weighted if necessary ~~~~~~~~~~~~~~~~~
makeWtdQtl <- function(data.vec,
wgts=rep(1,length(data.vec)),
return.type='rank')
{
## Load required library
require(Hmisc)
## Set the adjustment jitter to prevent identical breaks
adj.jit <- abs(mean(data.vec) / 100000)
## Calculate the weighted quantiles 0 to 1000
wtd.qtl <- Hmisc::wtd.quantile(data.vec + runif(length(data.vec), 0, adj.jit),
weights=wgts,
probs=seq(0, 1, .01))
## Fix the ends
# Minimum
if(wtd.qtl[1] > min(data.vec)){
wtd.qtl[1] <- min(data.vec) - adj.jit
}
# Maximum
if(wtd.qtl[length(wtd.qtl)] < max(data.vec)){
wtd.qtl[length(wtd.qtl)] <- max(data.vec) + adj.jit
}
## Convert to a vector of quantile indicators
qtl.vec <- as.numeric(as.factor(cut(data.vec,
breaks=(wtd.qtl + seq(0, 1, .01) * adj.jit))))
## Return value
if(return.type == 'rank'){
return(qtl.vec)
} else {
return(wtd.qtl)
}
}
### Cross impute rates and rents ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
imputeLtrRents <- function(ltr.df,
str.df,
mod.spec,
match.factor=NULL)
{
## Arguments
# ltr.df: data.frame of long term rental observations
# str.df: data.frame of airbnb properties
# ltr.mod.spec: specification for rent price model
# str.mod.spec: specification for airbnb properties
# clip.field: field to ensure factors match between rent and str
## Remove those within the clip field that isn't present in both
if(!is.null(match.factor)){
for(i.cf in 1:length(match.factor)){
# Find the fields that are used to clip
l.cf <- which(names(ltr.df) == match.factor[i.cf])
s.cf <- which(names(str.df) == match.factor[i.cf])
# Get IDs for those to be removed
ltr.df <- ltr.df[ltr.df[[l.cf]] %in% str.df[[s.cf]], ]
str.df <- str.df[str.df[[s.cf]] %in% ltr.df[[l.cf]], ]
# id.l <- ltr.df[ ,l.cf] %in% names(table(as.character(str.df[ ,s.cf])))
# id.s <- str.df[ ,s.cf] %in% names(table(as.character(ltr.df[ ,l.cf])))
#
# # Filter out obs missing matched factors
# ltr.df <- ltr.df[id.l, ]
# str.df <- str.df[id.s, ]
}
}
## Add the monthly factors
str.df$month <- '2015_8'
## Build regression models for rental values
ltr.mod <- lm(mod.spec, data=ltr.df)
## Add the predicted values to the short term data
imp.rent <- exp(predict(ltr.mod, str.df))
## Return Values
return(list(imp_rent=data.frame(property_id=str.df$property_id,
imp_rent=round(imp.rent, 0)),
model=ltr.mod))
}
knnImputeWrap <- function(ltr_df,
str_df,
knns = c(3, 5, 7)){
# Create Split Lists
ltr_ <- plyr::dlply(ltr_df, .variables = c('bedrooms', 'bathrooms',
'type'))
str_ <- plyr::dlply(str_tdf, .variables = c('bedrooms', 'bathrooms',
'type'))
ltr_ <- ltr_[names(ltr_) %in% names(str_)]
k_ <- list()
for (j in knns){
k_[[j]] <- purrr::map2(.x = str_,
.y = ltr_,
.f = knnImpute,
k = j) %>%
dplyr::bind_rows()
}
x <- tidyr::spread(k_ %>% bind_rows(), k, weekly_rent)
if (length(unique(knns)) > 1){
x$mean <- rowMeans(x[,grepl('k_', names(x))])
} else {
x$mean <- x[,grepl('k_', names(x))]
}
x$year <- x$mean * 52
x
}
knnImpute <- function(s_df, l_df, k){
nn <- RANN::nn2(data=l_df[, c('longitude', 'latitude')],
query = s_df[, c('longitude', 'latitude')],
k = k)
getMed <- function(x,y){median(y$price[x])}
kp_df <- plyr::adply(nn$nn.idx, 1, getMed, y=l_df)
return(data.frame(property_id = s_df$property_id,
weekly_rent = kp_df$V1,
k = paste0('k_', k)))
}
### Impute days on market for the airbnb properties ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
imputeDOM <- function(str.df,
ltr.df,
calc.type='median'){
## If median type
if(calc.type == 'median'){
dom.qtl <- makeWtdQtl(ltr.df$dom, return.type='raw')
str.df$imp.dom <- dom.qtl[51]
}
## if model type
if(calc.type == 'model'){
# Save for later
}
## Return Values
return(round(str.df$imp.dom, 0))
}
# Place multiple ggplots into a configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ggMultiPlots <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
require(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
###~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tapply2DF <- function(xData, # Vector being tapply'd
byField, # Field to split vector by
xFunc, # Function to apply
newName='Var', # Name of new variable
idName='ID',
na.rm=FALSE) # Name of identification field
{
## Execute tapply()
xTable <- as.data.frame(tapply(xData, byField, xFunc, na.rm=na.rm))
## Add names and new fields
# Give calculated field a name
names(xTable) <- newName
# Add id field and give it a name
xTable[ ,2] <- rownames(xTable)
names(xTable)[2] <- idName
# Reorder columns
xTable <- xTable[ ,c(2, 1)]
# Remove existing field names
rownames(xTable) <- 1:nrow(xTable)
## Return values
return(xTable)
}
### Create comparison table ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
abbCreateCompTable <- function(ic.df,
split.field=NULL){
if(split.field == 'none'){
str.act <- mean(ic.df$act_pref)
str.lik <- mean(ic.df$lik_pref)
str.pot <- mean(ltr.df$pot_pref)
rate.table <- data.frame(ID='all',
var=c(str.act, str.lik, str.pot),
rev.type=c('Actual',
'Likely',
'Potential'))
} else {
# Calculate cross-tab values
str.act <- tapply2DF(ic.df$act_pref, ic.df[ ,split.field], mean)
str.lik <- tapply2DF(ic.df$lik_pref, ic.df[ ,split.field], mean)
str.pot <- tapply2DF(ic.df$pot_pref, ic.df[, split.field], mean)
# Add names
str.act$rev.type <- 'Actual'
str.lik$rev.type <- 'Likely'
str.pot$rev.type <- 'Potential'
# Combine into table
rate.table <- rbind(str.act, str.lik, str.pot)
# Reorder factors for common split fields
if(split.field == 'geo_mrkt'){
rate.table$ID <- factor(rate.table$ID,
levels=c('city-core', 'city', 'beach',
'suburban', 'rural'))
}
if(split.field == 'host_type'){
rate.table$ID <- factor(rate.table$ID,
levels=c('Profit Seeker', 'Opportunistic Sharer',
'Multi-Platform User', 'Unknown'))
}
}
## Return Values
return(rate.table)
}
### Create a vector of significance stars for regression results ~~~~~~~~~~~~~~~~~~~~~~~~~
makeStatSig <- function(x){
x <- as.numeric(x)
y <- rep('***', length(x))
y[x > .01] <- '** '
y[x > .05] <- '* '
y[x > .1] <- ' '
y
}
### Diagnostics for logistic regression models ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
logDx <- function(log.model, data, resp.var){
pred <- prediction(predict(log.model, data, type='response'), resp.var)
auc <- performance(pred, measure='auc')
ll <- logLik(log.model)
AIC <- AIC(log.model)
return(list(AIC=AIC,
logLik=ll,
auc=auc))
}
|
library(utility)
### Name: utility.aggregate.bonusmalus
### Title: Bonus-malus aggregation of values or utilities
### Aliases: utility.aggregate.bonusmalus
### ** Examples
utility.aggregate.bonusmalus(c(0.2,0.8), par=c(1,NA,1))
utility.aggregate.bonusmalus(c(0.2,0.8), par=c(1,1,NA))
utility.aggregate.bonusmalus(c(0.2,0.8), par=c(1,NA,-1))
utility.aggregate.bonusmalus(c(0.2,0.8), par=c(1,-1,NA))
|
/data/genthat_extracted_code/utility/examples/utility.aggregate.bonusmalus.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 405
|
r
|
library(utility)
### Name: utility.aggregate.bonusmalus
### Title: Bonus-malus aggregation of values or utilities
### Aliases: utility.aggregate.bonusmalus
### ** Examples
utility.aggregate.bonusmalus(c(0.2,0.8), par=c(1,NA,1))
utility.aggregate.bonusmalus(c(0.2,0.8), par=c(1,1,NA))
utility.aggregate.bonusmalus(c(0.2,0.8), par=c(1,NA,-1))
utility.aggregate.bonusmalus(c(0.2,0.8), par=c(1,-1,NA))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/envDataRead.R
\name{envDataRead}
\alias{envDataRead}
\title{Reading the environmental data from a file.}
\usage{
envDataRead(
file.in = stop("'file.in' must be given!"),
dir.in = ".",
file.out = NULL,
dir.out = ".",
sep = ",",
cont = TRUE,
header = TRUE,
rownames = TRUE,
overwrite = NULL
)
}
\arguments{
\item{file.in}{The name of the file with environmental data.}
\item{dir.in}{The path to the directory where the 'file.in' resides.}
\item{file.out}{The base name for the output files (see Details).}
\item{dir.out}{The path to the directory where the output files will be saved.}
\item{sep}{The separator character that separates values in each line of the
file; "," by default (as in a csv file).}
\item{cont}{Logical - are the values continuous (TRUE, default) or categories
(FALSE)? See Details.}
\item{header}{Logical indicating whether the first line of the file is a header;
default TRUE.}
\item{rownames}{Default (TRUE) indicates that the first column of the file
includes names of rows. If a character vector is given here, these names
are used as rownames; if FALSE, no rownames are used.}
\item{overwrite}{Logical: if a file with the given name exists, should it be
overwritten or not? If NULL, the user will be prompt for input.}
}
\value{
A list of ffdf objects with the environmental data in numeric format.
}
\description{
This function reads in the environmental data that accompanies the genetic
data read in with \link{genDataRead}.
}
\details{
The environmental data such as methylation data can be large if the information
is stored on per-SNP basis. Thus, when data is large, this function reads it
in andcreates a special ff object that stores the data without limiting the
memory available. This can take time but needs to be performed only once.
Later on, one can use the \link{envDataLoad} function to load the appropriate
data from \code{.ffData} file saved to disk, which is a quick process.
}
\section{Details}{
If 'file.out' is not given, the default is NULL and the output filenames are
constructed based on the input filenames. The '_env' suffix is added to the
base name and the \code{.ffData} file is written to disk. This file contains
all the information needed to restore the ffdf object by calling
\link{envDataLoad} function later on.
If 'cont' is TRUE (default), the output data will be a list of ff matrices
containing single-precision values. However, before using this data as
stratification values, the user needs to create categories - this can be
done manually or with the provided \link{envDataCategorize} function.
}
|
/man/envDataRead.Rd
|
no_license
|
jromanowska/HaplinMethyl
|
R
| false
| true
| 2,691
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/envDataRead.R
\name{envDataRead}
\alias{envDataRead}
\title{Reading the environmental data from a file.}
\usage{
envDataRead(
file.in = stop("'file.in' must be given!"),
dir.in = ".",
file.out = NULL,
dir.out = ".",
sep = ",",
cont = TRUE,
header = TRUE,
rownames = TRUE,
overwrite = NULL
)
}
\arguments{
\item{file.in}{The name of the file with environmental data.}
\item{dir.in}{The path to the directory where the 'file.in' resides.}
\item{file.out}{The base name for the output files (see Details).}
\item{dir.out}{The path to the directory where the output files will be saved.}
\item{sep}{The separator character that separates values in each line of the
file; "," by default (as in a csv file).}
\item{cont}{Logical - are the values continuous (TRUE, default) or categories
(FALSE)? See Details.}
\item{header}{Logical indicating whether the first line of the file is a header;
default TRUE.}
\item{rownames}{Default (TRUE) indicates that the first column of the file
includes names of rows. If a character vector is given here, these names
are used as rownames; if FALSE, no rownames are used.}
\item{overwrite}{Logical: if a file with the given name exists, should it be
overwritten or not? If NULL, the user will be prompt for input.}
}
\value{
A list of ffdf objects with the environmental data in numeric format.
}
\description{
This function reads in the environmental data that accompanies the genetic
data read in with \link{genDataRead}.
}
\details{
The environmental data such as methylation data can be large if the information
is stored on per-SNP basis. Thus, when data is large, this function reads it
in andcreates a special ff object that stores the data without limiting the
memory available. This can take time but needs to be performed only once.
Later on, one can use the \link{envDataLoad} function to load the appropriate
data from \code{.ffData} file saved to disk, which is a quick process.
}
\section{Details}{
If 'file.out' is not given, the default is NULL and the output filenames are
constructed based on the input filenames. The '_env' suffix is added to the
base name and the \code{.ffData} file is written to disk. This file contains
all the information needed to restore the ffdf object by calling
\link{envDataLoad} function later on.
If 'cont' is TRUE (default), the output data will be a list of ff matrices
containing single-precision values. However, before using this data as
stratification values, the user needs to create categories - this can be
done manually or with the provided \link{envDataCategorize} function.
}
|
coef.CARBayes <- function(object,...)
{
#### Return the estimated regression coefficient
if(is.null(nrow(object$samples$beta)))
{
return(NULL)
}else
{
beta <- apply(object$samples$beta, 2, median)
names(beta) <- colnames(object$X)
return(beta)
}
}
|
/R/coef.CARBayes.R
|
no_license
|
duncanplee/CARBayes
|
R
| false
| false
| 297
|
r
|
coef.CARBayes <- function(object,...)
{
#### Return the estimated regression coefficient
if(is.null(nrow(object$samples$beta)))
{
return(NULL)
}else
{
beta <- apply(object$samples$beta, 2, median)
names(beta) <- colnames(object$X)
return(beta)
}
}
|
#' `d3.js` topogram
#'
#' Continuous area cartograms with `d3.js`
#'
#' @param data A \code{data.frame} with at least two variables : the geo id and the value associated
#' @param key_var A character vector of length one or more, or a named list. The value to represent on the map
#' @param shape Geographical shape to use, should be one of \code{france-reg}, \code{france-reg-2016}, \code{france-dep}, \code{usa-states}
#' @param geo_id Name of variable containing the geographical id
#' @param geo_lab Name of variable containing the geographical label
#' @param colors A vector of color to use on the map
#' @param origin For France only, a numeric vector of length two for centering the map
#' @param scale For France only, a numeric for sizing the map
#' @param width,height height and width of widget
#' @param elementId string id as a valid CSS element id.
#'
#' @examples
#' library("topogRam")
#' topogRam(data = frRegPop, key_var = "P13_POP", geo_lab = "region")
#'
#'
#'
#' @import htmlwidgets
#' @import jsonlite
#' @importFrom stats runif
#'
#' @export
topogRam <- function(data, key_var, shape = "france-reg", geo_id = "id", geo_lab = NULL,
colors, origin = NULL, scale = NULL, width = 500, height = 500, elementId = NULL) {
if (missing(colors))
colors <- c("#FEE5D9", "#FCAE91", "#FB6A4A", "#DE2D26", "#A50F15")
if (geo_id != "id")
names(data)[names(data) == geo_id] <- "id"
if (!is.null(geo_lab))
names(data)[names(data) == geo_lab] <- "NAME"
if (is.list(key_var) && !is.list(unlist(key_var, recursive = FALSE)))
key_var <- list(key_var)
if (!is.list(key_var) & is.character(key_var))
key_var <- lapply(key_var, function(x) list(key = x, name = "", format = "", lab = ""))
shape <- match.arg(
arg = shape,
choices = c("france-reg", "france-reg-2016", "france-dep", "france-dep-2", "usa-states", "sweden-1", "nz-reg","spain-regions")
)
if (is.null(origin))
origin <- c(8, 45.5)
if (is.null(scale))
scale <- 2500
# shapejs <- switch(
# shape,
# "france-reg" = 'frReg',
# "france-dep" = 'frDep',
# "france-dep-2" = 'frDep2',
# "france-reg-2016" = 'frReg2016',
# "usa-states" = 'usaStates'
# )
# forward options using x
x = list(
data = jsonlite::toJSON(x = data),
colors = jsonlite::toJSON(x = colors),
fields = jsonlite::toJSON(x = key_var, auto_unbox = TRUE),
shape = shape, #shapejs = shapejs,
addSelect = jsonlite::toJSON(length(key_var) > 1, auto_unbox = TRUE),
idSelect = paste0("selectfield", round(runif(1,1e6,9e6))),
origin = jsonlite::toJSON(x = origin), scale = jsonlite::toJSON(x = scale)
)
# shapesDisp <- list(
# "france-reg" = 'france-regions.topojson',
# "france-dep" = 'france-departements.topojson',
# "france-dep-2" = 'france-departements-2.topojson',
# "france-reg-2016" = 'france-regions-2016.topojson',
# "usa-states" = 'usa-states.topojson'
# )
#
# # Dependancies
# shapeDep <- htmltools::htmlDependency(
# name = "shapes",
# version = '1.0',
# src = system.file('htmlwidgets/lib/shapes', package = 'topogRam'),
# attachment = shapesDisp[shape]
# )
# create widget
htmlwidgets::createWidget(
name = 'topogRam',
x,
width = width,
height = height,
# dependencies = shapeDep,
package = 'topogRam',
elementId = elementId
)
}
#' Shiny bindings for topogRam
#'
#' Output and render functions for using topogRam within Shiny
#' applications and interactive Rmd documents.
#'
#' @param outputId output variable to read from
#' @param width,height Must be a valid CSS unit (like \code{'100\%'},
#' \code{'400px'}, \code{'auto'}) or a number, which will be coerced to a
#' string and have \code{'px'} appended.
#' @param expr An expression that generates a topogRam
#' @param env The environment in which to evaluate \code{expr}.
#' @param quoted Is \code{expr} a quoted expression (with \code{quote()})? This
#' is useful if you want to save an expression in a variable.
#'
#' @name topogRam-shiny
#'
#' @export
topogRamOutput <- function(outputId, width = '500px', height = '500px'){
htmlwidgets::shinyWidgetOutput(outputId, 'topogRam', width, height, package = 'topogRam')
}
#' @rdname topogRam-shiny
#' @export
renderTopogRam <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
htmlwidgets::shinyRenderWidget(expr, topogRamOutput, env, quoted = TRUE)
}
|
/R/topogRam.R
|
no_license
|
DATAUNIRIO/topogRam
|
R
| false
| false
| 4,491
|
r
|
#' `d3.js` topogram
#'
#' Continuous area cartograms with `d3.js`
#'
#' @param data A \code{data.frame} with at least two variables : the geo id and the value associated
#' @param key_var A character vector of length one or more, or a named list. The value to represent on the map
#' @param shape Geographical shape to use, should be one of \code{france-reg}, \code{france-reg-2016}, \code{france-dep}, \code{usa-states}
#' @param geo_id Name of variable containing the geographical id
#' @param geo_lab Name of variable containing the geographical label
#' @param colors A vector of color to use on the map
#' @param origin For France only, a numeric vector of length two for centering the map
#' @param scale For France only, a numeric for sizing the map
#' @param width,height height and width of widget
#' @param elementId string id as a valid CSS element id.
#'
#' @examples
#' library("topogRam")
#' topogRam(data = frRegPop, key_var = "P13_POP", geo_lab = "region")
#'
#'
#'
#' @import htmlwidgets
#' @import jsonlite
#' @importFrom stats runif
#'
#' @export
topogRam <- function(data, key_var, shape = "france-reg", geo_id = "id", geo_lab = NULL,
colors, origin = NULL, scale = NULL, width = 500, height = 500, elementId = NULL) {
if (missing(colors))
colors <- c("#FEE5D9", "#FCAE91", "#FB6A4A", "#DE2D26", "#A50F15")
if (geo_id != "id")
names(data)[names(data) == geo_id] <- "id"
if (!is.null(geo_lab))
names(data)[names(data) == geo_lab] <- "NAME"
if (is.list(key_var) && !is.list(unlist(key_var, recursive = FALSE)))
key_var <- list(key_var)
if (!is.list(key_var) & is.character(key_var))
key_var <- lapply(key_var, function(x) list(key = x, name = "", format = "", lab = ""))
shape <- match.arg(
arg = shape,
choices = c("france-reg", "france-reg-2016", "france-dep", "france-dep-2", "usa-states", "sweden-1", "nz-reg","spain-regions")
)
if (is.null(origin))
origin <- c(8, 45.5)
if (is.null(scale))
scale <- 2500
# shapejs <- switch(
# shape,
# "france-reg" = 'frReg',
# "france-dep" = 'frDep',
# "france-dep-2" = 'frDep2',
# "france-reg-2016" = 'frReg2016',
# "usa-states" = 'usaStates'
# )
# forward options using x
x = list(
data = jsonlite::toJSON(x = data),
colors = jsonlite::toJSON(x = colors),
fields = jsonlite::toJSON(x = key_var, auto_unbox = TRUE),
shape = shape, #shapejs = shapejs,
addSelect = jsonlite::toJSON(length(key_var) > 1, auto_unbox = TRUE),
idSelect = paste0("selectfield", round(runif(1,1e6,9e6))),
origin = jsonlite::toJSON(x = origin), scale = jsonlite::toJSON(x = scale)
)
# shapesDisp <- list(
# "france-reg" = 'france-regions.topojson',
# "france-dep" = 'france-departements.topojson',
# "france-dep-2" = 'france-departements-2.topojson',
# "france-reg-2016" = 'france-regions-2016.topojson',
# "usa-states" = 'usa-states.topojson'
# )
#
# # Dependancies
# shapeDep <- htmltools::htmlDependency(
# name = "shapes",
# version = '1.0',
# src = system.file('htmlwidgets/lib/shapes', package = 'topogRam'),
# attachment = shapesDisp[shape]
# )
# create widget
htmlwidgets::createWidget(
name = 'topogRam',
x,
width = width,
height = height,
# dependencies = shapeDep,
package = 'topogRam',
elementId = elementId
)
}
#' Shiny bindings for topogRam
#'
#' Output and render functions for using topogRam within Shiny
#' applications and interactive Rmd documents.
#'
#' @param outputId output variable to read from
#' @param width,height Must be a valid CSS unit (like \code{'100\%'},
#' \code{'400px'}, \code{'auto'}) or a number, which will be coerced to a
#' string and have \code{'px'} appended.
#' @param expr An expression that generates a topogRam
#' @param env The environment in which to evaluate \code{expr}.
#' @param quoted Is \code{expr} a quoted expression (with \code{quote()})? This
#' is useful if you want to save an expression in a variable.
#'
#' @name topogRam-shiny
#'
#' @export
topogRamOutput <- function(outputId, width = '500px', height = '500px'){
htmlwidgets::shinyWidgetOutput(outputId, 'topogRam', width, height, package = 'topogRam')
}
#' @rdname topogRam-shiny
#' @export
renderTopogRam <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
htmlwidgets::shinyRenderWidget(expr, topogRamOutput, env, quoted = TRUE)
}
|
\name{read.DVH}
\alias{read.DVH}
\title{
Read DVH data from input file
}
\description{
Function to extract DVH data from input file(s). Supported file formats include Aria (v8-13), DICOM-RT, CadPlan, Monaco, RayStation, and TomoTherapy.
}
\usage{
read.DVH(file, type=NA, verbose=TRUE, collapse=TRUE)
}
\arguments{
\item{file}{
Location of file containing relevant DVH information. One or more files may be specified. If multiple files are specified, all will be imported simultaneously.
}
\item{type}{
Character vector specifying the DVH file format corresonding to each element in \code{file}. Value(s) must be one of \code{"aria8"}, \code{"aria10"}, \code{"aria11"}, \code{"aria13"}, \code{"dicom"}, \code{"cadplan"}, \code{"monaco"}, \code{"raystation"}, or \code{"tomo"} (default is \code{NA}). Note that multiple different types can be processed so long as the length of \code{type} corresponds to the number of files specified.
}
\item{verbose}{
Single logical value indicating whether or not to output verbose information and status in text
}
\item{collapse}{
Single logical value indicating whether or not to collapse input from multiple files into a single \code{DVH.list} with all structures (default) or to output a nested list of individual \code{DVH.list} objects with each one corresponding to a single input file
}
}
\value{
Returns a single \code{DVH.list} object containing all DVH information from a single input file. If multiple files are specified, a list of \code{DVH.list} objects will be returned.
}
\author{
Reid F. Thompson (\email{reid.thompson@gmail.com})
}
\seealso{
\code{\link[RadOnc:DVH-class]{DVH}}, \code{\link[RadOnc:DVH.list-class]{DVH.list}}, \code{\link{new}}
}
\examples{
# Read two DVH example files
file1 <- system.file("extdata/John_Doe.dvh", package="RadOnc")
johndoe <- read.DVH(file=file1, type="aria10", verbose=TRUE)
file2 <- system.file("extdata/Jane_Doe.dvh", package="RadOnc")
janedoe <- read.DVH(file=file2, type="aria10", verbose=TRUE)
combined <- read.DVH(file=c(file1, file2), type="aria10", collapse=TRUE)
}
\keyword{ file }
|
/man/read.DVH.Rd
|
no_license
|
beverlyvictoriateresita/RadOnc
|
R
| false
| false
| 2,088
|
rd
|
\name{read.DVH}
\alias{read.DVH}
\title{
Read DVH data from input file
}
\description{
Function to extract DVH data from input file(s). Supported file formats include Aria (v8-13), DICOM-RT, CadPlan, Monaco, RayStation, and TomoTherapy.
}
\usage{
read.DVH(file, type=NA, verbose=TRUE, collapse=TRUE)
}
\arguments{
\item{file}{
Location of file containing relevant DVH information. One or more files may be specified. If multiple files are specified, all will be imported simultaneously.
}
\item{type}{
Character vector specifying the DVH file format corresonding to each element in \code{file}. Value(s) must be one of \code{"aria8"}, \code{"aria10"}, \code{"aria11"}, \code{"aria13"}, \code{"dicom"}, \code{"cadplan"}, \code{"monaco"}, \code{"raystation"}, or \code{"tomo"} (default is \code{NA}). Note that multiple different types can be processed so long as the length of \code{type} corresponds to the number of files specified.
}
\item{verbose}{
Single logical value indicating whether or not to output verbose information and status in text
}
\item{collapse}{
Single logical value indicating whether or not to collapse input from multiple files into a single \code{DVH.list} with all structures (default) or to output a nested list of individual \code{DVH.list} objects with each one corresponding to a single input file
}
}
\value{
Returns a single \code{DVH.list} object containing all DVH information from a single input file. If multiple files are specified, a list of \code{DVH.list} objects will be returned.
}
\author{
Reid F. Thompson (\email{reid.thompson@gmail.com})
}
\seealso{
\code{\link[RadOnc:DVH-class]{DVH}}, \code{\link[RadOnc:DVH.list-class]{DVH.list}}, \code{\link{new}}
}
\examples{
# Read two DVH example files
file1 <- system.file("extdata/John_Doe.dvh", package="RadOnc")
johndoe <- read.DVH(file=file1, type="aria10", verbose=TRUE)
file2 <- system.file("extdata/Jane_Doe.dvh", package="RadOnc")
janedoe <- read.DVH(file=file2, type="aria10", verbose=TRUE)
combined <- read.DVH(file=c(file1, file2), type="aria10", collapse=TRUE)
}
\keyword{ file }
|
install.packages("rgdal")
library(rgdal)
data(state)
states <- data.frame(state.x77, state.center)
states <- states[states$x > -121,]
coordinates(states) <- c("x", "y")
proj4string(states) <- CRS("+proj=longlat +ellps=clrk66")
summary(states)
states
#Adding a comment line
|
/intro.r
|
no_license
|
larhyp/erboreus
|
R
| false
| false
| 275
|
r
|
install.packages("rgdal")
library(rgdal)
data(state)
states <- data.frame(state.x77, state.center)
states <- states[states$x > -121,]
coordinates(states) <- c("x", "y")
proj4string(states) <- CRS("+proj=longlat +ellps=clrk66")
summary(states)
states
#Adding a comment line
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/4_metaClustering.R
\name{MetaclusterCVs}
\alias{MetaclusterCVs}
\title{MetaclusterCVs}
\usage{
MetaclusterCVs(fsom)
}
\arguments{
\item{fsom}{Result of calling the FlowSOM function}
}
\value{
Metacluster CVs
}
\description{
Compute the coefficient of variation for the metaclusters
}
\examples{
fileName <- system.file("extdata","lymphocytes.fcs",package="FlowSOM")
ff <- flowCore::read.FCS(fileName)
ff <- flowCore::compensate(ff,ff@description$SPILL)
ff <- flowCore::transform(ff,
flowCore::transformList(colnames(ff@description$SPILL),
flowCore::logicleTransform()))
flowSOM.res <- FlowSOM(ff,scale=TRUE,colsToUse=c(9,12,14:18), nClus=10)
cvs <- MetaclusterCVs(flowSOM.res)
}
|
/man/MetaclusterCVs.Rd
|
no_license
|
AbhivKoladiya/FlowSOM
|
R
| false
| true
| 798
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/4_metaClustering.R
\name{MetaclusterCVs}
\alias{MetaclusterCVs}
\title{MetaclusterCVs}
\usage{
MetaclusterCVs(fsom)
}
\arguments{
\item{fsom}{Result of calling the FlowSOM function}
}
\value{
Metacluster CVs
}
\description{
Compute the coefficient of variation for the metaclusters
}
\examples{
fileName <- system.file("extdata","lymphocytes.fcs",package="FlowSOM")
ff <- flowCore::read.FCS(fileName)
ff <- flowCore::compensate(ff,ff@description$SPILL)
ff <- flowCore::transform(ff,
flowCore::transformList(colnames(ff@description$SPILL),
flowCore::logicleTransform()))
flowSOM.res <- FlowSOM(ff,scale=TRUE,colsToUse=c(9,12,14:18), nClus=10)
cvs <- MetaclusterCVs(flowSOM.res)
}
|
parse_leaf_extent <- function(x) {
if (missing(x)) {
x <- try(readLines("clipboard", warn = FALSE), silent = TRUE)
if (inherits(x, "try-error")) {
stop("cannot read from clipboard")
}
if (!grepl("^'\\{\"\\_southWest", x)) stop("clipboard contents does not look like leafem copy output")
}
#{"_southWest":{"lat":-1.307259612275665,"lng":23.411865234375},"_northEast":{"lat":6.937332868878443,"lng":31.904296875000004}}'
parts <- unlist(strsplit(x, ":")[[1]][c(4, 7, 3, 6)])
lon <- as.numeric(unlist(lapply(strsplit(parts[1:2], "\\}"), "[", 1)))
lat <- as.numeric(unlist(lapply(strsplit(parts[3:4], ","), "[", 1)))
spex(raster::extent(lon, lat), crs = "+proj=longlat +datum=WGS84")
}
#' Polygon extent
#'
#' Create Spatial Polygons with projection metadata from a 'Spatial Extent'.
#'
#' Called with no arguments will return the extent of the current 'par("usr")' setting.
#'
#' Called with a matrix, list, or data frame it will create an extent from a two columned thing.
#'
#' Called with `clipboard = TRUE` and `x` will be treated as the JSON-ic output of the clipboard copy from
#' leafem (WIP). If x is missing, it will be attempted to be read from the clipboard. Clipboard read cannot
#' work on RStudio Server, so we allow the text value to be passed in.
#' I.e. `spex(clipboard = TRUE)` will
#' read from the clipboard, `spex(tx, clipboard = TRUE)` will read from tx with value like
#' \code{'{"_southWest":{"lat":-1.307259612275665,"lng":23.411865234375},"_north...}"'}.
#'
#'
#' This function is to replace a common pattern in spatial packages which is
#' \itemize{
#' \item create an \code{\link[raster]{Extent-class}}, a bounding box in xmin,xmax,ymin,ymax but without projection metadata
#' \item coerce the Extent to \code{\link[sp]{SpatialPolygons}}
#' \item restore the 'CRS', the "coordinate reference system", i.e. projection metadata
#' \item elevate the object to be a \code{\link[sp]{SpatialPolygonsDataFrame-class}}.
#' }
#'
#' In short, this pattern exists because there is no projection metadata stored
#' with either sp's \code{\link[sp]{bbox}} or raster's \code{\link[raster]{Extent-class}}.
#'
#' @param x any object with a \code{\link[raster]{Extent-class}}
#' @param byid return a separate object for every input sub-object (not yet implemented)
#' @param .id optional name for output attribute name
#' @param ... arguments for methods
#' @param crs a projection string
#' @param clipboard WIP this special-case allows x to be the result of the leafem clipboard copy process
#' @importFrom methods as
#' @importFrom raster projection<- projection extent
#' @importFrom sp SpatialPolygonsDataFrame
#' @importFrom stats setNames
#' @return 'SpatialPolygonsDataFrame'
#' @section Warning: Please note that an extent converted to polygons consists
#' of only four unique coordinates, and so this is not necessarily suited for
#' projection transformations.
#' @examples
#' library(raster)
#' data(lux)
#' exlux <- spex(lux)
#'
#' plot(lux)
#' plot(exlux, add = TRUE)
#'
#' ## put an extent and a CRS together
#' spex(extent(0, 1, 0, 1), crs = "+proj=laea +ellps=WGS84")
#' @export
#' @seealso This pattern is displayed in the example code for \code{\link[raster]{cover}}.
spex <- function(x, crs, byid = FALSE, .id, ..., clipboard = FALSE) {
UseMethod("spex")
}
#' @export
#' @name spex
spex.default <- function(x, crs = NULL, byid = FALSE, .id, ..., clipboard = FALSE) {
if (clipboard) {
out <- if (missing(x)) parse_leaf_extent() else parse_leaf_extent(x)
return(out)
}
if (missing(x)) x <- raster::extent(graphics::par("usr"))
cls <- class(x)[1L]
if (is.null(crs)) {
#crs <- raster::projection(x)
crs <- crsmeta::crs_proj(x)
if (is.na(crs)) {
crs <- crsmeta::crs_input(x)
}
}
if (is.na(crs)) crs <- NA_character_
#if (missing(crs) && raster::couldBeLonLat(x)) crs <- "+proj=longlat +datum=WGS84 +no_defs"
if (is.data.frame(x)) x<- as.matrix(x)
if (is.list(x)) x <- do.call(cbind, x)
if (is.numeric(x)) {
x <- as.matrix(x)
if (ncol(x) < 2) stop("matrix of 2 columns required")
if (ncol(x) > 2) warning("only 2 columns used from input")
}
if (byid) {
stop("byid option not yet implemented")
#lapply(split(x, seq(nrow(x))), raster::extent)
} else {
p <- as(extent(x), 'SpatialPolygons')
}
if (missing(.id)) {
.id <- sprintf("%s_extent", cls)
}
if (is.character(crs)) {
raster::crs(p) <- sp::CRS(crs, doCheckCRSArgs = FALSE)
} else {
raster::crs(p) <- crs
}
SpatialPolygonsDataFrame(p, setNames(data.frame(1L), .id))
}
#' Extent of simple features
#'
#' This is the simplest of the missing "raster support" for the sf package,
#' here using the xmin, xmax, ymin, ymax convention used by raster rather than
#' the transpose version favoured in sp and sf.
#' @param x object with an extent
#' @param ... unused
#' @name extent
#' @aliases Extent
#' @importFrom raster extent
extent_sf <- function(x, ...) {
raster::extent(attr(x[[attr(x, "sf_column")]], "bbox")[c(1, 3, 2, 4)])
}
setOldClass("sf")
setMethod(f = "extent", signature = "sf", definition = extent_sf)
#' @export
#' @name spex
spex.sf <- function(x, crs, byid = FALSE, .id, ..., clipboard = FALSE) {
if (missing(crs)) {
crs <- crsmeta::crs_proj(x)
if (is.na(crs)) {
crs <- crsmeta::crs_input(x)
}
}
spex(extent(x), crs = crs)
}
#' @export
#' @name spex
spex.sfc <- function(x, crs, byid = FALSE, .id, ..., clipboard = FALSE) {
if (missing(crs)) {
crs <- crsmeta::crs_proj(x)
if (is.na(crs)) {
crs <- crsmeta::crs_input(x)
}
}
spex(extent(attr(x, "bbox")[c("xmin", "xmax", "ymin", "ymax")]), crs = crs)
}
#' Axis ranges from extent
#'
#' Functions `xlim` and `ylim` return the two-value counterparts of an extent.
#'
#' Any projection metadata is dropped since this is a one-dimensional entity.
#' @param x any object with an extent understood by `spex`
#' @param ... reserved for future methods
#' @export
xlim <- function(x, ...) UseMethod("xlim")
#' @export
#' @name xlim
xlim.default <- function(x, ...) {
spx <- spex(x)
c(raster::xmin(spx), raster::xmax(spx))
}
#' @export
#' @name xlim
ylim <- function(x, ...) UseMethod("ylim")
#' @export
#' @name xlim
ylim.default <- function(x, ...) {
spx <- spex(x)
c(raster::ymin(spx), raster::ymax(spx))
}
|
/R/spex.R
|
no_license
|
cran/spex
|
R
| false
| false
| 6,340
|
r
|
parse_leaf_extent <- function(x) {
if (missing(x)) {
x <- try(readLines("clipboard", warn = FALSE), silent = TRUE)
if (inherits(x, "try-error")) {
stop("cannot read from clipboard")
}
if (!grepl("^'\\{\"\\_southWest", x)) stop("clipboard contents does not look like leafem copy output")
}
#{"_southWest":{"lat":-1.307259612275665,"lng":23.411865234375},"_northEast":{"lat":6.937332868878443,"lng":31.904296875000004}}'
parts <- unlist(strsplit(x, ":")[[1]][c(4, 7, 3, 6)])
lon <- as.numeric(unlist(lapply(strsplit(parts[1:2], "\\}"), "[", 1)))
lat <- as.numeric(unlist(lapply(strsplit(parts[3:4], ","), "[", 1)))
spex(raster::extent(lon, lat), crs = "+proj=longlat +datum=WGS84")
}
#' Polygon extent
#'
#' Create Spatial Polygons with projection metadata from a 'Spatial Extent'.
#'
#' Called with no arguments will return the extent of the current 'par("usr")' setting.
#'
#' Called with a matrix, list, or data frame it will create an extent from a two columned thing.
#'
#' Called with `clipboard = TRUE` and `x` will be treated as the JSON-ic output of the clipboard copy from
#' leafem (WIP). If x is missing, it will be attempted to be read from the clipboard. Clipboard read cannot
#' work on RStudio Server, so we allow the text value to be passed in.
#' I.e. `spex(clipboard = TRUE)` will
#' read from the clipboard, `spex(tx, clipboard = TRUE)` will read from tx with value like
#' \code{'{"_southWest":{"lat":-1.307259612275665,"lng":23.411865234375},"_north...}"'}.
#'
#'
#' This function is to replace a common pattern in spatial packages which is
#' \itemize{
#' \item create an \code{\link[raster]{Extent-class}}, a bounding box in xmin,xmax,ymin,ymax but without projection metadata
#' \item coerce the Extent to \code{\link[sp]{SpatialPolygons}}
#' \item restore the 'CRS', the "coordinate reference system", i.e. projection metadata
#' \item elevate the object to be a \code{\link[sp]{SpatialPolygonsDataFrame-class}}.
#' }
#'
#' In short, this pattern exists because there is no projection metadata stored
#' with either sp's \code{\link[sp]{bbox}} or raster's \code{\link[raster]{Extent-class}}.
#'
#' @param x any object with a \code{\link[raster]{Extent-class}}
#' @param byid return a separate object for every input sub-object (not yet implemented)
#' @param .id optional name for output attribute name
#' @param ... arguments for methods
#' @param crs a projection string
#' @param clipboard WIP this special-case allows x to be the result of the leafem clipboard copy process
#' @importFrom methods as
#' @importFrom raster projection<- projection extent
#' @importFrom sp SpatialPolygonsDataFrame
#' @importFrom stats setNames
#' @return 'SpatialPolygonsDataFrame'
#' @section Warning: Please note that an extent converted to polygons consists
#' of only four unique coordinates, and so this is not necessarily suited for
#' projection transformations.
#' @examples
#' library(raster)
#' data(lux)
#' exlux <- spex(lux)
#'
#' plot(lux)
#' plot(exlux, add = TRUE)
#'
#' ## put an extent and a CRS together
#' spex(extent(0, 1, 0, 1), crs = "+proj=laea +ellps=WGS84")
#' @export
#' @seealso This pattern is displayed in the example code for \code{\link[raster]{cover}}.
spex <- function(x, crs, byid = FALSE, .id, ..., clipboard = FALSE) {
UseMethod("spex")
}
#' @export
#' @name spex
spex.default <- function(x, crs = NULL, byid = FALSE, .id, ..., clipboard = FALSE) {
if (clipboard) {
out <- if (missing(x)) parse_leaf_extent() else parse_leaf_extent(x)
return(out)
}
if (missing(x)) x <- raster::extent(graphics::par("usr"))
cls <- class(x)[1L]
if (is.null(crs)) {
#crs <- raster::projection(x)
crs <- crsmeta::crs_proj(x)
if (is.na(crs)) {
crs <- crsmeta::crs_input(x)
}
}
if (is.na(crs)) crs <- NA_character_
#if (missing(crs) && raster::couldBeLonLat(x)) crs <- "+proj=longlat +datum=WGS84 +no_defs"
if (is.data.frame(x)) x<- as.matrix(x)
if (is.list(x)) x <- do.call(cbind, x)
if (is.numeric(x)) {
x <- as.matrix(x)
if (ncol(x) < 2) stop("matrix of 2 columns required")
if (ncol(x) > 2) warning("only 2 columns used from input")
}
if (byid) {
stop("byid option not yet implemented")
#lapply(split(x, seq(nrow(x))), raster::extent)
} else {
p <- as(extent(x), 'SpatialPolygons')
}
if (missing(.id)) {
.id <- sprintf("%s_extent", cls)
}
if (is.character(crs)) {
raster::crs(p) <- sp::CRS(crs, doCheckCRSArgs = FALSE)
} else {
raster::crs(p) <- crs
}
SpatialPolygonsDataFrame(p, setNames(data.frame(1L), .id))
}
#' Extent of simple features
#'
#' This is the simplest of the missing "raster support" for the sf package,
#' here using the xmin, xmax, ymin, ymax convention used by raster rather than
#' the transpose version favoured in sp and sf.
#' @param x object with an extent
#' @param ... unused
#' @name extent
#' @aliases Extent
#' @importFrom raster extent
extent_sf <- function(x, ...) {
raster::extent(attr(x[[attr(x, "sf_column")]], "bbox")[c(1, 3, 2, 4)])
}
setOldClass("sf")
setMethod(f = "extent", signature = "sf", definition = extent_sf)
#' @export
#' @name spex
spex.sf <- function(x, crs, byid = FALSE, .id, ..., clipboard = FALSE) {
if (missing(crs)) {
crs <- crsmeta::crs_proj(x)
if (is.na(crs)) {
crs <- crsmeta::crs_input(x)
}
}
spex(extent(x), crs = crs)
}
#' @export
#' @name spex
spex.sfc <- function(x, crs, byid = FALSE, .id, ..., clipboard = FALSE) {
if (missing(crs)) {
crs <- crsmeta::crs_proj(x)
if (is.na(crs)) {
crs <- crsmeta::crs_input(x)
}
}
spex(extent(attr(x, "bbox")[c("xmin", "xmax", "ymin", "ymax")]), crs = crs)
}
#' Axis ranges from extent
#'
#' Functions `xlim` and `ylim` return the two-value counterparts of an extent.
#'
#' Any projection metadata is dropped since this is a one-dimensional entity.
#' @param x any object with an extent understood by `spex`
#' @param ... reserved for future methods
#' @export
xlim <- function(x, ...) UseMethod("xlim")
#' @export
#' @name xlim
xlim.default <- function(x, ...) {
spx <- spex(x)
c(raster::xmin(spx), raster::xmax(spx))
}
#' @export
#' @name xlim
ylim <- function(x, ...) UseMethod("ylim")
#' @export
#' @name xlim
ylim.default <- function(x, ...) {
spx <- spex(x)
c(raster::ymin(spx), raster::ymax(spx))
}
|
####16 S rRNA Amplicon sequencing Analysis on Ollie####
# Script by: Batuhan Cagri Yapan.
# contact: byapan@mpi-bremen.de
# Last update: 31/05/2021
# Based on David Benito's and Marwa Baloza's scripts & DADA2 tutorial: https://benjjneb.github.io/dada2/tutorial.html
# Script works in R environment (R 4.0.6) on Linux Distro CentOS.
# Cutadapt is working on terminal; an R function does it automatically.
# R 4.0 and Cutadapt 3.2 are installed on Ollie, they must be called on BASH by module function e.g.:
# module load bio/R/4.0.0
# module load bio/cutadapt/3.2
# Working interactively is not recommended on Ollie's login nodes (ollie0 or ollie1)
# To work interactively ask for a proper node e.g.:
# salloc -N 1-1 -c 36 -p fat --mem 425G
# Then you can start R and work, but beware that it is hard to see graphs etc.
# If you want to put your script on queue please check script "amplicon_analysis.sl"
# Then send it to queue via:
# sbatch amplicon_analysis.sl
##########################################################################
### Four main parts in this script:
### 1)Cutadapt - for trimming primers and adapters of paired-end Illumina reads.
### 2)DADA2 - 16 S analysis from paired-end Illumina reads
### 3)Phyloseq - Analyses of microbial community structure !!not completed yet!!
##########################################################################
### Part 1 - Cutadapt
### for trimming primers and adapters of paired-end Illumina rads.
#Install and call required libraries. dada2, ggplot are already installed on Ollie.
#Install BiocManager for maintaining Bioconductor Packages' compatibility
print(Sys.time()) #to check runtime of the script
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
#call dada2
if (!is.loaded("dada2")) {
library(dada2)
}
#call ShortRead
if (!is.loaded("ShortRead")) {
library(ShortRead)
}
#call Biostrings
if (!is.loaded("Biostrings")) {
library(Biostrings)
}
#Install phyloseq; it will be called later.
BiocManager::install("phyloseq")
dir <- "/work/ollie/byapan/playground/bact_1088/Renamed" # path to the directory containing the raw sequences in fastq.gz format
#Ollie works much faster if working directory is on "/work/ollie" directory.
setwd(dir)
getwd()
list.files(dir) #all raw readings (forward and reverse) should be listed.
## Create vectors for forward and reverse readings of the samples.
# If forward and reverse fastq filenames have a name format:
# SAMPLENAME_R1_001.fastq.gz and SAMPLENAME_R2_001.fastq.gz
# If they do not; they could be arranged by file_naming_210401.sh
# Then continue as below:
# forward readings:
R1_list <- sort(list.files(dir, pattern="_R1.fastq.gz", full.names = TRUE))
# reverse readings:
R2_list <- sort(list.files(dir, pattern="_R2.fastq.gz", full.names = TRUE))
## Define forward and reverse primers.
# In order to learn primer sets used by DSET group you could check the txt file:
# primers_list_16S_tag_seq_HABITAT.txt
# In Deep-sea Mining Project primers and target regions are listed below.
# DO NOT FORGET comment out the redundant primers.
FWD <- "CCTACGGGNGGCWGCAG" ## 341F Bacteria V3-V4 forward primer sequence.
#FWD <- "GYGCASCAGKCGMGAAW" ## 349F Archaea V3-V5 forward primer sequence.
REV <- "GACTACHVGGGTATCTAATCC" ## 785R Bacteria V3-V4 reverse primer sequence.
#REV <- "GTGCTCCCCCGCCAATTCCT" ## 915R Archaea V3-V5 reverse primer sequence.
## Arrange orientation of primer sequences.
allOrients <- function(primer) {
#Create all orientations of the input sequence
require(Biostrings)
dna <- DNAString(primer) # The Biostrings works w/ DNAString objects rather than character vectors
orients <- c(Forward = dna, Complement = complement(dna), Reverse = reverse(dna),
RevComp = reverseComplement(dna))
return(sapply(orients, toString)) # Convert back to character vector
}
FWD.orients <- allOrients(FWD)
REV.orients <- allOrients(REV)
FWD.orients
REV.orients
#Printing checkpoints may make it easier to see failure point if you run script on ollie by queing.
print("checkpoint0")
print(Sys.time())
### Cutadapt part
# After arranging primers it is time to make arrangements for using Cutadapt.
cutadapt <- "/global/AWIsoft/bio/cutadapt/3.2/bin/cutadapt" # This is the path for Cutadapt on AWI servers
system2(cutadapt, args = "--version") # Run shell commands from R
dir.cut <- file.path(dir, "cutadapt")
if(!dir.exists(dir.cut)) dir.create(dir.cut) #Create a subfolder for trimmed sequences.
R1_list.cut <- file.path(dir.cut, basename(R1_list))
R2_list.cut <- file.path(dir.cut, basename(R2_list))
FWD.RC <- dada2:::rc(FWD)
REV.RC <- dada2:::rc(REV)
#Trim FWD and the reverse-complement of REV off of R1 (forward reads)
# Flags define options and variables for cutadapt function.
R1.flags <- paste("-g", FWD, "-a", REV.RC)
#Trim REV and the reverse-complement of FWD off of R2 (reverse reads)
# Flags define options and variables for cutadapt function.
R2.flags <- paste("-G", REV, "-A", FWD.RC)
#Run Cutadapt
for(i in seq_along(R1_list)) {
system2(cutadapt, args = c(R1.flags, R2.flags, "-n", 2, # -n 2 required to remove FWD and REV from reads
"-o", R1_list.cut[i], "-p", R2_list.cut[i], # output files
R1_list[i], R2_list[i])) # input files
}
#rbind(FWD.ForwardReads = sapply(FWD.orients, primerHits, fn = fnFs.cut[[1]]),
# FWD.ReverseReads = sapply(FWD.orients, primerHits, fn = fnRs.cut[[1]]),
# REV.ForwardReads = sapply(REV.orients, primerHits, fn = fnFs.cut[[1]]),
# REV.ReverseReads = sapply(REV.orients, primerHits, fn = fnRs.cut[[1]]))
print("checkpoint 1")
print(Sys.time())
R1_list.cut <- sort(list.files(dir.cut, pattern = "_R1.fastq.gz", full.names = TRUE))
R2_list.cut <- sort(list.files(dir.cut, pattern = "_R2.fastq.gz", full.names = TRUE))
save.image(file="reseq_amplicon.RData")
#plotQualityProfile(R1_list.cut[1:2])
pdf(file="quality_graph_r2.pdf")
for (i in length(R2_list.cut))
{
plotQualityProfile(R1_list.cut[i])
}
dev.off()
#Forward and reverse fastq filenames have the format respectively:
#SAMPLE_NAME_1_R1_001.fastq.gz
#SAMPLE_NAME_1_R2_001.fastq.gz
#Extract sample names
get.sample.name <- function(fname) strsplit(basename(fname), "_")[[1]][1]
sample.names <- unname(sapply(R1_list.cut, get.sample.name))
R1_filtered <- file.path(dir.cut, "filtered", basename(R1_list.cut))
R2_filtered <- file.path(dir.cut, "filtered", basename(R2_list.cut))
#FILTER and TRIM. In this step sequences are trimmed from the end side.
#Parameters for trimming are set manually according to quality of reads.
#The quality should be checked by checking fastqc results.
#FIGARO package could help on parameter decision: https://github.com/Zymo-Research/figaro#figaro
#In order to speed up downstream processes maxEE (max number of expected errors)
#could be tightened; if the number of passing reads is too low (could be checked by parameter "out" after running)
#maxEE parameter could be increased
#For more information on error rates: https://academic.oup.com/bioinformatics/article/31/21/3476/194979
out <- filterAndTrim(R1_list.cut, R1_filtered, R2_list.cut, R2_filtered, truncLen=c(260,200),
maxN=0, maxEE=c(1,1), truncQ=2, rm.phix=TRUE,
compress=TRUE, multithread=TRUE)
# Error rate calculation is one of the most time consuming part of the run
# If you work on a HPC, multithread=TRUE makes job distributed among cores and makes it faster.
# You should be sure you have configured high number of cores and high memory
# in slurm script amplicon_analysis.sl
R1_error <- learnErrors(R1_filtered, multithread=TRUE)
R2_error <- learnErrors(R2_filtered, multithread=TRUE)
#Error rates could be plotted.
#error_plot_R1 <- plotErrors(R1_error, nominalQ=TRUE); error_plot_R1
#error_plot_R2 <- plotErrors(R2_error, nominalQ=TRUE); error_plot_R2
save.image(file="reseq_amplicon.RData")
print("checkpoint 2")
print(Sys.time())
# DEREPLICATION. To decrease memory and CPU demand for large datasets and reduce time.
# In dereplication, identical sequences counted as one unique sequence and
# according to their number of those sequence a "abundance" value is given them.
# Further steps are done by using unique sequences and abundance value,
# By that way computational resources and time are saved.
# For further information:
# see https://rdrr.io/bioc/dada2/man/derepFastq.html
# see https://benjjneb.github.io/dada2/tutorial_1_8.html
R1_dereplicated <- derepFastq(R1_filtered, verbose=TRUE)
R2_dereplicated <- derepFastq(R2_filtered, verbose=TRUE)
# Use the same sample names in the dereplicated data:
names(R1_dereplicated) <- sample.names
names(R2_dereplicated) <- sample.names
# DADA; starring of the show. Sample inference algorithm:
# dada infers sequences and resolves differences as fine as 1 nucleotide difference.
# For further information (beware paywall): https://www.nature.com/articles/nmeth.3869#methods
# pool is a very important parameter for determination of ASVs and time/resources management.
# pool=FALSE - by default samples are processed individually
# pool=TRUE - pooling to increase sensitivity (https://benjjneb.github.io/dada2/pool.html#pooling-for-sample-inference)
# pool="pseudo" - pseudo-pooled where samples are still processed individually (https://benjjneb.github.io/dada2/pseudo.html#Pseudo-pooling)
# Pooling might be important in cases such as samples from different environments are studied together.
# A sequence which is rare in an environment could be abundant in others; however, if pooling is not applied
# that rare taxa might be missed.
# if pooling is allowed by pool=TRUE it might take so long time to process high number of reads.
# pseudo pooling option could be the best option; time could be saved while keeping inference relatively sensitive
R1_dada <- dada(R1_dereplicated, err=R1_error, multithread=TRUE, pool=TRUE)
R2_dada <- dada(R2_dereplicated, err=R2_error, multithread=TRUE, pool=TRUE)
R1_dada[[1]] # To access sample "1"
R2_dada[[1]]
save.image(file="reseq_amplicon.RData")
print("checkpoint 3")
print(Sys.time())
# MERGING. Merging of matching forward and reverse reads to obtain sequence of
# the region of interest.
# In case of low number of merging reads; the parameters
# maxMismatch could be increased, minOverlap could be decreased (default is 20)
# justConcatenate could be witched to TRUE.
R1R2_merged <- mergePairs(R1_dada, R1_dereplicated, R2_dada, R2_dereplicated, justConcatenate=FALSE, verbose=TRUE)
# Inspect the merged data.frame from the first sample
head(R1R2_merged[[1]])
# Construct the amplicon sequence variant table (ASV)
# This is analogous to an OTU table but with resolution up to single-nucleotide level.
ASV_table <- makeSequenceTable(R1R2_merged)
dim(ASV_table)
# Inspect distribution of sequence lengths
table(nchar(getSequences(ASV_table)))
#hist(nchar(getSequences(ASV_table)))
# Remove chimaeras
ASV_nochim <- removeBimeraDenovo(ASV_table, method="consensus", multithread=TRUE, verbose=TRUE)
dim(ASV_nochim)
# Proportion of non-chimaeric sequences:
sum(ASV_nochim)/sum(ASV_table)
#Read counts throughout the pipeline:
getN <- function(x) sum(getUniques(x))
track <- cbind(out, sapply(R1_dada, getN), sapply(R2_dada, getN), sapply(R1R2_merged, getN), rowSums(ASV_nochim))
# If processing a single sample, remove the sapply calls: e.g. replace sapply(dadaFs, getN) with getN(dadaFs)
colnames(track) <- c("input", "filtered", "denoisedF", "denoisedR", "merged", "nonchim")
rownames(track) <- sample.names
#track
write.csv(track,"track_reads.csv")
saveRDS(ASV_nochim, "/work/ollie/byapan/playground/asv_bact_1088.rds")
save.image(file="reseq_amplicon.RData")
print("checkpoint 4")
print(Sys.time())
# For the studies which comprised samples from different sequencing runs
# it is better to recall ASV_tables saved as rds and continue the steps after here
# by merging them.
# TAXONOMIC ANNOTATION
# The database in the correct format can be found in the dada2 website.
# You should download the database to a directory you chhose and call it from there.
unite.ref <- "/work/ollie/byapan/playground/silva/silva_nr99_v138.1_train_set.fa.gz"
ASV_taxonomy <- assignTaxonomy(ASV_nochim,unite.ref, multithread=TRUE, verbose=TRUE)
# Add species:
unite.ref_sp <-"/work/ollie/byapan/playground/silva/silva_species_assignment_v138.1.fa.gz"
ASV_sp <- addSpecies(ASV_taxonomy, unite.ref_sp, verbose=TRUE)
saveRDS(ASV_sp, "/work/ollie/byapan/playground/asv_sp_bact_1088.rds")
#ASV_taxonomy_check <- ASV_taxonomy
#rownames(ASV_taxonomy_check) <- NULL
save.image(file="bacteria_amplicon.RData")
print("checkpoint 5")
print(Sys.time())
q()
##############################################################################################
# Analyse and plot results with phyloseq.
# Import results to phyloseq:
#library(phyloseq); packageVersion("phyloseq")
#library(ggplot2); packageVersion("ggplot2")
#theme_set(theme_bw())
# Extract the sample and ASV names:
#samples.out <- rownames(ASV_nochim)
#ASVs <- colnames(ASV_nochim)
# ASVs ID table:
#ASVs_ID <- cbind(ASVs, paste("asv", c(1:ncol(ASV_nochim)), sep=""))
# rename the ASV to asv#:
#colnames(ASV_nochim) <- paste("asv", c(1:ncol(ASV_nochim)), sep="")
#rownames(ASV_taxonomy) <- paste("asv", c(1:nrow(ASV_taxonomy)), sep="")
#ASV_taxonomy[is.na(ASV_taxonomy[,1])] <- "Unclassified" # Replace empty taxons (domain/kingdom level) with "Unclassified".
# Add sample names:
#head (samples.out)
#samples.out3<- #cbind(samples.out,c("nod111","nod112","nod113","nod117","nod118","nod119","nod173","nod75","no76","nod77","nod81","nod83","dn1","dn2","w49","w151","w159","120","121","127","128","134","135","2","22","23","43","44","50","51","57","58","78","79","80","85",,"86","9","92","93","dis05","dis06","dis07","dis08","dis14","dis15"),c("BGR_ref","BGR_ref","BGR_ref","BGR_ref","BGR_ref","BGR_ref","BGR_ref","GSR_ref","GSR_ref","GSR_ref","GSR_ref","GSR_ref","DISCOL","DISCOL","GSR_tri","GSR_ref","BGR_ref","BGR_r#ef","BGR_ref","BGR_ref","BGR_ref","BGR_ref","BGR_ref","GSR_tri","GSR_tri","BGR_tri","GSR_tri","GSR_tri","BGR_tri","GSR_tri","GSR_tri","DIS","DIS","DIS","DIS","DIS","DIS"))
#colnames(samples.out3) <- c("ID", "Sample","Location")
#rownames(samples.out3) <- samples.out3[,1] # Row names are samples IDs.
#samples.out3 <- as.data.frame(samples.out3)
#OTU_phyloseq3 <- otu_table(ASV_nochim, taxa_are_rows = FALSE)
#OTU_phyloseq3<- filter_taxa(OTU_phyloseq3, function (x) {sum(x > 0) > 1}, prune=TRUE) #Remove singletons
#SAMPLE_phyloseq3 <- sample_data(samples.out3)
#TAX_phyloseq3 <- tax_table(ASV_taxonomy)
#TAX_phyloseq3<- subset_taxa(TAX_phyloseq3, Kingdom=="Archaea") #Remove archaeal reads
#dada2_phyloseq4<- phyloseq(OTU_phyloseq3, TAX_phyloseq3, SAMPLE_phyloseq3) # Create a phyloseq object.
#phyloseq-class experiment-level object w/ singletons
#otu_table() OTU Table: [ 34916 taxa and 30 samples ]
#sample_data() Sample Data: [ 30 samples by 3 sample variables ]
#tax_table() Taxonomy Table: [ 34916 taxa by 6 taxonomic ranks ]
#phyloseq-class experiment-level object wtihout singletons
#otu_table() OTU Table: [ 15682 taxa and 30 samples ]
#sample_data() Sample Data: [ 30 samples by 3 sample variables ]
#tax_table() Taxonomy Table: [ 15682 taxa by 6 taxonomic ranks ]
#phy = dada2_phyloseq4
#NameTax <- function(x, ind){
# if(is.na(x[ind])){
# x[ind] <- x[ind]
# } else {
# if(ind==1){x[ind] <- paste("d", x[ind], sep="__")} else{ # Domain
# if(ind==2){x[ind] <- paste("p", x[ind], sep="__")} else{ # Phylum
# if(ind==3){x[ind] <- paste("c", x[ind], sep="__")} else{ # Class
# if(ind==4){x[ind] <- paste("o", x[ind], sep="__")} else{ # Order
# if(ind==5){x[ind] <- paste("f", x[ind], sep="__")} else{ # Family
# if(ind==6){x[ind] <- paste("g", paste(x[ind-1], x[ind], sep="_"), sep="__")} # Genus
# }
# }
# }
# }
# }
# }
# }
#}
#tax.tab <- data.frame(tax_table(phy))
#for (i in 1:7) {
# tax_table(phy)[,i] <- apply(tax.tab, 1, NameTax, ind=i)
#}
#ModifyTax <- function(x,ind){
# # xth row in the dataframe
# # ind taxonomy level to change
# if(is.na(x[ind])){
# nonNa <- which(!is.na(x[-ind])) # which taxa are not NA excepting the one we're interested in.
# maxNonNa <- max(nonNa)
# x[ind] <- x[maxNonNa]
# }else{x[ind] <- x[ind]}
#}
#for (i in 1:7) {
# tax_table(phy)[,i] <- apply(tax.tab,1,ModifyTax,ind=i)
#}
#phy_rare <- phy
#wh0 = genefilter_sample(phy_samples, filterfun_sample(function(x) x > 5), A=0.5*nsamples(phy_samples))
#GP1 = prune_taxa(wh0, phy_samples)
#GP2 = transform_sample_counts(GP1, function(x) 100 * x/sum(x))
#GP20 <- transform_sample_counts(phy_rare, function(x) sqrt(x / sum(x)))
#GP30 <- transform_sample_counts(phy_rare, function(x) if (x>=1) {x=1} else {x=0} )
#GP2 = transform_sample_counts(GP1, decostand(otu_table(GP1),"hellinger"))
#GP.ord_rare <- ordinate(phy_rare, "NMDS", "Jaccard")
#Run 17 stress 0.06218149
#... Procrustes: rmse 8.369671e-05 max resid 0.0003976089
#... Similar to previous best
#Run 18 stress 0.06218146
#... Procrustes: rmse 4.971322e-05 max resid 0.0002354726
#... Similar to previous best
#Run 19 stress 0.230557
#Run 20 stress 0.0645415
#*** Solution reached
#p11 = plot_ordination(GP2, GP.ord2, type="taxa", color="Phylum", title="taxa")
#p20 = plot_ordination(GP20, GP.ord_rare, type="samples", color="location")
#vegan_otu <- function(GP2) {
# OTU <- otu_table(GP2)
# if (taxa_are_rows(OTU)) {
# OTU <- t(OTU)
# }
# return(as(OTU, "matrix"))
#}
#metadata<- as(sample_data(phy_rare), "data.frame")
#sim_GP_rare <- anosim(otu_table(phy_rare), metadata$location, permutations = 999, distance = "jaccard", strata = NULL)
#p42 <- plot_richness(phy_rare, x="location", measures=c("Observed", "Shannon"),color="location")+
#scale_x_discrete(limits=c("DIS","BGR_ref","BGR_tri","GSR_ref","GSR_tri"))
#Call:
#anosim(x = otu_table(GP20), grouping = metadata$location, permutations = 999, distance = "bray", strata = NULL)
#Dissimilarity: bray
#ANOSIM statistic R: 0.8224
# Significance: 0.001
#Permutation: free
#Number of permutations: 999
#Upper quantiles of permutations (null model):
# 90% 95% 97.5% 99%
#0.0764 0.1067 0.1296 0.1481
#Dissimilarity ranks between and within classes:
# 0% 25% 50% 75% 100% N
#Between 4 159.75 254.5 345.25 435 360
#BGR_ref 47 72.50 92.0 101.50 118 15
#BGR_tri 75 93.50 139.0 171.50 256 15
#DIS 1 18.00 29.0 36.00 58 15
#GSR_ref 2 8.50 32.0 48.50 67 15
#GSR_tri 16 32.50 56.0 87.50 154 15
#TopNOTUs <- names(sort(taxa_sums(GP2), TRUE)[1:10])
#ent10 <- prune_taxa(TopNOTUs, GP2)
#ent10 = transform_sample_counts(ent10, function(x) 100 * x/sum(x))
#sample_names(ent10) <- c("neg0","neg1","neg2","1","120","121","127","128","134","135","2","22","23","43","44","50","51","57","58","78","79","8","85","86","9","92","93","d05","d06","d07","d08","d14","d15")
#p35=plot_bar(ent10, fill="Order", x="sample_Sample")
#print("checkpoint6")
#save.image(file="archaea_0.RData")
|
/trial/amplicon_analysis_pipeline.r
|
no_license
|
cagriyapan/hello-world
|
R
| false
| false
| 19,323
|
r
|
####16 S rRNA Amplicon sequencing Analysis on Ollie####
# Script by: Batuhan Cagri Yapan.
# contact: byapan@mpi-bremen.de
# Last update: 31/05/2021
# Based on David Benito's and Marwa Baloza's scripts & DADA2 tutorial: https://benjjneb.github.io/dada2/tutorial.html
# Script works in R environment (R 4.0.6) on Linux Distro CentOS.
# Cutadapt is working on terminal; an R function does it automatically.
# R 4.0 and Cutadapt 3.2 are installed on Ollie, they must be called on BASH by module function e.g.:
# module load bio/R/4.0.0
# module load bio/cutadapt/3.2
# Working interactively is not recommended on Ollie's login nodes (ollie0 or ollie1)
# To work interactively ask for a proper node e.g.:
# salloc -N 1-1 -c 36 -p fat --mem 425G
# Then you can start R and work, but beware that it is hard to see graphs etc.
# If you want to put your script on queue please check script "amplicon_analysis.sl"
# Then send it to queue via:
# sbatch amplicon_analysis.sl
##########################################################################
### Four main parts in this script:
### 1)Cutadapt - for trimming primers and adapters of paired-end Illumina reads.
### 2)DADA2 - 16 S analysis from paired-end Illumina reads
### 3)Phyloseq - Analyses of microbial community structure !!not completed yet!!
##########################################################################
### Part 1 - Cutadapt
### for trimming primers and adapters of paired-end Illumina rads.
#Install and call required libraries. dada2, ggplot are already installed on Ollie.
#Install BiocManager for maintaining Bioconductor Packages' compatibility
print(Sys.time()) #to check runtime of the script
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
#call dada2
if (!is.loaded("dada2")) {
library(dada2)
}
#call ShortRead
if (!is.loaded("ShortRead")) {
library(ShortRead)
}
#call Biostrings
if (!is.loaded("Biostrings")) {
library(Biostrings)
}
#Install phyloseq; it will be called later.
BiocManager::install("phyloseq")
dir <- "/work/ollie/byapan/playground/bact_1088/Renamed" # path to the directory containing the raw sequences in fastq.gz format
#Ollie works much faster if working directory is on "/work/ollie" directory.
setwd(dir)
getwd()
list.files(dir) #all raw readings (forward and reverse) should be listed.
## Create vectors for forward and reverse readings of the samples.
# If forward and reverse fastq filenames have a name format:
# SAMPLENAME_R1_001.fastq.gz and SAMPLENAME_R2_001.fastq.gz
# If they do not; they could be arranged by file_naming_210401.sh
# Then continue as below:
# forward readings:
R1_list <- sort(list.files(dir, pattern="_R1.fastq.gz", full.names = TRUE))
# reverse readings:
R2_list <- sort(list.files(dir, pattern="_R2.fastq.gz", full.names = TRUE))
## Define forward and reverse primers.
# In order to learn primer sets used by DSET group you could check the txt file:
# primers_list_16S_tag_seq_HABITAT.txt
# In Deep-sea Mining Project primers and target regions are listed below.
# DO NOT FORGET comment out the redundant primers.
FWD <- "CCTACGGGNGGCWGCAG" ## 341F Bacteria V3-V4 forward primer sequence.
#FWD <- "GYGCASCAGKCGMGAAW" ## 349F Archaea V3-V5 forward primer sequence.
REV <- "GACTACHVGGGTATCTAATCC" ## 785R Bacteria V3-V4 reverse primer sequence.
#REV <- "GTGCTCCCCCGCCAATTCCT" ## 915R Archaea V3-V5 reverse primer sequence.
## Arrange orientation of primer sequences.
allOrients <- function(primer) {
#Create all orientations of the input sequence
require(Biostrings)
dna <- DNAString(primer) # The Biostrings works w/ DNAString objects rather than character vectors
orients <- c(Forward = dna, Complement = complement(dna), Reverse = reverse(dna),
RevComp = reverseComplement(dna))
return(sapply(orients, toString)) # Convert back to character vector
}
FWD.orients <- allOrients(FWD)
REV.orients <- allOrients(REV)
FWD.orients
REV.orients
#Printing checkpoints may make it easier to see failure point if you run script on ollie by queing.
print("checkpoint0")
print(Sys.time())
### Cutadapt part
# After arranging primers it is time to make arrangements for using Cutadapt.
cutadapt <- "/global/AWIsoft/bio/cutadapt/3.2/bin/cutadapt" # This is the path for Cutadapt on AWI servers
system2(cutadapt, args = "--version") # Run shell commands from R
dir.cut <- file.path(dir, "cutadapt")
if(!dir.exists(dir.cut)) dir.create(dir.cut) #Create a subfolder for trimmed sequences.
R1_list.cut <- file.path(dir.cut, basename(R1_list))
R2_list.cut <- file.path(dir.cut, basename(R2_list))
FWD.RC <- dada2:::rc(FWD)
REV.RC <- dada2:::rc(REV)
#Trim FWD and the reverse-complement of REV off of R1 (forward reads)
# Flags define options and variables for cutadapt function.
R1.flags <- paste("-g", FWD, "-a", REV.RC)
#Trim REV and the reverse-complement of FWD off of R2 (reverse reads)
# Flags define options and variables for cutadapt function.
R2.flags <- paste("-G", REV, "-A", FWD.RC)
#Run Cutadapt
for(i in seq_along(R1_list)) {
system2(cutadapt, args = c(R1.flags, R2.flags, "-n", 2, # -n 2 required to remove FWD and REV from reads
"-o", R1_list.cut[i], "-p", R2_list.cut[i], # output files
R1_list[i], R2_list[i])) # input files
}
#rbind(FWD.ForwardReads = sapply(FWD.orients, primerHits, fn = fnFs.cut[[1]]),
# FWD.ReverseReads = sapply(FWD.orients, primerHits, fn = fnRs.cut[[1]]),
# REV.ForwardReads = sapply(REV.orients, primerHits, fn = fnFs.cut[[1]]),
# REV.ReverseReads = sapply(REV.orients, primerHits, fn = fnRs.cut[[1]]))
print("checkpoint 1")
print(Sys.time())
R1_list.cut <- sort(list.files(dir.cut, pattern = "_R1.fastq.gz", full.names = TRUE))
R2_list.cut <- sort(list.files(dir.cut, pattern = "_R2.fastq.gz", full.names = TRUE))
save.image(file="reseq_amplicon.RData")
#plotQualityProfile(R1_list.cut[1:2])
pdf(file="quality_graph_r2.pdf")
for (i in length(R2_list.cut))
{
plotQualityProfile(R1_list.cut[i])
}
dev.off()
#Forward and reverse fastq filenames have the format respectively:
#SAMPLE_NAME_1_R1_001.fastq.gz
#SAMPLE_NAME_1_R2_001.fastq.gz
#Extract sample names
get.sample.name <- function(fname) strsplit(basename(fname), "_")[[1]][1]
sample.names <- unname(sapply(R1_list.cut, get.sample.name))
R1_filtered <- file.path(dir.cut, "filtered", basename(R1_list.cut))
R2_filtered <- file.path(dir.cut, "filtered", basename(R2_list.cut))
#FILTER and TRIM. In this step sequences are trimmed from the end side.
#Parameters for trimming are set manually according to quality of reads.
#The quality should be checked by checking fastqc results.
#FIGARO package could help on parameter decision: https://github.com/Zymo-Research/figaro#figaro
#In order to speed up downstream processes maxEE (max number of expected errors)
#could be tightened; if the number of passing reads is too low (could be checked by parameter "out" after running)
#maxEE parameter could be increased
#For more information on error rates: https://academic.oup.com/bioinformatics/article/31/21/3476/194979
out <- filterAndTrim(R1_list.cut, R1_filtered, R2_list.cut, R2_filtered, truncLen=c(260,200),
maxN=0, maxEE=c(1,1), truncQ=2, rm.phix=TRUE,
compress=TRUE, multithread=TRUE)
# Error rate calculation is one of the most time consuming part of the run
# If you work on a HPC, multithread=TRUE makes job distributed among cores and makes it faster.
# You should be sure you have configured high number of cores and high memory
# in slurm script amplicon_analysis.sl
R1_error <- learnErrors(R1_filtered, multithread=TRUE)
R2_error <- learnErrors(R2_filtered, multithread=TRUE)
#Error rates could be plotted.
#error_plot_R1 <- plotErrors(R1_error, nominalQ=TRUE); error_plot_R1
#error_plot_R2 <- plotErrors(R2_error, nominalQ=TRUE); error_plot_R2
save.image(file="reseq_amplicon.RData")
print("checkpoint 2")
print(Sys.time())
# DEREPLICATION. To decrease memory and CPU demand for large datasets and reduce time.
# In dereplication, identical sequences counted as one unique sequence and
# according to their number of those sequence a "abundance" value is given them.
# Further steps are done by using unique sequences and abundance value,
# By that way computational resources and time are saved.
# For further information:
# see https://rdrr.io/bioc/dada2/man/derepFastq.html
# see https://benjjneb.github.io/dada2/tutorial_1_8.html
R1_dereplicated <- derepFastq(R1_filtered, verbose=TRUE)
R2_dereplicated <- derepFastq(R2_filtered, verbose=TRUE)
# Use the same sample names in the dereplicated data:
names(R1_dereplicated) <- sample.names
names(R2_dereplicated) <- sample.names
# DADA; starring of the show. Sample inference algorithm:
# dada infers sequences and resolves differences as fine as 1 nucleotide difference.
# For further information (beware paywall): https://www.nature.com/articles/nmeth.3869#methods
# pool is a very important parameter for determination of ASVs and time/resources management.
# pool=FALSE - by default samples are processed individually
# pool=TRUE - pooling to increase sensitivity (https://benjjneb.github.io/dada2/pool.html#pooling-for-sample-inference)
# pool="pseudo" - pseudo-pooled where samples are still processed individually (https://benjjneb.github.io/dada2/pseudo.html#Pseudo-pooling)
# Pooling might be important in cases such as samples from different environments are studied together.
# A sequence which is rare in an environment could be abundant in others; however, if pooling is not applied
# that rare taxa might be missed.
# if pooling is allowed by pool=TRUE it might take so long time to process high number of reads.
# pseudo pooling option could be the best option; time could be saved while keeping inference relatively sensitive
R1_dada <- dada(R1_dereplicated, err=R1_error, multithread=TRUE, pool=TRUE)
R2_dada <- dada(R2_dereplicated, err=R2_error, multithread=TRUE, pool=TRUE)
R1_dada[[1]] # To access sample "1"
R2_dada[[1]]
save.image(file="reseq_amplicon.RData")
print("checkpoint 3")
print(Sys.time())
# MERGING. Merging of matching forward and reverse reads to obtain sequence of
# the region of interest.
# In case of low number of merging reads; the parameters
# maxMismatch could be increased, minOverlap could be decreased (default is 20)
# justConcatenate could be witched to TRUE.
R1R2_merged <- mergePairs(R1_dada, R1_dereplicated, R2_dada, R2_dereplicated, justConcatenate=FALSE, verbose=TRUE)
# Inspect the merged data.frame from the first sample
head(R1R2_merged[[1]])
# Construct the amplicon sequence variant table (ASV)
# This is analogous to an OTU table but with resolution up to single-nucleotide level.
ASV_table <- makeSequenceTable(R1R2_merged)
dim(ASV_table)
# Inspect distribution of sequence lengths
table(nchar(getSequences(ASV_table)))
#hist(nchar(getSequences(ASV_table)))
# Remove chimaeras
ASV_nochim <- removeBimeraDenovo(ASV_table, method="consensus", multithread=TRUE, verbose=TRUE)
dim(ASV_nochim)
# Proportion of non-chimaeric sequences:
sum(ASV_nochim)/sum(ASV_table)
#Read counts throughout the pipeline:
getN <- function(x) sum(getUniques(x))
track <- cbind(out, sapply(R1_dada, getN), sapply(R2_dada, getN), sapply(R1R2_merged, getN), rowSums(ASV_nochim))
# If processing a single sample, remove the sapply calls: e.g. replace sapply(dadaFs, getN) with getN(dadaFs)
colnames(track) <- c("input", "filtered", "denoisedF", "denoisedR", "merged", "nonchim")
rownames(track) <- sample.names
#track
write.csv(track,"track_reads.csv")
saveRDS(ASV_nochim, "/work/ollie/byapan/playground/asv_bact_1088.rds")
save.image(file="reseq_amplicon.RData")
print("checkpoint 4")
print(Sys.time())
# For the studies which comprised samples from different sequencing runs
# it is better to recall ASV_tables saved as rds and continue the steps after here
# by merging them.
# TAXONOMIC ANNOTATION
# The database in the correct format can be found in the dada2 website.
# You should download the database to a directory you chhose and call it from there.
unite.ref <- "/work/ollie/byapan/playground/silva/silva_nr99_v138.1_train_set.fa.gz"
ASV_taxonomy <- assignTaxonomy(ASV_nochim,unite.ref, multithread=TRUE, verbose=TRUE)
# Add species:
unite.ref_sp <-"/work/ollie/byapan/playground/silva/silva_species_assignment_v138.1.fa.gz"
ASV_sp <- addSpecies(ASV_taxonomy, unite.ref_sp, verbose=TRUE)
saveRDS(ASV_sp, "/work/ollie/byapan/playground/asv_sp_bact_1088.rds")
#ASV_taxonomy_check <- ASV_taxonomy
#rownames(ASV_taxonomy_check) <- NULL
save.image(file="bacteria_amplicon.RData")
print("checkpoint 5")
print(Sys.time())
q()
##############################################################################################
# Analyse and plot results with phyloseq.
# Import results to phyloseq:
#library(phyloseq); packageVersion("phyloseq")
#library(ggplot2); packageVersion("ggplot2")
#theme_set(theme_bw())
# Extract the sample and ASV names:
#samples.out <- rownames(ASV_nochim)
#ASVs <- colnames(ASV_nochim)
# ASVs ID table:
#ASVs_ID <- cbind(ASVs, paste("asv", c(1:ncol(ASV_nochim)), sep=""))
# rename the ASV to asv#:
#colnames(ASV_nochim) <- paste("asv", c(1:ncol(ASV_nochim)), sep="")
#rownames(ASV_taxonomy) <- paste("asv", c(1:nrow(ASV_taxonomy)), sep="")
#ASV_taxonomy[is.na(ASV_taxonomy[,1])] <- "Unclassified" # Replace empty taxons (domain/kingdom level) with "Unclassified".
# Add sample names:
#head (samples.out)
#samples.out3<- #cbind(samples.out,c("nod111","nod112","nod113","nod117","nod118","nod119","nod173","nod75","no76","nod77","nod81","nod83","dn1","dn2","w49","w151","w159","120","121","127","128","134","135","2","22","23","43","44","50","51","57","58","78","79","80","85",,"86","9","92","93","dis05","dis06","dis07","dis08","dis14","dis15"),c("BGR_ref","BGR_ref","BGR_ref","BGR_ref","BGR_ref","BGR_ref","BGR_ref","GSR_ref","GSR_ref","GSR_ref","GSR_ref","GSR_ref","DISCOL","DISCOL","GSR_tri","GSR_ref","BGR_ref","BGR_r#ef","BGR_ref","BGR_ref","BGR_ref","BGR_ref","BGR_ref","GSR_tri","GSR_tri","BGR_tri","GSR_tri","GSR_tri","BGR_tri","GSR_tri","GSR_tri","DIS","DIS","DIS","DIS","DIS","DIS"))
#colnames(samples.out3) <- c("ID", "Sample","Location")
#rownames(samples.out3) <- samples.out3[,1] # Row names are samples IDs.
#samples.out3 <- as.data.frame(samples.out3)
#OTU_phyloseq3 <- otu_table(ASV_nochim, taxa_are_rows = FALSE)
#OTU_phyloseq3<- filter_taxa(OTU_phyloseq3, function (x) {sum(x > 0) > 1}, prune=TRUE) #Remove singletons
#SAMPLE_phyloseq3 <- sample_data(samples.out3)
#TAX_phyloseq3 <- tax_table(ASV_taxonomy)
#TAX_phyloseq3<- subset_taxa(TAX_phyloseq3, Kingdom=="Archaea") #Remove archaeal reads
#dada2_phyloseq4<- phyloseq(OTU_phyloseq3, TAX_phyloseq3, SAMPLE_phyloseq3) # Create a phyloseq object.
#phyloseq-class experiment-level object w/ singletons
#otu_table() OTU Table: [ 34916 taxa and 30 samples ]
#sample_data() Sample Data: [ 30 samples by 3 sample variables ]
#tax_table() Taxonomy Table: [ 34916 taxa by 6 taxonomic ranks ]
#phyloseq-class experiment-level object wtihout singletons
#otu_table() OTU Table: [ 15682 taxa and 30 samples ]
#sample_data() Sample Data: [ 30 samples by 3 sample variables ]
#tax_table() Taxonomy Table: [ 15682 taxa by 6 taxonomic ranks ]
#phy = dada2_phyloseq4
#NameTax <- function(x, ind){
# if(is.na(x[ind])){
# x[ind] <- x[ind]
# } else {
# if(ind==1){x[ind] <- paste("d", x[ind], sep="__")} else{ # Domain
# if(ind==2){x[ind] <- paste("p", x[ind], sep="__")} else{ # Phylum
# if(ind==3){x[ind] <- paste("c", x[ind], sep="__")} else{ # Class
# if(ind==4){x[ind] <- paste("o", x[ind], sep="__")} else{ # Order
# if(ind==5){x[ind] <- paste("f", x[ind], sep="__")} else{ # Family
# if(ind==6){x[ind] <- paste("g", paste(x[ind-1], x[ind], sep="_"), sep="__")} # Genus
# }
# }
# }
# }
# }
# }
# }
#}
#tax.tab <- data.frame(tax_table(phy))
#for (i in 1:7) {
# tax_table(phy)[,i] <- apply(tax.tab, 1, NameTax, ind=i)
#}
#ModifyTax <- function(x,ind){
# # xth row in the dataframe
# # ind taxonomy level to change
# if(is.na(x[ind])){
# nonNa <- which(!is.na(x[-ind])) # which taxa are not NA excepting the one we're interested in.
# maxNonNa <- max(nonNa)
# x[ind] <- x[maxNonNa]
# }else{x[ind] <- x[ind]}
#}
#for (i in 1:7) {
# tax_table(phy)[,i] <- apply(tax.tab,1,ModifyTax,ind=i)
#}
#phy_rare <- phy
#wh0 = genefilter_sample(phy_samples, filterfun_sample(function(x) x > 5), A=0.5*nsamples(phy_samples))
#GP1 = prune_taxa(wh0, phy_samples)
#GP2 = transform_sample_counts(GP1, function(x) 100 * x/sum(x))
#GP20 <- transform_sample_counts(phy_rare, function(x) sqrt(x / sum(x)))
#GP30 <- transform_sample_counts(phy_rare, function(x) if (x>=1) {x=1} else {x=0} )
#GP2 = transform_sample_counts(GP1, decostand(otu_table(GP1),"hellinger"))
#GP.ord_rare <- ordinate(phy_rare, "NMDS", "Jaccard")
#Run 17 stress 0.06218149
#... Procrustes: rmse 8.369671e-05 max resid 0.0003976089
#... Similar to previous best
#Run 18 stress 0.06218146
#... Procrustes: rmse 4.971322e-05 max resid 0.0002354726
#... Similar to previous best
#Run 19 stress 0.230557
#Run 20 stress 0.0645415
#*** Solution reached
#p11 = plot_ordination(GP2, GP.ord2, type="taxa", color="Phylum", title="taxa")
#p20 = plot_ordination(GP20, GP.ord_rare, type="samples", color="location")
#vegan_otu <- function(GP2) {
# OTU <- otu_table(GP2)
# if (taxa_are_rows(OTU)) {
# OTU <- t(OTU)
# }
# return(as(OTU, "matrix"))
#}
#metadata<- as(sample_data(phy_rare), "data.frame")
#sim_GP_rare <- anosim(otu_table(phy_rare), metadata$location, permutations = 999, distance = "jaccard", strata = NULL)
#p42 <- plot_richness(phy_rare, x="location", measures=c("Observed", "Shannon"),color="location")+
#scale_x_discrete(limits=c("DIS","BGR_ref","BGR_tri","GSR_ref","GSR_tri"))
#Call:
#anosim(x = otu_table(GP20), grouping = metadata$location, permutations = 999, distance = "bray", strata = NULL)
#Dissimilarity: bray
#ANOSIM statistic R: 0.8224
# Significance: 0.001
#Permutation: free
#Number of permutations: 999
#Upper quantiles of permutations (null model):
# 90% 95% 97.5% 99%
#0.0764 0.1067 0.1296 0.1481
#Dissimilarity ranks between and within classes:
# 0% 25% 50% 75% 100% N
#Between 4 159.75 254.5 345.25 435 360
#BGR_ref 47 72.50 92.0 101.50 118 15
#BGR_tri 75 93.50 139.0 171.50 256 15
#DIS 1 18.00 29.0 36.00 58 15
#GSR_ref 2 8.50 32.0 48.50 67 15
#GSR_tri 16 32.50 56.0 87.50 154 15
#TopNOTUs <- names(sort(taxa_sums(GP2), TRUE)[1:10])
#ent10 <- prune_taxa(TopNOTUs, GP2)
#ent10 = transform_sample_counts(ent10, function(x) 100 * x/sum(x))
#sample_names(ent10) <- c("neg0","neg1","neg2","1","120","121","127","128","134","135","2","22","23","43","44","50","51","57","58","78","79","8","85","86","9","92","93","d05","d06","d07","d08","d14","d15")
#p35=plot_bar(ent10, fill="Order", x="sample_Sample")
#print("checkpoint6")
#save.image(file="archaea_0.RData")
|
library(testthat)
library(AdapteR)
##' runs a test file.
##'
##' @param f the test file to run
##' @param ask if TRUE, will ask for each file
##' @param skip a regexp on the filename whether the test should be skipped
##' @param ... passed on to test_file
##' @return the test_file report
runMyTestFile <- function(f, ask=FALSE, runonly=NULL, skip=NULL,...){
if(!is.null(skip))
if(grepl(skip,f)) {
cat(paste0("skipped ",f,"\n"))
return(data.frame())
}
if(!is.null(runonly))
if(!grepl(runonly,f)) {
cat(paste0("skipped ",f,"\n"))
return(data.frame())
}
cat(paste0("testing ",f,"\n"))
if(ask)
run <- readline("start (y?)")=="y"
else
run <- TRUE
if(run)
tryCatch({
options(debugSQL=FALSE)
test_file(f,...)
},
error=function(e) print(e))
}
checkagain <- ".*"
results <- list()
results$testthat <- llply(
find_test_scripts("testthat"),
runMyTestFile,
ask=FALSE,
runonly=checkagain)
results$testsuite <- llply(
find_test_scripts("testsuite"),
runMyTestFile,
ask=FALSE,
runonly=checkagain)
results$limitations <- llply(find_test_scripts("limitations"),
runMyTestFile)
|
/tests/alltests.R
|
no_license
|
amalshri/AdapteR
|
R
| false
| false
| 1,275
|
r
|
library(testthat)
library(AdapteR)
##' runs a test file.
##'
##' @param f the test file to run
##' @param ask if TRUE, will ask for each file
##' @param skip a regexp on the filename whether the test should be skipped
##' @param ... passed on to test_file
##' @return the test_file report
runMyTestFile <- function(f, ask=FALSE, runonly=NULL, skip=NULL,...){
if(!is.null(skip))
if(grepl(skip,f)) {
cat(paste0("skipped ",f,"\n"))
return(data.frame())
}
if(!is.null(runonly))
if(!grepl(runonly,f)) {
cat(paste0("skipped ",f,"\n"))
return(data.frame())
}
cat(paste0("testing ",f,"\n"))
if(ask)
run <- readline("start (y?)")=="y"
else
run <- TRUE
if(run)
tryCatch({
options(debugSQL=FALSE)
test_file(f,...)
},
error=function(e) print(e))
}
checkagain <- ".*"
results <- list()
results$testthat <- llply(
find_test_scripts("testthat"),
runMyTestFile,
ask=FALSE,
runonly=checkagain)
results$testsuite <- llply(
find_test_scripts("testsuite"),
runMyTestFile,
ask=FALSE,
runonly=checkagain)
results$limitations <- llply(find_test_scripts("limitations"),
runMyTestFile)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.