content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.5033142666043e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615765557-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,802 | r | testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.5033142666043e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) |
#######################
## Example SQL Creds ##
#######################
sql_db <-"some-db"
sql_usr <-"huxley"
sql_pw<-"!" | /dependencies/secrets_example.R | no_license | Sillson/friaR | R | false | false | 121 | r | #######################
## Example SQL Creds ##
#######################
sql_db <-"some-db"
sql_usr <-"huxley"
sql_pw<-"!" |
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31582586514531e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615828809-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 361 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31582586514531e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) |
## File Name: mi.anova.R
## File Version: 0.34
mi.anova <- function( mi.res, formula, type=2 ){
# INPUT:
# mi.res ... mids object (from mice imputation function)
# formula ... formula for ANOVA model (variable names must be in colnames(mi.list[[1]]), ...
# converting MICE object to mi.list
if (type==3){
TAM::require_namespace_msg("car")
}
mi.list <- mi.res
if( class(mi.list)=="mids.1chain" ){
mi.list <- mi.list$midsobj
}
if( class(mi.list)=="mids" ){
# number of imputations
m <- mi.list$m
# list of completed datasets
h1 <- list( rep("", m ))
for (ii in 1:m){
h1[[ii]] <- as.data.frame( mice::complete( mi.list, ii ) )
}
mi.list <- h1
}
# converting mi.norm objects
if (class(mi.res)=="mi.norm" ){
mi.list <- mi.list$imp.data
}
#**** type II sum of squares
if ( type==2){
anova.imp0 <- lapply( mi.list, FUN=function(dat){
stats::lm( formula, data=dat ) } )
anova.imp <- lapply( anova.imp0, FUN=function( obj){
summary( stats::aov(obj))
} )
}
#**** type III sum of squares
if (type==3){
Nimp <- length(mi.list)
vars <- all.vars( stats::as.formula( formula ))[-1]
VV <- length(vars)
# define contrasts
ma_contrasts <- as.list(1:VV)
names(ma_contrasts) <- vars
dat <- mi.list[[1]]
for (vv in 1:VV){
ma_contrasts[[ vars[vv] ]] <- "contr.sum"
if ( ! is.factor( dat[, vars[vv] ] ) ){
ma_contrasts[[ vars[vv] ]] <- NULL
}
}
# estimate linear model
anova.imp0 <- lapply( as.list( 1:Nimp), FUN=function(ii){
dat <- mi.list[[ii]]
mod1 <- stats::lm( formula, data=dat, contrasts=ma_contrasts)
return(mod1)
} )
# compute summary
anova.imp <- lapply( as.list( 1:Nimp), FUN=function( ii ){
obj <- anova.imp0[[ii]]
car::Anova(obj, type=3)
}
)
}
# number of F statistics to be evaluated
if (type==2){
FF <- nrow( anova.imp[[1]][[1]] ) - 1
}
if (type==3){
FF <- nrow(anova.imp[[1]]["F value"])-2
}
anova.imp.inf <- t( sapply( 1:FF, FUN=function(ff){
micombine.F( sapply( 1:( length(anova.imp) ), FUN=function(ii){
if ( type==2 ){
r1 <- anova.imp[[ii]][[1]]$'F value'[ff]
}
if ( type==3 ){
r1 <- anova.imp[[ii]]$'F value'[ff+1]
}
return(r1)
} ),
df1=ifelse (type==2, anova.imp[[1]][[1]]$Df[ff],
anova.imp[[1]]["Df"][ff+1,1] ),
display=FALSE )
#Revalpr("anova.imp[[1]][[1]]$Df[ff]")
} ) )
# ANOVA results
res <- anova.imp.inf[, c(3,4,1,2) ]
res <- matrix( res, ncol=4 )
res[,3] <- round( res[,3], 4 )
res[,4] <- round( res[,4], 6 )
g1 <- rownames( anova.imp[[1]][[1]] )[1:FF]
if (type==3){ g1 <- rownames( anova.imp[[1]] )[1 + 1:FF] }
rownames(res) <- g1
res <- data.frame(res)
# compute eta squared and partial eta squared coefficients
if (type==2){
SS <- rowMeans( matrix( unlist( lapply( anova.imp, FUN=function(ll){
ll[[1]][,2] } ) ), ncol=length(mi.list) ) )
}
if (type==3){
SS <- rowMeans( matrix( unlist( lapply( anova.imp, FUN=function(ll){
l2 <- ll["Sum Sq"][-1,1]
return(l2)
} ) ), ncol=length(mi.list) ) )
}
# calculate (average) R squared
r.squared <- sum(SS[ - (FF+1) ]) / sum(SS)
res$eta2 <- round( SS[ - ( FF + 1 ) ] / sum( SS ), 6 )
res$partial.eta2 <- round( SS[ - (FF+1) ] / ( SS[ - (FF+1) ] + SS[ FF + 1 ] ), 6 )
g1 <- c("F value", "Pr(>F)" )
colnames(res)[3:4] <- g1
colnames(res)[1:2] <- c("df1", "df2")
c1 <- colnames(res)
res <- rbind( res, res[1,] )
rownames( res)[ nrow(res) ] <- "Residual"
res[ nrow(res), ] <- NA
res <- data.frame( "SSQ"=SS, res )
colnames(res)[-1] <- c1
cat("Univariate ANOVA for Multiply Imputed Data",
paste0("(Type ", type, ")" ), " \n\n")
cat("lm Formula: ", formula )
cat( paste( "\nR^2=", round(r.squared, 4 ), sep=""), "\n" )
cat("..........................................................................\n")
cat("ANOVA Table \n" )
print( round( res,5) )
invisible( list( "r.squared"=r.squared, "anova.table"=res, type=type ) )
}
| /R/mi.anova.R | no_license | stefvanbuuren/miceadds | R | false | false | 4,841 | r | ## File Name: mi.anova.R
## File Version: 0.34
mi.anova <- function( mi.res, formula, type=2 ){
# INPUT:
# mi.res ... mids object (from mice imputation function)
# formula ... formula for ANOVA model (variable names must be in colnames(mi.list[[1]]), ...
# converting MICE object to mi.list
if (type==3){
TAM::require_namespace_msg("car")
}
mi.list <- mi.res
if( class(mi.list)=="mids.1chain" ){
mi.list <- mi.list$midsobj
}
if( class(mi.list)=="mids" ){
# number of imputations
m <- mi.list$m
# list of completed datasets
h1 <- list( rep("", m ))
for (ii in 1:m){
h1[[ii]] <- as.data.frame( mice::complete( mi.list, ii ) )
}
mi.list <- h1
}
# converting mi.norm objects
if (class(mi.res)=="mi.norm" ){
mi.list <- mi.list$imp.data
}
#**** type II sum of squares
if ( type==2){
anova.imp0 <- lapply( mi.list, FUN=function(dat){
stats::lm( formula, data=dat ) } )
anova.imp <- lapply( anova.imp0, FUN=function( obj){
summary( stats::aov(obj))
} )
}
#**** type III sum of squares
if (type==3){
Nimp <- length(mi.list)
vars <- all.vars( stats::as.formula( formula ))[-1]
VV <- length(vars)
# define contrasts
ma_contrasts <- as.list(1:VV)
names(ma_contrasts) <- vars
dat <- mi.list[[1]]
for (vv in 1:VV){
ma_contrasts[[ vars[vv] ]] <- "contr.sum"
if ( ! is.factor( dat[, vars[vv] ] ) ){
ma_contrasts[[ vars[vv] ]] <- NULL
}
}
# estimate linear model
anova.imp0 <- lapply( as.list( 1:Nimp), FUN=function(ii){
dat <- mi.list[[ii]]
mod1 <- stats::lm( formula, data=dat, contrasts=ma_contrasts)
return(mod1)
} )
# compute summary
anova.imp <- lapply( as.list( 1:Nimp), FUN=function( ii ){
obj <- anova.imp0[[ii]]
car::Anova(obj, type=3)
}
)
}
# number of F statistics to be evaluated
if (type==2){
FF <- nrow( anova.imp[[1]][[1]] ) - 1
}
if (type==3){
FF <- nrow(anova.imp[[1]]["F value"])-2
}
anova.imp.inf <- t( sapply( 1:FF, FUN=function(ff){
micombine.F( sapply( 1:( length(anova.imp) ), FUN=function(ii){
if ( type==2 ){
r1 <- anova.imp[[ii]][[1]]$'F value'[ff]
}
if ( type==3 ){
r1 <- anova.imp[[ii]]$'F value'[ff+1]
}
return(r1)
} ),
df1=ifelse (type==2, anova.imp[[1]][[1]]$Df[ff],
anova.imp[[1]]["Df"][ff+1,1] ),
display=FALSE )
#Revalpr("anova.imp[[1]][[1]]$Df[ff]")
} ) )
# ANOVA results
res <- anova.imp.inf[, c(3,4,1,2) ]
res <- matrix( res, ncol=4 )
res[,3] <- round( res[,3], 4 )
res[,4] <- round( res[,4], 6 )
g1 <- rownames( anova.imp[[1]][[1]] )[1:FF]
if (type==3){ g1 <- rownames( anova.imp[[1]] )[1 + 1:FF] }
rownames(res) <- g1
res <- data.frame(res)
# compute eta squared and partial eta squared coefficients
if (type==2){
SS <- rowMeans( matrix( unlist( lapply( anova.imp, FUN=function(ll){
ll[[1]][,2] } ) ), ncol=length(mi.list) ) )
}
if (type==3){
SS <- rowMeans( matrix( unlist( lapply( anova.imp, FUN=function(ll){
l2 <- ll["Sum Sq"][-1,1]
return(l2)
} ) ), ncol=length(mi.list) ) )
}
# calculate (average) R squared
r.squared <- sum(SS[ - (FF+1) ]) / sum(SS)
res$eta2 <- round( SS[ - ( FF + 1 ) ] / sum( SS ), 6 )
res$partial.eta2 <- round( SS[ - (FF+1) ] / ( SS[ - (FF+1) ] + SS[ FF + 1 ] ), 6 )
g1 <- c("F value", "Pr(>F)" )
colnames(res)[3:4] <- g1
colnames(res)[1:2] <- c("df1", "df2")
c1 <- colnames(res)
res <- rbind( res, res[1,] )
rownames( res)[ nrow(res) ] <- "Residual"
res[ nrow(res), ] <- NA
res <- data.frame( "SSQ"=SS, res )
colnames(res)[-1] <- c1
cat("Univariate ANOVA for Multiply Imputed Data",
paste0("(Type ", type, ")" ), " \n\n")
cat("lm Formula: ", formula )
cat( paste( "\nR^2=", round(r.squared, 4 ), sep=""), "\n" )
cat("..........................................................................\n")
cat("ANOVA Table \n" )
print( round( res,5) )
invisible( list( "r.squared"=r.squared, "anova.table"=res, type=type ) )
}
|
library(class)
library(caret)
library(ggplot2)
install.packages('ggplot2')
library(rpart)
library(rpart.plot)
library(randomForest)
library(e1071)
set.seed(123)
path = "E:\\Imarticus\\R Final Project\\dataset_diabetes\\diabetic_data.csv "
diab = read.csv(path,header=T)
colnames(diab)
head(diab)
ncol(diab)
nrow(diab)
str(diab)
# check for Nulls, Zeroes for all columns
col_name = colnames(diab) [apply(diab, 2, function(n) any(is.na(n)))]
if(length(col_name) > 0) print("NULLs present") else print("No NULLs")
col_name = colnames(diab) [apply(diab, 2, function(n) any(n == ""))]
if(length(col_name) > 0) print("Blanks present") else print("No Blanks")
col_name = colnames(diab) [apply(diab, 2, function(n) any(n==0))]
if(length(col_name) > 0) print("0's Present") else print("No 0's")
print(col_name)
#according to discription admission_id ,admission_source_id and discharge_disposition ID are categories
#Therefore converting them to factor format
diab$admission_type_id=as.factor(diab$admission_type_id)
diab$discharge_disposition_id=as.factor(diab$discharge_disposition_id)
diab$admission_source_id=as.factor(diab$admission_source_id)
#according to discharge_disposition_id some of
#the categories data is for expire person therefore tha data is irrelevant
#Expired patient data is removed
diab <-diab[!(diab$discharge_disposition_id %in% c(11,12,19,20,21)),]
#Univariate Analysis
namesCol=colnames(diab)
#For Race
levels(diab$race)
table(diab$race)
#since ? data is unknown we will put it in category of other
levels(diab$race)[levels(diab$race)=="?"] <- "Other"
#Gender
table(diab$gender)
#Since Unknown values are very less we can replace it with Mode value
levels(diab$gender)[levels(diab$gender)=="Unknown/Invalid"] <- "Female"
#Age
table(diab$age)
#Weight
table(diab$weight)
#since max values are not given we will delete this column
diab$weight=NULL
#payer code
table(diab$payer_code)
levels(diab$payer_code)[levels(diab$payer_code)=="?"] <- "Other"
#medical Speciality
table(diab$medical_specialty)
levels(diab$medical_specialty)[levels(diab$medical_specialty)=="?"] <- "Unknown"
#reducing level based on IDS mapping
diab$admission_type_id=as.factor(diab$admission_type_id)
levels(diab$admission_type_id)[levels(diab$admission_type_id)=='6' | levels(diab$admission_type_id)=='8']= '5'
levels(diab$admission_type_id)[levels(diab$admission_type_id)=='1' | levels(diab$admission_type_id)=='2' | levels(diab$admission_type_id)=='4']= '7'
diab$admission_source_id=as.factor(diab$admission_source_id)
#diab$time_in_hospital=as.factor(diab$time_in_hospital) # converted it to factor variable because of only 14 values present
levels(diab$admission_source_id)[levels(diab$admission_source_id)=='15' | levels(diab$admission_source_id)=='17' | levels(diab$admission_source_id)=='20' | levels(diab$admission_source_id)=='21']='9'
levels(diab$admission_source_id)[levels(diab$admission_source_id)=='2' | levels(diab$admission_source_id)=='3']='1'
levels(diab$admission_source_id)[levels(diab$admission_source_id)=='11' | levels(diab$admission_source_id)=='23' | levels(diab$admission_source_id)=='24']='8'
levels(diab$admission_source_id)[levels(diab$admission_source_id)=='12' | levels(diab$admission_source_id)=='13' | levels(diab$admission_source_id)=='14']='7'
levels(diab$admission_source_id)[levels(diab$admission_source_id)!='1' & levels(diab$admission_source_id)!='8' & levels(diab$admission_source_id)!='7'& levels(diab$admission_source_id)!='9']='4'
table(diab$admission_source_id)
diab$discharge_disposition_id=as.factor(diab$discharge_disposition_id)
levels(diab$discharge_disposition_id)[levels(diab$discharge_disposition_id)=='13']='1'
levels(diab$discharge_disposition_id)[levels(diab$discharge_disposition_id) %in% c('19','20','21')]='11'
levels(diab$discharge_disposition_id)[levels(diab$discharge_disposition_id) %in% c('25','26')]='18'
levels(diab$discharge_disposition_id)[levels(diab$discharge_disposition_id) %in% c('3','4','5','6','8','12','15','10','14','16','17','22','23','24','30','27','28','29')]='2'
table(diab$discharge_disposition_id)
levels(diab$medical_specialty)
100*prop.table(table(diab$medical_specialty))
str(diab)
str(diab$num_medications)
# removing columns which are not required
diab$encounter_id = NULL
diab$patient_nbr = NULL
diab$weight = NULL
diab$payer_code = NULL
diab$medical_specialty = NULL
diab$citoglipton = NULL
diab$examide = NULL
ncol(diab)
str(diab)
cor(diab[8:13])
#Diagnosis 1
table(diab$diag_1)#Since It has too many Variable we will Group the variable
levels(diab$diag_1)[levels(diab$diag_1) %in% c(390:459, 785)] <- "Circulatory"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(460:519, 786)] <- "Respiratory"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(520:579, 787)] <- "Digestive"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(seq.default(from = 250,to = 250.99,by =0.01))] <- "Diabetes"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(800:999)] <- "Injury"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(710:739)] <- "Musculoskeletal"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(580:629,788)] <- "Genitourinary"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(140:239,780,781,784,790:799,240:249,251:279,680:709,782,001:139)] <- "Neoplasms"
Defined=c("Circulatory","Respiratory","Digestive","Diabetes","Injury","Musculoskeletal","Genitourinary","Neoplasms")
levels(diab$diag_1)[!(levels(diab$diag_1) %in% Defined)] <- "Other"
table(diab$diag_1)#Grouped levels by ICD9 codes
#Diagnosis 2
table(diab$diag_2)#Since It has too many Variable we will Group the variable
levels(diab$diag_2)[levels(diab$diag_2) %in% c(390:459, 785)] <- "Circulatory"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(460:519, 786)] <- "Respiratory"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(520:579, 787)] <- "Digestive"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(seq.default(from = 250,to = 250.99,by =0.01))] <- "Diabetes"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(800:999)] <- "Injury"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(710:739)] <- "Musculoskeletal"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(580:629,788)] <- "Genitourinary"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(140:239,780,781,784,790:799,240:249,251:279,680:709,782,001:139)] <- "Neoplasms"
Defined=c("Circulatory","Respiratory","Digestive","Diabetes","Injury","Musculoskeletal","Genitourinary","Neoplasms")
levels(diab$diag_2)[!(levels(diab$diag_2) %in% Defined)] <- "Other"
table(diab$diag_2)#Grouped levels by ICD9 codes
#Diagnosis 3
table(diab$diag_3)#Since It has too many Variable we will Group the variable
levels(diab$diag_3)[levels(diab$diag_3) %in% c(390:459, 785)] <- "Circulatory"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(460:519, 786)] <- "Respiratory"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(520:579, 787)] <- "Digestive"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(seq.default(from = 250,to = 250.99,by =0.01))] <- "Diabetes"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(800:999)] <- "Injury"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(710:739)] <- "Musculoskeletal"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(580:629,788)] <- "Genitourinary"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(140:239,780,781,784,790:799,240:249,251:279,680:709,782,001:139)] <- "Neoplasms"
Defined=c("Circulatory","Respiratory","Digestive","Diabetes","Injury","Musculoskeletal","Genitourinary","Neoplasms")
levels(diab$diag_3)[!(levels(diab$diag_3) %in% Defined)] <- "Other"
table(diab$diag_3)#Grouped levels by ICD9 codes
# randomly shuffle the dataset
grp = runif(nrow(diab))
diab = diab[order(grp),]
View(diab)
ind = sample(seq_len(nrow(diab)), floor(nrow(diab)*0.7) )
train = diab[ind,]
test = diab[-ind,]
colnames(train)
train$X = NULL
train$X.1 = NULL
train$X.2 = NULL
ncol(train)
nrow(train)
nrow(test)
head(train,3)
head(test,3)
# to check the count of each value of a factor variable against the Y-variable
# ----------------------------------------------------------------------------
100*prop.table(table(train$readmitted))
100*prop.table(table(test$readmitted))
100*prop.table(table(diab$readmitted))
train_x = train[,1:42]
colnames(train_x)
train_y = train[,43]
colnames(train_y)
levels(train_y)
colnames(train_x)
head(train_y)
rf1 = randomForest(train_x, factor(train_y))
rf1
summary(rf1)
test$X=NULL
test$X.1=NULL
test$X.2=NULL
# predict the Classification for the Testing data
# ------------------------------------------------
pdct_rf1 = predict(rf1, test)
table(predicted=pdct_rf1,actual=test$readmitted)
pdct_rf1
confusionMatrix(pdct_rf1,test$readmitted,positive='positive')
### Feature selection
# --------------------
# variables used by the randomforest()
varUsed(rf1, by.tree = F, count=F)
# importance of features/attributes.
# higher the value, more important it is
# used for feature selection to optimise other algorithm
# uses the MeanDecreaseGini
# -------------------------------------------------------------
importance(rf1)
# variable importance - for feature selection
# ----------------------------------------------
varImpPlot(rf1)
### </Feature selection>
# Model with imp variables
# -----------------------------------------
diab_new = diab
head(diab_new)
diab_new$X = NULL
diab_new$X.1 = NULL
diab_new$X.2 = NULL
diab_new$age = NULL
diab_new$time_in_hospital = NULL
diab_new$num_procedures = NULL
diab_new$number_inpatient = NULL
diab_new$diag_1 = NULL
diab_new$diag_2 = NULL
diab_new$diag_3 = NULL
diab_new$number_diagnoses = NULL
diab_new$max_glu_serum = NULL
diab_new$repaglinide = NULL
diab_new$chlorpropamide = NULL
diab_new$tolbutamide = NULL
diab_new$tolazamide = NULL
diab_new$insulin = NULL
diab_new$glipizide.metformin = NULL
diab_new$glimepiride.pioglitazone = NULL
diab_new$metformin.rosiglitazone = NULL
diab_new$metformin.pioglitazone = NULL
# randomly shuffle the dataset
grp = runif(nrow(diab_new))
diab_new = diab_new[order(grp),]
View(diab_new)
ind = sample(seq_len(nrow(diab_new)), floor(nrow(diab_new)*0.7) )
train = diab_new[ind,]
test = diab_new[-ind,]
ncol(train)
nrow(train)
nrow(test)
head(train,3)
head(test,3)
# to check the count of each value of a factor variable against the Y-variable
# ----------------------------------------------------------------------------
100*prop.table(table(train$readmitted))
100*prop.table(table(test$readmitted))
100*prop.table(table(diab_new$readmitted))
train_x = train[,1:24]
colnames(train_x)
train_y = train[,25]
colnames(train_y)
levels(train_y)
colnames(train_x)
head(train_y)
rf2 = randomForest(train_x, factor(train_y))
rf2
summary(rf2)
colnames(test)
test$X=NULL
test$X.1=NULL
test$X.2=NULL
# predict the Classification for the Testing data
# ------------------------------------------------
pdct_rf2= predict(rf2,test)
table(predicted=pdct_rf2,actual=test$readmitted)
pdct_rf2
confusionMatrix(pdct_rf2,test$readmitted,positive='positive')
# ----------------------------
# The accuracy decreased to 55.45 from 57.87 after
# building the model only with the important variables.
| /Final_Project_R.R | no_license | siddharth0305/Diabetes | R | false | false | 11,308 | r | library(class)
library(caret)
library(ggplot2)
install.packages('ggplot2')
library(rpart)
library(rpart.plot)
library(randomForest)
library(e1071)
set.seed(123)
path = "E:\\Imarticus\\R Final Project\\dataset_diabetes\\diabetic_data.csv "
diab = read.csv(path,header=T)
colnames(diab)
head(diab)
ncol(diab)
nrow(diab)
str(diab)
# check for Nulls, Zeroes for all columns
col_name = colnames(diab) [apply(diab, 2, function(n) any(is.na(n)))]
if(length(col_name) > 0) print("NULLs present") else print("No NULLs")
col_name = colnames(diab) [apply(diab, 2, function(n) any(n == ""))]
if(length(col_name) > 0) print("Blanks present") else print("No Blanks")
col_name = colnames(diab) [apply(diab, 2, function(n) any(n==0))]
if(length(col_name) > 0) print("0's Present") else print("No 0's")
print(col_name)
#according to discription admission_id ,admission_source_id and discharge_disposition ID are categories
#Therefore converting them to factor format
diab$admission_type_id=as.factor(diab$admission_type_id)
diab$discharge_disposition_id=as.factor(diab$discharge_disposition_id)
diab$admission_source_id=as.factor(diab$admission_source_id)
#according to discharge_disposition_id some of
#the categories data is for expire person therefore tha data is irrelevant
#Expired patient data is removed
diab <-diab[!(diab$discharge_disposition_id %in% c(11,12,19,20,21)),]
#Univariate Analysis
namesCol=colnames(diab)
#For Race
levels(diab$race)
table(diab$race)
#since ? data is unknown we will put it in category of other
levels(diab$race)[levels(diab$race)=="?"] <- "Other"
#Gender
table(diab$gender)
#Since Unknown values are very less we can replace it with Mode value
levels(diab$gender)[levels(diab$gender)=="Unknown/Invalid"] <- "Female"
#Age
table(diab$age)
#Weight
table(diab$weight)
#since max values are not given we will delete this column
diab$weight=NULL
#payer code
table(diab$payer_code)
levels(diab$payer_code)[levels(diab$payer_code)=="?"] <- "Other"
#medical Speciality
table(diab$medical_specialty)
levels(diab$medical_specialty)[levels(diab$medical_specialty)=="?"] <- "Unknown"
#reducing level based on IDS mapping
diab$admission_type_id=as.factor(diab$admission_type_id)
levels(diab$admission_type_id)[levels(diab$admission_type_id)=='6' | levels(diab$admission_type_id)=='8']= '5'
levels(diab$admission_type_id)[levels(diab$admission_type_id)=='1' | levels(diab$admission_type_id)=='2' | levels(diab$admission_type_id)=='4']= '7'
diab$admission_source_id=as.factor(diab$admission_source_id)
#diab$time_in_hospital=as.factor(diab$time_in_hospital) # converted it to factor variable because of only 14 values present
levels(diab$admission_source_id)[levels(diab$admission_source_id)=='15' | levels(diab$admission_source_id)=='17' | levels(diab$admission_source_id)=='20' | levels(diab$admission_source_id)=='21']='9'
levels(diab$admission_source_id)[levels(diab$admission_source_id)=='2' | levels(diab$admission_source_id)=='3']='1'
levels(diab$admission_source_id)[levels(diab$admission_source_id)=='11' | levels(diab$admission_source_id)=='23' | levels(diab$admission_source_id)=='24']='8'
levels(diab$admission_source_id)[levels(diab$admission_source_id)=='12' | levels(diab$admission_source_id)=='13' | levels(diab$admission_source_id)=='14']='7'
levels(diab$admission_source_id)[levels(diab$admission_source_id)!='1' & levels(diab$admission_source_id)!='8' & levels(diab$admission_source_id)!='7'& levels(diab$admission_source_id)!='9']='4'
table(diab$admission_source_id)
diab$discharge_disposition_id=as.factor(diab$discharge_disposition_id)
levels(diab$discharge_disposition_id)[levels(diab$discharge_disposition_id)=='13']='1'
levels(diab$discharge_disposition_id)[levels(diab$discharge_disposition_id) %in% c('19','20','21')]='11'
levels(diab$discharge_disposition_id)[levels(diab$discharge_disposition_id) %in% c('25','26')]='18'
levels(diab$discharge_disposition_id)[levels(diab$discharge_disposition_id) %in% c('3','4','5','6','8','12','15','10','14','16','17','22','23','24','30','27','28','29')]='2'
table(diab$discharge_disposition_id)
levels(diab$medical_specialty)
100*prop.table(table(diab$medical_specialty))
str(diab)
str(diab$num_medications)
# removing columns which are not required
diab$encounter_id = NULL
diab$patient_nbr = NULL
diab$weight = NULL
diab$payer_code = NULL
diab$medical_specialty = NULL
diab$citoglipton = NULL
diab$examide = NULL
ncol(diab)
str(diab)
cor(diab[8:13])
#Diagnosis 1
table(diab$diag_1)#Since It has too many Variable we will Group the variable
levels(diab$diag_1)[levels(diab$diag_1) %in% c(390:459, 785)] <- "Circulatory"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(460:519, 786)] <- "Respiratory"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(520:579, 787)] <- "Digestive"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(seq.default(from = 250,to = 250.99,by =0.01))] <- "Diabetes"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(800:999)] <- "Injury"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(710:739)] <- "Musculoskeletal"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(580:629,788)] <- "Genitourinary"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(140:239,780,781,784,790:799,240:249,251:279,680:709,782,001:139)] <- "Neoplasms"
Defined=c("Circulatory","Respiratory","Digestive","Diabetes","Injury","Musculoskeletal","Genitourinary","Neoplasms")
levels(diab$diag_1)[!(levels(diab$diag_1) %in% Defined)] <- "Other"
table(diab$diag_1)#Grouped levels by ICD9 codes
#Diagnosis 2
table(diab$diag_2)#Since It has too many Variable we will Group the variable
levels(diab$diag_2)[levels(diab$diag_2) %in% c(390:459, 785)] <- "Circulatory"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(460:519, 786)] <- "Respiratory"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(520:579, 787)] <- "Digestive"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(seq.default(from = 250,to = 250.99,by =0.01))] <- "Diabetes"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(800:999)] <- "Injury"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(710:739)] <- "Musculoskeletal"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(580:629,788)] <- "Genitourinary"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(140:239,780,781,784,790:799,240:249,251:279,680:709,782,001:139)] <- "Neoplasms"
Defined=c("Circulatory","Respiratory","Digestive","Diabetes","Injury","Musculoskeletal","Genitourinary","Neoplasms")
levels(diab$diag_2)[!(levels(diab$diag_2) %in% Defined)] <- "Other"
table(diab$diag_2)#Grouped levels by ICD9 codes
#Diagnosis 3
table(diab$diag_3)#Since It has too many Variable we will Group the variable
levels(diab$diag_3)[levels(diab$diag_3) %in% c(390:459, 785)] <- "Circulatory"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(460:519, 786)] <- "Respiratory"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(520:579, 787)] <- "Digestive"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(seq.default(from = 250,to = 250.99,by =0.01))] <- "Diabetes"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(800:999)] <- "Injury"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(710:739)] <- "Musculoskeletal"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(580:629,788)] <- "Genitourinary"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(140:239,780,781,784,790:799,240:249,251:279,680:709,782,001:139)] <- "Neoplasms"
Defined=c("Circulatory","Respiratory","Digestive","Diabetes","Injury","Musculoskeletal","Genitourinary","Neoplasms")
levels(diab$diag_3)[!(levels(diab$diag_3) %in% Defined)] <- "Other"
table(diab$diag_3)#Grouped levels by ICD9 codes
# randomly shuffle the dataset
grp = runif(nrow(diab))
diab = diab[order(grp),]
View(diab)
ind = sample(seq_len(nrow(diab)), floor(nrow(diab)*0.7) )
train = diab[ind,]
test = diab[-ind,]
colnames(train)
train$X = NULL
train$X.1 = NULL
train$X.2 = NULL
ncol(train)
nrow(train)
nrow(test)
head(train,3)
head(test,3)
# to check the count of each value of a factor variable against the Y-variable
# ----------------------------------------------------------------------------
100*prop.table(table(train$readmitted))
100*prop.table(table(test$readmitted))
100*prop.table(table(diab$readmitted))
train_x = train[,1:42]
colnames(train_x)
train_y = train[,43]
colnames(train_y)
levels(train_y)
colnames(train_x)
head(train_y)
rf1 = randomForest(train_x, factor(train_y))
rf1
summary(rf1)
test$X=NULL
test$X.1=NULL
test$X.2=NULL
# predict the Classification for the Testing data
# ------------------------------------------------
pdct_rf1 = predict(rf1, test)
table(predicted=pdct_rf1,actual=test$readmitted)
pdct_rf1
confusionMatrix(pdct_rf1,test$readmitted,positive='positive')
### Feature selection
# --------------------
# variables used by the randomforest()
varUsed(rf1, by.tree = F, count=F)
# importance of features/attributes.
# higher the value, more important it is
# used for feature selection to optimise other algorithm
# uses the MeanDecreaseGini
# -------------------------------------------------------------
importance(rf1)
# variable importance - for feature selection
# ----------------------------------------------
varImpPlot(rf1)
### </Feature selection>
# Model with imp variables
# -----------------------------------------
diab_new = diab
head(diab_new)
diab_new$X = NULL
diab_new$X.1 = NULL
diab_new$X.2 = NULL
diab_new$age = NULL
diab_new$time_in_hospital = NULL
diab_new$num_procedures = NULL
diab_new$number_inpatient = NULL
diab_new$diag_1 = NULL
diab_new$diag_2 = NULL
diab_new$diag_3 = NULL
diab_new$number_diagnoses = NULL
diab_new$max_glu_serum = NULL
diab_new$repaglinide = NULL
diab_new$chlorpropamide = NULL
diab_new$tolbutamide = NULL
diab_new$tolazamide = NULL
diab_new$insulin = NULL
diab_new$glipizide.metformin = NULL
diab_new$glimepiride.pioglitazone = NULL
diab_new$metformin.rosiglitazone = NULL
diab_new$metformin.pioglitazone = NULL
# randomly shuffle the dataset
grp = runif(nrow(diab_new))
diab_new = diab_new[order(grp),]
View(diab_new)
ind = sample(seq_len(nrow(diab_new)), floor(nrow(diab_new)*0.7) )
train = diab_new[ind,]
test = diab_new[-ind,]
ncol(train)
nrow(train)
nrow(test)
head(train,3)
head(test,3)
# to check the count of each value of a factor variable against the Y-variable
# ----------------------------------------------------------------------------
100*prop.table(table(train$readmitted))
100*prop.table(table(test$readmitted))
100*prop.table(table(diab_new$readmitted))
train_x = train[,1:24]
colnames(train_x)
train_y = train[,25]
colnames(train_y)
levels(train_y)
colnames(train_x)
head(train_y)
rf2 = randomForest(train_x, factor(train_y))
rf2
summary(rf2)
colnames(test)
test$X=NULL
test$X.1=NULL
test$X.2=NULL
# predict the Classification for the Testing data
# ------------------------------------------------
pdct_rf2= predict(rf2,test)
table(predicted=pdct_rf2,actual=test$readmitted)
pdct_rf2
confusionMatrix(pdct_rf2,test$readmitted,positive='positive')
# ----------------------------
# The accuracy decreased to 55.45 from 57.87 after
# building the model only with the important variables.
|
# TOOL deseq2.R: "Differential expression using DESeq2" (Differential expression analysis using the DESeq2 Bioconductor package. You can create the input count table and phenodata file using the tool \"Utilities - Define NGS experiment\". If you have more than two experimental groups, note that the output figures sum up information from all pairwise comparisons.)
# INPUT data.tsv: "Count table" TYPE GENERIC
# INPUT META phenodata.tsv: "Phenodata file" TYPE GENERIC
# OUTPUT OPTIONAL de-list-deseq2.tsv
# OUTPUT OPTIONAL summary.txt
# OUTPUT OPTIONAL deseq2_report.pdf
# OUTPUT OPTIONAL de-list-deseq2.bed
# PARAMETER column: "Column describing groups" TYPE METACOLUMN_SEL DEFAULT group (Phenodata column describing the groups to test.)
# PARAMETER OPTIONAL ad_factor: "Column describing additional experimental factor" TYPE METACOLUMN_SEL DEFAULT EMPTY (Phenodata column describing an additional experimental factor. If given, p-values in the output table are from a likelihood ratio test of a model including the experimental groups and experimental factor, vs a model which only includes the experimental factor.)
# PARAMETER OPTIONAL p.value.cutoff: "Cutoff for the adjusted P-value" TYPE DECIMAL FROM 0 TO 1 DEFAULT 0.05 (The cutoff for Benjamini-Hochberg adjusted p-value. Note that the developers of DESeq2 use 0.1 as a default cut-off.)
# MK 15.04.2014, added the possibility to use DESeq2 in dea-deseq.R
# AMS 17.06.2014, split the DESeq2 part to a separate tool
# EK 1.7.2014, clarified the script before moving it to production, and fixed a bug that disabled DESeq2's automatic independent filtering
# EK 9.2.2015, updated to R3.1.2, changed the MA plot, added summary
# AMS 7.4.2015, Join pdf outputs to one
# ML 25.6.2015, Fixed some problems with S4vectors
# EK 10.2.2016, Added alpha to results()
# ML 4.5.2016, Changed the direction of the comparison when more than 2 groups
# ML+SS 18.10.2016, Fixed plotting & rounding and formatting when more than 2 groups
# ML+SS 08.05.2018, comparison possible also when >9 groups
#column <-"group"
#ad_factor<-"EMPTY"
#p.value.cutoff<-0.05
# Load the library
source(file.path(chipster.common.path, "bed-utils.R"))
library(DESeq2)
# Load the counts data and extract expression value columns
dat <- read.table("data.tsv", header=T, sep="\t", row.names=1)
dat2 <- dat[,grep("chip", names(dat))]
# Get the experimental group information from phenodata
phenodata <- read.table("phenodata.tsv", header=T, sep="\t")
groups <- as.character (phenodata[,pmatch(column,colnames(phenodata))])
group_levels <- levels(as.factor(groups))
# Read the additional experimental factor from phenodata and construct a design matrix from this information
exp_factor <- NULL
if(ad_factor != "EMPTY") {
exp_factor <- as.character (phenodata[,pmatch(ad_factor,colnames(phenodata))])
design <- data.frame(condition=as.factor(groups), exp_factor=exp_factor)
rownames(design) <- colnames(dat2)
}
# Create a DESeqDataSet object
if (ad_factor == "EMPTY") {
dds <- DESeqDataSetFromMatrix(countData=dat2, colData=data.frame(condition=groups), design = ~ condition)
} else if (ad_factor != "EMPTY") {
dds <- DESeqDataSetFromMatrix(countData=dat2, colData=design, design = ~ exp_factor + condition)
}
# Vector / variable that holds comparison names
results_name <- NULL
dds <- DESeq(dds)
# Calculate statistic for differential expression, merge with original data table, keep significant DEGs, remove NAs and sort by FDR. If there are more than 2 groups, get pairwise results for each comparison.
if (length(unique(groups)) == 2) {
res <- results(dds,alpha=p.value.cutoff)
sig <- cbind(dat, res)
sig <- as.data.frame(sig)
#sig <- sig[! (is.na(sig$padj)), ]
sig <- sig[sig$padj <= p.value.cutoff, ]
sig <- sig[! (is.na(sig$padj)), ]
sig <- sig[ order(sig$padj), ]
# Open pdf file for output
pdf(file="deseq2_report.pdf")
plotMA(dds,alpha=p.value.cutoff,main=c("DESeq2 MA-plot, FDR =", p.value.cutoff),ylim=c(-2,2))
sink("summary.txt")
summary(res, alpha=p.value.cutoff)
sink()
# Output significant DEGs
if (dim(sig)[1] > 0) {
ndat <- ncol(dat)
nmax <- ncol(sig)
write.table(cbind(sig[,1:ndat], round(sig[, (ndat+1):(nmax-2)], digits=2), format(sig[, (nmax-1):nmax], digits=4, scientific=T)), file="de-list-deseq2.tsv", sep="\t", row.names=T, col.names=T, quote=F)
}
} else if (length(unique(groups)) > 2){
test_results <- dds
res <- NULL
# going through all the pairwise comparisons (i vs j):
conditions <- colData(test_results)$condition
# i goes from the first group to the one before last:
for (i in 1: (nlevels(conditions)-1) ) {
# j goes through all groups starting from i:
for (j in (i+1) : nlevels(conditions) ) {
i_label <- levels(conditions)[i]
j_label <- levels(conditions)[j]
pairwise_results <- as.data.frame(results(test_results, contrast=c("condition",j_label,i_label))) # note: j,i => j-i
# building the table:
if(is.null(res)) res <- pairwise_results else res <- cbind(res, pairwise_results)
results_name <- c(results_name, paste(i_label,"_vs_", j_label, sep=""))
}
}
colnames(res) <- paste(colnames(res), rep(results_name,each=6), sep=".")
min_padj <- apply(res[, grep("padj", colnames(res))], 1, min)
sig <- cbind(dat, res, min_padj=min_padj)
sig <- sig[ (sig$min_padj <= p.value.cutoff), ]
sig <- sig[! (is.na(sig$min_padj)), ]
sig <- sig[ order(sig$min_padj), ]
sig <- sig[, -grep("min_padj", colnames(sig))]
# Open pdf file for output
pdf(file="deseq2_report.pdf")
plotMA(dds,alpha=p.value.cutoff,main=c("DESeq2 MA-plot, FDR =", p.value.cutoff),ylim=c(-2,2))
sink("summary.txt")
summary(res, alpha=p.value.cutoff)
sink()
# Output significant DEGs
if (dim(sig)[1] > 0) {
ndat <- ncol(dat)
npairdat <- ncol(pairwise_results) # number of columns in one comparison
ncomp <- choose(nlevels(conditions),2) # number of comparisons
rounded_sig <- sig
for (i in 1:ncomp) {
skip <- ndat+ (i-1)*npairdat # how many columns to skip
round2 <- skip +1:(npairdat-2) # everything except the last two columns (p-value & padj), beginning after skipped cols
round4 <- skip +(npairdat-1):npairdat # the last two columns (p-value & padj), beginning after skipped cols
rounded_sig[,round2] <- round(rounded_sig[,round2], digits=2) # round to 2 digits everything except p-value & padj
rounded_sig[,round4] <- format(rounded_sig[,round4], digits=4, scientific=T) # round to 4 digits p-value & padj
}
write.table(rounded_sig, file="de-list-deseq2.tsv", sep="\t", row.names=T, col.names=T, quote=F)
}
}
# Create a template output table for plotting. If having N genes and 3 comparisons, this conversion results in a data matrix that has Nx3 rows
output_table <- NULL
colnames(res) <- gsub("\\..*$", "", colnames(res))
for(i in grep("baseMean$", colnames(res))) {
col_size <- grep("padj", colnames(res))[1] - grep("baseMean", colnames(res))[1]
output_table <- rbind(output_table, cbind(dat, res[, (i:(i+col_size))]))
}
rownames(output_table) <- make.names(rep(rownames(res), length(grep("baseMean$", colnames(res)))), unique=T)
# If genomic coordinates are present, output a sorted BED file for genome browser visualization and region matching tools
if("chr" %in% colnames(dat)) {
if (dim(sig)[1] > 0) {
bed <- output_table[,c("chr","start","end")]
bed <- as.data.frame(bed)
if(is.null(results_name)) {
gene_names <- rownames(res)
} else {
gene_names <- paste(rep(results_name, each=nrow(res)), rownames(res), sep="")
}
bed <- cbind(bed, name=gene_names) #name
bed <- cbind(bed, score=output_table[, "log2FoldChange"]) #score
bed <- bed[(output_table$padj <= p.value.cutoff & (! (is.na(output_table$padj)))), ]
bed <- sort.bed(bed)
write.table(bed, file="de-list-deseq2.bed", sep="\t", row.names=F, col.names=F, quote=F)
}
}
# Make dispersion plot
plotDispEsts(dds, main="Dispersion plot", cex=0.2)
legend(x="topright", legend="fitted dispersion", col="red", cex=1, pch="-")
# Make histogram of p-values with overlaid significance cutoff. When more than two groups, min.pvalue is taken over all comparisons for genes
hist(output_table$pval, breaks=100, col="blue", border="slateblue", freq=FALSE, main="P-value distribution", xlab="p-value", ylab="proportion (%)")
hist(output_table$padj, breaks=100, col="red", border="slateblue", add=TRUE, freq=FALSE)
abline(v=p.value.cutoff, lwd=2, lty=2, col="black")
legend (x="topright", legend=c("p-values","adjusted p-values", "significance cutoff"), col=c("blue","red","black"), cex=1, pch=15)
# Close pdf
dev.off()
# MA-plot when there are more than 2 groups. Define function for making MA-plot.
# plotDE <- function(res)
# plot(res$baseMean, res$log2FoldChange,
# log="x", pch=20, cex=.25, col = ifelse( res$padj < p.value.cutoff, "red", "black"),
# main="MA plot", xlab="mean counts", ylab="log2(fold change)")
# Make MA-plot
# pdf(file="ma-plot-deseq2.pdf")
# plotDE(unique(output_table))
# legend (x="topleft", legend=c("significant","not significant"), col=c("red","black"), cex=1, pch=19)
# abline(h = c(-1, 0, 1), col = c("dodgerblue", "darkgreen", "dodgerblue"), lwd = 2)
# dev.off()
# EOF
| /tools/ngs/R/deseq2.R | permissive | edwardtao/chipster-tools | R | false | false | 9,151 | r | # TOOL deseq2.R: "Differential expression using DESeq2" (Differential expression analysis using the DESeq2 Bioconductor package. You can create the input count table and phenodata file using the tool \"Utilities - Define NGS experiment\". If you have more than two experimental groups, note that the output figures sum up information from all pairwise comparisons.)
# INPUT data.tsv: "Count table" TYPE GENERIC
# INPUT META phenodata.tsv: "Phenodata file" TYPE GENERIC
# OUTPUT OPTIONAL de-list-deseq2.tsv
# OUTPUT OPTIONAL summary.txt
# OUTPUT OPTIONAL deseq2_report.pdf
# OUTPUT OPTIONAL de-list-deseq2.bed
# PARAMETER column: "Column describing groups" TYPE METACOLUMN_SEL DEFAULT group (Phenodata column describing the groups to test.)
# PARAMETER OPTIONAL ad_factor: "Column describing additional experimental factor" TYPE METACOLUMN_SEL DEFAULT EMPTY (Phenodata column describing an additional experimental factor. If given, p-values in the output table are from a likelihood ratio test of a model including the experimental groups and experimental factor, vs a model which only includes the experimental factor.)
# PARAMETER OPTIONAL p.value.cutoff: "Cutoff for the adjusted P-value" TYPE DECIMAL FROM 0 TO 1 DEFAULT 0.05 (The cutoff for Benjamini-Hochberg adjusted p-value. Note that the developers of DESeq2 use 0.1 as a default cut-off.)
# MK 15.04.2014, added the possibility to use DESeq2 in dea-deseq.R
# AMS 17.06.2014, split the DESeq2 part to a separate tool
# EK 1.7.2014, clarified the script before moving it to production, and fixed a bug that disabled DESeq2's automatic independent filtering
# EK 9.2.2015, updated to R3.1.2, changed the MA plot, added summary
# AMS 7.4.2015, Join pdf outputs to one
# ML 25.6.2015, Fixed some problems with S4vectors
# EK 10.2.2016, Added alpha to results()
# ML 4.5.2016, Changed the direction of the comparison when more than 2 groups
# ML+SS 18.10.2016, Fixed plotting & rounding and formatting when more than 2 groups
# ML+SS 08.05.2018, comparison possible also when >9 groups
#column <-"group"
#ad_factor<-"EMPTY"
#p.value.cutoff<-0.05
# Load the library
source(file.path(chipster.common.path, "bed-utils.R"))
library(DESeq2)
# Load the counts data and extract expression value columns
dat <- read.table("data.tsv", header=T, sep="\t", row.names=1)
dat2 <- dat[,grep("chip", names(dat))]
# Get the experimental group information from phenodata
phenodata <- read.table("phenodata.tsv", header=T, sep="\t")
groups <- as.character (phenodata[,pmatch(column,colnames(phenodata))])
group_levels <- levels(as.factor(groups))
# Read the additional experimental factor from phenodata and construct a design matrix from this information
exp_factor <- NULL
if(ad_factor != "EMPTY") {
exp_factor <- as.character (phenodata[,pmatch(ad_factor,colnames(phenodata))])
design <- data.frame(condition=as.factor(groups), exp_factor=exp_factor)
rownames(design) <- colnames(dat2)
}
# Create a DESeqDataSet object
if (ad_factor == "EMPTY") {
dds <- DESeqDataSetFromMatrix(countData=dat2, colData=data.frame(condition=groups), design = ~ condition)
} else if (ad_factor != "EMPTY") {
dds <- DESeqDataSetFromMatrix(countData=dat2, colData=design, design = ~ exp_factor + condition)
}
# Vector / variable that holds comparison names
results_name <- NULL
dds <- DESeq(dds)
# Calculate statistic for differential expression, merge with original data table, keep significant DEGs, remove NAs and sort by FDR. If there are more than 2 groups, get pairwise results for each comparison.
if (length(unique(groups)) == 2) {
res <- results(dds,alpha=p.value.cutoff)
sig <- cbind(dat, res)
sig <- as.data.frame(sig)
#sig <- sig[! (is.na(sig$padj)), ]
sig <- sig[sig$padj <= p.value.cutoff, ]
sig <- sig[! (is.na(sig$padj)), ]
sig <- sig[ order(sig$padj), ]
# Open pdf file for output
pdf(file="deseq2_report.pdf")
plotMA(dds,alpha=p.value.cutoff,main=c("DESeq2 MA-plot, FDR =", p.value.cutoff),ylim=c(-2,2))
sink("summary.txt")
summary(res, alpha=p.value.cutoff)
sink()
# Output significant DEGs
if (dim(sig)[1] > 0) {
ndat <- ncol(dat)
nmax <- ncol(sig)
write.table(cbind(sig[,1:ndat], round(sig[, (ndat+1):(nmax-2)], digits=2), format(sig[, (nmax-1):nmax], digits=4, scientific=T)), file="de-list-deseq2.tsv", sep="\t", row.names=T, col.names=T, quote=F)
}
} else if (length(unique(groups)) > 2){
test_results <- dds
res <- NULL
# going through all the pairwise comparisons (i vs j):
conditions <- colData(test_results)$condition
# i goes from the first group to the one before last:
for (i in 1: (nlevels(conditions)-1) ) {
# j goes through all groups starting from i:
for (j in (i+1) : nlevels(conditions) ) {
i_label <- levels(conditions)[i]
j_label <- levels(conditions)[j]
pairwise_results <- as.data.frame(results(test_results, contrast=c("condition",j_label,i_label))) # note: j,i => j-i
# building the table:
if(is.null(res)) res <- pairwise_results else res <- cbind(res, pairwise_results)
results_name <- c(results_name, paste(i_label,"_vs_", j_label, sep=""))
}
}
colnames(res) <- paste(colnames(res), rep(results_name,each=6), sep=".")
min_padj <- apply(res[, grep("padj", colnames(res))], 1, min)
sig <- cbind(dat, res, min_padj=min_padj)
sig <- sig[ (sig$min_padj <= p.value.cutoff), ]
sig <- sig[! (is.na(sig$min_padj)), ]
sig <- sig[ order(sig$min_padj), ]
sig <- sig[, -grep("min_padj", colnames(sig))]
# Open pdf file for output
pdf(file="deseq2_report.pdf")
plotMA(dds,alpha=p.value.cutoff,main=c("DESeq2 MA-plot, FDR =", p.value.cutoff),ylim=c(-2,2))
sink("summary.txt")
summary(res, alpha=p.value.cutoff)
sink()
# Output significant DEGs
if (dim(sig)[1] > 0) {
ndat <- ncol(dat)
npairdat <- ncol(pairwise_results) # number of columns in one comparison
ncomp <- choose(nlevels(conditions),2) # number of comparisons
rounded_sig <- sig
for (i in 1:ncomp) {
skip <- ndat+ (i-1)*npairdat # how many columns to skip
round2 <- skip +1:(npairdat-2) # everything except the last two columns (p-value & padj), beginning after skipped cols
round4 <- skip +(npairdat-1):npairdat # the last two columns (p-value & padj), beginning after skipped cols
rounded_sig[,round2] <- round(rounded_sig[,round2], digits=2) # round to 2 digits everything except p-value & padj
rounded_sig[,round4] <- format(rounded_sig[,round4], digits=4, scientific=T) # round to 4 digits p-value & padj
}
write.table(rounded_sig, file="de-list-deseq2.tsv", sep="\t", row.names=T, col.names=T, quote=F)
}
}
# Create a template output table for plotting. If having N genes and 3 comparisons, this conversion results in a data matrix that has Nx3 rows
output_table <- NULL
colnames(res) <- gsub("\\..*$", "", colnames(res))
for(i in grep("baseMean$", colnames(res))) {
col_size <- grep("padj", colnames(res))[1] - grep("baseMean", colnames(res))[1]
output_table <- rbind(output_table, cbind(dat, res[, (i:(i+col_size))]))
}
rownames(output_table) <- make.names(rep(rownames(res), length(grep("baseMean$", colnames(res)))), unique=T)
# If genomic coordinates are present, output a sorted BED file for genome browser visualization and region matching tools
if("chr" %in% colnames(dat)) {
if (dim(sig)[1] > 0) {
bed <- output_table[,c("chr","start","end")]
bed <- as.data.frame(bed)
if(is.null(results_name)) {
gene_names <- rownames(res)
} else {
gene_names <- paste(rep(results_name, each=nrow(res)), rownames(res), sep="")
}
bed <- cbind(bed, name=gene_names) #name
bed <- cbind(bed, score=output_table[, "log2FoldChange"]) #score
bed <- bed[(output_table$padj <= p.value.cutoff & (! (is.na(output_table$padj)))), ]
bed <- sort.bed(bed)
write.table(bed, file="de-list-deseq2.bed", sep="\t", row.names=F, col.names=F, quote=F)
}
}
# Make dispersion plot
plotDispEsts(dds, main="Dispersion plot", cex=0.2)
legend(x="topright", legend="fitted dispersion", col="red", cex=1, pch="-")
# Make histogram of p-values with overlaid significance cutoff. When more than two groups, min.pvalue is taken over all comparisons for genes
hist(output_table$pval, breaks=100, col="blue", border="slateblue", freq=FALSE, main="P-value distribution", xlab="p-value", ylab="proportion (%)")
hist(output_table$padj, breaks=100, col="red", border="slateblue", add=TRUE, freq=FALSE)
abline(v=p.value.cutoff, lwd=2, lty=2, col="black")
legend (x="topright", legend=c("p-values","adjusted p-values", "significance cutoff"), col=c("blue","red","black"), cex=1, pch=15)
# Close pdf
dev.off()
# MA-plot when there are more than 2 groups. Define function for making MA-plot.
# plotDE <- function(res)
# plot(res$baseMean, res$log2FoldChange,
# log="x", pch=20, cex=.25, col = ifelse( res$padj < p.value.cutoff, "red", "black"),
# main="MA plot", xlab="mean counts", ylab="log2(fold change)")
# Make MA-plot
# pdf(file="ma-plot-deseq2.pdf")
# plotDE(unique(output_table))
# legend (x="topleft", legend=c("significant","not significant"), col=c("red","black"), cex=1, pch=19)
# abline(h = c(-1, 0, 1), col = c("dodgerblue", "darkgreen", "dodgerblue"), lwd = 2)
# dev.off()
# EOF
|
"emaxsimB" <-
function(nsim, genObj, prior, modType=4,binary=FALSE,seed=12357,
check=FALSE,nproc=parallel::detectCores(),
negEmax=FALSE,ed50contr=NULL, lambdacontr=NULL,testMods=NULL,
idmax=length(doselev),mcmc=mcmc.control(),customCode=NULL,
customParms=NULL,
description="")
{
#### assumes dose levels are sorted on input
#### placebo (comparator) is in the first position
#### primary test dose is in position idmax
if(! modType %in% c(3,4))stop("modType must be 3 or 4")
if(length(ed50contr)!=length(lambdacontr))stop('The number of ED50 and Lambda defining contrasts must be equal')
if(isTRUE( binary!=prior$binary ))stop('Binary specification in prior and model do not match')
if(inherits(prior,'emaxPrior'))localParm<-TRUE else localParm<-FALSE
if(exists('.Random.seed'))save.seed<-.Random.seed
save.rng<-RNGkind()[1]
on.exit( RNGkind(save.rng))
on.exit(if(exists('save.seed')).Random.seed<<-save.seed,add=TRUE)
### extract design parameters from genObj
n<-genObj$genP$n
doselev<-genObj$genP$doselev ## doselev should be sorted/unique
Ndose<-length(doselev)
if(!binary)ddf<-sum(n)-Ndose else ddf<-Inf
nfrac<-n/sum(n) ##allocate fractional obs for stability
nfrac2<-0.5*nfrac
dose<-genObj$genP$dose
if(localParm)estan<-selEstan('mrmodel') else estan<-selEstan('basemodel')
### set up emax-model contrasts for null hypothesis test
contMat<-NULL
if(missing(testMods)){
if(is.null(ed50contr)){
if(Ndose<=4){
ed50contr<-c((doselev[1]+doselev[2])/2,
(doselev[Ndose-1]+doselev[Ndose])/2)
lambdacontr<-rep(1,2)
}else{
ed50contr<-c((doselev[1]+doselev[2])/2,median(doselev),
(doselev[Ndose-1]+doselev[Ndose])/2)
lambdacontr<-rep(1,3)
}
}
parmscontr<-cbind(ed50contr,lambdacontr)
testMods<-Mods(sigEmax=parmscontr,doses=doselev, placEff = 0,
maxEff=1-2*(negEmax))
}
if(!binary) contMat <-optContr(testMods,w=n)
### simulation result holders
if(modType==3){nparm<-3
}else {nparm<-4}
### posterior intervals to compute and store
llev<-c(0.025,0.05,0.1)
ulev<-c(0.975,0.95,0.9)
### set up independent stream of random numbers for
### each simulation iteration.
RNGkind("L'Ecuyer-CMRG")
set.seed(seed)
rseed<-matrix(integer(nsim*7),ncol=7)
rseed[1,]<-as.integer(.Random.seed)
if(nsim>1){
for(i in 2:nsim){
rseed[i,]<-nextRNGStream(rseed[i-1,])
}
}
if(isTRUE(.Platform$OS.type=='unix') && missing(nproc))
stop('nproc must be specified on multi-user unix machines')
if(check)nproc<-1 else{
if(nproc>detectCores())stop("The number of processes requested exceeds the number of processors available.")
if(nproc>nsim){
warning(paste('The number of processors requested exceeded the number of simulations\n',
'This iikely a mistake. nproc set to 1'))
nproc<-1
}
}
if(mcmc$chains>1 && !check)stop('The number of chains should be 1 except when testing.')
### set up indices for computing consecutive blocks
### of simulations
nblock<-as.integer(trunc(nsim/nproc))
nleft<-nsim%%nproc
indmat<-matrix(integer(nproc*2),ncol=2)
indmat[1,1]<-1
indmat[1,2]<-nblock+1*(1<=nleft)
if(nproc>=2){
for (i in 2:nproc){
indmat[i,1]<-indmat[i-1,2]+1
indmat[i,2]<-indmat[i,1]+nblock-1 + 1*(i<=nleft)
}
}
inlist<-list(indmat=indmat,rseed=rseed,Ndose=Ndose,dose=dose,
ddf=ddf,doselev=doselev,nparm=nparm,modType=modType,
binary=binary,genObj=genObj,testMods=testMods,
contMat=contMat,negEmax=negEmax,
check=check,estan=estan,prior=prior,mcmc=mcmc,
customCode=customCode,customParms=customParms,
nfrac=nfrac,nfrac2=nfrac2,n=n,
ulev=ulev,llev=llev,localParm=localParm)
if(nproc==1){
simout<-simrepB(1,inlist)
if(check)return(simout)
simout<-list(simout)
}else{
cl<-makeCluster(nproc)
registerDoParallel(cl)
simout<-foreach(j=1:nproc, .packages='clinDR') %dopar%{
simrepB(j,inlist)
}
stopCluster(cl)
}
####################################
### assign output to matrices/vectors
predpop <- matrix(rep(NA, nsim * Ndose), ncol = Ndose)
fitpredv <- matrix(rep(NA, nsim * Ndose), ncol = Ndose)
fitdifv <- matrix(rep(NA, nsim * Ndose), ncol = Ndose)
sepredv <- matrix(rep(NA, nsim * Ndose), ncol = Ndose)
sedifv <- matrix(rep(NA, nsim * Ndose), ncol = Ndose)
mv <- matrix(rep(NA, nsim * Ndose), ncol = Ndose)
msSat<-rep(NA,nsim)
pVal<-rep(NA,nsim)
divergence<-rep(NA,nsim)
gofP<-rep(NA,nsim)
selContrast<-rep(NA,nsim)
colnames(predpop)<-doselev
colnames(fitpredv)<-doselev
colnames(fitdifv)<-doselev
colnames(sepredv)<-doselev
colnames(sedifv)<-doselev
colnames(mv)<-doselev
residSD<-numeric(nsim)
est<-matrix( rep(NA,nsim*nparm),ncol=nparm )
if(modType==3){
colnames(est)<-c("led50","emax","e0")
}else{
colnames(est)<-c("led50","lambda","emax","e0")
}
if(localParm) est<-cbind(est,difTarget=rep(NA,nsim))
np1<-ncol(est)
nlev<-length(llev)
estlb<-array(numeric(np1*nsim*nlev),dim=c(np1,nsim,nlev))
estub<-array(numeric(np1*nsim*nlev),dim=c(np1,nsim,nlev))
dimnames(estlb)<-list(colnames(est),1:nsim,llev)
dimnames(estub)<-list(colnames(est),1:nsim,ulev)
if(!binary){
sdv <- matrix(rep(NA, nsim * Ndose), ncol = Ndose)
}else sdv<-NULL
nd1<-Ndose-1
lb<-array(numeric(nd1*nsim*nlev),dim=c(nd1,nsim,nlev))
ub<-array(numeric(nd1*nsim*nlev),dim=c(nd1,nsim,nlev))
dimnames(lb)<-list(doselev[2:Ndose],1:nsim,llev)
dimnames(ub)<-list(doselev[2:Ndose],1:nsim,ulev)
pop<-NULL
popSD<-NULL
if(is.null(customCode))customOut<-NULL
else customOut<-vector("list",nsim)
for(j in 1:nproc){
predpop[indmat[j,1]:indmat[j,2],]<-simout[[j]]$predpop
fitpredv[indmat[j,1]:indmat[j,2],]<-simout[[j]]$fitpredv
fitdifv[indmat[j,1]:indmat[j,2],]<-simout[[j]]$fitdifv
sepredv[indmat[j,1]:indmat[j,2],]<-simout[[j]]$sepredv
sedifv[indmat[j,1]:indmat[j,2],]<-simout[[j]]$sedifv
mv[indmat[j,1]:indmat[j,2],]<-simout[[j]]$mv
sdv[indmat[j,1]:indmat[j,2],]<-simout[[j]]$sdv
msSat[indmat[j,1]:indmat[j,2]]<-simout[[j]]$msSat
pVal[indmat[j,1]:indmat[j,2]]<-simout[[j]]$pVal
divergence[indmat[j,1]:indmat[j,2]]<-simout[[j]]$divergence
gofP[indmat[j,1]:indmat[j,2]]<-simout[[j]]$gofP
selContrast[indmat[j,1]:indmat[j,2]]<-simout[[j]]$selContrast
residSD[indmat[j,1]:indmat[j,2]]<-simout[[j]]$residSD
est[indmat[j,1]:indmat[j,2],]<-simout[[j]]$est
estlb[,indmat[j,1]:indmat[j,2],]<-simout[[j]]$estlb
estub[,indmat[j,1]:indmat[j,2],]<-simout[[j]]$estub
lb[,indmat[j,1]:indmat[j,2],]<-simout[[j]]$lb
ub[,indmat[j,1]:indmat[j,2],]<-simout[[j]]$ub
customOut[indmat[j,1]:indmat[j,2]]<-simout[[j]]$customOut
pop<-rbind(pop,simout[[j]]$pop)
popSD<-c(popSD,simout[[j]]$popSD)
}
return(structure(list(description=description,localParm=localParm,
binary=binary,modType=modType,genObj=genObj,
pop=pop,popSD=popSD,mcmc=mcmc,prior=prior,
est=est,estlb=estlb,estub=estub,residSD=residSD,
pVal=pVal,selContrast=selContrast,
testMods=testMods,gofP=gofP,
negEmax=negEmax,
predpop=predpop,
mv = mv, sdv = sdv, msSat=msSat, fitpredv = fitpredv,
sepredv = sepredv, fitdifv = fitdifv, sedifv = sedifv,
lb=lb,ub=ub,divergence=divergence,
rseed=rseed, idmax=idmax,customOut=customOut
),class="emaxsimB") )
}
simrepB<-function(j,inlist)
{
indmat<-inlist$indmat
rseed<-inlist$rseed
Ndose<-inlist$Ndose
dose<-inlist$dose
ddf<-inlist$ddf
doselev<-inlist$doselev
nparm<-inlist$nparm
modType<-inlist$modType
binary<-inlist$binary
genObj<-inlist$genObj
testMods<-inlist$testMods
contMat<-inlist$contMat
negEmax<-inlist$negEmax
check<-inlist$check
estan<-inlist$estan
prior<-inlist$prior
mcmc<-inlist$mcmc
customCode<-inlist$customCode
customParms<-inlist$customParms
nfrac<-inlist$nfrac
nfrac2<-inlist$nfrac2
n<-inlist$n
llev<-inlist$llev
ulev<-inlist$ulev
localParm<-inlist$localParm
nrep<-indmat[j,2]-indmat[j,1]+1
### set up input dose variable
if(binary)din<-c(doselev,doselev) else din<-doselev
if(binary)sigsim<-1 ### default placeholder
predpop <- matrix(rep(NA, nrep * Ndose), ncol = Ndose)
fitpredv <- matrix(rep(NA, nrep * Ndose), ncol = Ndose)
fitdifv <- matrix(rep(NA, nrep * Ndose), ncol = Ndose)
sepredv <- matrix(rep(NA, nrep * Ndose), ncol = Ndose)
sedifv <- matrix(rep(NA, nrep * Ndose), ncol = Ndose)
mv <- matrix(rep(NA, nrep * Ndose), ncol = Ndose)
msSat<-rep(NA,nrep)
pVal<-rep(NA,nrep)
divergence<-rep(NA,nrep)
selContrast<-rep(NA,nrep)
colnames(predpop)<-doselev
colnames(fitpredv)<-doselev
colnames(fitdifv)<-doselev
colnames(sepredv)<-doselev
colnames(sedifv)<-doselev
colnames(mv)<-doselev
residSD<-numeric(nrep)
gofP<-numeric(nrep)
est<-matrix( rep(NA,nrep*nparm),ncol=nparm )
if(modType==3){
colnames(est)<-c("led50","emax","e0")
}else{
colnames(est)<-c("led50","lambda","emax","e0")
}
if(localParm) est<-cbind(est,difTarget=rep(NA,nrep))
np1<-ncol(est)
nlev<-length(llev)
estlb<-array(numeric(np1*nrep*nlev),dim=c(np1,nrep,nlev))
estub<-array(numeric(np1*nrep*nlev),dim=c(np1,nrep,nlev))
dimnames(estlb)<-list(colnames(est),1:nrep,llev)
dimnames(estub)<-list(colnames(est),1:nrep,ulev)
if(!binary){
sdv <- matrix(rep(NA, nrep * Ndose), ncol = Ndose)
}else sdv<-NULL
nd1<-Ndose-1
lb<-array(numeric(nd1*nrep*nlev),dim=c(nd1,nrep,nlev))
ub<-array(numeric(nd1*nrep*nlev),dim=c(nd1,nrep,nlev))
dimnames(lb)<-list(doselev[2:Ndose],1:nrep,llev)
dimnames(ub)<-list(doselev[2:Ndose],1:nrep,ulev)
pop<-NULL
popSD<-NULL
if(is.null(customCode))customOut<-NULL
else customOut<-vector("list",nrep)
if(!negEmax)trend<-'positive' else trend<-'negative'
for(i in 1:nrep) {
ioff<-i+indmat[j,1]-1
### generate simulated data set by calling
### the function in genObj
.Random.seed<<-rseed[ioff,]
### simulate design and data
gendat <- genObj$genFun(genObj$genP)
predpop[i,]<-gendat[['meanlev']]
pop<-rbind(pop,gendat[['parm']])
popSD<-c(popSD,gendat[['resSD']])
y<-gendat[['y']]
#### set up normal approximation to logit(phat) if
#### binary data
####
if(binary){
phat<-as.numeric(tapply(y,dose,sum))
phat<-(phat+nfrac2)/(n+nfrac) ###shrink to .5 for stability
yhat<-qlogis(phat)
V<-diag(1/(phat*(1-phat)*n))
contMat <-optContr(testMods,S=V)
}else{
yhat<-as.numeric(tapply(y,dose,mean))
ms<-summary(lm(y~factor(dose)))$sigma
msSat[i]<-ms^2
V<-diag(msSat[i]/n)
}
### compute p-value for preliminary test
holdC<-MCTtest(doselev, yhat, contMat = contMat,
S=V,df=ddf,type='general')$tStat
holdP<-attr(holdC,'pVal')
orderh<-order(holdC)
ncontr<-length(orderh)
pVal[i]<-holdP[orderh[ncontr]]
selContrast[i]<-orderh[ncontr]
### format data as counts if binary
if(binary){
cin<-tapply(y,dose,sum)
cin<- c(cin,n-cin)
yin<-c(rep(1,Ndose),rep(0,Ndose))
}else{
cin<-n
yin<-yhat
}
bfit<-fitEmaxB(yin,din,prior,modType,count=cin,binary=binary,
msSat=msSat[i],mcmc=mcmc,estan=estan,
diagnostics=check,nproc=1)
### return mcmc for preliminary checking
if(check){
if(binary){
return(list(estanfit=bfit$estanfit,parms=coef(bfit),pVal=pVal[i],dose=dose,y=y))
}else{
return(list(estanfit=bfit$estanfit,parms=coef(bfit),residSD=sigma(bfit),pVal=pVal[i],dose=dose,y=y))
}
}
### assign simulation output
sampler <- get_sampler_params(bfit$estanfit, inc_warmup = FALSE)
sampler<-sampler[[1]]
divergence[i] <- mean(sampler[,'divergent__'])
mv[i, ] <- tapply(y,dose,mean)
if(!binary){
sdv[i, ] <- sqrt(tapply(y,dose,var))
}
simout<-predict(bfit,doselev)
fitpredv[i,]<-simout$predMed
fitdifv[i,]<-simout$fitdifMed
sepredv[i, ] <-simout$se
sedifv[i, ] <-simout$sedif
### extract generated parameters
parms<-coef(bfit)
if(!binary){
sigsim<- sigma(bfit)
residSD[i]<-median(sigsim)
}
if(localParm){
parms1<- cbind(parms,coef(bfit,local=TRUE))
}else parms1<-parms
est[i,]<- apply(parms1,2,median)
simdif<-simout$simResp[,2:Ndose]-simout$simResp[,1]
for(k in 1:nlev){
qtile<-apply(simdif,2,quantile,c(llev[k],ulev[k]))
lb[,i,k]<-qtile[1,]
ub[,i,k]<-qtile[2,]
qtile<-apply(parms1,2,quantile,c(llev[k],ulev[k]))
estlb[,i,k]<-qtile[1,]
estub[,i,k]<-qtile[2,]
}
### compute gof test
gofP[i]<-bpchkMonoEmax(bfit,trend=trend)
### execute customized code
if(!is.null(customCode)){
if(!is.null(customParms)){
if(binary)customOut[[i]]<-customCode(parms,pVal[i],dose,y,
customParms=customParms )
else customOut[[i]]<-customCode(parms,sigsim,pVal[i],dose,y,
customParms=customParms )
}else{
if(binary)customOut[[i]]<-customCode(parms,pVal[i],dose,y)
else customOut[[i]]<-customCode(parms,sigsim,pVal[i],dose,y)
}
}
}
return(list(pop=pop,popSD=popSD,
est=est,estlb=estlb,estub=estub,residSD=residSD,
pVal=pVal,selContrast=selContrast,
gofP=gofP,predpop=predpop,
mv = mv, sdv = sdv, msSat=msSat, fitpredv = fitpredv,
fitdifv = fitdifv,sepredv = sepredv, sedifv = sedifv, lb=lb,ub=ub,
divergence=divergence,
customOut=customOut
))
}
| /R/emaxsimB.R | no_license | cran/clinDR | R | false | false | 14,019 | r | "emaxsimB" <-
function(nsim, genObj, prior, modType=4,binary=FALSE,seed=12357,
check=FALSE,nproc=parallel::detectCores(),
negEmax=FALSE,ed50contr=NULL, lambdacontr=NULL,testMods=NULL,
idmax=length(doselev),mcmc=mcmc.control(),customCode=NULL,
customParms=NULL,
description="")
{
#### assumes dose levels are sorted on input
#### placebo (comparator) is in the first position
#### primary test dose is in position idmax
if(! modType %in% c(3,4))stop("modType must be 3 or 4")
if(length(ed50contr)!=length(lambdacontr))stop('The number of ED50 and Lambda defining contrasts must be equal')
if(isTRUE( binary!=prior$binary ))stop('Binary specification in prior and model do not match')
if(inherits(prior,'emaxPrior'))localParm<-TRUE else localParm<-FALSE
if(exists('.Random.seed'))save.seed<-.Random.seed
save.rng<-RNGkind()[1]
on.exit( RNGkind(save.rng))
on.exit(if(exists('save.seed')).Random.seed<<-save.seed,add=TRUE)
### extract design parameters from genObj
n<-genObj$genP$n
doselev<-genObj$genP$doselev ## doselev should be sorted/unique
Ndose<-length(doselev)
if(!binary)ddf<-sum(n)-Ndose else ddf<-Inf
nfrac<-n/sum(n) ##allocate fractional obs for stability
nfrac2<-0.5*nfrac
dose<-genObj$genP$dose
if(localParm)estan<-selEstan('mrmodel') else estan<-selEstan('basemodel')
### set up emax-model contrasts for null hypothesis test
contMat<-NULL
if(missing(testMods)){
if(is.null(ed50contr)){
if(Ndose<=4){
ed50contr<-c((doselev[1]+doselev[2])/2,
(doselev[Ndose-1]+doselev[Ndose])/2)
lambdacontr<-rep(1,2)
}else{
ed50contr<-c((doselev[1]+doselev[2])/2,median(doselev),
(doselev[Ndose-1]+doselev[Ndose])/2)
lambdacontr<-rep(1,3)
}
}
parmscontr<-cbind(ed50contr,lambdacontr)
testMods<-Mods(sigEmax=parmscontr,doses=doselev, placEff = 0,
maxEff=1-2*(negEmax))
}
if(!binary) contMat <-optContr(testMods,w=n)
### simulation result holders
if(modType==3){nparm<-3
}else {nparm<-4}
### posterior intervals to compute and store
llev<-c(0.025,0.05,0.1)
ulev<-c(0.975,0.95,0.9)
### set up independent stream of random numbers for
### each simulation iteration.
RNGkind("L'Ecuyer-CMRG")
set.seed(seed)
rseed<-matrix(integer(nsim*7),ncol=7)
rseed[1,]<-as.integer(.Random.seed)
if(nsim>1){
for(i in 2:nsim){
rseed[i,]<-nextRNGStream(rseed[i-1,])
}
}
if(isTRUE(.Platform$OS.type=='unix') && missing(nproc))
stop('nproc must be specified on multi-user unix machines')
if(check)nproc<-1 else{
if(nproc>detectCores())stop("The number of processes requested exceeds the number of processors available.")
if(nproc>nsim){
warning(paste('The number of processors requested exceeded the number of simulations\n',
'This iikely a mistake. nproc set to 1'))
nproc<-1
}
}
if(mcmc$chains>1 && !check)stop('The number of chains should be 1 except when testing.')
### set up indices for computing consecutive blocks
### of simulations
nblock<-as.integer(trunc(nsim/nproc))
nleft<-nsim%%nproc
indmat<-matrix(integer(nproc*2),ncol=2)
indmat[1,1]<-1
indmat[1,2]<-nblock+1*(1<=nleft)
if(nproc>=2){
for (i in 2:nproc){
indmat[i,1]<-indmat[i-1,2]+1
indmat[i,2]<-indmat[i,1]+nblock-1 + 1*(i<=nleft)
}
}
inlist<-list(indmat=indmat,rseed=rseed,Ndose=Ndose,dose=dose,
ddf=ddf,doselev=doselev,nparm=nparm,modType=modType,
binary=binary,genObj=genObj,testMods=testMods,
contMat=contMat,negEmax=negEmax,
check=check,estan=estan,prior=prior,mcmc=mcmc,
customCode=customCode,customParms=customParms,
nfrac=nfrac,nfrac2=nfrac2,n=n,
ulev=ulev,llev=llev,localParm=localParm)
if(nproc==1){
simout<-simrepB(1,inlist)
if(check)return(simout)
simout<-list(simout)
}else{
cl<-makeCluster(nproc)
registerDoParallel(cl)
simout<-foreach(j=1:nproc, .packages='clinDR') %dopar%{
simrepB(j,inlist)
}
stopCluster(cl)
}
####################################
### assign output to matrices/vectors
predpop <- matrix(rep(NA, nsim * Ndose), ncol = Ndose)
fitpredv <- matrix(rep(NA, nsim * Ndose), ncol = Ndose)
fitdifv <- matrix(rep(NA, nsim * Ndose), ncol = Ndose)
sepredv <- matrix(rep(NA, nsim * Ndose), ncol = Ndose)
sedifv <- matrix(rep(NA, nsim * Ndose), ncol = Ndose)
mv <- matrix(rep(NA, nsim * Ndose), ncol = Ndose)
msSat<-rep(NA,nsim)
pVal<-rep(NA,nsim)
divergence<-rep(NA,nsim)
gofP<-rep(NA,nsim)
selContrast<-rep(NA,nsim)
colnames(predpop)<-doselev
colnames(fitpredv)<-doselev
colnames(fitdifv)<-doselev
colnames(sepredv)<-doselev
colnames(sedifv)<-doselev
colnames(mv)<-doselev
residSD<-numeric(nsim)
est<-matrix( rep(NA,nsim*nparm),ncol=nparm )
if(modType==3){
colnames(est)<-c("led50","emax","e0")
}else{
colnames(est)<-c("led50","lambda","emax","e0")
}
if(localParm) est<-cbind(est,difTarget=rep(NA,nsim))
np1<-ncol(est)
nlev<-length(llev)
estlb<-array(numeric(np1*nsim*nlev),dim=c(np1,nsim,nlev))
estub<-array(numeric(np1*nsim*nlev),dim=c(np1,nsim,nlev))
dimnames(estlb)<-list(colnames(est),1:nsim,llev)
dimnames(estub)<-list(colnames(est),1:nsim,ulev)
if(!binary){
sdv <- matrix(rep(NA, nsim * Ndose), ncol = Ndose)
}else sdv<-NULL
nd1<-Ndose-1
lb<-array(numeric(nd1*nsim*nlev),dim=c(nd1,nsim,nlev))
ub<-array(numeric(nd1*nsim*nlev),dim=c(nd1,nsim,nlev))
dimnames(lb)<-list(doselev[2:Ndose],1:nsim,llev)
dimnames(ub)<-list(doselev[2:Ndose],1:nsim,ulev)
pop<-NULL
popSD<-NULL
if(is.null(customCode))customOut<-NULL
else customOut<-vector("list",nsim)
for(j in 1:nproc){
predpop[indmat[j,1]:indmat[j,2],]<-simout[[j]]$predpop
fitpredv[indmat[j,1]:indmat[j,2],]<-simout[[j]]$fitpredv
fitdifv[indmat[j,1]:indmat[j,2],]<-simout[[j]]$fitdifv
sepredv[indmat[j,1]:indmat[j,2],]<-simout[[j]]$sepredv
sedifv[indmat[j,1]:indmat[j,2],]<-simout[[j]]$sedifv
mv[indmat[j,1]:indmat[j,2],]<-simout[[j]]$mv
sdv[indmat[j,1]:indmat[j,2],]<-simout[[j]]$sdv
msSat[indmat[j,1]:indmat[j,2]]<-simout[[j]]$msSat
pVal[indmat[j,1]:indmat[j,2]]<-simout[[j]]$pVal
divergence[indmat[j,1]:indmat[j,2]]<-simout[[j]]$divergence
gofP[indmat[j,1]:indmat[j,2]]<-simout[[j]]$gofP
selContrast[indmat[j,1]:indmat[j,2]]<-simout[[j]]$selContrast
residSD[indmat[j,1]:indmat[j,2]]<-simout[[j]]$residSD
est[indmat[j,1]:indmat[j,2],]<-simout[[j]]$est
estlb[,indmat[j,1]:indmat[j,2],]<-simout[[j]]$estlb
estub[,indmat[j,1]:indmat[j,2],]<-simout[[j]]$estub
lb[,indmat[j,1]:indmat[j,2],]<-simout[[j]]$lb
ub[,indmat[j,1]:indmat[j,2],]<-simout[[j]]$ub
customOut[indmat[j,1]:indmat[j,2]]<-simout[[j]]$customOut
pop<-rbind(pop,simout[[j]]$pop)
popSD<-c(popSD,simout[[j]]$popSD)
}
return(structure(list(description=description,localParm=localParm,
binary=binary,modType=modType,genObj=genObj,
pop=pop,popSD=popSD,mcmc=mcmc,prior=prior,
est=est,estlb=estlb,estub=estub,residSD=residSD,
pVal=pVal,selContrast=selContrast,
testMods=testMods,gofP=gofP,
negEmax=negEmax,
predpop=predpop,
mv = mv, sdv = sdv, msSat=msSat, fitpredv = fitpredv,
sepredv = sepredv, fitdifv = fitdifv, sedifv = sedifv,
lb=lb,ub=ub,divergence=divergence,
rseed=rseed, idmax=idmax,customOut=customOut
),class="emaxsimB") )
}
simrepB<-function(j,inlist)
{
indmat<-inlist$indmat
rseed<-inlist$rseed
Ndose<-inlist$Ndose
dose<-inlist$dose
ddf<-inlist$ddf
doselev<-inlist$doselev
nparm<-inlist$nparm
modType<-inlist$modType
binary<-inlist$binary
genObj<-inlist$genObj
testMods<-inlist$testMods
contMat<-inlist$contMat
negEmax<-inlist$negEmax
check<-inlist$check
estan<-inlist$estan
prior<-inlist$prior
mcmc<-inlist$mcmc
customCode<-inlist$customCode
customParms<-inlist$customParms
nfrac<-inlist$nfrac
nfrac2<-inlist$nfrac2
n<-inlist$n
llev<-inlist$llev
ulev<-inlist$ulev
localParm<-inlist$localParm
nrep<-indmat[j,2]-indmat[j,1]+1
### set up input dose variable
if(binary)din<-c(doselev,doselev) else din<-doselev
if(binary)sigsim<-1 ### default placeholder
predpop <- matrix(rep(NA, nrep * Ndose), ncol = Ndose)
fitpredv <- matrix(rep(NA, nrep * Ndose), ncol = Ndose)
fitdifv <- matrix(rep(NA, nrep * Ndose), ncol = Ndose)
sepredv <- matrix(rep(NA, nrep * Ndose), ncol = Ndose)
sedifv <- matrix(rep(NA, nrep * Ndose), ncol = Ndose)
mv <- matrix(rep(NA, nrep * Ndose), ncol = Ndose)
msSat<-rep(NA,nrep)
pVal<-rep(NA,nrep)
divergence<-rep(NA,nrep)
selContrast<-rep(NA,nrep)
colnames(predpop)<-doselev
colnames(fitpredv)<-doselev
colnames(fitdifv)<-doselev
colnames(sepredv)<-doselev
colnames(sedifv)<-doselev
colnames(mv)<-doselev
residSD<-numeric(nrep)
gofP<-numeric(nrep)
est<-matrix( rep(NA,nrep*nparm),ncol=nparm )
if(modType==3){
colnames(est)<-c("led50","emax","e0")
}else{
colnames(est)<-c("led50","lambda","emax","e0")
}
if(localParm) est<-cbind(est,difTarget=rep(NA,nrep))
np1<-ncol(est)
nlev<-length(llev)
estlb<-array(numeric(np1*nrep*nlev),dim=c(np1,nrep,nlev))
estub<-array(numeric(np1*nrep*nlev),dim=c(np1,nrep,nlev))
dimnames(estlb)<-list(colnames(est),1:nrep,llev)
dimnames(estub)<-list(colnames(est),1:nrep,ulev)
if(!binary){
sdv <- matrix(rep(NA, nrep * Ndose), ncol = Ndose)
}else sdv<-NULL
nd1<-Ndose-1
lb<-array(numeric(nd1*nrep*nlev),dim=c(nd1,nrep,nlev))
ub<-array(numeric(nd1*nrep*nlev),dim=c(nd1,nrep,nlev))
dimnames(lb)<-list(doselev[2:Ndose],1:nrep,llev)
dimnames(ub)<-list(doselev[2:Ndose],1:nrep,ulev)
pop<-NULL
popSD<-NULL
if(is.null(customCode))customOut<-NULL
else customOut<-vector("list",nrep)
if(!negEmax)trend<-'positive' else trend<-'negative'
for(i in 1:nrep) {
ioff<-i+indmat[j,1]-1
### generate simulated data set by calling
### the function in genObj
.Random.seed<<-rseed[ioff,]
### simulate design and data
gendat <- genObj$genFun(genObj$genP)
predpop[i,]<-gendat[['meanlev']]
pop<-rbind(pop,gendat[['parm']])
popSD<-c(popSD,gendat[['resSD']])
y<-gendat[['y']]
#### set up normal approximation to logit(phat) if
#### binary data
####
if(binary){
phat<-as.numeric(tapply(y,dose,sum))
phat<-(phat+nfrac2)/(n+nfrac) ###shrink to .5 for stability
yhat<-qlogis(phat)
V<-diag(1/(phat*(1-phat)*n))
contMat <-optContr(testMods,S=V)
}else{
yhat<-as.numeric(tapply(y,dose,mean))
ms<-summary(lm(y~factor(dose)))$sigma
msSat[i]<-ms^2
V<-diag(msSat[i]/n)
}
### compute p-value for preliminary test
holdC<-MCTtest(doselev, yhat, contMat = contMat,
S=V,df=ddf,type='general')$tStat
holdP<-attr(holdC,'pVal')
orderh<-order(holdC)
ncontr<-length(orderh)
pVal[i]<-holdP[orderh[ncontr]]
selContrast[i]<-orderh[ncontr]
### format data as counts if binary
if(binary){
cin<-tapply(y,dose,sum)
cin<- c(cin,n-cin)
yin<-c(rep(1,Ndose),rep(0,Ndose))
}else{
cin<-n
yin<-yhat
}
bfit<-fitEmaxB(yin,din,prior,modType,count=cin,binary=binary,
msSat=msSat[i],mcmc=mcmc,estan=estan,
diagnostics=check,nproc=1)
### return mcmc for preliminary checking
if(check){
if(binary){
return(list(estanfit=bfit$estanfit,parms=coef(bfit),pVal=pVal[i],dose=dose,y=y))
}else{
return(list(estanfit=bfit$estanfit,parms=coef(bfit),residSD=sigma(bfit),pVal=pVal[i],dose=dose,y=y))
}
}
### assign simulation output
sampler <- get_sampler_params(bfit$estanfit, inc_warmup = FALSE)
sampler<-sampler[[1]]
divergence[i] <- mean(sampler[,'divergent__'])
mv[i, ] <- tapply(y,dose,mean)
if(!binary){
sdv[i, ] <- sqrt(tapply(y,dose,var))
}
simout<-predict(bfit,doselev)
fitpredv[i,]<-simout$predMed
fitdifv[i,]<-simout$fitdifMed
sepredv[i, ] <-simout$se
sedifv[i, ] <-simout$sedif
### extract generated parameters
parms<-coef(bfit)
if(!binary){
sigsim<- sigma(bfit)
residSD[i]<-median(sigsim)
}
if(localParm){
parms1<- cbind(parms,coef(bfit,local=TRUE))
}else parms1<-parms
est[i,]<- apply(parms1,2,median)
simdif<-simout$simResp[,2:Ndose]-simout$simResp[,1]
for(k in 1:nlev){
qtile<-apply(simdif,2,quantile,c(llev[k],ulev[k]))
lb[,i,k]<-qtile[1,]
ub[,i,k]<-qtile[2,]
qtile<-apply(parms1,2,quantile,c(llev[k],ulev[k]))
estlb[,i,k]<-qtile[1,]
estub[,i,k]<-qtile[2,]
}
### compute gof test
gofP[i]<-bpchkMonoEmax(bfit,trend=trend)
### execute customized code
if(!is.null(customCode)){
if(!is.null(customParms)){
if(binary)customOut[[i]]<-customCode(parms,pVal[i],dose,y,
customParms=customParms )
else customOut[[i]]<-customCode(parms,sigsim,pVal[i],dose,y,
customParms=customParms )
}else{
if(binary)customOut[[i]]<-customCode(parms,pVal[i],dose,y)
else customOut[[i]]<-customCode(parms,sigsim,pVal[i],dose,y)
}
}
}
return(list(pop=pop,popSD=popSD,
est=est,estlb=estlb,estub=estub,residSD=residSD,
pVal=pVal,selContrast=selContrast,
gofP=gofP,predpop=predpop,
mv = mv, sdv = sdv, msSat=msSat, fitpredv = fitpredv,
fitdifv = fitdifv,sepredv = sepredv, sedifv = sedifv, lb=lb,ub=ub,
divergence=divergence,
customOut=customOut
))
}
|
### TERN LANDSCAPES
# Soil pH 4A1 [geoms]
# Author: Brendan Malone
# Email: brendan.malone@csiro.au
# created: 13.9.21
# modified: 16.9.21
# CODE PURPOSE
# Moasic prediction geom tiles
##
## libraries
library(sp);library(rgdal);library(raster)
# root directories
root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/predictions/"
root.tiles<- paste0(root, "tiles/")
slurm.out<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/rcode/digital_soil_mapping/mosaic/slurm/"
#r.code<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/rcode/digital_soil_mapping/mosaic/tile_mosaic_4a1_median_d1.R"
# tiles
fols<- as.numeric(list.files(root.tiles))
fols<- sort(fols)
fols
length(fols)
### variables
vart<- "pH_4a1"
vart2<- "geomsimsFP"
vart3<- "labdat"
depth<- "d6"
#file name output
f.name<- paste0(root, depth, "/", vart, "/", vart, "_", vart2, "_",vart3,"_", depth, ".tif")
f.name
#file name input
f.name.in<- paste0(vart, "_", depth, "_", vart2, "_", vart3,"_modelfit.tif")
f.name.in
# initialise the list of rasters
raster_list <- list()
# cycle through each tile and append to list
for (i in 1:length(fols)){
fpath1<- paste0(root.tiles,fols[i], "/", depth,"/",vart,"/")
r1<- raster(list.files(fpath1, pattern = f.name.in, full.names=TRUE))
raster_list <- append(raster_list, r1)
print(i)
}
# SLURM output
slurm.out1<- paste0(slurm.out,f.name.in, "_tilemos_begin.txt")
itOuts<- c(as.character(Sys.time()))
write.table(itOuts,
file = slurm.out1,
row.names = F, col.names = F, sep=",")
#raster_list
raster_list$filename <- f.name
raster_list$datatype <- "FLT4S"
raster_list$format <- "GTiff"
raster_list$overwrite <- TRUE
raster_list$na.rm <- TRUE
# do the mosaic
mos <- do.call(merge, raster_list)
# SLURM output
slurm.out2<- paste0(slurm.out,f.name.in, "_tilemos_end.txt")
itOuts<- c(as.character(Sys.time()))
write.table(itOuts,
file = slurm.out2,
row.names = F, col.names = F, sep=",")
| /Production/DSM/pH/digital_soil_mapping/mosaic/tile_mosaic_4a1_geoms_labdat_d6.R | permissive | AusSoilsDSM/SLGA | R | false | false | 2,046 | r | ### TERN LANDSCAPES
# Soil pH 4A1 [geoms]
# Author: Brendan Malone
# Email: brendan.malone@csiro.au
# created: 13.9.21
# modified: 16.9.21
# CODE PURPOSE
# Moasic prediction geom tiles
##
## libraries
library(sp);library(rgdal);library(raster)
# root directories
root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/predictions/"
root.tiles<- paste0(root, "tiles/")
slurm.out<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/rcode/digital_soil_mapping/mosaic/slurm/"
#r.code<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/rcode/digital_soil_mapping/mosaic/tile_mosaic_4a1_median_d1.R"
# tiles
fols<- as.numeric(list.files(root.tiles))
fols<- sort(fols)
fols
length(fols)
### variables
vart<- "pH_4a1"
vart2<- "geomsimsFP"
vart3<- "labdat"
depth<- "d6"
#file name output
f.name<- paste0(root, depth, "/", vart, "/", vart, "_", vart2, "_",vart3,"_", depth, ".tif")
f.name
#file name input
f.name.in<- paste0(vart, "_", depth, "_", vart2, "_", vart3,"_modelfit.tif")
f.name.in
# initialise the list of rasters
raster_list <- list()
# cycle through each tile and append to list
for (i in 1:length(fols)){
fpath1<- paste0(root.tiles,fols[i], "/", depth,"/",vart,"/")
r1<- raster(list.files(fpath1, pattern = f.name.in, full.names=TRUE))
raster_list <- append(raster_list, r1)
print(i)
}
# SLURM output
slurm.out1<- paste0(slurm.out,f.name.in, "_tilemos_begin.txt")
itOuts<- c(as.character(Sys.time()))
write.table(itOuts,
file = slurm.out1,
row.names = F, col.names = F, sep=",")
#raster_list
raster_list$filename <- f.name
raster_list$datatype <- "FLT4S"
raster_list$format <- "GTiff"
raster_list$overwrite <- TRUE
raster_list$na.rm <- TRUE
# do the mosaic
mos <- do.call(merge, raster_list)
# SLURM output
slurm.out2<- paste0(slurm.out,f.name.in, "_tilemos_end.txt")
itOuts<- c(as.character(Sys.time()))
write.table(itOuts,
file = slurm.out2,
row.names = F, col.names = F, sep=",")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_next_version.R
\name{get_next_version}
\alias{get_next_version}
\title{get the next version of a data package}
\usage{
get_next_version(
provided_scope = "knb-lter-cap",
provided_identifier,
display_message = FALSE
)
}
\arguments{
\item{provided_scope}{character) scope of data package (defaults to CAP LTER: knb-lter-cap)}
\item{provided_identifier}{(integer) identifier of data package}
\item{display_message}{(boolean) indicates whether to display a message if an existing dataset is
not identifed in the repository (thus returning a version number 1)}
}
\value{
integer reflecting the next version of a data package
}
\description{
get_next_version will access the EDI API to identify and return
the next version of a data package. If the data package does not exist
(i.e., we are constructing a new package), get_next_version will return `1`
and display a corresponding note.
}
\details{
get_next_version is a helper function designed to aid construction
of a package identifier (e.g., the version `5` of `knb-lter-cap.624.5`). As
such, the most common use case is that the function will be called
internally from capeml::createDataset but the function can be called
directly.
}
\examples{
\dontrun{
get_next_version(
provided_scope = "knb-lter-cap",
provided_identifier = 624
)
}
#'
}
| /man/get_next_version.Rd | no_license | CAPLTER/capeml | R | false | true | 1,393 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_next_version.R
\name{get_next_version}
\alias{get_next_version}
\title{get the next version of a data package}
\usage{
get_next_version(
provided_scope = "knb-lter-cap",
provided_identifier,
display_message = FALSE
)
}
\arguments{
\item{provided_scope}{character) scope of data package (defaults to CAP LTER: knb-lter-cap)}
\item{provided_identifier}{(integer) identifier of data package}
\item{display_message}{(boolean) indicates whether to display a message if an existing dataset is
not identifed in the repository (thus returning a version number 1)}
}
\value{
integer reflecting the next version of a data package
}
\description{
get_next_version will access the EDI API to identify and return
the next version of a data package. If the data package does not exist
(i.e., we are constructing a new package), get_next_version will return `1`
and display a corresponding note.
}
\details{
get_next_version is a helper function designed to aid construction
of a package identifier (e.g., the version `5` of `knb-lter-cap.624.5`). As
such, the most common use case is that the function will be called
internally from capeml::createDataset but the function can be called
directly.
}
\examples{
\dontrun{
get_next_version(
provided_scope = "knb-lter-cap",
provided_identifier = 624
)
}
#'
}
|
#' Recursive density function describing the probability of \eqn{(W(T)-\mu T; T)}
#'
#' Recursive density function describing the probability of \eqn{(W(T)-\mu T; T)}
#'
#' @param u Input of function
#' @param i Index of interim look, e.g., \code{i=2} for the second interim look.
#' \code{i=length(t_vec)} for the final analysis.
#' @param sigmas Vector of \eqn{\sigmai}, same length as \code{cstars} and \code{bstars}.
#' @param cstars Vector of upper boundaries for \eqn{W(T)-\mu T}.
#' @param bstars Vector of lower boundaries for \eqn{W(T)-\mu T}.
#'
#' @return
#' @export
#'
#' @examples
redensity <- function(u,i,sigmas,cstars,bstars){
sigma <- sigmas[i]
if(i == 1){
return(dnorm(u/sigma)/sigma)
}
else{
cstar <- cstars[i-1]
bstar <- bstars[i-1]
integrand <- function(x){sigma^(-1) * dnorm(u/sigma - x/sigma) * redensity(u=x,i=i-1,sigmas=sigmas,cstars=cstars,bstars=bstars)}
outt <- cubature::adaptIntegrate(f = integrand,lowerLimit = bstar,upperLimit = cstar)$integral
return(outt)
}
}
| /R/func_redensity.R | permissive | fzhang8/gsdbias | R | false | false | 1,020 | r |
#' Recursive density function describing the probability of \eqn{(W(T)-\mu T; T)}
#'
#' Recursive density function describing the probability of \eqn{(W(T)-\mu T; T)}
#'
#' @param u Input of function
#' @param i Index of interim look, e.g., \code{i=2} for the second interim look.
#' \code{i=length(t_vec)} for the final analysis.
#' @param sigmas Vector of \eqn{\sigmai}, same length as \code{cstars} and \code{bstars}.
#' @param cstars Vector of upper boundaries for \eqn{W(T)-\mu T}.
#' @param bstars Vector of lower boundaries for \eqn{W(T)-\mu T}.
#'
#' @return
#' @export
#'
#' @examples
redensity <- function(u,i,sigmas,cstars,bstars){
sigma <- sigmas[i]
if(i == 1){
return(dnorm(u/sigma)/sigma)
}
else{
cstar <- cstars[i-1]
bstar <- bstars[i-1]
integrand <- function(x){sigma^(-1) * dnorm(u/sigma - x/sigma) * redensity(u=x,i=i-1,sigmas=sigmas,cstars=cstars,bstars=bstars)}
outt <- cubature::adaptIntegrate(f = integrand,lowerLimit = bstar,upperLimit = cstar)$integral
return(outt)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codeartifact_operations.R
\name{codeartifact_update_package_versions_status}
\alias{codeartifact_update_package_versions_status}
\title{Updates the status of one or more versions of a package}
\usage{
codeartifact_update_package_versions_status(domain, domainOwner,
repository, format, namespace, package, versions, versionRevisions,
expectedStatus, targetStatus)
}
\arguments{
\item{domain}{[required] The domain that contains the repository that contains the package
versions with a status to be updated.}
\item{domainOwner}{The 12-digit account number of the AWS account that owns the domain. It
does not include dashes or spaces.}
\item{repository}{[required] The repository that contains the package versions with the status you
want to update.}
\item{format}{[required] A format that specifies the type of the package with the statuses to
update. The valid values are:
\itemize{
\item \code{npm}
\item \code{pypi}
\item \code{maven}
\item \code{nuget}
}}
\item{namespace}{The namespace of the package. The package component that specifies its
namespace depends on its type. For example:
\itemize{
\item The namespace of a Maven package is its \code{groupId}.
\item The namespace of an npm package is its \code{scope}.
\item A Python package does not contain a corresponding component, so
Python packages do not have a namespace.
\item A NuGet package does not contain a corresponding component, so NuGet
packages do not have a namespace.
}}
\item{package}{[required] The name of the package with the version statuses to update.}
\item{versions}{[required] An array of strings that specify the versions of the package with the
statuses to update.}
\item{versionRevisions}{A map of package versions and package version revisions. The map \code{key}
is the package version (for example, \verb{3.5.2}), and the map \code{value} is
the package version revision.}
\item{expectedStatus}{The package version’s expected status before it is updated. If
\code{expectedStatus} is provided, the package version's status is updated
only if its status at the time \code{UpdatePackageVersionsStatus} is called
matches \code{expectedStatus}.}
\item{targetStatus}{[required] The status you want to change the package version status to.}
}
\description{
Updates the status of one or more versions of a package.
}
\section{Request syntax}{
\preformatted{svc$update_package_versions_status(
domain = "string",
domainOwner = "string",
repository = "string",
format = "npm"|"pypi"|"maven"|"nuget",
namespace = "string",
package = "string",
versions = list(
"string"
),
versionRevisions = list(
"string"
),
expectedStatus = "Published"|"Unfinished"|"Unlisted"|"Archived"|"Disposed"|"Deleted",
targetStatus = "Published"|"Unfinished"|"Unlisted"|"Archived"|"Disposed"|"Deleted"
)
}
}
\keyword{internal}
| /paws/man/codeartifact_update_package_versions_status.Rd | permissive | sanchezvivi/paws | R | false | true | 2,909 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codeartifact_operations.R
\name{codeartifact_update_package_versions_status}
\alias{codeartifact_update_package_versions_status}
\title{Updates the status of one or more versions of a package}
\usage{
codeartifact_update_package_versions_status(domain, domainOwner,
repository, format, namespace, package, versions, versionRevisions,
expectedStatus, targetStatus)
}
\arguments{
\item{domain}{[required] The domain that contains the repository that contains the package
versions with a status to be updated.}
\item{domainOwner}{The 12-digit account number of the AWS account that owns the domain. It
does not include dashes or spaces.}
\item{repository}{[required] The repository that contains the package versions with the status you
want to update.}
\item{format}{[required] A format that specifies the type of the package with the statuses to
update. The valid values are:
\itemize{
\item \code{npm}
\item \code{pypi}
\item \code{maven}
\item \code{nuget}
}}
\item{namespace}{The namespace of the package. The package component that specifies its
namespace depends on its type. For example:
\itemize{
\item The namespace of a Maven package is its \code{groupId}.
\item The namespace of an npm package is its \code{scope}.
\item A Python package does not contain a corresponding component, so
Python packages do not have a namespace.
\item A NuGet package does not contain a corresponding component, so NuGet
packages do not have a namespace.
}}
\item{package}{[required] The name of the package with the version statuses to update.}
\item{versions}{[required] An array of strings that specify the versions of the package with the
statuses to update.}
\item{versionRevisions}{A map of package versions and package version revisions. The map \code{key}
is the package version (for example, \verb{3.5.2}), and the map \code{value} is
the package version revision.}
\item{expectedStatus}{The package version’s expected status before it is updated. If
\code{expectedStatus} is provided, the package version's status is updated
only if its status at the time \code{UpdatePackageVersionsStatus} is called
matches \code{expectedStatus}.}
\item{targetStatus}{[required] The status you want to change the package version status to.}
}
\description{
Updates the status of one or more versions of a package.
}
\section{Request syntax}{
\preformatted{svc$update_package_versions_status(
domain = "string",
domainOwner = "string",
repository = "string",
format = "npm"|"pypi"|"maven"|"nuget",
namespace = "string",
package = "string",
versions = list(
"string"
),
versionRevisions = list(
"string"
),
expectedStatus = "Published"|"Unfinished"|"Unlisted"|"Archived"|"Disposed"|"Deleted",
targetStatus = "Published"|"Unfinished"|"Unlisted"|"Archived"|"Disposed"|"Deleted"
)
}
}
\keyword{internal}
|
# Title: Generate csv of centroids MODIS
# date: nov 2016
# Author: Pérez-Luque AJ @ajpelu
# ------
# Load packages
library('rgdal')
library('sp')
library('tidyverse')
# ------
# ------
# Set dir
mymachine <- '/Users/ajpelu/'
# mymachine <- '/Users/ajpeluLap/'
di <- paste0(mymachine, 'Dropbox/phd/phd_repos/qpyr_distribution/')
# ------
# Read modis iv pixels (centroid)
modis_iv <- rgdal::readOGR(dsn=paste0(di, '/data_raw/geoinfo/'),
layer = 'iv_malla_modis_qp_centroid', verbose = FALSE, encoding = "UTF-8")
# Get coordinates
iv_modis_coord_qp <- as.data.frame(cbind(coordinates(modis_iv), modis_iv@data))
# Rename
names(iv_modis_coord_qp) <- c('longitude', 'latitude', 'iv_malla_modi_id', 'lng', 'lat', 'pop')
# Format data (factor as numeric )
iv_modis_coord_qp$iv_malla_modi_id <- as.numeric(levels(iv_modis_coord_qp$iv_malla_modi_id))
iv_modis_coord_qp$pop <- unclass(iv_modis_coord_qp$pop) %>% as.numeric
# Select columns of interest
iv_modis_coord_qp <- iv_modis_coord_qp %>%
select(iv_malla_modi_id, longitude, latitude, pop)
# Export
write.csv(iv_modis_coord_qp, file=paste0(di, '/data/iv_malla_modis_qp_centroid.csv'), row.names = FALSE)
| /R/generate_csv_modis_centroid.R | no_license | ajpelu/qpyr_distribution | R | false | false | 1,201 | r | # Title: Generate csv of centroids MODIS
# date: nov 2016
# Author: Pérez-Luque AJ @ajpelu
# ------
# Load packages
library('rgdal')
library('sp')
library('tidyverse')
# ------
# ------
# Set dir
mymachine <- '/Users/ajpelu/'
# mymachine <- '/Users/ajpeluLap/'
di <- paste0(mymachine, 'Dropbox/phd/phd_repos/qpyr_distribution/')
# ------
# Read modis iv pixels (centroid)
modis_iv <- rgdal::readOGR(dsn=paste0(di, '/data_raw/geoinfo/'),
layer = 'iv_malla_modis_qp_centroid', verbose = FALSE, encoding = "UTF-8")
# Get coordinates
iv_modis_coord_qp <- as.data.frame(cbind(coordinates(modis_iv), modis_iv@data))
# Rename
names(iv_modis_coord_qp) <- c('longitude', 'latitude', 'iv_malla_modi_id', 'lng', 'lat', 'pop')
# Format data (factor as numeric )
iv_modis_coord_qp$iv_malla_modi_id <- as.numeric(levels(iv_modis_coord_qp$iv_malla_modi_id))
iv_modis_coord_qp$pop <- unclass(iv_modis_coord_qp$pop) %>% as.numeric
# Select columns of interest
iv_modis_coord_qp <- iv_modis_coord_qp %>%
select(iv_malla_modi_id, longitude, latitude, pop)
# Export
write.csv(iv_modis_coord_qp, file=paste0(di, '/data/iv_malla_modis_qp_centroid.csv'), row.names = FALSE)
|
# install.packages("V8")
library(V8)
source("R_functions/Sub_Functions_For_App.R")
source("R_functions/Main_Function_For_App.R")
library(mlbench)
library(shinyjs)
library(shiny)
jsResetCode <- "shinyjs.reset = function() {history.go(0)}" # Define the js method that resets the page
ui <- navbarPage(title="TreeMaster",
id = "Navbar",
tabPanel(title = 'Read Data',
value = 'page1',
sidebarLayout(
sidebarPanel(
fileInput(inputId = "file1", label="Select CSV File",
accept =c("text/csv","text/comma-separated-values,text/plain",".csv")),
checkboxInput(inputId = "header", label= "Header", value=TRUE),
#to determine whether the file contains the names of the variables as its first line
useShinyjs(), # Include shinyjs in the UI
extendShinyjs(text = jsResetCode), # Add the js code to the page
actionButton(inputId = "reset", label="Reset Data",
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
tags$hr(),#a horizontal rule separate the file input and other options
selectInput(inputId = 'default', label="Load Default Data",width="100%",
choices = c("( Please select the default data )"="def",
"Regression: Boston Housing"="reg",
"Classification: Pima Indians Diabetes"="cla"),selected="def"),
#user choose to load the default regression data or the default classification data
tags$hr(),
actionButton(inputId = "regression", label="Regression",
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
actionButton(inputId = "classification", label="Classification",
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
),
mainPanel(tableOutput("contents"))
)
), # end of page 1
####################################################
#2nd page:regression
tabPanel(title="Regression",
value = 'page2',
sidebarLayout(
sidebarPanel(
radioButtons("model", label="Regression Models:",
choices = c("Random Forest","Decision Tree"),selected = "Random Forest"),
conditionalPanel("input.model == 'Random Forest'",
sliderInput("train_RF",label="Training set ratio: ",
min= 0.0,max = 1.0, step=0.05,value = 0.75),
htmlOutput("variable_rf"),
sliderInput("num_trees", label="Number of trees: ",min = 1,max = 500,value = 100),
radioButtons("importance", label="Importance measure:",
choices = c("impurity","accuracy"),selected = "impurity")
),
conditionalPanel("input.model == 'Decision Tree'",
sliderInput(inputId="train_DT",label="Training set ratio: ",min = 0.0,max = 1.0,step=0.05,value = 0.75),
htmlOutput("variable_dt"))
),
#3 plots could be chosen for Regression
mainPanel(
conditionalPanel("input.model == 'Random Forest'",
tabsetPanel(
tabPanel("Variable Importance",plotOutput("VarImportancePlot"),textOutput("RMSE_RF")),
tabPanel("Tree Number Tuning",
sliderInput("RegParamTuneCenter","Specify the center of number of tree",
min=1,max=100,value=50,step=1),
sliderInput("RegParamTuneRange","Specify how many neighbors to test",
min=2,max=100,value=50,step=2),
plotOutput("num_trees_tune"))
)),
conditionalPanel("input.model == 'Decision Tree'",
tabsetPanel(
tabPanel("Tree Visualization",plotOutput("Treeplot"),textOutput("RMSE_DT"))
))
))
),
############# Classification ##################
tabPanel(title = 'Classification',
value = 'page3',
sidebarLayout(
################ User input ###################
sidebarPanel(
# Warning the user only this model can only handle binary classification
h4("Error will appear for data without any binary factor variable."),
# Selecting models
radioButtons("ClfModel","Classification Model:",
choices=list("Random Forest","Decision Tree")),
# Selecting Training set proportion
sliderInput("TrainProp","Proportion of training set:",
min=0.05,max=0.95,value=0.75, step=0.05),
# Selecting binary response name
htmlOutput("BinaryResp"),
# Select which level should be considered as positive class in
# classification, will affect the confusion matrix output
htmlOutput("BinaryLevel"),
conditionalPanel(
condition = "input.ClfModel == 'Decision Tree'",
# Decision Tree Only: max-depth parameter
sliderInput("MaxDepth","Max Depth of tree (Decision Tree only)",
min=1,max=20,value=4, step=1)
),
conditionalPanel(
condition = "input.ClfModel == 'Random Forest'",
# Select split criteria
radioButtons("SplitCriteria","Importance Measure Criteria:",
choices=list("impurity",
"accuracy")),
# Select number of trees for random forest classifier
sliderInput("NumTree","Number of trees in random forest:",
min=1,max=500,value=100, step=1)
)
), # end of sidebar panel
############### UI Output ##########################
mainPanel(
############ Random Forest ###################
conditionalPanel(
condition = "input.ClfModel == 'Random Forest'",
tabsetPanel(
# Variable importance
tabPanel(
title = "Variable Importance",
plotOutput("RfImpPlot"),
textOutput("RfAcc")
),
# ROC plot
tabPanel(
title = "Receiver Operating Characteristic(ROC) Curve",
plotOutput("RfRoc"),
),
# KS plot
tabPanel(
title = "Kolmogorov-Smirnov(KS) Plot",
plotOutput("RfKs"),
),
# Confusion matrix
tabPanel(
title = "Confusion Matrix",
htmlOutput("RfPosText"),
htmlOutput("RfNegText"),
plotOutput("RfConfMat")
),
# Parameter Tuning
tabPanel(
title = "Tree Number Tuning",
sliderInput("ParamTuneCenter",
"Specify the center of number of tree",
min=1,max=500,value=100,step=1),
sliderInput("ParamTuneRange","Specify how many neighbors to test",
min=2,max=100,value=50,step=2),
plotOutput("RfParamTune")
)
)# end of tabset panel
), # end condition panel
############### Decision Tree #################
conditionalPanel(
condition = "input.ClfModel == 'Decision Tree'",
tabsetPanel(
# Tree Plot
tabPanel(
title = "Tree Visualization",
plotOutput("DtTreePlot"),
textOutput("DtAcc")
),
# ROC plot
tabPanel(
title = "Receiver Operating Characteristic(ROC) Curve",
plotOutput("DtRoc"),
),
# KS plot
tabPanel(
title = "Kolmogorov-Smirnov(KS) Plot",
plotOutput("DtKs"),
),
# Confusion matrix
tabPanel(
title = "Confusion Matrix",
htmlOutput("DtPosText"),
htmlOutput("DtNegText"),
plotOutput("DtConfMat")
)
)# end of tabset panel
) # end condition panel
) # end main panel
)# end of sidebar layout
),# end of tab panel3/page3
#############################################
#4th page:help page
tabPanel(title="Help",
value='page4',
htmlOutput("inc")
)
)# end of navbarPage
server <- function(input, output,session) {
##1st page server
#read data reactive function
loaddt <- reactive({
if(input$default=="def"){
inFile<-input$file1
if(is.null(inFile)) return(NULL)#not to get warning messages before reading data
data <- read.csv(file = inFile$datapath, header = input$header)
}
if(input$default=="reg") {
data("BostonHousing")
data <- BostonHousing
}
if(input$default=="cla") {
data("PimaIndiansDiabetes")
data <- PimaIndiansDiabetes
}
return(data)
})
########## observeEvent functions #############
observeEvent(input$regression, {
updateNavbarPage(session, "Navbar",selected = "page2")
})
observeEvent(input$classification, {
updateNavbarPage(session, "Navbar",selected = "page3")
})
observeEvent(input$file1, {
output$contents <- renderTable({
data <- loaddt()
head(data)
})
})
observeEvent(input$default, {
output$contents <- renderTable({
data <- loaddt()
head(data)
})
})
# reset page for file uploading
observeEvent(input$reset, {
js$reset()
})
###################### Regression ################################
output$variable_rf <- renderUI({
if (identical(data(), '') || identical(data(),data.frame())) return(NULL)
# Independent Variable selection:
selectInput("variable_rf", "variable",
names(loaddt()), multiple =FALSE)
})
output$variable_dt <- renderUI({
if (identical(data(), '') || identical(data(),data.frame())) return(NULL)
# Independent Variable selection:
selectInput("variable_dt", "variable",
names(loaddt()), multiple =FALSE)
})
###random forest model
model.rf<-reactive({
data<-loaddt()
fit.rf <- randomforest_build(data, m = input$train_RF, response_name =input$variable_rf,
indicator = "regression", num_trees = input$num_trees,
importance_measure = input$importance)
return(fit.rf)})
###plot
output$VarImportancePlot = renderPlot({model.rf()$VarImportancePlot})
output$RMSE_RF <- renderText({paste("RMSE: ", model.rf()$RMSE)})
###parameter tuning function
model.tune<-reactive({
data<-loaddt()
min_tree<-max(1,input$RegParamTuneCenter-(input$RegParamTuneRange/2))
max_tree<-input$RegParamTuneCenter+(input$RegParamTuneRange/2)
fit.tune<-num_trees_tune(data, m = input$train_RF, response_name =input$variable_rf,
indicator = "regression",
max_trees = max_tree,
min_trees=min_tree,
importance_measure = "accuracy")
return(fit.tune)})
###tuning plot
output$num_trees_tune = renderPlot({model.tune()})
###decision tree model
model.dt<-reactive({
data<-loaddt()
fit.dt<-decision_tree(data, m = input$train_DT, response_name =input$variable_dt,
indicator = "regression")
return(fit.dt)}
)
###plot
output$Treeplot = renderPlot({Tree_plot(model.dt()$PlotObject)})
output$RMSE_DT <- renderText({paste("RMSE: ", model.dt()$RMSE)})
###################### Classification ################################
################# Random Forest classifier ###########################
# helper functions to get binray variable names
binary_response_name <- function(data){
factor_var_names <- names(Filter(is.factor, data))
binary_var_name <- c()
for (var_name in factor_var_names){
if (length(unique(data[,var_name])) == 2){
binary_var_name <- c(binary_var_name, var_name)
}
}
return(binary_var_name)
}
binary_response_level <- function(data, resp_name){
as.character(unique(data[,resp_name]))
}
# updated binary response name
output$BinaryResp <- renderUI({
if (identical(loaddt(), '') || identical(loaddt(),data.frame())) return(NULL)
# Binary Variable selection:
binary_vars <- binary_response_name(loaddt())
selectInput("ResponseName","Select a binary variable as the response variable:",
binary_vars, multiple =FALSE)
})
# updated binary reponse levels
output$BinaryLevel <- renderUI({
if (identical(loaddt(), '') || identical(loaddt(),data.frame())) return(NULL)
# Binary level selection
binary_levels <- binary_response_level(loaddt(),input$ResponseName)
selectInput("PosLevel","Select the positive level",
binary_levels, selected=binary_levels[1], multiple =FALSE)
})
# The user input for positive labels of confusion matrix
output$RfPosText <- renderUI({
textInput("RfPosText","Enter positive level label",value=input$PosLevel)
})
# The user input for negative labels of confusion matrix
output$RfNegText <- renderUI({
binary_levels <- binary_response_level(loaddt(),input$ResponseName)
neg_label <- binary_levels[binary_levels!=input$PosLevel]
textInput("RfNegText", "Enter negative level label", value=neg_label)
})
# output list from random forest
RfClfResult <- reactive({
result <- randomforest_build(loaddt(),
m = input$TrainProp,
response_name = input$ResponseName,
indicator = "classification",
num_trees = input$NumTree,
importance_measure = input$SplitCriteria,
positive_class_name = input$PosLevel)
return(result)
})
# Plot showing accuracies over a range of number of trees in random forest
output$RfParamTune <- renderPlot({
min_tree <- max(1, input$ParamTuneCenter - input$ParamTuneRange/2)
max_tree <- input$ParamTuneCenter + input$ParamTuneRange/2
num_trees_tune(loaddt(), m = input$TrainProp, response_name = input$ResponseName,
indicator = "classification", max_trees = max_tree,
min_trees = min_tree, importance_measure = "accuracy",
positive_class_name = "pos")
})
# Random forest classifier variable importance plot
output$RfImpPlot <- renderPlot({
RfClfResult()$VarImportancePlot
})
# Random forest classifier ROC curve
output$RfRoc <- renderPlot({
RfClfResult()$ROCPlot
})
# Random forest classifier KS plot
output$RfKs <- renderPlot({
RfClfResult()$KSPlot
})
# Random forest classifier confusino matrix
output$RfConfMat <- renderPlot({
confusion_matrix_plot(cm_matrix = RfClfResult()$ConfusionMatrix,
positive = input$RfPosText,
negative = input$RfNegText)
})
# Random forest classifier accuracy
output$RfAcc <- renderText({
paste("Current Accuracy:",RfClfResult()$Accuracy)
})
################## Decision Tree Classifier #######################
DtClfResult <- reactive({
result <- decision_tree(loaddt(), m = input$TrainProp,
response_name = input$ResponseName,
indicator = "classification",
positive_class_name = input$PosLevel,
maxdepth = input$MaxDepth)
return(result)
})
# The user input for positive labels of confusion matrix
output$DtPosText <- renderUI({
textInput("DtPosText","Enter positive level label",value=input$PosLevel)
})
# The user input for negative labels of confusion matrix
output$DtNegText <- renderUI({
binary_levels <- binary_response_level(loaddt(),input$ResponseName)
neg_label <- binary_levels[binary_levels!=input$PosLevel]
textInput("DtNegText", "Enter negative level label", value=neg_label)
})
# Decision Tree classifier ROC curve
output$DtRoc <- renderPlot({
DtClfResult()$ROCPlot
})
# Decision Tree classifier KS plot
output$DtKs <- renderPlot({
DtClfResult()$KSPlot
})
# Decision Tree classifier confusino matrix
output$DtConfMat <- renderPlot({
confusion_matrix_plot(cm_matrix = DtClfResult()$ConfusionMatrix,
positive = input$DtPosText,
negative = input$DtNegText)
})
# Decision Tree classifier accuracy
output$DtAcc <- renderText({
paste("Current Accuracy:",DtClfResult()$Accuracy)
})
# Decision Tree classifier tree plot
output$DtTreePlot <- renderPlot({
Tree_plot(DtClfResult()$PlotObject)
})
############################ Help Page #######################
getPage<-function() {return(includeHTML("User-Guidance.html"))}
output$inc<-renderUI({getPage()})
}
shinyApp(ui = ui, server = server) | /TreeMaster.R | no_license | linchen-deng/TreeMasterShiny | R | false | false | 22,398 | r |
# install.packages("V8")
library(V8)
source("R_functions/Sub_Functions_For_App.R")
source("R_functions/Main_Function_For_App.R")
library(mlbench)
library(shinyjs)
library(shiny)
jsResetCode <- "shinyjs.reset = function() {history.go(0)}" # Define the js method that resets the page
ui <- navbarPage(title="TreeMaster",
id = "Navbar",
tabPanel(title = 'Read Data',
value = 'page1',
sidebarLayout(
sidebarPanel(
fileInput(inputId = "file1", label="Select CSV File",
accept =c("text/csv","text/comma-separated-values,text/plain",".csv")),
checkboxInput(inputId = "header", label= "Header", value=TRUE),
#to determine whether the file contains the names of the variables as its first line
useShinyjs(), # Include shinyjs in the UI
extendShinyjs(text = jsResetCode), # Add the js code to the page
actionButton(inputId = "reset", label="Reset Data",
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
tags$hr(),#a horizontal rule separate the file input and other options
selectInput(inputId = 'default', label="Load Default Data",width="100%",
choices = c("( Please select the default data )"="def",
"Regression: Boston Housing"="reg",
"Classification: Pima Indians Diabetes"="cla"),selected="def"),
#user choose to load the default regression data or the default classification data
tags$hr(),
actionButton(inputId = "regression", label="Regression",
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
actionButton(inputId = "classification", label="Classification",
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
),
mainPanel(tableOutput("contents"))
)
), # end of page 1
####################################################
#2nd page:regression
tabPanel(title="Regression",
value = 'page2',
sidebarLayout(
sidebarPanel(
radioButtons("model", label="Regression Models:",
choices = c("Random Forest","Decision Tree"),selected = "Random Forest"),
conditionalPanel("input.model == 'Random Forest'",
sliderInput("train_RF",label="Training set ratio: ",
min= 0.0,max = 1.0, step=0.05,value = 0.75),
htmlOutput("variable_rf"),
sliderInput("num_trees", label="Number of trees: ",min = 1,max = 500,value = 100),
radioButtons("importance", label="Importance measure:",
choices = c("impurity","accuracy"),selected = "impurity")
),
conditionalPanel("input.model == 'Decision Tree'",
sliderInput(inputId="train_DT",label="Training set ratio: ",min = 0.0,max = 1.0,step=0.05,value = 0.75),
htmlOutput("variable_dt"))
),
#3 plots could be chosen for Regression
mainPanel(
conditionalPanel("input.model == 'Random Forest'",
tabsetPanel(
tabPanel("Variable Importance",plotOutput("VarImportancePlot"),textOutput("RMSE_RF")),
tabPanel("Tree Number Tuning",
sliderInput("RegParamTuneCenter","Specify the center of number of tree",
min=1,max=100,value=50,step=1),
sliderInput("RegParamTuneRange","Specify how many neighbors to test",
min=2,max=100,value=50,step=2),
plotOutput("num_trees_tune"))
)),
conditionalPanel("input.model == 'Decision Tree'",
tabsetPanel(
tabPanel("Tree Visualization",plotOutput("Treeplot"),textOutput("RMSE_DT"))
))
))
),
############# Classification ##################
tabPanel(title = 'Classification',
value = 'page3',
sidebarLayout(
################ User input ###################
sidebarPanel(
# Warning the user only this model can only handle binary classification
h4("Error will appear for data without any binary factor variable."),
# Selecting models
radioButtons("ClfModel","Classification Model:",
choices=list("Random Forest","Decision Tree")),
# Selecting Training set proportion
sliderInput("TrainProp","Proportion of training set:",
min=0.05,max=0.95,value=0.75, step=0.05),
# Selecting binary response name
htmlOutput("BinaryResp"),
# Select which level should be considered as positive class in
# classification, will affect the confusion matrix output
htmlOutput("BinaryLevel"),
conditionalPanel(
condition = "input.ClfModel == 'Decision Tree'",
# Decision Tree Only: max-depth parameter
sliderInput("MaxDepth","Max Depth of tree (Decision Tree only)",
min=1,max=20,value=4, step=1)
),
conditionalPanel(
condition = "input.ClfModel == 'Random Forest'",
# Select split criteria
radioButtons("SplitCriteria","Importance Measure Criteria:",
choices=list("impurity",
"accuracy")),
# Select number of trees for random forest classifier
sliderInput("NumTree","Number of trees in random forest:",
min=1,max=500,value=100, step=1)
)
), # end of sidebar panel
############### UI Output ##########################
mainPanel(
############ Random Forest ###################
conditionalPanel(
condition = "input.ClfModel == 'Random Forest'",
tabsetPanel(
# Variable importance
tabPanel(
title = "Variable Importance",
plotOutput("RfImpPlot"),
textOutput("RfAcc")
),
# ROC plot
tabPanel(
title = "Receiver Operating Characteristic(ROC) Curve",
plotOutput("RfRoc"),
),
# KS plot
tabPanel(
title = "Kolmogorov-Smirnov(KS) Plot",
plotOutput("RfKs"),
),
# Confusion matrix
tabPanel(
title = "Confusion Matrix",
htmlOutput("RfPosText"),
htmlOutput("RfNegText"),
plotOutput("RfConfMat")
),
# Parameter Tuning
tabPanel(
title = "Tree Number Tuning",
sliderInput("ParamTuneCenter",
"Specify the center of number of tree",
min=1,max=500,value=100,step=1),
sliderInput("ParamTuneRange","Specify how many neighbors to test",
min=2,max=100,value=50,step=2),
plotOutput("RfParamTune")
)
)# end of tabset panel
), # end condition panel
############### Decision Tree #################
conditionalPanel(
condition = "input.ClfModel == 'Decision Tree'",
tabsetPanel(
# Tree Plot
tabPanel(
title = "Tree Visualization",
plotOutput("DtTreePlot"),
textOutput("DtAcc")
),
# ROC plot
tabPanel(
title = "Receiver Operating Characteristic(ROC) Curve",
plotOutput("DtRoc"),
),
# KS plot
tabPanel(
title = "Kolmogorov-Smirnov(KS) Plot",
plotOutput("DtKs"),
),
# Confusion matrix
tabPanel(
title = "Confusion Matrix",
htmlOutput("DtPosText"),
htmlOutput("DtNegText"),
plotOutput("DtConfMat")
)
)# end of tabset panel
) # end condition panel
) # end main panel
)# end of sidebar layout
),# end of tab panel3/page3
#############################################
#4th page:help page
tabPanel(title="Help",
value='page4',
htmlOutput("inc")
)
)# end of navbarPage
server <- function(input, output,session) {
##1st page server
#read data reactive function
loaddt <- reactive({
if(input$default=="def"){
inFile<-input$file1
if(is.null(inFile)) return(NULL)#not to get warning messages before reading data
data <- read.csv(file = inFile$datapath, header = input$header)
}
if(input$default=="reg") {
data("BostonHousing")
data <- BostonHousing
}
if(input$default=="cla") {
data("PimaIndiansDiabetes")
data <- PimaIndiansDiabetes
}
return(data)
})
########## observeEvent functions #############
observeEvent(input$regression, {
updateNavbarPage(session, "Navbar",selected = "page2")
})
observeEvent(input$classification, {
updateNavbarPage(session, "Navbar",selected = "page3")
})
observeEvent(input$file1, {
output$contents <- renderTable({
data <- loaddt()
head(data)
})
})
observeEvent(input$default, {
output$contents <- renderTable({
data <- loaddt()
head(data)
})
})
# reset page for file uploading
observeEvent(input$reset, {
js$reset()
})
###################### Regression ################################
output$variable_rf <- renderUI({
if (identical(data(), '') || identical(data(),data.frame())) return(NULL)
# Independent Variable selection:
selectInput("variable_rf", "variable",
names(loaddt()), multiple =FALSE)
})
output$variable_dt <- renderUI({
if (identical(data(), '') || identical(data(),data.frame())) return(NULL)
# Independent Variable selection:
selectInput("variable_dt", "variable",
names(loaddt()), multiple =FALSE)
})
###random forest model
model.rf<-reactive({
data<-loaddt()
fit.rf <- randomforest_build(data, m = input$train_RF, response_name =input$variable_rf,
indicator = "regression", num_trees = input$num_trees,
importance_measure = input$importance)
return(fit.rf)})
###plot
output$VarImportancePlot = renderPlot({model.rf()$VarImportancePlot})
output$RMSE_RF <- renderText({paste("RMSE: ", model.rf()$RMSE)})
###parameter tuning function
model.tune<-reactive({
data<-loaddt()
min_tree<-max(1,input$RegParamTuneCenter-(input$RegParamTuneRange/2))
max_tree<-input$RegParamTuneCenter+(input$RegParamTuneRange/2)
fit.tune<-num_trees_tune(data, m = input$train_RF, response_name =input$variable_rf,
indicator = "regression",
max_trees = max_tree,
min_trees=min_tree,
importance_measure = "accuracy")
return(fit.tune)})
###tuning plot
output$num_trees_tune = renderPlot({model.tune()})
###decision tree model
model.dt<-reactive({
data<-loaddt()
fit.dt<-decision_tree(data, m = input$train_DT, response_name =input$variable_dt,
indicator = "regression")
return(fit.dt)}
)
###plot
output$Treeplot = renderPlot({Tree_plot(model.dt()$PlotObject)})
output$RMSE_DT <- renderText({paste("RMSE: ", model.dt()$RMSE)})
###################### Classification ################################
################# Random Forest classifier ###########################
# helper functions to get binray variable names
binary_response_name <- function(data){
factor_var_names <- names(Filter(is.factor, data))
binary_var_name <- c()
for (var_name in factor_var_names){
if (length(unique(data[,var_name])) == 2){
binary_var_name <- c(binary_var_name, var_name)
}
}
return(binary_var_name)
}
binary_response_level <- function(data, resp_name){
as.character(unique(data[,resp_name]))
}
# updated binary response name
output$BinaryResp <- renderUI({
if (identical(loaddt(), '') || identical(loaddt(),data.frame())) return(NULL)
# Binary Variable selection:
binary_vars <- binary_response_name(loaddt())
selectInput("ResponseName","Select a binary variable as the response variable:",
binary_vars, multiple =FALSE)
})
# updated binary reponse levels
output$BinaryLevel <- renderUI({
if (identical(loaddt(), '') || identical(loaddt(),data.frame())) return(NULL)
# Binary level selection
binary_levels <- binary_response_level(loaddt(),input$ResponseName)
selectInput("PosLevel","Select the positive level",
binary_levels, selected=binary_levels[1], multiple =FALSE)
})
# The user input for positive labels of confusion matrix
output$RfPosText <- renderUI({
textInput("RfPosText","Enter positive level label",value=input$PosLevel)
})
# The user input for negative labels of confusion matrix
output$RfNegText <- renderUI({
binary_levels <- binary_response_level(loaddt(),input$ResponseName)
neg_label <- binary_levels[binary_levels!=input$PosLevel]
textInput("RfNegText", "Enter negative level label", value=neg_label)
})
# output list from random forest
RfClfResult <- reactive({
result <- randomforest_build(loaddt(),
m = input$TrainProp,
response_name = input$ResponseName,
indicator = "classification",
num_trees = input$NumTree,
importance_measure = input$SplitCriteria,
positive_class_name = input$PosLevel)
return(result)
})
# Plot showing accuracies over a range of number of trees in random forest
output$RfParamTune <- renderPlot({
min_tree <- max(1, input$ParamTuneCenter - input$ParamTuneRange/2)
max_tree <- input$ParamTuneCenter + input$ParamTuneRange/2
num_trees_tune(loaddt(), m = input$TrainProp, response_name = input$ResponseName,
indicator = "classification", max_trees = max_tree,
min_trees = min_tree, importance_measure = "accuracy",
positive_class_name = "pos")
})
# Random forest classifier variable importance plot
output$RfImpPlot <- renderPlot({
RfClfResult()$VarImportancePlot
})
# Random forest classifier ROC curve
output$RfRoc <- renderPlot({
RfClfResult()$ROCPlot
})
# Random forest classifier KS plot
output$RfKs <- renderPlot({
RfClfResult()$KSPlot
})
# Random forest classifier confusino matrix
output$RfConfMat <- renderPlot({
confusion_matrix_plot(cm_matrix = RfClfResult()$ConfusionMatrix,
positive = input$RfPosText,
negative = input$RfNegText)
})
# Random forest classifier accuracy
output$RfAcc <- renderText({
paste("Current Accuracy:",RfClfResult()$Accuracy)
})
################## Decision Tree Classifier #######################
DtClfResult <- reactive({
result <- decision_tree(loaddt(), m = input$TrainProp,
response_name = input$ResponseName,
indicator = "classification",
positive_class_name = input$PosLevel,
maxdepth = input$MaxDepth)
return(result)
})
# The user input for positive labels of confusion matrix
output$DtPosText <- renderUI({
textInput("DtPosText","Enter positive level label",value=input$PosLevel)
})
# The user input for negative labels of confusion matrix
output$DtNegText <- renderUI({
binary_levels <- binary_response_level(loaddt(),input$ResponseName)
neg_label <- binary_levels[binary_levels!=input$PosLevel]
textInput("DtNegText", "Enter negative level label", value=neg_label)
})
# Decision Tree classifier ROC curve
output$DtRoc <- renderPlot({
DtClfResult()$ROCPlot
})
# Decision Tree classifier KS plot
output$DtKs <- renderPlot({
DtClfResult()$KSPlot
})
# Decision Tree classifier confusino matrix
output$DtConfMat <- renderPlot({
confusion_matrix_plot(cm_matrix = DtClfResult()$ConfusionMatrix,
positive = input$DtPosText,
negative = input$DtNegText)
})
# Decision Tree classifier accuracy
output$DtAcc <- renderText({
paste("Current Accuracy:",DtClfResult()$Accuracy)
})
# Decision Tree classifier tree plot
output$DtTreePlot <- renderPlot({
Tree_plot(DtClfResult()$PlotObject)
})
############################ Help Page #######################
getPage<-function() {return(includeHTML("User-Guidance.html"))}
output$inc<-renderUI({getPage()})
}
shinyApp(ui = ui, server = server) |
###################################
# Script de instalação dos pacotes
# R version 4.1.1
##################################
## Usamos o seguinte comando para a instalação dos pacotes que estão no CRAN do R (faça isso caso estes pacotes NÃO ESTEJAM INSTALADOS).
## Obs.: A instalação só precisa ser feita uma vez.
install.packages("raster")
install.packages("sp")
install.packages("devtools")
install.packages("remotes")
install.packages("rgdal")
install.packages("maptools")
## Carregando pacotes necessários para instalação.
# Cada vez que q uma sessão de R é iniciada, é preciso carregar os pacotes que serão usados, como abaixo:
library(remotes)
library(devtools)
## Instalação modleR
#o modleR ainda não está no CRAN do R, nesse caso a instalação é diferente
# Instalação com vinhetas (exemplos do pacote). Recomendada*
remotes::install_github("Model-R/modleR",
build = TRUE,
dependencies = TRUE,
build_opts = c("--no-resave-data", "--no-manual"),
build_vignettes = TRUE)
# Instalação sem vinheta (sem os exemplos)
remotes::install_github("Model-R/modleR", build = TRUE)
| /R/1-Scripts _Pre_Modelagem/1-install_packages.R | no_license | mariapaaiva/projeto_tfunalis | R | false | false | 1,213 | r | ###################################
# Script de instalação dos pacotes
# R version 4.1.1
##################################
## Usamos o seguinte comando para a instalação dos pacotes que estão no CRAN do R (faça isso caso estes pacotes NÃO ESTEJAM INSTALADOS).
## Obs.: A instalação só precisa ser feita uma vez.
install.packages("raster")
install.packages("sp")
install.packages("devtools")
install.packages("remotes")
install.packages("rgdal")
install.packages("maptools")
## Carregando pacotes necessários para instalação.
# Cada vez que q uma sessão de R é iniciada, é preciso carregar os pacotes que serão usados, como abaixo:
library(remotes)
library(devtools)
## Instalação modleR
#o modleR ainda não está no CRAN do R, nesse caso a instalação é diferente
# Instalação com vinhetas (exemplos do pacote). Recomendada*
remotes::install_github("Model-R/modleR",
build = TRUE,
dependencies = TRUE,
build_opts = c("--no-resave-data", "--no-manual"),
build_vignettes = TRUE)
# Instalação sem vinheta (sem os exemplos)
remotes::install_github("Model-R/modleR", build = TRUE)
|
testlist <- list(Beta = 0, CVLinf = -1.36287613609745e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615829288-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 487 | r | testlist <- list(Beta = 0, CVLinf = -1.36287613609745e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
/separate_scripts/02.glm.project.R | no_license | jjvanderwal/MQ_JCU_work | R | false | false | 5,444 | r | ||
source("config/install-r-libraries.R")
source("legendas.R")
source("candidatos.R")
source("votos.R")
source("consolidacao-eleicao.R")
cat(green("Iniciando atualização de dados\n"))
if (!dir.exists(DATA_DIR)) {
dir.create(DATA_DIR)
}
con = getCon()
tryCatch({
main_atualiza_dados_consolidacao_eleicao()
main_atualiza_dados_legendas()
main_atualiza_dados_candidatos()
main_atualiza_dados_votos()
}, finally = dbDisconnect(con))
| /atualizacao-cache/main.R | no_license | fredpolicarpo/de-olho-no-BR | R | false | false | 458 | r | source("config/install-r-libraries.R")
source("legendas.R")
source("candidatos.R")
source("votos.R")
source("consolidacao-eleicao.R")
cat(green("Iniciando atualização de dados\n"))
if (!dir.exists(DATA_DIR)) {
dir.create(DATA_DIR)
}
con = getCon()
tryCatch({
main_atualiza_dados_consolidacao_eleicao()
main_atualiza_dados_legendas()
main_atualiza_dados_candidatos()
main_atualiza_dados_votos()
}, finally = dbDisconnect(con))
|
##################
# STAT775
# Midterm
# Problem 03
#
#
##################
#
# Initial Setup
#
setwd("C:/Users/Terence/Documents/GitHub/STAT775/Midterm/")
# setwd("~/STAT775/HW03/")
DATA.FILE.NAME <- "../DataSets/south.african.heart.disease/data.disease"
#
# Data Handling
#
shuffle.data <- function(x) {
# x: a numeric matrix
for (i in 1:nrow(x)) {
one <- round(runif(1, 1, nrow(x)))
other <- round(runif(1, 1, nrow(x)))
temp <- x[one, ]
x[one, ] <- x[other, ]
x[other, ] <- temp
}
return(x)
}
code.data <- function(dat.a) {
family.history <- rep(0.0, nrow(dat.a))
data <- data.frame(dat.a[, 1:4], family.history, dat.a[6:10])
# data <- data.frame(dat.a[, 'tobacco'], dat.a[, 'ldl'], family.history, dat.a[, 'age'], dat.a[, 'chd'])
for (i in 1:nrow(data)) {
if (dat.a[i, 'famhist'] == 'Present') {
data[i, 'family.history'] <- 1.0
}
}
return(shuffle.data(data.matrix(data)))
}
get.data.tuples <- function(data) {
last.column <- ncol(data)
return(list(
observations = (data.matrix(data[, -last.column])),
targets = data.matrix(data[, last.column])
))
}
#
# Logistic Regression
#
logistic.predict <- function(x, b) {
#
# Args:
# x: a p x 1 column vector of observation values
# b: a p X 1 column vector of model weights (betas)
e <- exp(matrix(b, nrow = 1) %*% rbind(1.0, matrix(x, ncol = 1)))
return(e / (1.0 + e))
}
newton.raphson.iteration <- function(X, y, old.betas, step.size = 0.5) {
#
# Args:
# X: N x p matrix of observations
# y: column vector of N target labels
# old.betas: column vector of (p + 1) model parameters
# step.size: a rate controlling parameter
p <- matrix(0, nrow = nrow(y), ncol = 1)
for (i in 1:nrow(y)) {
p[i, 1] <- logistic.predict(x = data.matrix(X[i, ]), b = old.betas)
}
W <- diag(1, nrow(y), nrow(y))
for (i in 1:nrow(y)) {
W[i, i] <- p[i, 1] * (1.0 - p[i, 1])
}
X.b <- cbind(1.0, X) # X with 1s prepended for bias term
z <- (X.b %*% old.betas) +
(solve(W + diag(0.001, nrow(W), ncol(W))) %*% (y - p))
step <- solve((t(X.b) %*% W %*% X.b) + diag(0.001, ncol(X.b), ncol(X.b))) %*%
t(X.b) %*% (y - p)
step <- step * step.size
return(old.betas + step)
}
train.logistic.model <- function(X, y, n.iterations = 400, step.size = 0.2) {
#
# Args:
# X: N x p matrix of observations, observations are rows
# y: N x 1 column vector of class labels {0, 1}
# n.iterations: the number of approximation iterations to perform
# step.size: used to slow the rate of convergence to prevent overshooting
# start with model parameters at 0 - arbitrary starting point
betas <- matrix(0, nrow = ncol(X) + 1, 1)
# iterate until parameter updates become arbitrarily small
old.betas <- betas + 1.0
i <- 0
while (i < n.iterations) {
betas <- newton.raphson.iteration(
X = X,
y = y,
old.betas = betas,
step.size = step.size
)
# TODO: use method for finding convergence, not just arbitrary iterations
i <- i + 1
}
return(betas)
}
#
# Neural Net
#
sigmoid <- function(x) {
#
# Args:
# x: a numeric or vector; the function is applied element-wise
return(1.0 / (1.0 + exp(-x)))
}
sigmoid.derivative <- function(x) {
#
# Args:
# x: a numeric or vector
return(sigmoid(x) * (1.0 - sigmoid(x)))
}
construct.neural.net <- function(
topology = c(2, 2, 1),
activation = sigmoid,
activation.derivative = sigmoid.derivative,
debug = F) {
#
# Args:
# topology: a list or vector of the dimensions of each layer
# activation: a function to be used for the activation of each unit
# activation.derivative: a function that is the derivative of activation
layer.weights <- list()
derivative.matrices <- list()
outputs <- list()
previous.layer.dim <- 1
next.layer.dim <- 1
for (i in 1:(length(topology) - 1)) {
previous.layer.dim <- topology[[i]] + 1 # +1 for bias
next.layer.dim <- topology[[i + 1]]
num.elements <- (previous.layer.dim) * next.layer.dim
layer.weights[[i]] <- matrix(
if(debug) {rep(1, num.elements)}
else {runif(n = num.elements, min = -0.001, max = 0.001)},
nrow = previous.layer.dim,
ncol = next.layer.dim
)
outputs[[i]] <- matrix(0, nrow = next.layer.dim, ncol = 1)
derivative.matrices[[i]] <- diag(0, next.layer.dim)
}
return(list(
input.dim = topology[[1]],
output.dim = next.layer.dim, # should be dim of last layer
n.layers = length(layer.weights),
activation = activation,
activation.deriv = activation.derivative,
input = matrix(0, nrow = 1, ncol = topology[[1]]),
output = matrix(0, nrow = tail(topology, 1)[[1]], 1),
weights = layer.weights,
outputs = outputs,
derivatives = derivative.matrices
))
}
apply.inputs <- function(net, x, for.training = T) {
#
# Args:
# x: a 1 x n vector of inputs; n should be the same as for net
# net: a structure with all of the appropriate data for a neural network,
# as created by construct.neural.net()
# for.training[T]: currently unused
net$input <- matrix(x, nrow = 1)
previous.output <- cbind(net$input, 1)
for (i in 1:net$n.layers) {
weighted.sums <- previous.output %*% net$weights[[i]]
net$outputs[[i]] <- net$activation(weighted.sums)
net$derivatives[[i]] <- diag(
as.list(net$activation.deriv(weighted.sums)),
length(net$outputs[[i]])
)
previous.output <- cbind(net$outputs[[i]], 1)
}
net$output <- t(tail(net$outputs, 1)[[1]])
return(net)
}
backprop.weight.update <- function(net, target, learning.rate = 2) {
#
# Args:
# net: a neural net object that has had inputs applied and derivatives stored
# target: a column vector; the target output that should have been observed
# learning.rate: the learning rate of the network
# TODO: refactor learning.rate to be less hacky, allow for
# advanced techniques
last.index <- net$n.layers
deltas <- list()
error <- matrix(net$output, ncol = 1) - matrix(target, ncol = 1)
deltas[[last.index + 1]] <- error
W <- diag(1, nrow = nrow(error))
D <- net$derivatives[[last.index]]
for (i in last.index:1) {
deltas[[i]] <- D %*% W %*% deltas[[i + 1]]
if (i > 1) {
D <- net$derivatives[[i - 1]]
W <- net$weights[[i]]
W <- W[1:(nrow(W) - 1), ]
}
}
weight.updates <- list()
o.hat <- cbind(net$input, 1)
for (i in 1:last.index) {
weight.updates[[i]] <- -learning.rate * t(deltas[[i]] %*% o.hat)
if (i < last.index) {
o.hat <- cbind(net$outputs[[i]], 1)
}
}
for (i in 1:length(weight.updates)) {
net$weights[[i]] <- net$weights[[i]] + weight.updates[[i]]
}
return(net)
}
######
# MAIN
######
data.fram.e <- read.table(
"http://www-stat.stanford.edu/~tibs/ElemStatLearn/datasets/SAheart.data",
sep = ",",
head = T,
row.names = 1
)
data <- code.data(data.fram.e)
boundary <- 2 * nrow(data) / 3
train <- get.data.tuples(data[1:boundary, ])
test <- get.data.tuples(data[-(1:boundary), ])
# Logistic
logistic.model <- train.logistic.model(X = train$observations, y = train$targets)
print(logistic.model)
glm.out <- glm(train$targets ~ ., family=binomial(logit), data=data.frame(train$observations))
# logistic.model <- matrix(c(-4.204, 0.081, 0.168, 0.924, 0.044), ncol = 1)
logistic.confusion <- matrix(0, nrow = 3, ncol = 3)
for (i in 1:nrow(test$targets)) {
prediction <- round(logistic.predict(x = test$observations[i, ], b = logistic.model))
label <- test$targets[[i]]
logistic.confusion[label + 1, prediction + 1] <-
logistic.confusion[label + 1, prediction + 1] + 1
}
# Neural Net
NUM.EPOCHS <- 700
chd.net <- construct.neural.net(
topology = c(9, 5, 1),
activation = sigmoid,
activation.deriv = sigmoid.derivative
)
for (t in 1:NUM.EPOCHS) {
for (i in 1:nrow(train$targets)) {
chd.net <- apply.inputs(
net = chd.net,
x = matrix(train$observations[i, ], nrow = 1)
)
chd.net <- backprop.weight.update(
net = chd.net,
target = matrix(train$targets[i, ], ncol = 1),
learning.rate = 1.2
)
}
}
net.confusion <- matrix(0, 3, 3)
for (i in 1:nrow(test$observations)) {
prediction <- round(
apply.inputs(
net = chd.net,
x = matrix(test$observations[i, ], nrow = 1),
for.training = F
)$output
)
label <- test$targets[[i]]
net.confusion[label + 1, prediction + 1] <-
net.confusion[label + 1, prediction + 1] + 1
}
# library('nnet')
# b.net <- nnet(x = train$observations, y = train$targets, size = 3, maxit = 1000)
# pr <- round(predict(b.net, test$observations))
# net.confusion <- matrix(0, 3, 3)
# for (i in 1:nrow(pr)) {
# prediction <- pr[[1]]
# label <- test$targets[[i]]
#
# net.confusion[label + 1, prediction + 1] <-
# net.confusion[label + 1, prediction + 1] + 1
# }
# Results
compute.confusion.percentages <- function(confusion) {
rownames(confusion) <- c('Actual No CHD', 'Actual CHD', '%.Correct')
colnames(confusion) <- c('Actual No CHD', 'Actual CHD', '%.Correct')
row.sums <- rowSums(confusion)
col.sums <- colSums(confusion)
correct <- 0
for (i in 1:2) {
confusion[i, 3] <- confusion[i, i] / row.sums[[i]]
confusion[3, i] <- confusion[i, i] / col.sums[[i]]
correct <- correct + confusion[i, i]
}
confusion[[3,3]] <- correct / sum(row.sums)
return(confusion)
}
logistic.confusion <-compute.confusion.percentages(logistic.confusion)
net.confusion <-compute.confusion.percentages(net.confusion)
print(logistic.confusion, digits = 2)
print(net.confusion, digits = 2)
| /STAT775/Midterm/Prolem_03.R | permissive | T-R0D/Past-Courses | R | false | false | 9,678 | r | ##################
# STAT775
# Midterm
# Problem 03
#
#
##################
#
# Initial Setup
#
setwd("C:/Users/Terence/Documents/GitHub/STAT775/Midterm/")
# setwd("~/STAT775/HW03/")
DATA.FILE.NAME <- "../DataSets/south.african.heart.disease/data.disease"
#
# Data Handling
#
shuffle.data <- function(x) {
# x: a numeric matrix
for (i in 1:nrow(x)) {
one <- round(runif(1, 1, nrow(x)))
other <- round(runif(1, 1, nrow(x)))
temp <- x[one, ]
x[one, ] <- x[other, ]
x[other, ] <- temp
}
return(x)
}
code.data <- function(dat.a) {
family.history <- rep(0.0, nrow(dat.a))
data <- data.frame(dat.a[, 1:4], family.history, dat.a[6:10])
# data <- data.frame(dat.a[, 'tobacco'], dat.a[, 'ldl'], family.history, dat.a[, 'age'], dat.a[, 'chd'])
for (i in 1:nrow(data)) {
if (dat.a[i, 'famhist'] == 'Present') {
data[i, 'family.history'] <- 1.0
}
}
return(shuffle.data(data.matrix(data)))
}
get.data.tuples <- function(data) {
last.column <- ncol(data)
return(list(
observations = (data.matrix(data[, -last.column])),
targets = data.matrix(data[, last.column])
))
}
#
# Logistic Regression
#
logistic.predict <- function(x, b) {
#
# Args:
# x: a p x 1 column vector of observation values
# b: a p X 1 column vector of model weights (betas)
e <- exp(matrix(b, nrow = 1) %*% rbind(1.0, matrix(x, ncol = 1)))
return(e / (1.0 + e))
}
newton.raphson.iteration <- function(X, y, old.betas, step.size = 0.5) {
#
# Args:
# X: N x p matrix of observations
# y: column vector of N target labels
# old.betas: column vector of (p + 1) model parameters
# step.size: a rate controlling parameter
p <- matrix(0, nrow = nrow(y), ncol = 1)
for (i in 1:nrow(y)) {
p[i, 1] <- logistic.predict(x = data.matrix(X[i, ]), b = old.betas)
}
W <- diag(1, nrow(y), nrow(y))
for (i in 1:nrow(y)) {
W[i, i] <- p[i, 1] * (1.0 - p[i, 1])
}
X.b <- cbind(1.0, X) # X with 1s prepended for bias term
z <- (X.b %*% old.betas) +
(solve(W + diag(0.001, nrow(W), ncol(W))) %*% (y - p))
step <- solve((t(X.b) %*% W %*% X.b) + diag(0.001, ncol(X.b), ncol(X.b))) %*%
t(X.b) %*% (y - p)
step <- step * step.size
return(old.betas + step)
}
train.logistic.model <- function(X, y, n.iterations = 400, step.size = 0.2) {
#
# Args:
# X: N x p matrix of observations, observations are rows
# y: N x 1 column vector of class labels {0, 1}
# n.iterations: the number of approximation iterations to perform
# step.size: used to slow the rate of convergence to prevent overshooting
# start with model parameters at 0 - arbitrary starting point
betas <- matrix(0, nrow = ncol(X) + 1, 1)
# iterate until parameter updates become arbitrarily small
old.betas <- betas + 1.0
i <- 0
while (i < n.iterations) {
betas <- newton.raphson.iteration(
X = X,
y = y,
old.betas = betas,
step.size = step.size
)
# TODO: use method for finding convergence, not just arbitrary iterations
i <- i + 1
}
return(betas)
}
#
# Neural Net
#
sigmoid <- function(x) {
#
# Args:
# x: a numeric or vector; the function is applied element-wise
return(1.0 / (1.0 + exp(-x)))
}
sigmoid.derivative <- function(x) {
#
# Args:
# x: a numeric or vector
return(sigmoid(x) * (1.0 - sigmoid(x)))
}
construct.neural.net <- function(
topology = c(2, 2, 1),
activation = sigmoid,
activation.derivative = sigmoid.derivative,
debug = F) {
#
# Args:
# topology: a list or vector of the dimensions of each layer
# activation: a function to be used for the activation of each unit
# activation.derivative: a function that is the derivative of activation
layer.weights <- list()
derivative.matrices <- list()
outputs <- list()
previous.layer.dim <- 1
next.layer.dim <- 1
for (i in 1:(length(topology) - 1)) {
previous.layer.dim <- topology[[i]] + 1 # +1 for bias
next.layer.dim <- topology[[i + 1]]
num.elements <- (previous.layer.dim) * next.layer.dim
layer.weights[[i]] <- matrix(
if(debug) {rep(1, num.elements)}
else {runif(n = num.elements, min = -0.001, max = 0.001)},
nrow = previous.layer.dim,
ncol = next.layer.dim
)
outputs[[i]] <- matrix(0, nrow = next.layer.dim, ncol = 1)
derivative.matrices[[i]] <- diag(0, next.layer.dim)
}
return(list(
input.dim = topology[[1]],
output.dim = next.layer.dim, # should be dim of last layer
n.layers = length(layer.weights),
activation = activation,
activation.deriv = activation.derivative,
input = matrix(0, nrow = 1, ncol = topology[[1]]),
output = matrix(0, nrow = tail(topology, 1)[[1]], 1),
weights = layer.weights,
outputs = outputs,
derivatives = derivative.matrices
))
}
apply.inputs <- function(net, x, for.training = T) {
#
# Args:
# x: a 1 x n vector of inputs; n should be the same as for net
# net: a structure with all of the appropriate data for a neural network,
# as created by construct.neural.net()
# for.training[T]: currently unused
net$input <- matrix(x, nrow = 1)
previous.output <- cbind(net$input, 1)
for (i in 1:net$n.layers) {
weighted.sums <- previous.output %*% net$weights[[i]]
net$outputs[[i]] <- net$activation(weighted.sums)
net$derivatives[[i]] <- diag(
as.list(net$activation.deriv(weighted.sums)),
length(net$outputs[[i]])
)
previous.output <- cbind(net$outputs[[i]], 1)
}
net$output <- t(tail(net$outputs, 1)[[1]])
return(net)
}
backprop.weight.update <- function(net, target, learning.rate = 2) {
#
# Args:
# net: a neural net object that has had inputs applied and derivatives stored
# target: a column vector; the target output that should have been observed
# learning.rate: the learning rate of the network
# TODO: refactor learning.rate to be less hacky, allow for
# advanced techniques
last.index <- net$n.layers
deltas <- list()
error <- matrix(net$output, ncol = 1) - matrix(target, ncol = 1)
deltas[[last.index + 1]] <- error
W <- diag(1, nrow = nrow(error))
D <- net$derivatives[[last.index]]
for (i in last.index:1) {
deltas[[i]] <- D %*% W %*% deltas[[i + 1]]
if (i > 1) {
D <- net$derivatives[[i - 1]]
W <- net$weights[[i]]
W <- W[1:(nrow(W) - 1), ]
}
}
weight.updates <- list()
o.hat <- cbind(net$input, 1)
for (i in 1:last.index) {
weight.updates[[i]] <- -learning.rate * t(deltas[[i]] %*% o.hat)
if (i < last.index) {
o.hat <- cbind(net$outputs[[i]], 1)
}
}
for (i in 1:length(weight.updates)) {
net$weights[[i]] <- net$weights[[i]] + weight.updates[[i]]
}
return(net)
}
######
# MAIN
######
data.fram.e <- read.table(
"http://www-stat.stanford.edu/~tibs/ElemStatLearn/datasets/SAheart.data",
sep = ",",
head = T,
row.names = 1
)
data <- code.data(data.fram.e)
boundary <- 2 * nrow(data) / 3
train <- get.data.tuples(data[1:boundary, ])
test <- get.data.tuples(data[-(1:boundary), ])
# Logistic
logistic.model <- train.logistic.model(X = train$observations, y = train$targets)
print(logistic.model)
glm.out <- glm(train$targets ~ ., family=binomial(logit), data=data.frame(train$observations))
# logistic.model <- matrix(c(-4.204, 0.081, 0.168, 0.924, 0.044), ncol = 1)
logistic.confusion <- matrix(0, nrow = 3, ncol = 3)
for (i in 1:nrow(test$targets)) {
prediction <- round(logistic.predict(x = test$observations[i, ], b = logistic.model))
label <- test$targets[[i]]
logistic.confusion[label + 1, prediction + 1] <-
logistic.confusion[label + 1, prediction + 1] + 1
}
# Neural Net
NUM.EPOCHS <- 700
chd.net <- construct.neural.net(
topology = c(9, 5, 1),
activation = sigmoid,
activation.deriv = sigmoid.derivative
)
for (t in 1:NUM.EPOCHS) {
for (i in 1:nrow(train$targets)) {
chd.net <- apply.inputs(
net = chd.net,
x = matrix(train$observations[i, ], nrow = 1)
)
chd.net <- backprop.weight.update(
net = chd.net,
target = matrix(train$targets[i, ], ncol = 1),
learning.rate = 1.2
)
}
}
net.confusion <- matrix(0, 3, 3)
for (i in 1:nrow(test$observations)) {
prediction <- round(
apply.inputs(
net = chd.net,
x = matrix(test$observations[i, ], nrow = 1),
for.training = F
)$output
)
label <- test$targets[[i]]
net.confusion[label + 1, prediction + 1] <-
net.confusion[label + 1, prediction + 1] + 1
}
# library('nnet')
# b.net <- nnet(x = train$observations, y = train$targets, size = 3, maxit = 1000)
# pr <- round(predict(b.net, test$observations))
# net.confusion <- matrix(0, 3, 3)
# for (i in 1:nrow(pr)) {
# prediction <- pr[[1]]
# label <- test$targets[[i]]
#
# net.confusion[label + 1, prediction + 1] <-
# net.confusion[label + 1, prediction + 1] + 1
# }
# Results
compute.confusion.percentages <- function(confusion) {
rownames(confusion) <- c('Actual No CHD', 'Actual CHD', '%.Correct')
colnames(confusion) <- c('Actual No CHD', 'Actual CHD', '%.Correct')
row.sums <- rowSums(confusion)
col.sums <- colSums(confusion)
correct <- 0
for (i in 1:2) {
confusion[i, 3] <- confusion[i, i] / row.sums[[i]]
confusion[3, i] <- confusion[i, i] / col.sums[[i]]
correct <- correct + confusion[i, i]
}
confusion[[3,3]] <- correct / sum(row.sums)
return(confusion)
}
logistic.confusion <-compute.confusion.percentages(logistic.confusion)
net.confusion <-compute.confusion.percentages(net.confusion)
print(logistic.confusion, digits = 2)
print(net.confusion, digits = 2)
|
############################################
## Intro. à Inferência Bayesiana - Lista 2
## -> Questão 6
############################################
library(TeachingDemos)
library(ggplot2)
library(latex2exp)
library(pscl)
library(R2OpenBUGS)
library(coda)
library(lattice)
library(gridExtra)
# A) Modelling the prior distribution
# ----------------------------------------------------------
mu = 0.4; sd2 <- 0.001;
a <- ((1-mu)/sd2 - 1/mu)*mu^2
b <- a*(1/mu - 1)
# Finds the variance that leads to P(theta < 60%) ~= 95%
while(pbeta(0.6, a, b) > 0.95) {
sd2 <- sd2*1.0001;
a <- ((1-mu)/sd2 - 1/mu)*mu^2
b <- a*(1/mu - 1)
}
# B) Simulating the BUGS model
# ----------------------------------------------------------
data <- list(x=sample(c(rep(1,15), rep(0,85))), n=100,
a=a, b=b)
params <- c("theta")
out <- bugs(data, NULL, params, model.file="model_Q6.txt",
n.iter=25000, n.chains=1, codaPkg=TRUE)
codaOut <- read.bugs(out)
| /Masters/CCP9016 - INTRODUÇÃO À INFERÊNCIA BAYESIANA/Exercises/02/Q6.r | no_license | TioMinho/UFC_Courses | R | false | false | 970 | r | ############################################
## Intro. à Inferência Bayesiana - Lista 2
## -> Questão 6
############################################
library(TeachingDemos)
library(ggplot2)
library(latex2exp)
library(pscl)
library(R2OpenBUGS)
library(coda)
library(lattice)
library(gridExtra)
# A) Modelling the prior distribution
# ----------------------------------------------------------
mu = 0.4; sd2 <- 0.001;
a <- ((1-mu)/sd2 - 1/mu)*mu^2
b <- a*(1/mu - 1)
# Finds the variance that leads to P(theta < 60%) ~= 95%
while(pbeta(0.6, a, b) > 0.95) {
sd2 <- sd2*1.0001;
a <- ((1-mu)/sd2 - 1/mu)*mu^2
b <- a*(1/mu - 1)
}
# B) Simulating the BUGS model
# ----------------------------------------------------------
data <- list(x=sample(c(rep(1,15), rep(0,85))), n=100,
a=a, b=b)
params <- c("theta")
out <- bugs(data, NULL, params, model.file="model_Q6.txt",
n.iter=25000, n.chains=1, codaPkg=TRUE)
codaOut <- read.bugs(out)
|
#' SSmase() computes MASE for one-step ahead hindcasting cross-validations of indices
#'
#' MASE for one-step ahead hindcasting cross-validations and computes MASE from prediction redisuals.
#' MASE is calculated the average ratio of mean absolute error (MAE) of prediction residuals (MAE.PR) and Naive Predictions (MAE.base)
#' MASE.adj sets the MAE.base to a minimum MAE.base.adj (default=0.1)
#' MASE.adj allow passing (MASE<1) if MAE.PE < 0.1 and thus accurate, when obs show extremely little variation
#'
#' @param retroSummary List created by r4ss::SSsummarize()
#' @param quants data type c("cpue","len","age)
#' @param models Optional subset of the models described in
#' r4ss function summaryoutput(). Either "all" or a vector of numbers indicating
#' columns in summary tables.
#' @param Season option to specify Season - Default uses first available, i.e. usual Seas = 1
#' @param endyrvec Optional single year or vector of years representing the
#' final year of values to show for each model. By default it is set to the
#' ending year specified in each model.
#' @param indexselect = Vector of fleet numbers for each model for which to compare
#' @param indexfleets CHECK IF NEEDED or how to adjust indexfleets
#' @param MAE.base.adj minimum MASE demoninator (naive predictions) for MASE.adj (default = 0.1)
#' @param verbose Report progress to R GUI?
#' @return MASE and hcxval statistic
#' @author Henning Winker (JRC-EC) and Laurence Kell (Sea++)
#' @export
SSmase<- function(retroSummary,quants=c("cpue","len","age"),Season="default",
models="all",endyrvec="default",indexselect = NULL,MAE.base.adj=0.1,residuals=FALSE,
verbose=FALSE
){
hcruns =retroSummary #added for now
xmin = NULL
subplots = quants[1]
if(is.null(hcruns$indices) & subplots[1] == "cpue"){
stop("Require input object from r4ss::SSsummarize()")
}
if(subplots[1] %in% c("len","age")){
if(is.null(hcruns$age) & is.null(hcruns$len)){
stop("Require input object from ss3diags::SSdiagsComps")
}}
if(subplots[1]=="len"){
if(is.null(hcruns$len)) stop("No Length Comps found")
hcruns$indices = hcruns$len
}
if(subplots[1]=="age"){
if(is.null(hcruns$age)) stop("No Age Comps found")
hcruns$indices = hcruns$age
}
# subset if indexselect is specified
if(is.null(indexselect) ==F & is.numeric(indexselect)){
iname = unique(hcruns$indices$Fleet_name)[indexselect]
if(TRUE %in% is.na(iname)) stop("One or more index numbers exceed number of available indices")
hcruns$indices = hcruns$indices[hcruns$indices$Fleet_name%in%iname,]
}
log=FALSE #(no option to plot on log scale)
mase <- function(indexfleets=1){
#-------------------------------------------------------------
# mase function
#-------------------------------------------------------------
# get stuff from summary output (minimized)
n <- hcruns$n
startyrs <- hcruns$startyrs
endyrs <- hcruns$endyrs
indices <- hcruns$indices
if(models[1]=="all") models <- 1:n
nlines <- length(models)
if(endyrvec[1]=="default"){
endyrvec <- endyrs-seq(0,n-1,1)
}
if(length(endyrvec)==1){
stop("SSplotHCxval requires a minimum of one reference and one retro peel")
}
# check length of indexfleets
if(!is.null(indexfleets) && length(indexfleets) < n){
if(length(indexfleets)==1){
indexfleets <- rep(indexfleets, n)
}else{
warning("'indexfleets' needs to have length either 1 or n=",n,"\n",
"with each value a fleet number for the index to compare.\n")
indexfleets <- NULL
}
}
# Exclude all Time steps not use in reference run replist1
if(subplots[1]%in%c("len","age")){
indices$Use = ifelse(is.na(indices$Like),-1,1)
}
RefUse = indices[indices$imodel==1 & indices$Use==1,]
RefUse = paste0(RefUse$Fleet_name,".",RefUse$Time)
indices = indices[paste0(indices$Fleet_name,".",indices$Time)%in%RefUse,]
indices2 <- NULL
for(iline in 1:nlines){
imodel <- models[iline]
subset1 <- indices$imodel==imodel & !is.na(indices$Like) & indices$Use == 1
subset2 <- indices$imodel==imodel #& indices$Use == 1 #><>
if(length(unique(indices$Fleet[subset1])) > 1){
if(!is.null(indexfleets[imodel])){
ifleet <- indexfleets[imodel]
indices2 <- rbind(indices2,indices[subset2 & indices$Fleet==ifleet,])
}else{
if(verbose){cat("some models have multiple indices, 'indexfleets' required\n",
"to compare fits to indices.\n")}
return()
}
}else{
indices2 <- rbind(indices2,indices[subset2,])
}
}
# Subset by month
if(Season=="default"){
Season = unique(indices2$Seas)[1]
if(verbose & length(unique(indices2$Seas))>1){cat("Taking Season",Season,"by default for Index",unique(indices2$Fleet_name))}
} else {
Season = as.numeric(Season)[1]
if(is.na(Season)) stop("Season must a default or and or the integer of indices$Seas 1,2,3,4")
}
indices <- indices[indices$Seas==Season,]
indices2 <- indices2[indices2$Seas==Season,]
# get quantities for plot
yr <- indices2$Yr
obs <- indices2$Obs
exp <- indices2$Exp
imodel <- indices2$imodel
Q <- indices2$Calc_Q
meanQ <- rep(NA,nlines)
imodel <- models[which(endyrvec==max(endyrvec))[1]]
xmin = min(endyrvec)-5
subset <- indices2$imodel==imodel & !is.na(indices2$Like) & yr>= xmin
### make plot of index fits
# calculate ylim (excluding dummy observations from observed but not expected)
sub <- !is.na(indices2$Like) & yr>= xmin
# hcxval section
yr.eval <- c(endyrvec)
yr.eval <- (sort(yr.eval))
yr.obs <- yr.eval%in%yr
pe.eval = which(yr.eval%in%yr)
if(length(which(yr.eval%in%yr))-length(pe.eval)<1){
pe.eval = pe.eval[-1]
}
npe <- length(pe.eval) # number of prection errors
obs.eval <- rep(NA,length(yr.eval))
obs.eval[yr.eval%in%yr] = obs[subset][yr[subset] %in%yr.eval]
#if(length(obs.eval)>length(pe.eval)) obs.eval=obs.eval[-1] # first non NA = NA
nhc = length(endyrvec)-1
if(length(endyrvec[yr%in%endyrvec])>0 & length(which(yr.eval%in%yr))>1){ # ><>
if(verbose) cat(paste("\n","Computing MASE with",ifelse(npe<(length(endyrvec)-1),"only","all"),
npe,"of",length(endyrvec)-1," prediction residuals for Index",indices2$Fleet_name[1]),"\n")
if(verbose & npe<(length(endyrvec)-1))cat(paste("\n","Warning: Unequal spacing of naive predictions residuals may influence the interpretation of MASE","\n"))
index.i = unique(indices2$Fleet_name)
pred.resid = NULL # Note Prediction Residuals
for(iline in (2:nlines)){
imodel <- models[iline]
subset <- indices2$imodel==imodel & yr <= endyrvec[iline]+1 & yr>=xmin
subset.ref <- indices2$imodel==imodel
if(endyrvec[iline-1]%in%yr){
x <- yr[subset]
y <- exp[subset]
yobs = obs[subset]
pred.resid = c(pred.resid,log(y[length(x)])-log(yobs[length(x)])) # add log() for v1.1
}
}
#}
if(length(pred.resid)>length(pe.eval)) pred.resid=pred.resid[-1]
maepr = mean(abs(pred.resid))
#nhc = length(endyrvec)-1
#naive.eval = log(obs.eval[1:nhc])-log(obs.eval[2:(nhc+1)]) # add log for v1.1
#npe <- length(naive.eval[is.na(naive.eval)==F]) # number of prection errors
naive.eval=log(obs.eval[pe.eval])-log(obs.eval[is.na(obs.eval)==F][-(npe+1)])
scaler = mean(abs(naive.eval))
mase=maepr/scaler
mase.adj = maepr/max(scaler,MAE.base.adj)
MASE.i = res.i = NULL
MASE.i = data.frame(Index=unique(indices2$Fleet_name)[1],Season=Season, MASE=mase,MAE.PR=maepr,MAE.base=scaler,MASE.adj=mase.adj,n.eval=npe)
res.i = data.frame(Index=rep(unique(indices2$Fleet_name)[1],length(pred.resid)),Season=rep(Season,length(pred.resid)),Year=yr.eval[pe.eval],Pred.Res=pred.resid,Native.Res=naive.eval,n.eval=npe)
} else {
if(verbose) cat(paste0("\n","No observations in evaluation years to compute prediction residuals for Index ",indices2$Fleet_name[1]),"\n")
MASE.i = res.i = NULL
MASE.i = data.frame(Index=unique(indices2$Fleet_name)[1],Season=Season, MASE=NA,MAE.PR=NA,MAE.base=NA,MASE.adj=NA,n.eval=0) }
out = list(MASE=MASE.i,Residuals=res.i)
return(out)
} # End of mase function
#------------------------------------------------------------
# LOOP through fleets
nfleets=length(unique(hcruns$indices$Fleet))
MASE = Residuals = NULL
for(fi in 1:nfleets){
indexfleets = unique(hcruns$indices$Fleet)[fi]
get_mase = mase(indexfleets)
MASE = rbind(MASE,get_mase$MASE)
Residuals = rbind(Residuals,get_mase$Residuals)
} # End of Fleet Loop
# Add new joint MASE
jstats = apply(abs(Residuals[c("Pred.Res","Native.Res")]),2,mean)
joint = data.frame(Index="joint",Season="",
MASE=jstats[1]/jstats[2],MAE.PR=jstats[1],MAE.base=jstats[2],
MASE.adj=jstats[1]/max(jstats[2],MAE.base.adj),n.eval=nrow(Residuals))
MASE = rbind(MASE,joint)
rownames(MASE) = 1:nrow(MASE)
if(verbose) cat(paste0("\n","MASE stats by Index:","\n"))
ret = MASE
if(residuals) ret = list(MASE=MASE,Residuals=Residuals)
return(ret)
} # end of SSmase()
#-----------------------------------------------------------------------------------------
| /R/SSmase.R | no_license | MOshima-PIFSC/ss3diags | R | false | false | 9,904 | r | #' SSmase() computes MASE for one-step ahead hindcasting cross-validations of indices
#'
#' MASE for one-step ahead hindcasting cross-validations and computes MASE from prediction redisuals.
#' MASE is calculated the average ratio of mean absolute error (MAE) of prediction residuals (MAE.PR) and Naive Predictions (MAE.base)
#' MASE.adj sets the MAE.base to a minimum MAE.base.adj (default=0.1)
#' MASE.adj allow passing (MASE<1) if MAE.PE < 0.1 and thus accurate, when obs show extremely little variation
#'
#' @param retroSummary List created by r4ss::SSsummarize()
#' @param quants data type c("cpue","len","age)
#' @param models Optional subset of the models described in
#' r4ss function summaryoutput(). Either "all" or a vector of numbers indicating
#' columns in summary tables.
#' @param Season option to specify Season - Default uses first available, i.e. usual Seas = 1
#' @param endyrvec Optional single year or vector of years representing the
#' final year of values to show for each model. By default it is set to the
#' ending year specified in each model.
#' @param indexselect = Vector of fleet numbers for each model for which to compare
#' @param indexfleets CHECK IF NEEDED or how to adjust indexfleets
#' @param MAE.base.adj minimum MASE demoninator (naive predictions) for MASE.adj (default = 0.1)
#' @param verbose Report progress to R GUI?
#' @return MASE and hcxval statistic
#' @author Henning Winker (JRC-EC) and Laurence Kell (Sea++)
#' @export
SSmase<- function(retroSummary,quants=c("cpue","len","age"),Season="default",
models="all",endyrvec="default",indexselect = NULL,MAE.base.adj=0.1,residuals=FALSE,
verbose=FALSE
){
hcruns =retroSummary #added for now
xmin = NULL
subplots = quants[1]
if(is.null(hcruns$indices) & subplots[1] == "cpue"){
stop("Require input object from r4ss::SSsummarize()")
}
if(subplots[1] %in% c("len","age")){
if(is.null(hcruns$age) & is.null(hcruns$len)){
stop("Require input object from ss3diags::SSdiagsComps")
}}
if(subplots[1]=="len"){
if(is.null(hcruns$len)) stop("No Length Comps found")
hcruns$indices = hcruns$len
}
if(subplots[1]=="age"){
if(is.null(hcruns$age)) stop("No Age Comps found")
hcruns$indices = hcruns$age
}
# subset if indexselect is specified
if(is.null(indexselect) ==F & is.numeric(indexselect)){
iname = unique(hcruns$indices$Fleet_name)[indexselect]
if(TRUE %in% is.na(iname)) stop("One or more index numbers exceed number of available indices")
hcruns$indices = hcruns$indices[hcruns$indices$Fleet_name%in%iname,]
}
log=FALSE #(no option to plot on log scale)
mase <- function(indexfleets=1){
#-------------------------------------------------------------
# mase function
#-------------------------------------------------------------
# get stuff from summary output (minimized)
n <- hcruns$n
startyrs <- hcruns$startyrs
endyrs <- hcruns$endyrs
indices <- hcruns$indices
if(models[1]=="all") models <- 1:n
nlines <- length(models)
if(endyrvec[1]=="default"){
endyrvec <- endyrs-seq(0,n-1,1)
}
if(length(endyrvec)==1){
stop("SSplotHCxval requires a minimum of one reference and one retro peel")
}
# check length of indexfleets
if(!is.null(indexfleets) && length(indexfleets) < n){
if(length(indexfleets)==1){
indexfleets <- rep(indexfleets, n)
}else{
warning("'indexfleets' needs to have length either 1 or n=",n,"\n",
"with each value a fleet number for the index to compare.\n")
indexfleets <- NULL
}
}
# Exclude all Time steps not use in reference run replist1
if(subplots[1]%in%c("len","age")){
indices$Use = ifelse(is.na(indices$Like),-1,1)
}
RefUse = indices[indices$imodel==1 & indices$Use==1,]
RefUse = paste0(RefUse$Fleet_name,".",RefUse$Time)
indices = indices[paste0(indices$Fleet_name,".",indices$Time)%in%RefUse,]
indices2 <- NULL
for(iline in 1:nlines){
imodel <- models[iline]
subset1 <- indices$imodel==imodel & !is.na(indices$Like) & indices$Use == 1
subset2 <- indices$imodel==imodel #& indices$Use == 1 #><>
if(length(unique(indices$Fleet[subset1])) > 1){
if(!is.null(indexfleets[imodel])){
ifleet <- indexfleets[imodel]
indices2 <- rbind(indices2,indices[subset2 & indices$Fleet==ifleet,])
}else{
if(verbose){cat("some models have multiple indices, 'indexfleets' required\n",
"to compare fits to indices.\n")}
return()
}
}else{
indices2 <- rbind(indices2,indices[subset2,])
}
}
# Subset by month
if(Season=="default"){
Season = unique(indices2$Seas)[1]
if(verbose & length(unique(indices2$Seas))>1){cat("Taking Season",Season,"by default for Index",unique(indices2$Fleet_name))}
} else {
Season = as.numeric(Season)[1]
if(is.na(Season)) stop("Season must a default or and or the integer of indices$Seas 1,2,3,4")
}
indices <- indices[indices$Seas==Season,]
indices2 <- indices2[indices2$Seas==Season,]
# get quantities for plot
yr <- indices2$Yr
obs <- indices2$Obs
exp <- indices2$Exp
imodel <- indices2$imodel
Q <- indices2$Calc_Q
meanQ <- rep(NA,nlines)
imodel <- models[which(endyrvec==max(endyrvec))[1]]
xmin = min(endyrvec)-5
subset <- indices2$imodel==imodel & !is.na(indices2$Like) & yr>= xmin
### make plot of index fits
# calculate ylim (excluding dummy observations from observed but not expected)
sub <- !is.na(indices2$Like) & yr>= xmin
# hcxval section
yr.eval <- c(endyrvec)
yr.eval <- (sort(yr.eval))
yr.obs <- yr.eval%in%yr
pe.eval = which(yr.eval%in%yr)
if(length(which(yr.eval%in%yr))-length(pe.eval)<1){
pe.eval = pe.eval[-1]
}
npe <- length(pe.eval) # number of prection errors
obs.eval <- rep(NA,length(yr.eval))
obs.eval[yr.eval%in%yr] = obs[subset][yr[subset] %in%yr.eval]
#if(length(obs.eval)>length(pe.eval)) obs.eval=obs.eval[-1] # first non NA = NA
nhc = length(endyrvec)-1
if(length(endyrvec[yr%in%endyrvec])>0 & length(which(yr.eval%in%yr))>1){ # ><>
if(verbose) cat(paste("\n","Computing MASE with",ifelse(npe<(length(endyrvec)-1),"only","all"),
npe,"of",length(endyrvec)-1," prediction residuals for Index",indices2$Fleet_name[1]),"\n")
if(verbose & npe<(length(endyrvec)-1))cat(paste("\n","Warning: Unequal spacing of naive predictions residuals may influence the interpretation of MASE","\n"))
index.i = unique(indices2$Fleet_name)
pred.resid = NULL # Note Prediction Residuals
for(iline in (2:nlines)){
imodel <- models[iline]
subset <- indices2$imodel==imodel & yr <= endyrvec[iline]+1 & yr>=xmin
subset.ref <- indices2$imodel==imodel
if(endyrvec[iline-1]%in%yr){
x <- yr[subset]
y <- exp[subset]
yobs = obs[subset]
pred.resid = c(pred.resid,log(y[length(x)])-log(yobs[length(x)])) # add log() for v1.1
}
}
#}
if(length(pred.resid)>length(pe.eval)) pred.resid=pred.resid[-1]
maepr = mean(abs(pred.resid))
#nhc = length(endyrvec)-1
#naive.eval = log(obs.eval[1:nhc])-log(obs.eval[2:(nhc+1)]) # add log for v1.1
#npe <- length(naive.eval[is.na(naive.eval)==F]) # number of prection errors
naive.eval=log(obs.eval[pe.eval])-log(obs.eval[is.na(obs.eval)==F][-(npe+1)])
scaler = mean(abs(naive.eval))
mase=maepr/scaler
mase.adj = maepr/max(scaler,MAE.base.adj)
MASE.i = res.i = NULL
MASE.i = data.frame(Index=unique(indices2$Fleet_name)[1],Season=Season, MASE=mase,MAE.PR=maepr,MAE.base=scaler,MASE.adj=mase.adj,n.eval=npe)
res.i = data.frame(Index=rep(unique(indices2$Fleet_name)[1],length(pred.resid)),Season=rep(Season,length(pred.resid)),Year=yr.eval[pe.eval],Pred.Res=pred.resid,Native.Res=naive.eval,n.eval=npe)
} else {
if(verbose) cat(paste0("\n","No observations in evaluation years to compute prediction residuals for Index ",indices2$Fleet_name[1]),"\n")
MASE.i = res.i = NULL
MASE.i = data.frame(Index=unique(indices2$Fleet_name)[1],Season=Season, MASE=NA,MAE.PR=NA,MAE.base=NA,MASE.adj=NA,n.eval=0) }
out = list(MASE=MASE.i,Residuals=res.i)
return(out)
} # End of mase function
#------------------------------------------------------------
# LOOP through fleets
nfleets=length(unique(hcruns$indices$Fleet))
MASE = Residuals = NULL
for(fi in 1:nfleets){
indexfleets = unique(hcruns$indices$Fleet)[fi]
get_mase = mase(indexfleets)
MASE = rbind(MASE,get_mase$MASE)
Residuals = rbind(Residuals,get_mase$Residuals)
} # End of Fleet Loop
# Add new joint MASE
jstats = apply(abs(Residuals[c("Pred.Res","Native.Res")]),2,mean)
joint = data.frame(Index="joint",Season="",
MASE=jstats[1]/jstats[2],MAE.PR=jstats[1],MAE.base=jstats[2],
MASE.adj=jstats[1]/max(jstats[2],MAE.base.adj),n.eval=nrow(Residuals))
MASE = rbind(MASE,joint)
rownames(MASE) = 1:nrow(MASE)
if(verbose) cat(paste0("\n","MASE stats by Index:","\n"))
ret = MASE
if(residuals) ret = list(MASE=MASE,Residuals=Residuals)
return(ret)
} # end of SSmase()
#-----------------------------------------------------------------------------------------
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adp.sontek.R
\name{read.adp.sontek}
\alias{read.adp.sontek}
\title{Read a Sontek ADP File}
\usage{
read.adp.sontek(file, from = 1, to, by = 1, tz = getOption("oceTz"),
longitude = NA, latitude = NA, type = c("adp", "pcadp"),
monitor = FALSE, despike = FALSE, processingLog,
debug = getOption("oceDebug"), ...)
}
\arguments{
\item{file}{a connection or a character string giving the name of the file
to load. (For \code{read.adp.sontek.serial}, this is generally a list of
files, which will be concatenated.)}
\item{from}{indication of the first profile to read. This can be an
integer, the sequence number of the first profile to read, or a POSIXt time
before which profiles should be skipped, or a character string that converts
to a POSIXt time (assuming UTC timezone). See \dQuote{Examples}, and make
careful note of the use of the \code{tz} argument. If \code{from} is not
supplied, it defaults to 1.}
\item{to}{an optional indication of the last profile to read, in a
format as described for \code{from}. As a special case, \code{to=0} means
to read the file to the end. If \code{to} is not supplied, then it defaults
to 0.}
\item{by}{an optional indication of the stride length to use while walking through
the file. If this is an integer, then \code{by-1} profiles are skipped
between each pair of profiles that is read, e.g. the default \code{by=1}
means to read all the data. (For RDI files \emph{only}, there are some
extra features to avoid running out of memory; see \dQuote{Memory considerations}.)}
\item{tz}{character string indicating time zone to be assumed in the data.}
\item{longitude}{optional signed number indicating the longitude in degrees
East.}
\item{latitude}{optional signed number indicating the latitude in degrees
North.}
\item{type}{A character string indicating the type of instrument.}
\item{monitor}{boolean, set to \code{TRUE} to provide an indication of progress
in reading the file, either by printing a dot for each profile or by writing
a textual progress bar with \code{\link{txtProgressBar}}.}
\item{despike}{if \code{TRUE}, \code{\link{despike}} will be used to clean
anomalous spikes in heading, etc.}
\item{processingLog}{if provided, the action item to be stored in the log.
(Typically only provided for internal calls; the default that it provides is
better for normal calls by a user.)}
\item{debug}{a flag that turns on debugging. Set to 1 to get a moderate
amount of debugging information, or to 2 to get more.}
\item{\dots}{additional arguments, passed to called routines.}
}
\value{
An \code{adp} object, i.e. one inheriting from \code{\link{adp-class}}.
}
\description{
Read a Sontek acoustic-Dopplerprofiler file [1].
}
\references{
1. Information about Sontek profilers is available at
\url{http://www.sontek.com}.
}
\seealso{
Other things related to \code{adp} data: \code{\link{[[,adp-method}},
\code{\link{[[<-,adp-method}}, \code{\link{adp-class}},
\code{\link{adpEnsembleAverage}}, \code{\link{adp}},
\code{\link{as.adp}}, \code{\link{beamName}},
\code{\link{beamToXyzAdp}}, \code{\link{beamToXyzAdv}},
\code{\link{beamToXyz}}, \code{\link{beamUnspreadAdp}},
\code{\link{binmapAdp}}, \code{\link{enuToOtherAdp}},
\code{\link{enuToOther}}, \code{\link{plot,adp-method}},
\code{\link{read.ad2cp}}, \code{\link{read.adp.nortek}},
\code{\link{read.adp.rdi}},
\code{\link{read.adp.sontek.serial}},
\code{\link{read.adp}}, \code{\link{read.aquadoppHR}},
\code{\link{read.aquadoppProfiler}},
\code{\link{read.aquadopp}},
\code{\link{subset,adp-method}},
\code{\link{summary,adp-method}}, \code{\link{toEnuAdp}},
\code{\link{toEnu}}, \code{\link{velocityStatistics}},
\code{\link{xyzToEnuAdp}}, \code{\link{xyzToEnu}}
}
\author{
Dan Kelley and Clark Richards
}
| /man/read.adp.sontek.Rd | no_license | yuanjisun/oce | R | false | true | 3,853 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adp.sontek.R
\name{read.adp.sontek}
\alias{read.adp.sontek}
\title{Read a Sontek ADP File}
\usage{
read.adp.sontek(file, from = 1, to, by = 1, tz = getOption("oceTz"),
longitude = NA, latitude = NA, type = c("adp", "pcadp"),
monitor = FALSE, despike = FALSE, processingLog,
debug = getOption("oceDebug"), ...)
}
\arguments{
\item{file}{a connection or a character string giving the name of the file
to load. (For \code{read.adp.sontek.serial}, this is generally a list of
files, which will be concatenated.)}
\item{from}{indication of the first profile to read. This can be an
integer, the sequence number of the first profile to read, or a POSIXt time
before which profiles should be skipped, or a character string that converts
to a POSIXt time (assuming UTC timezone). See \dQuote{Examples}, and make
careful note of the use of the \code{tz} argument. If \code{from} is not
supplied, it defaults to 1.}
\item{to}{an optional indication of the last profile to read, in a
format as described for \code{from}. As a special case, \code{to=0} means
to read the file to the end. If \code{to} is not supplied, then it defaults
to 0.}
\item{by}{an optional indication of the stride length to use while walking through
the file. If this is an integer, then \code{by-1} profiles are skipped
between each pair of profiles that is read, e.g. the default \code{by=1}
means to read all the data. (For RDI files \emph{only}, there are some
extra features to avoid running out of memory; see \dQuote{Memory considerations}.)}
\item{tz}{character string indicating time zone to be assumed in the data.}
\item{longitude}{optional signed number indicating the longitude in degrees
East.}
\item{latitude}{optional signed number indicating the latitude in degrees
North.}
\item{type}{A character string indicating the type of instrument.}
\item{monitor}{boolean, set to \code{TRUE} to provide an indication of progress
in reading the file, either by printing a dot for each profile or by writing
a textual progress bar with \code{\link{txtProgressBar}}.}
\item{despike}{if \code{TRUE}, \code{\link{despike}} will be used to clean
anomalous spikes in heading, etc.}
\item{processingLog}{if provided, the action item to be stored in the log.
(Typically only provided for internal calls; the default that it provides is
better for normal calls by a user.)}
\item{debug}{a flag that turns on debugging. Set to 1 to get a moderate
amount of debugging information, or to 2 to get more.}
\item{\dots}{additional arguments, passed to called routines.}
}
\value{
An \code{adp} object, i.e. one inheriting from \code{\link{adp-class}}.
}
\description{
Read a Sontek acoustic-Dopplerprofiler file [1].
}
\references{
1. Information about Sontek profilers is available at
\url{http://www.sontek.com}.
}
\seealso{
Other things related to \code{adp} data: \code{\link{[[,adp-method}},
\code{\link{[[<-,adp-method}}, \code{\link{adp-class}},
\code{\link{adpEnsembleAverage}}, \code{\link{adp}},
\code{\link{as.adp}}, \code{\link{beamName}},
\code{\link{beamToXyzAdp}}, \code{\link{beamToXyzAdv}},
\code{\link{beamToXyz}}, \code{\link{beamUnspreadAdp}},
\code{\link{binmapAdp}}, \code{\link{enuToOtherAdp}},
\code{\link{enuToOther}}, \code{\link{plot,adp-method}},
\code{\link{read.ad2cp}}, \code{\link{read.adp.nortek}},
\code{\link{read.adp.rdi}},
\code{\link{read.adp.sontek.serial}},
\code{\link{read.adp}}, \code{\link{read.aquadoppHR}},
\code{\link{read.aquadoppProfiler}},
\code{\link{read.aquadopp}},
\code{\link{subset,adp-method}},
\code{\link{summary,adp-method}}, \code{\link{toEnuAdp}},
\code{\link{toEnu}}, \code{\link{velocityStatistics}},
\code{\link{xyzToEnuAdp}}, \code{\link{xyzToEnu}}
}
\author{
Dan Kelley and Clark Richards
}
|
# Created on
# Course work:
# @author:
# Source:
vector1 <- c(1:3)
vector2 <- c(9:14)
result <- array(c(vector1,vector2),dim = c(3,3,2))
# print 1st element of the 3rd column from the first matrix
ele2 <- result[1,3,1]
print(ele2) | /chaaya/accessing_array_element.r | no_license | tactlabs/r-samples | R | false | false | 235 | r | # Created on
# Course work:
# @author:
# Source:
vector1 <- c(1:3)
vector2 <- c(9:14)
result <- array(c(vector1,vector2),dim = c(3,3,2))
# print 1st element of the 3rd column from the first matrix
ele2 <- result[1,3,1]
print(ele2) |
species_vector <- c('Bufo bufo',
'BUbo bubo',
'B. bubo',
'Buteo sp','Buteosp','buteosp1','buteo sp1','Buteo_sp1','Buteo_sp 1',
'Buteo buteo 1758 Linnée','Buteo buteo (linnaeus)',
'Buteo buteo meridionalis',
'Buteo buteo aff meridionalis', 'Buteo morphospecies4','Buteo morphospecies 4',
'Buteo morpho 1','Buteo morpho 10',
"Buteo buteo.us", 'Buteo buteo.de','Buteo buteo.fr'
)
grouping_vector <- rep("Site 1", length(species_vector))
| /data/species_names_examples.r | no_license | AlbanSagouis/iClean | R | false | false | 596 | r | species_vector <- c('Bufo bufo',
'BUbo bubo',
'B. bubo',
'Buteo sp','Buteosp','buteosp1','buteo sp1','Buteo_sp1','Buteo_sp 1',
'Buteo buteo 1758 Linnée','Buteo buteo (linnaeus)',
'Buteo buteo meridionalis',
'Buteo buteo aff meridionalis', 'Buteo morphospecies4','Buteo morphospecies 4',
'Buteo morpho 1','Buteo morpho 10',
"Buteo buteo.us", 'Buteo buteo.de','Buteo buteo.fr'
)
grouping_vector <- rep("Site 1", length(species_vector))
|
# ---- fit-1pl-hier-stan ----
library(rstan);
model_hier <- stan_model("irt_1pl_hier.stan");
fit_hier <-sampling(model_hier, data = c("I", "J", "y"),
refresh=2000, seed=1234)
options("width"=100);
print(fit_hier, c(paste("b[", 1:5, "]"), paste("theta[", 1:5, "]"),
"mu_b", "sigma_b", "sigma_theta", "lp__"),
probs=c(0.10, 0.5, 0.90));
print(b[1:10], digits=2);
print(theta[1:10], digits=2);
print("mu_b = 1; sigma_b = 2; sigma_theta=1");
| /test/integration/example-models/knitr/irt/irt-1pl-hier-fit.R | permissive | nhuurre/stanc3 | R | false | false | 488 | r | # ---- fit-1pl-hier-stan ----
library(rstan);
model_hier <- stan_model("irt_1pl_hier.stan");
fit_hier <-sampling(model_hier, data = c("I", "J", "y"),
refresh=2000, seed=1234)
options("width"=100);
print(fit_hier, c(paste("b[", 1:5, "]"), paste("theta[", 1:5, "]"),
"mu_b", "sigma_b", "sigma_theta", "lp__"),
probs=c(0.10, 0.5, 0.90));
print(b[1:10], digits=2);
print(theta[1:10], digits=2);
print("mu_b = 1; sigma_b = 2; sigma_theta=1");
|
help(Distributions) # for a full list
## for each:
## “d” returns the height of the probability density function
## “p” returns the cumulative density function
## “q” returns the inverse cumulative density function (quantiles)
## “r” returns randomly generated numbers
help(Normal)
## examples from help:
require(graphics)
dnorm(0) == 1/sqrt(2*pi) # T
dnorm(1) == exp(-1/2)/sqrt(2*pi) # T
dnorm(1) == 1/sqrt(2*pi*exp(1)) # T
help(par)
par(mfrow = c(2,1)) # 2 below each other
plot(function(x) dnorm(x, log = TRUE), -60, 50,
main = "log { Normal density }")
curve(log(dnorm(x)), add = TRUE, col = "red", lwd = 2)
mtext("dnorm(x, log=TRUE)", adj = 0)
mtext("log(dnorm(x))", col = "red", adj = 1)
plot(function(x) pnorm(x, log.p = TRUE), -50, 10,
main = "log { Normal Cumulative }")
curve(log(pnorm(x)), add = TRUE, col = "red", lwd = 2)
mtext("pnorm(x, log=TRUE)", adj = 0)
mtext("log(pnorm(x))", col = "red", adj = 1)
## back to tutorial
## the height of the probability distribution at each point
dnorm(0) # mean of zero, sd == 1
dnorm(0)*sqrt(2*pi)
1/sqrt(2*pi)
dnorm(0,mean=4)
dnorm(0,mean=4,sd=10)
v <- 0:2
dnorm(v)
x <- seq(-20,20,by=.1)
y <- dnorm(x)
plot(x,y,main = "Normal distribution")
mtext("mean=0, sd=1", adj = 0)
y <- dnorm(x,mean=2.5,sd=0.1)
plot(x,y,main = "Normal distribution")
mtext("mean=2.5, sd=0.1", adj = 0)
y <- dnorm(x,mean=2.5,sd=5)
plot(x,y,main = "Normal distribution")
mtext("mean=2.5, sd=5", adj = 0)
## the probability that a normally distributed random number will be less than that number
# sd = 1, mean = 0
pnorm(0) # 0.5
pnorm(1) # 0.8413447
pnorm(0,mean=2) # 0.02275013
pnorm(0,mean=2,sd=3) # 0.2524925
pnorm(v) # 0.5000000 0.8413447 0.9772499
y <- pnorm(x)
plot(x,y,main = "Probability of Less than x at Normal Distribution")
mtext("mean=0, sd=1", adj = 0)
y <- pnorm(x,mean=3,sd=4)
plot(x,y,main = "Probability of Less than x at Normal Distribution")
mtext("mean=3, sd=4", adj = 0)
## the probability that a number is larger than the given number
pnorm(0,lower.tail=FALSE)
pnorm(1,lower.tail=FALSE)
pnorm(0,mean=2,lower.tail=FALSE)
y <- pnorm(x,lower.tail=F)
plot(x,y,main = "Probability of Larger than x at Normal Distribution")
mtext("mean=0, sd=1", adj = 0)
## inverse of pnorm, cumulative distribution matches the probability
qnorm(0.5) # 0
qnorm(0.5,mean=1) # 1
qnorm(0.5,mean=1,sd=2) # 1
qnorm(0.5,mean=2,sd=2) # 2
qnorm(0.5,mean=2,sd=4) # 2
qnorm(0.25,mean=2,sd=2) # 0.6510205
qnorm(0.333) # -0.4316442
qnorm(0.333,sd=3) # -1.294933
qnorm(0.75,mean=5,sd=2) # 6.34898
v = c(0.1,0.3,0.75)
qnorm(v) # -1.2815516 -0.5244005 0.6744898
x <- seq(0,1,by=.05)
y <- qnorm(x)
plot(x,y,main = "Cummulative Probability at Normal Distribution")
mtext("mean=0, sd=1", adj = 0)
y <- qnorm(x,mean=3,sd=2)
plot(x,y,main = "Cummulative Probability at Normal Distribution")
mtext("mean=3, sd=2", adj = 0)
y <- qnorm(x,mean=3,sd=0.1)
plot(x,y,main = "Cummulative Probability at Normal Distribution")
mtext("mean=3, sd=0.1", adj = 0)
## T, Binomial and Chi distributions are skipped
help(par)
randoms <- function(num) {
par(mfrow=c(3,2)) # square region
plot(runif(num,1,10),main=paste0("Uniform Distribution (n = ",num))
plot(sort(runif(num,1,10)),main=paste0("Sorted Uniform Distribution (n = ",num))
plot(rnorm(num,0,5),main=paste0("Normal Distribution (n = ",num))
plot(sort(rnorm(num,0,5)),main=paste0("Sorted Normal Distribution (n = ",num))
plot(sample(seq(1,num*100,1),num),main=paste0("Sampling from 100x (n = ",num))
plot(sort(sample(seq(1,num*100,1),num)),main=paste0("Sorted Sampling from 100x (n = ",num))
par(mfrow=c(1,1))
}
randoms(30)
randoms(50)
randoms(100)
randoms(200)
randoms(500)
| /tutorial4_basic_probability.r | no_license | tiborh/r | R | false | false | 4,175 | r | help(Distributions) # for a full list
## for each:
## “d” returns the height of the probability density function
## “p” returns the cumulative density function
## “q” returns the inverse cumulative density function (quantiles)
## “r” returns randomly generated numbers
help(Normal)
## examples from help:
require(graphics)
dnorm(0) == 1/sqrt(2*pi) # T
dnorm(1) == exp(-1/2)/sqrt(2*pi) # T
dnorm(1) == 1/sqrt(2*pi*exp(1)) # T
help(par)
par(mfrow = c(2,1)) # 2 below each other
plot(function(x) dnorm(x, log = TRUE), -60, 50,
main = "log { Normal density }")
curve(log(dnorm(x)), add = TRUE, col = "red", lwd = 2)
mtext("dnorm(x, log=TRUE)", adj = 0)
mtext("log(dnorm(x))", col = "red", adj = 1)
plot(function(x) pnorm(x, log.p = TRUE), -50, 10,
main = "log { Normal Cumulative }")
curve(log(pnorm(x)), add = TRUE, col = "red", lwd = 2)
mtext("pnorm(x, log=TRUE)", adj = 0)
mtext("log(pnorm(x))", col = "red", adj = 1)
## back to tutorial
## the height of the probability distribution at each point
dnorm(0) # mean of zero, sd == 1
dnorm(0)*sqrt(2*pi)
1/sqrt(2*pi)
dnorm(0,mean=4)
dnorm(0,mean=4,sd=10)
v <- 0:2
dnorm(v)
x <- seq(-20,20,by=.1)
y <- dnorm(x)
plot(x,y,main = "Normal distribution")
mtext("mean=0, sd=1", adj = 0)
y <- dnorm(x,mean=2.5,sd=0.1)
plot(x,y,main = "Normal distribution")
mtext("mean=2.5, sd=0.1", adj = 0)
y <- dnorm(x,mean=2.5,sd=5)
plot(x,y,main = "Normal distribution")
mtext("mean=2.5, sd=5", adj = 0)
## the probability that a normally distributed random number will be less than that number
# sd = 1, mean = 0
pnorm(0) # 0.5
pnorm(1) # 0.8413447
pnorm(0,mean=2) # 0.02275013
pnorm(0,mean=2,sd=3) # 0.2524925
pnorm(v) # 0.5000000 0.8413447 0.9772499
y <- pnorm(x)
plot(x,y,main = "Probability of Less than x at Normal Distribution")
mtext("mean=0, sd=1", adj = 0)
y <- pnorm(x,mean=3,sd=4)
plot(x,y,main = "Probability of Less than x at Normal Distribution")
mtext("mean=3, sd=4", adj = 0)
## the probability that a number is larger than the given number
pnorm(0,lower.tail=FALSE)
pnorm(1,lower.tail=FALSE)
pnorm(0,mean=2,lower.tail=FALSE)
y <- pnorm(x,lower.tail=F)
plot(x,y,main = "Probability of Larger than x at Normal Distribution")
mtext("mean=0, sd=1", adj = 0)
## inverse of pnorm, cumulative distribution matches the probability
qnorm(0.5) # 0
qnorm(0.5,mean=1) # 1
qnorm(0.5,mean=1,sd=2) # 1
qnorm(0.5,mean=2,sd=2) # 2
qnorm(0.5,mean=2,sd=4) # 2
qnorm(0.25,mean=2,sd=2) # 0.6510205
qnorm(0.333) # -0.4316442
qnorm(0.333,sd=3) # -1.294933
qnorm(0.75,mean=5,sd=2) # 6.34898
v = c(0.1,0.3,0.75)
qnorm(v) # -1.2815516 -0.5244005 0.6744898
x <- seq(0,1,by=.05)
y <- qnorm(x)
plot(x,y,main = "Cummulative Probability at Normal Distribution")
mtext("mean=0, sd=1", adj = 0)
y <- qnorm(x,mean=3,sd=2)
plot(x,y,main = "Cummulative Probability at Normal Distribution")
mtext("mean=3, sd=2", adj = 0)
y <- qnorm(x,mean=3,sd=0.1)
plot(x,y,main = "Cummulative Probability at Normal Distribution")
mtext("mean=3, sd=0.1", adj = 0)
## T, Binomial and Chi distributions are skipped
help(par)
randoms <- function(num) {
par(mfrow=c(3,2)) # square region
plot(runif(num,1,10),main=paste0("Uniform Distribution (n = ",num))
plot(sort(runif(num,1,10)),main=paste0("Sorted Uniform Distribution (n = ",num))
plot(rnorm(num,0,5),main=paste0("Normal Distribution (n = ",num))
plot(sort(rnorm(num,0,5)),main=paste0("Sorted Normal Distribution (n = ",num))
plot(sample(seq(1,num*100,1),num),main=paste0("Sampling from 100x (n = ",num))
plot(sort(sample(seq(1,num*100,1),num)),main=paste0("Sorted Sampling from 100x (n = ",num))
par(mfrow=c(1,1))
}
randoms(30)
randoms(50)
randoms(100)
randoms(200)
randoms(500)
|
\name{mlest}
\alias{mlest}
\title{ML Estimation of Multivariate Normal Data}
\description{
Finds the maximum likelihood estimates of the mean vector and
variance-covariance matrix for multivariate normal data with
(potentially) missing values.
}
\usage{
mlest(data, ...)
}
\arguments{
\item{data}{A data frame or matrix containing multivariate normal
data. Each row should correspond to an observation, and each
column to a component of the multivariate vector. Missing values
should be coded by 'NA'.}
\item{\dots}{Optional arguments to be passed to the nlm optimization routine.}
}
\details{
The estimate of the variance-covariance matrix returned by
\code{mlest} is necessarily positive semi-definite. Internally,
\code{nlm} is used to minimize the negative log-likelihood, so
optional arguments mayh be passed to \code{nlm} which modify the
details of the minimization algorithm, such as \code{iterlim}. The
likelihood is specified in terms of the inverse of the Cholesky factor
of the variance-covariance matrix (see Pinheiro and Bates 2000).
\code{mlest} cannot handle data matrices with more than 50 variables.
Each varaible must also be observed at least once.
}
\value{
\item{muhat}{MLE of the mean vector.}
\item{sigmahat}{MLE of the variance-covariance matrix.}
\item{value}{The objective function that is minimized by \code{nlm}.
Is is proportional to twice the negative log-likelihood.}
\item{gradient}{The curvature of the likelihood surface at the MLE, in
the parameterization used internally by the optimization
algorithm. This parameterization is: mean vector first, followed
by the log of the diagonal elements of the inverse of the Cholesky
factor, and then the elements of the inverse of the Cholesky
factor above the main diagonal. These off-diagonal elements are
ordered by column (left to right), and then by row within column
(top to bottom).}
\item{stop.code}{The stop code returned by \code{nlm}.}
\item{iterations}{The number of iterations used by \code{nlm}.}
}
\references{
Little, R. J. A., and Rubin, D. B. (1987) \emph{Statistical Analysis
with Missing Data}. New York: Wiley.
Pinheiro, J. C., and Bates, D. M. (1996) Unconstrained
parametrizations for variance-covariance matrices.
\emph{Statistics and Computing} \bold{6}, 289--296.
Pinheiro, J. C., and Bates, D. M. (2000) \emph{Mixed-effects models in
S and S-PLUS}. New York: Springer.
}
\seealso{ \code{\link{nlm}} }
\examples{
library(mvnmle)
data(apple)
mlest(apple)
data(missvals)
mlest(missvals, iterlim=400)
}
\keyword{multivariate}
| /man/mlest.Rd | no_license | cortrudolph/mvnmle | R | false | false | 2,693 | rd | \name{mlest}
\alias{mlest}
\title{ML Estimation of Multivariate Normal Data}
\description{
Finds the maximum likelihood estimates of the mean vector and
variance-covariance matrix for multivariate normal data with
(potentially) missing values.
}
\usage{
mlest(data, ...)
}
\arguments{
\item{data}{A data frame or matrix containing multivariate normal
data. Each row should correspond to an observation, and each
column to a component of the multivariate vector. Missing values
should be coded by 'NA'.}
\item{\dots}{Optional arguments to be passed to the nlm optimization routine.}
}
\details{
The estimate of the variance-covariance matrix returned by
\code{mlest} is necessarily positive semi-definite. Internally,
\code{nlm} is used to minimize the negative log-likelihood, so
optional arguments mayh be passed to \code{nlm} which modify the
details of the minimization algorithm, such as \code{iterlim}. The
likelihood is specified in terms of the inverse of the Cholesky factor
of the variance-covariance matrix (see Pinheiro and Bates 2000).
\code{mlest} cannot handle data matrices with more than 50 variables.
Each varaible must also be observed at least once.
}
\value{
\item{muhat}{MLE of the mean vector.}
\item{sigmahat}{MLE of the variance-covariance matrix.}
\item{value}{The objective function that is minimized by \code{nlm}.
Is is proportional to twice the negative log-likelihood.}
\item{gradient}{The curvature of the likelihood surface at the MLE, in
the parameterization used internally by the optimization
algorithm. This parameterization is: mean vector first, followed
by the log of the diagonal elements of the inverse of the Cholesky
factor, and then the elements of the inverse of the Cholesky
factor above the main diagonal. These off-diagonal elements are
ordered by column (left to right), and then by row within column
(top to bottom).}
\item{stop.code}{The stop code returned by \code{nlm}.}
\item{iterations}{The number of iterations used by \code{nlm}.}
}
\references{
Little, R. J. A., and Rubin, D. B. (1987) \emph{Statistical Analysis
with Missing Data}. New York: Wiley.
Pinheiro, J. C., and Bates, D. M. (1996) Unconstrained
parametrizations for variance-covariance matrices.
\emph{Statistics and Computing} \bold{6}, 289--296.
Pinheiro, J. C., and Bates, D. M. (2000) \emph{Mixed-effects models in
S and S-PLUS}. New York: Springer.
}
\seealso{ \code{\link{nlm}} }
\examples{
library(mvnmle)
data(apple)
mlest(apple)
data(missvals)
mlest(missvals, iterlim=400)
}
\keyword{multivariate}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Report-methods.R
\name{rnb.add.section}
\alias{rnb.add.section}
\title{rnb.add.section}
\usage{
rnb.add.section(report, title, description, level = 1L, collapsed = FALSE)
}
\arguments{
\item{report}{Report to write the text to.}
\item{title}{Section header. This must be a single-element \code{character} vector.}
\item{description}{Human-readable paragraph text of the section in the form of a \code{character} vector. Elements
of this vector are concatenated without a separator to form the full description. Set this to
\code{NULL} if the section does not (yet) contain text.}
\item{level}{Section level as a single \code{integer}. It must be one of \code{1}, \code{2} or \code{3},
denoting section, subsection and sub-subsection, respectively.}
\item{collapsed}{Flag indicating if the contents of this section is to be initially collapsed. Possible values are
\code{TRUE} (the section is not visible), \code{FALSE} (default, the section is expanded) and
\code{"never"} (the section cannot be collapsed or expanded).}
}
\value{
The modified report.
}
\description{
Generates HTML code for a new section in the specified report.
}
\examples{
\donttest{
report <- createReport("example.html", "Example", init.configuration = TRUE)
report <- rnb.add.section(report, "Introduction", "This is how it's done.")
}
}
\seealso{
\code{\linkS4class{Report}} for other functions adding contents to an HTML report
}
\author{
Yassen Assenov
}
| /man/rnb.add.section.Rd | no_license | epigen/RnBeads | R | false | true | 1,514 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Report-methods.R
\name{rnb.add.section}
\alias{rnb.add.section}
\title{rnb.add.section}
\usage{
rnb.add.section(report, title, description, level = 1L, collapsed = FALSE)
}
\arguments{
\item{report}{Report to write the text to.}
\item{title}{Section header. This must be a single-element \code{character} vector.}
\item{description}{Human-readable paragraph text of the section in the form of a \code{character} vector. Elements
of this vector are concatenated without a separator to form the full description. Set this to
\code{NULL} if the section does not (yet) contain text.}
\item{level}{Section level as a single \code{integer}. It must be one of \code{1}, \code{2} or \code{3},
denoting section, subsection and sub-subsection, respectively.}
\item{collapsed}{Flag indicating if the contents of this section is to be initially collapsed. Possible values are
\code{TRUE} (the section is not visible), \code{FALSE} (default, the section is expanded) and
\code{"never"} (the section cannot be collapsed or expanded).}
}
\value{
The modified report.
}
\description{
Generates HTML code for a new section in the specified report.
}
\examples{
\donttest{
report <- createReport("example.html", "Example", init.configuration = TRUE)
report <- rnb.add.section(report, "Introduction", "This is how it's done.")
}
}
\seealso{
\code{\linkS4class{Report}} for other functions adding contents to an HTML report
}
\author{
Yassen Assenov
}
|
# Vignettes that depend on internet access have been precompiled
library(knitr)
oldwd = getwd()
setwd("vignettes/")
knit("orthophotomap.Rmd.orig", "orthophotomap.Rmd")
knit("DEM.Rmd.orig", "DEM.Rmd")
knit("topodb.Rmd.orig", "topodb.Rmd")
setwd(oldwd)
| /vignettes/PRECOMPILE.R | permissive | kadyb/rgugik | R | false | false | 252 | r | # Vignettes that depend on internet access have been precompiled
library(knitr)
oldwd = getwd()
setwd("vignettes/")
knit("orthophotomap.Rmd.orig", "orthophotomap.Rmd")
knit("DEM.Rmd.orig", "DEM.Rmd")
knit("topodb.Rmd.orig", "topodb.Rmd")
setwd(oldwd)
|
context("wflow_build")
# Setup ------------------------------------------------------------------------
# start project in a tempdir
site_dir <- tempfile("test-wflow_build-")
suppressMessages(wflow_start(site_dir, change_wd = FALSE, user.name = "Test Name",
user.email = "test@email"))
on.exit(unlink(site_dir, recursive = TRUE, force = TRUE))
site_dir <- workflowr:::absolute(site_dir)
s <- wflow_status(project = site_dir)
rmd <- rownames(s$status)
stopifnot(length(rmd) > 0)
# Expected html files
html <- workflowr:::to_html(rmd, outdir = s$docs)
# Test wflow_build -------------------------------------------------------------
test_that("wflow_build builds the specified files", {
skip_on_cran()
# Dry run for file 1
expect_silent(actual <- wflow_build(rmd[1], dry_run = TRUE,
project = site_dir))
expect_identical(actual$built, rmd[1])
expect_false(fs::file_exists(html[1]))
# Build file 1
expect_message(actual <- wflow_build(rmd[1], view = FALSE, dry_run = FALSE,
project = site_dir),
rmd[1])
expect_identical(actual$built, rmd[1])
expect_true(fs::file_exists(html[1]))
# Dry run for files 2 & 3
expect_silent(actual <- wflow_build(rmd[2:3], dry_run = TRUE,
project = site_dir))
expect_identical(actual$built, rmd[2:3])
expect_false(any(fs::file_exists(html[2:3])))
# Build files 2 & 3
expect_message(actual <- wflow_build(rmd[2:3], view = FALSE, dry_run = FALSE,
project = site_dir),
rmd[2])
expect_identical(actual$built, rmd[2:3])
expect_true(all(fs::file_exists(html[2:3])))
})
test_that("wflow_build can run in 'make' mode", {
skip_on_cran()
# Reset modifications of rmd files. It is important to wait a couple
# seconds so that the modification times are different.
Sys.sleep(2)
system2("touch", args = rmd)
expect_silent(actual <- wflow_build(dry_run = TRUE, project = site_dir))
expect_identical(actual$built, rmd)
expect_true(actual$make)
expect_message(actual <- wflow_build(view = FALSE, project = site_dir),
rmd[1])
expect_identical(actual$built, rmd)
# No file should be built now.
expect_silent(actual <- wflow_build(view = FALSE, project = site_dir))
expect_identical(actual$built, character(0))
# Reset modification of file 1 only. It is important to wait a couple
# seconds so that the modification times are different.
Sys.sleep(2)
system2("touch", args = rmd[1])
expect_message(actual <- wflow_build(view = FALSE, project = site_dir),
rmd[1])
expect_identical(actual$built, rmd[1])
})
# Fixed error in which 'make' didn't work with relative paths from the root
# directory. This set of tests ensures that this won't happen again.
test_that("wflow_build can run in 'make' mode from within project", {
skip_on_cran()
cwd <- getwd()
setwd(site_dir)
on.exit(setwd(cwd))
rmd_local <- Sys.glob("analysis/*Rmd")
html_local <- workflowr:::to_html(rmd_local, outdir = "docs")
# Reset modifications of rmd files. It is important to wait a couple
# seconds so that the modification times are different.
Sys.sleep(2)
system2("touch", args = rmd_local)
expect_silent(actual <- wflow_build(dry_run = TRUE))
expect_identical(actual$built, rmd_local)
expect_true(actual$make)
expect_message(actual <- wflow_build(view = FALSE), rmd_local[1])
expect_identical(actual$built, rmd_local)
# No file should be built now.
expect_silent(actual <- wflow_build())
expect_identical(actual$built, character(0))
# Reset modification of file 1 only. It is important to wait a couple
# seconds so that the modification times are different.
Sys.sleep(2)
system2("touch", args = rmd_local[1])
expect_message(actual <- wflow_build(view = FALSE), rmd_local[1])
expect_identical(actual$built, rmd_local[1])
})
test_that("wflow_build update builds published files with modifications", {
skip_on_cran()
# Publish the files
suppressMessages(wflow_publish(files = rmd, view = FALSE, project = site_dir))
cat("edit", file = rmd[1], append = TRUE)
wflow_git_commit(rmd[1], project = site_dir)
expect_silent(actual <- wflow_build(update = TRUE, dry_run = TRUE,
project = site_dir))
expect_identical(actual$built, rmd[1])
expect_true(actual$update)
expect_message(actual <- wflow_build(update = TRUE, view = FALSE,
project = site_dir),
rmd[1])
expect_identical(actual$built, rmd[1])
})
test_that("wflow_build republish builds all published files", {
skip_on_cran()
wflow_build(view = FALSE, project = site_dir)
html_mtime_pre <- file.mtime(html)
Sys.sleep(2)
expect_message(actual <- wflow_build(view = FALSE, republish = TRUE,
project = site_dir),
rmd[1])
expect_true(actual$republish)
expect_identical(actual$built, rmd)
html_mtime_post <- file.mtime(html)
expect_true(all(html_mtime_post > html_mtime_pre))
})
# The default is to build a file in its own separate R session to avoid
# conflicts in the variable names and loaded packages between files. However, it
# may be useful for debugging to build the file directly in the R console. To
# test the difference, the file `local.Rmd` has an undefined variable, and it
# should only be able to access it from the global environment when built
# locally.
test_that("Only locally built files can access variables in the global environment", {
skip_on_cran()
fs::file_copy("files/test-wflow_build/global-variable.Rmd", s$analysis)
rmd_local <- file.path(s$analysis, "global-variable.Rmd")
html_local <- workflowr:::to_html(rmd_local, outdir = s$docs)
on.exit(fs::file_delete(c(rmd_local, html_local)))
# Create a variable in the global environment
# https://stackoverflow.com/a/25096276/2483477
env <- globalenv()
env$global_variable <- 1
stopifnot(exists("global_variable", envir = env))
expect_error(utils::capture.output(wflow_build(rmd_local, view = FALSE,
project = site_dir)),
"object 'global_variable' not found")
expect_false(fs::file_exists(html_local))
utils::capture.output(wflow_build(rmd_local, local = TRUE, view = FALSE,
project = site_dir))
expect_true(fs::file_exists(html_local))
# Remove the global variable
rm("global_variable", envir = env)
stopifnot(!exists("global_variable", envir = env))
})
# The test file local.Rmd loads the package "tools" and defines the variable
# `local_variable`.
test_that("Only locally built files add packages/variables to global environment", {
skip_on_cran()
fs::file_copy("files/test-wflow_build/local.Rmd", s$analysis)
rmd_local <- file.path(s$analysis, "local.Rmd")
html_local <- workflowr:::to_html(rmd_local, outdir = s$docs)
on.exit(fs::file_delete(c(rmd_local, html_local)))
on.exit(detach("package:tools"), add = TRUE)
# Build file externally
utils::capture.output(wflow_build(rmd_local, view = FALSE,
project = site_dir))
expect_false("package:tools" %in% search())
expect_false(exists("local_variable", envir = .GlobalEnv))
# Build file locally
utils::capture.output(wflow_build(rmd_local, local = TRUE, view = FALSE,
project = site_dir))
expect_true("package:tools" %in% search())
expect_true(exists("local_variable", envir = .GlobalEnv))
# Remove `local_variable`
rm("local_variable", envir = .GlobalEnv)
stopifnot(!exists("global_variable", envir = .GlobalEnv))
})
test_that("wflow_build only builds files starting with _ when specified", {
skip_on_cran()
rmd_ignore <- file.path(s$analysis, "_ignore.Rmd")
fs::file_copy("files/example.Rmd", rmd_ignore)
html_ignore <- workflowr:::to_html(rmd_ignore, outdir = s$docs)
# Ignored by default "make"-mode
expect_silent(actual <- wflow_build(view = FALSE, project = site_dir))
expect_false(fs::file_exists(html_ignore))
expect_equal(length(actual$built), 0)
# Built when directly specified
expect_message(actual <- wflow_build(rmd_ignore, view = FALSE,
project = site_dir),
rmd_ignore)
expect_true(fs::file_exists(html_ignore))
expect_identical(actual$built, rmd_ignore)
})
test_that("wflow_build uses tempdir() to save log files by default", {
skip_on_cran()
expected <- workflowr:::absolute(file.path(tempdir(), "workflowr"))
actual <- wflow_build(rmd[1], view = FALSE, project = site_dir)
expect_identical(expected, actual$log_dir)
})
test_that("wflow_build accepts custom directory to save log files", {
skip_on_cran()
expected <- workflowr:::absolute(file.path(site_dir, "log"))
actual <- wflow_build(rmd[1], view = FALSE, log_dir = expected,
project = site_dir)
expect_true(fs::dir_exists(expected))
expect_identical(expected, actual$log_dir)
})
test_that("wflow_build removes unused figure files if clean_fig_files = TRUE", {
skip_on_cran()
# Build a file that has 2 plots from 2 unnamed chunks
file_w_figs <- file.path(s$analysis, "fig.Rmd")
fs::file_copy("files/test-wflow_build/figure-v01.Rmd", file_w_figs)
build_v01 <- wflow_build(file_w_figs, view = FALSE, clean_fig_files = TRUE,
project = site_dir)
figs_analysis_v01 <- file.path(s$analysis, "figure", basename(file_w_figs),
c("unnamed-chunk-1-1.png", "unnamed-chunk-2-1.png"))
expect_false(all(fs::file_exists(figs_analysis_v01))) # moved by wflow_site()
figs_docs_v01 <- file.path(s$docs, "figure", basename(file_w_figs),
c("unnamed-chunk-1-1.png", "unnamed-chunk-2-1.png"))
expect_true(all(fs::file_exists(figs_docs_v01)))
# Update the file such that the previous 2 chunks are now named, plus add a
# 3rd plot chunk
fs::file_copy("files/test-wflow_build/figure-v02.Rmd", file_w_figs, overwrite = TRUE)
build_v02 <- wflow_build(file_w_figs, view = FALSE, clean_fig_files = TRUE,
project = site_dir)
expect_false(all(fs::file_exists(figs_analysis_v01)))
expect_false(all(fs::file_exists(figs_docs_v01)))
figs_analysis_v02 <- file.path(s$analysis, "figure", basename(file_w_figs),
c("named1-1.png", "named2-1.png", "named3-1.png"))
expect_false(all(fs::file_exists(figs_analysis_v02))) # moved by wflow_site()
figs_docs_v02 <- file.path(s$docs, "figure", basename(file_w_figs),
c("named1-1.png", "named2-1.png", "named3-1.png"))
expect_true(all(fs::file_exists(figs_docs_v02)))
# Cleanup
wflow_remove(file_w_figs, project = site_dir)
})
test_that("wflow_build does not remove unused figure files if clean_fig_files = FALSE", {
skip_on_cran()
# Build a file that has 2 plots from 2 unnamed chunks
file_w_figs <- file.path(s$analysis, "fig.Rmd")
fs::file_copy("files/test-wflow_build/figure-v01.Rmd", file_w_figs)
build_v01 <- wflow_build(file_w_figs, view = FALSE, project = site_dir)
figs_analysis_v01 <- file.path(s$analysis, "figure", basename(file_w_figs),
c("unnamed-chunk-1-1.png", "unnamed-chunk-2-1.png"))
expect_false(all(fs::file_exists(figs_analysis_v01))) # moved by wflow_site()
figs_docs_v01 <- file.path(s$docs, "figure", basename(file_w_figs),
c("unnamed-chunk-1-1.png", "unnamed-chunk-2-1.png"))
expect_true(all(fs::file_exists(figs_docs_v01)))
# Update the file such that the previous 2 chunks are now named, plus add a
# 3rd plot chunk
fs::file_copy("files/test-wflow_build/figure-v02.Rmd", file_w_figs, overwrite = TRUE)
build_v02 <- wflow_build(file_w_figs, view = FALSE, project = site_dir)
expect_false(all(fs::file_exists(figs_analysis_v01)))
# This line is the critical difference from the previous test. The outdated
# figure files are still there since clean_fig_files = FALSE by default.
expect_true(all(fs::file_exists(figs_docs_v01)))
figs_analysis_v02 <- file.path(s$analysis, "figure", basename(file_w_figs),
c("named1-1.png", "named2-1.png", "named3-1.png"))
expect_false(all(fs::file_exists(figs_analysis_v02))) # moved by wflow_site()
figs_docs_v02 <- file.path(s$docs, "figure", basename(file_w_figs),
c("named1-1.png", "named2-1.png", "named3-1.png"))
expect_true(all(fs::file_exists(figs_docs_v02)))
# Cleanup
wflow_remove(file_w_figs, project = site_dir)
})
test_that("wflow_build deletes cache when delete_cache = TRUE", {
skip_on_cran()
skip_on_os("windows") # Avoid errors due to long filenames
# Build a file that has cached chunks
file_w_cache <- file.path(s$analysis, "cache.Rmd")
fs::file_copy("files/test-wflow_html/cache-all-chunks.Rmd", file_w_cache)
build_v01 <- wflow_build(file_w_cache, view = FALSE, project = site_dir)
dir_cache <- fs::path_ext_remove(file_w_cache)
dir_cache <- glue::glue("{dir_cache}_cache")
expect_true(fs::dir_exists(dir_cache))
# By default, cache directory is not affected
dir_cache_mod_pre <- fs::file_info(dir_cache)$modification_time
expect_message(
build_v02 <- wflow_build(file_w_cache, view = FALSE, project = site_dir),
" - Note: This file has a cache directory"
)
expect_false(build_v02$delete_cache)
expect_true(fs::dir_exists(dir_cache))
dir_cache_mod_post <- fs::file_info(dir_cache)$modification_time
expect_equal(dir_cache_mod_post, dir_cache_mod_pre)
# delete_cache deletes cache directory prior to building (it gets re-created)
dir_cache_mod_pre <- fs::file_info(dir_cache)$modification_time
expect_message(
build_v03 <- wflow_build(file_w_cache, view = FALSE, delete_cache = TRUE,
project = site_dir),
" - Note: Deleted the cache directory before building"
)
expect_true(build_v03$delete_cache)
expect_true(fs::dir_exists(dir_cache))
dir_cache_mod_post <- fs::file_info(dir_cache)$modification_time
expect_true(dir_cache_mod_post > dir_cache_mod_pre)
# Cleanup
wflow_remove(file_w_cache, project = site_dir)
})
test_that("wflow_build can display build log directly in R console with verbose", {
skip_on_cran()
x <- utils::capture.output(
build <- wflow_build(rmd[2], view = FALSE, verbose = TRUE, project = site_dir))
expect_true(build$verbose)
expect_true(length(x) > 0)
})
test_that("wflow_build reports working and knit directories", {
skip_on_cran()
expect_message(
wflow_build(rmd[2], view = FALSE, project = site_dir),
getwd())
expect_message(
wflow_build(rmd[2], view = FALSE, project = site_dir),
sprintf("Building %s in %s", rmd[2], site_dir))
# Should not output knit directory if it's the same as working directory
cwd <- getwd()
on.exit(setwd(cwd))
setwd(site_dir)
site_dir_new <- getwd()
rmd_new <- "analysis/index.Rmd"
expect_message(
wflow_build(rmd_new, view = FALSE, project = site_dir),
site_dir_new)
expect_message(
wflow_build(rmd_new, view = FALSE, project = site_dir),
sprintf("Building %s", rmd_new))
})
test_that("wflow_build can combine files to build using the intersection of the provided args", {
skip_on_cran()
# Reset modification of first rmd file. It is important to wait a couple
# seconds so that the modification times are different.
Sys.sleep(1)
system2("touch", args = rmd[1])
# With combine == "or" (default), make-mode should be build rmd[1] in addition
# to rmd[2]
actual <- wflow_build(files = rmd[2], make = TRUE, combine = "or",
dry_run = TRUE, project = site_dir)
# rmd[2] is first since it was specified in the argument `files`
expect_identical(actual$built, rmd[2:1])
# With combine == "and" (default), make-mode should only consider rmd[2], and
# thus build no files
actual <- wflow_build(files = rmd[2], make = TRUE, combine = "and",
dry_run = TRUE, project = site_dir)
expect_identical(actual$built, character(0))
})
# Test error handling ----------------------------------------------------------
test_that("wflow_build fails early for bad files", {
expect_error(wflow_build(character(), project = site_dir),
"vector with length 0")
expect_error(wflow_build(s$analysis, project = site_dir),
"files cannot include a path to a directory")
expect_error(wflow_build("", project = site_dir),
"Not all files exist. Check the paths to the files")
fs::file_create(file.path(s$analysis, "invalid.R"))
expect_error(wflow_build(file.path(s$analysis, "invalid.R"), project = site_dir),
"Only files with extension Rmd or rmd")
})
test_that("wflow_build throws error if given directory input", {
d <- file.path(site_dir, "toplevel")
fs::dir_create(d)
on.exit(unlink(d, recursive = TRUE, force = TRUE))
expect_error(wflow_build(d, project = site_dir),
"files cannot include a path to a directory")
})
test_that("wflow_build fails if file outside of analysis/", {
# If pandoc is *not* installed, the error message will be about this.
if(!rmarkdown::pandoc_available())
skip("skipped because pandoc is *not* installed")
rmd_outside <- file.path(s$root, "outside.Rmd")
fs::file_create(rmd_outside)
# When passing one invalid file
expect_error(wflow_build(rmd_outside, project = site_dir),
"Only files in the analysis directory can be built with wflow_build.")
# When passing one invalid file with other valid files
expect_error(wflow_build(c(rmd, rmd_outside), project = site_dir),
"Only files in the analysis directory can be built with wflow_build.")
})
test_that("wflow_build throws error if pandoc is not installed", {
if(rmarkdown::pandoc_available())
skip("skipped because pandoc is installed")
expect_error(wflow_build(project = site_dir),
'Pandoc is not installed.')
})
test_that("wflow_build throws error if combine=and but no files specified", {
# If pandoc isn't available, that error is thrown first
if(!rmarkdown::pandoc_available())
skip("skipped because pandoc is not installed")
expect_error(wflow_build(combine = "and", dry_run = TRUE, project = site_dir),
"can only be used when explicitly specifying Rmd files")
expect_error(wflow_build(republish = TRUE, combine = "and", dry_run = TRUE,
project = site_dir),
"can only be used when explicitly specifying Rmd files")
})
| /tests/testthat/test-wflow_build.R | permissive | workflowr/workflowr | R | false | false | 18,789 | r | context("wflow_build")
# Setup ------------------------------------------------------------------------
# start project in a tempdir
site_dir <- tempfile("test-wflow_build-")
suppressMessages(wflow_start(site_dir, change_wd = FALSE, user.name = "Test Name",
user.email = "test@email"))
on.exit(unlink(site_dir, recursive = TRUE, force = TRUE))
site_dir <- workflowr:::absolute(site_dir)
s <- wflow_status(project = site_dir)
rmd <- rownames(s$status)
stopifnot(length(rmd) > 0)
# Expected html files
html <- workflowr:::to_html(rmd, outdir = s$docs)
# Test wflow_build -------------------------------------------------------------
test_that("wflow_build builds the specified files", {
skip_on_cran()
# Dry run for file 1
expect_silent(actual <- wflow_build(rmd[1], dry_run = TRUE,
project = site_dir))
expect_identical(actual$built, rmd[1])
expect_false(fs::file_exists(html[1]))
# Build file 1
expect_message(actual <- wflow_build(rmd[1], view = FALSE, dry_run = FALSE,
project = site_dir),
rmd[1])
expect_identical(actual$built, rmd[1])
expect_true(fs::file_exists(html[1]))
# Dry run for files 2 & 3
expect_silent(actual <- wflow_build(rmd[2:3], dry_run = TRUE,
project = site_dir))
expect_identical(actual$built, rmd[2:3])
expect_false(any(fs::file_exists(html[2:3])))
# Build files 2 & 3
expect_message(actual <- wflow_build(rmd[2:3], view = FALSE, dry_run = FALSE,
project = site_dir),
rmd[2])
expect_identical(actual$built, rmd[2:3])
expect_true(all(fs::file_exists(html[2:3])))
})
test_that("wflow_build can run in 'make' mode", {
skip_on_cran()
# Reset modifications of rmd files. It is important to wait a couple
# seconds so that the modification times are different.
Sys.sleep(2)
system2("touch", args = rmd)
expect_silent(actual <- wflow_build(dry_run = TRUE, project = site_dir))
expect_identical(actual$built, rmd)
expect_true(actual$make)
expect_message(actual <- wflow_build(view = FALSE, project = site_dir),
rmd[1])
expect_identical(actual$built, rmd)
# No file should be built now.
expect_silent(actual <- wflow_build(view = FALSE, project = site_dir))
expect_identical(actual$built, character(0))
# Reset modification of file 1 only. It is important to wait a couple
# seconds so that the modification times are different.
Sys.sleep(2)
system2("touch", args = rmd[1])
expect_message(actual <- wflow_build(view = FALSE, project = site_dir),
rmd[1])
expect_identical(actual$built, rmd[1])
})
# Fixed error in which 'make' didn't work with relative paths from the root
# directory. This set of tests ensures that this won't happen again.
test_that("wflow_build can run in 'make' mode from within project", {
skip_on_cran()
cwd <- getwd()
setwd(site_dir)
on.exit(setwd(cwd))
rmd_local <- Sys.glob("analysis/*Rmd")
html_local <- workflowr:::to_html(rmd_local, outdir = "docs")
# Reset modifications of rmd files. It is important to wait a couple
# seconds so that the modification times are different.
Sys.sleep(2)
system2("touch", args = rmd_local)
expect_silent(actual <- wflow_build(dry_run = TRUE))
expect_identical(actual$built, rmd_local)
expect_true(actual$make)
expect_message(actual <- wflow_build(view = FALSE), rmd_local[1])
expect_identical(actual$built, rmd_local)
# No file should be built now.
expect_silent(actual <- wflow_build())
expect_identical(actual$built, character(0))
# Reset modification of file 1 only. It is important to wait a couple
# seconds so that the modification times are different.
Sys.sleep(2)
system2("touch", args = rmd_local[1])
expect_message(actual <- wflow_build(view = FALSE), rmd_local[1])
expect_identical(actual$built, rmd_local[1])
})
test_that("wflow_build update builds published files with modifications", {
skip_on_cran()
# Publish the files
suppressMessages(wflow_publish(files = rmd, view = FALSE, project = site_dir))
cat("edit", file = rmd[1], append = TRUE)
wflow_git_commit(rmd[1], project = site_dir)
expect_silent(actual <- wflow_build(update = TRUE, dry_run = TRUE,
project = site_dir))
expect_identical(actual$built, rmd[1])
expect_true(actual$update)
expect_message(actual <- wflow_build(update = TRUE, view = FALSE,
project = site_dir),
rmd[1])
expect_identical(actual$built, rmd[1])
})
test_that("wflow_build republish builds all published files", {
skip_on_cran()
wflow_build(view = FALSE, project = site_dir)
html_mtime_pre <- file.mtime(html)
Sys.sleep(2)
expect_message(actual <- wflow_build(view = FALSE, republish = TRUE,
project = site_dir),
rmd[1])
expect_true(actual$republish)
expect_identical(actual$built, rmd)
html_mtime_post <- file.mtime(html)
expect_true(all(html_mtime_post > html_mtime_pre))
})
# The default is to build a file in its own separate R session to avoid
# conflicts in the variable names and loaded packages between files. However, it
# may be useful for debugging to build the file directly in the R console. To
# test the difference, the file `local.Rmd` has an undefined variable, and it
# should only be able to access it from the global environment when built
# locally.
test_that("Only locally built files can access variables in the global environment", {
skip_on_cran()
fs::file_copy("files/test-wflow_build/global-variable.Rmd", s$analysis)
rmd_local <- file.path(s$analysis, "global-variable.Rmd")
html_local <- workflowr:::to_html(rmd_local, outdir = s$docs)
on.exit(fs::file_delete(c(rmd_local, html_local)))
# Create a variable in the global environment
# https://stackoverflow.com/a/25096276/2483477
env <- globalenv()
env$global_variable <- 1
stopifnot(exists("global_variable", envir = env))
expect_error(utils::capture.output(wflow_build(rmd_local, view = FALSE,
project = site_dir)),
"object 'global_variable' not found")
expect_false(fs::file_exists(html_local))
utils::capture.output(wflow_build(rmd_local, local = TRUE, view = FALSE,
project = site_dir))
expect_true(fs::file_exists(html_local))
# Remove the global variable
rm("global_variable", envir = env)
stopifnot(!exists("global_variable", envir = env))
})
# The test file local.Rmd loads the package "tools" and defines the variable
# `local_variable`.
test_that("Only locally built files add packages/variables to global environment", {
skip_on_cran()
fs::file_copy("files/test-wflow_build/local.Rmd", s$analysis)
rmd_local <- file.path(s$analysis, "local.Rmd")
html_local <- workflowr:::to_html(rmd_local, outdir = s$docs)
on.exit(fs::file_delete(c(rmd_local, html_local)))
on.exit(detach("package:tools"), add = TRUE)
# Build file externally
utils::capture.output(wflow_build(rmd_local, view = FALSE,
project = site_dir))
expect_false("package:tools" %in% search())
expect_false(exists("local_variable", envir = .GlobalEnv))
# Build file locally
utils::capture.output(wflow_build(rmd_local, local = TRUE, view = FALSE,
project = site_dir))
expect_true("package:tools" %in% search())
expect_true(exists("local_variable", envir = .GlobalEnv))
# Remove `local_variable`
rm("local_variable", envir = .GlobalEnv)
stopifnot(!exists("global_variable", envir = .GlobalEnv))
})
test_that("wflow_build only builds files starting with _ when specified", {
skip_on_cran()
rmd_ignore <- file.path(s$analysis, "_ignore.Rmd")
fs::file_copy("files/example.Rmd", rmd_ignore)
html_ignore <- workflowr:::to_html(rmd_ignore, outdir = s$docs)
# Ignored by default "make"-mode
expect_silent(actual <- wflow_build(view = FALSE, project = site_dir))
expect_false(fs::file_exists(html_ignore))
expect_equal(length(actual$built), 0)
# Built when directly specified
expect_message(actual <- wflow_build(rmd_ignore, view = FALSE,
project = site_dir),
rmd_ignore)
expect_true(fs::file_exists(html_ignore))
expect_identical(actual$built, rmd_ignore)
})
test_that("wflow_build uses tempdir() to save log files by default", {
skip_on_cran()
expected <- workflowr:::absolute(file.path(tempdir(), "workflowr"))
actual <- wflow_build(rmd[1], view = FALSE, project = site_dir)
expect_identical(expected, actual$log_dir)
})
test_that("wflow_build accepts custom directory to save log files", {
skip_on_cran()
expected <- workflowr:::absolute(file.path(site_dir, "log"))
actual <- wflow_build(rmd[1], view = FALSE, log_dir = expected,
project = site_dir)
expect_true(fs::dir_exists(expected))
expect_identical(expected, actual$log_dir)
})
test_that("wflow_build removes unused figure files if clean_fig_files = TRUE", {
skip_on_cran()
# Build a file that has 2 plots from 2 unnamed chunks
file_w_figs <- file.path(s$analysis, "fig.Rmd")
fs::file_copy("files/test-wflow_build/figure-v01.Rmd", file_w_figs)
build_v01 <- wflow_build(file_w_figs, view = FALSE, clean_fig_files = TRUE,
project = site_dir)
figs_analysis_v01 <- file.path(s$analysis, "figure", basename(file_w_figs),
c("unnamed-chunk-1-1.png", "unnamed-chunk-2-1.png"))
expect_false(all(fs::file_exists(figs_analysis_v01))) # moved by wflow_site()
figs_docs_v01 <- file.path(s$docs, "figure", basename(file_w_figs),
c("unnamed-chunk-1-1.png", "unnamed-chunk-2-1.png"))
expect_true(all(fs::file_exists(figs_docs_v01)))
# Update the file such that the previous 2 chunks are now named, plus add a
# 3rd plot chunk
fs::file_copy("files/test-wflow_build/figure-v02.Rmd", file_w_figs, overwrite = TRUE)
build_v02 <- wflow_build(file_w_figs, view = FALSE, clean_fig_files = TRUE,
project = site_dir)
expect_false(all(fs::file_exists(figs_analysis_v01)))
expect_false(all(fs::file_exists(figs_docs_v01)))
figs_analysis_v02 <- file.path(s$analysis, "figure", basename(file_w_figs),
c("named1-1.png", "named2-1.png", "named3-1.png"))
expect_false(all(fs::file_exists(figs_analysis_v02))) # moved by wflow_site()
figs_docs_v02 <- file.path(s$docs, "figure", basename(file_w_figs),
c("named1-1.png", "named2-1.png", "named3-1.png"))
expect_true(all(fs::file_exists(figs_docs_v02)))
# Cleanup
wflow_remove(file_w_figs, project = site_dir)
})
test_that("wflow_build does not remove unused figure files if clean_fig_files = FALSE", {
skip_on_cran()
# Build a file that has 2 plots from 2 unnamed chunks
file_w_figs <- file.path(s$analysis, "fig.Rmd")
fs::file_copy("files/test-wflow_build/figure-v01.Rmd", file_w_figs)
build_v01 <- wflow_build(file_w_figs, view = FALSE, project = site_dir)
figs_analysis_v01 <- file.path(s$analysis, "figure", basename(file_w_figs),
c("unnamed-chunk-1-1.png", "unnamed-chunk-2-1.png"))
expect_false(all(fs::file_exists(figs_analysis_v01))) # moved by wflow_site()
figs_docs_v01 <- file.path(s$docs, "figure", basename(file_w_figs),
c("unnamed-chunk-1-1.png", "unnamed-chunk-2-1.png"))
expect_true(all(fs::file_exists(figs_docs_v01)))
# Update the file such that the previous 2 chunks are now named, plus add a
# 3rd plot chunk
fs::file_copy("files/test-wflow_build/figure-v02.Rmd", file_w_figs, overwrite = TRUE)
build_v02 <- wflow_build(file_w_figs, view = FALSE, project = site_dir)
expect_false(all(fs::file_exists(figs_analysis_v01)))
# This line is the critical difference from the previous test. The outdated
# figure files are still there since clean_fig_files = FALSE by default.
expect_true(all(fs::file_exists(figs_docs_v01)))
figs_analysis_v02 <- file.path(s$analysis, "figure", basename(file_w_figs),
c("named1-1.png", "named2-1.png", "named3-1.png"))
expect_false(all(fs::file_exists(figs_analysis_v02))) # moved by wflow_site()
figs_docs_v02 <- file.path(s$docs, "figure", basename(file_w_figs),
c("named1-1.png", "named2-1.png", "named3-1.png"))
expect_true(all(fs::file_exists(figs_docs_v02)))
# Cleanup
wflow_remove(file_w_figs, project = site_dir)
})
test_that("wflow_build deletes cache when delete_cache = TRUE", {
skip_on_cran()
skip_on_os("windows") # Avoid errors due to long filenames
# Build a file that has cached chunks
file_w_cache <- file.path(s$analysis, "cache.Rmd")
fs::file_copy("files/test-wflow_html/cache-all-chunks.Rmd", file_w_cache)
build_v01 <- wflow_build(file_w_cache, view = FALSE, project = site_dir)
dir_cache <- fs::path_ext_remove(file_w_cache)
dir_cache <- glue::glue("{dir_cache}_cache")
expect_true(fs::dir_exists(dir_cache))
# By default, cache directory is not affected
dir_cache_mod_pre <- fs::file_info(dir_cache)$modification_time
expect_message(
build_v02 <- wflow_build(file_w_cache, view = FALSE, project = site_dir),
" - Note: This file has a cache directory"
)
expect_false(build_v02$delete_cache)
expect_true(fs::dir_exists(dir_cache))
dir_cache_mod_post <- fs::file_info(dir_cache)$modification_time
expect_equal(dir_cache_mod_post, dir_cache_mod_pre)
# delete_cache deletes cache directory prior to building (it gets re-created)
dir_cache_mod_pre <- fs::file_info(dir_cache)$modification_time
expect_message(
build_v03 <- wflow_build(file_w_cache, view = FALSE, delete_cache = TRUE,
project = site_dir),
" - Note: Deleted the cache directory before building"
)
expect_true(build_v03$delete_cache)
expect_true(fs::dir_exists(dir_cache))
dir_cache_mod_post <- fs::file_info(dir_cache)$modification_time
expect_true(dir_cache_mod_post > dir_cache_mod_pre)
# Cleanup
wflow_remove(file_w_cache, project = site_dir)
})
test_that("wflow_build can display build log directly in R console with verbose", {
skip_on_cran()
x <- utils::capture.output(
build <- wflow_build(rmd[2], view = FALSE, verbose = TRUE, project = site_dir))
expect_true(build$verbose)
expect_true(length(x) > 0)
})
test_that("wflow_build reports working and knit directories", {
skip_on_cran()
expect_message(
wflow_build(rmd[2], view = FALSE, project = site_dir),
getwd())
expect_message(
wflow_build(rmd[2], view = FALSE, project = site_dir),
sprintf("Building %s in %s", rmd[2], site_dir))
# Should not output knit directory if it's the same as working directory
cwd <- getwd()
on.exit(setwd(cwd))
setwd(site_dir)
site_dir_new <- getwd()
rmd_new <- "analysis/index.Rmd"
expect_message(
wflow_build(rmd_new, view = FALSE, project = site_dir),
site_dir_new)
expect_message(
wflow_build(rmd_new, view = FALSE, project = site_dir),
sprintf("Building %s", rmd_new))
})
test_that("wflow_build can combine files to build using the intersection of the provided args", {
skip_on_cran()
# Reset modification of first rmd file. It is important to wait a couple
# seconds so that the modification times are different.
Sys.sleep(1)
system2("touch", args = rmd[1])
# With combine == "or" (default), make-mode should be build rmd[1] in addition
# to rmd[2]
actual <- wflow_build(files = rmd[2], make = TRUE, combine = "or",
dry_run = TRUE, project = site_dir)
# rmd[2] is first since it was specified in the argument `files`
expect_identical(actual$built, rmd[2:1])
# With combine == "and" (default), make-mode should only consider rmd[2], and
# thus build no files
actual <- wflow_build(files = rmd[2], make = TRUE, combine = "and",
dry_run = TRUE, project = site_dir)
expect_identical(actual$built, character(0))
})
# Test error handling ----------------------------------------------------------
test_that("wflow_build fails early for bad files", {
expect_error(wflow_build(character(), project = site_dir),
"vector with length 0")
expect_error(wflow_build(s$analysis, project = site_dir),
"files cannot include a path to a directory")
expect_error(wflow_build("", project = site_dir),
"Not all files exist. Check the paths to the files")
fs::file_create(file.path(s$analysis, "invalid.R"))
expect_error(wflow_build(file.path(s$analysis, "invalid.R"), project = site_dir),
"Only files with extension Rmd or rmd")
})
test_that("wflow_build throws error if given directory input", {
d <- file.path(site_dir, "toplevel")
fs::dir_create(d)
on.exit(unlink(d, recursive = TRUE, force = TRUE))
expect_error(wflow_build(d, project = site_dir),
"files cannot include a path to a directory")
})
test_that("wflow_build fails if file outside of analysis/", {
# If pandoc is *not* installed, the error message will be about this.
if(!rmarkdown::pandoc_available())
skip("skipped because pandoc is *not* installed")
rmd_outside <- file.path(s$root, "outside.Rmd")
fs::file_create(rmd_outside)
# When passing one invalid file
expect_error(wflow_build(rmd_outside, project = site_dir),
"Only files in the analysis directory can be built with wflow_build.")
# When passing one invalid file with other valid files
expect_error(wflow_build(c(rmd, rmd_outside), project = site_dir),
"Only files in the analysis directory can be built with wflow_build.")
})
test_that("wflow_build throws error if pandoc is not installed", {
if(rmarkdown::pandoc_available())
skip("skipped because pandoc is installed")
expect_error(wflow_build(project = site_dir),
'Pandoc is not installed.')
})
test_that("wflow_build throws error if combine=and but no files specified", {
# If pandoc isn't available, that error is thrown first
if(!rmarkdown::pandoc_available())
skip("skipped because pandoc is not installed")
expect_error(wflow_build(combine = "and", dry_run = TRUE, project = site_dir),
"can only be used when explicitly specifying Rmd files")
expect_error(wflow_build(republish = TRUE, combine = "and", dry_run = TRUE,
project = site_dir),
"can only be used when explicitly specifying Rmd files")
})
|
#' Collapse baranka.
#'
#' @param pudelko pudelko. Pudelko z barankiem.
#'
#' @return baranek
#'
#' baranek <- collapse(pudelko)
#' @export
collapse<-function(pudelko) {
ret <- NULL
for(name in ls(envir=globalenv())) {
o <- get(name, globalenv())
if((class(o) == "pudelko") && (o$id == pudelko$id) && (!is.null(o$zawartosc)) && (class(o$zawartosc)=="baranek")) {
o$zawartosc$czyZyje <- sample(c(TRUE, FALSE), 1)
ret <- o$zawartosc
break
}
}
ret
}
| /R/collapse.R | no_license | wernerolaf/agneau | R | false | false | 486 | r | #' Collapse baranka.
#'
#' @param pudelko pudelko. Pudelko z barankiem.
#'
#' @return baranek
#'
#' baranek <- collapse(pudelko)
#' @export
collapse<-function(pudelko) {
ret <- NULL
for(name in ls(envir=globalenv())) {
o <- get(name, globalenv())
if((class(o) == "pudelko") && (o$id == pudelko$id) && (!is.null(o$zawartosc)) && (class(o$zawartosc)=="baranek")) {
o$zawartosc$czyZyje <- sample(c(TRUE, FALSE), 1)
ret <- o$zawartosc
break
}
}
ret
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/birk.R
\docType{package}
\encoding{UTF-8}
\name{birk}
\alias{birk}
\alias{birk-package}
\title{MA Birk's Functions}
\description{
Collection of tools to make R more convenient. Includes tools to summarize data using statistics not available with base R and manipulate objects for analyses.
}
\author{
Matthew A. Birk, \email{matthewabirk@gmail.com}
}
| /birk_2.1.2/birk.Rcheck/00_pkg_src/birk/man/birk.Rd | no_license | matthewabirk/birk | R | false | true | 430 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/birk.R
\docType{package}
\encoding{UTF-8}
\name{birk}
\alias{birk}
\alias{birk-package}
\title{MA Birk's Functions}
\description{
Collection of tools to make R more convenient. Includes tools to summarize data using statistics not available with base R and manipulate objects for analyses.
}
\author{
Matthew A. Birk, \email{matthewabirk@gmail.com}
}
|
\name{camel.tiger}
\alias{camel.tiger}
\title{
Tuning Insensitive Graph Estimation and Regression
}
\description{
The function "camel.cmr" implements TIGER and Calibrated CLIME using L1 norm regularization}
\usage{
camel.tiger(data, lambda = NULL, nlambda = NULL, lambda.min.ratio = NULL,
method = "slasso", sym = "or", shrink=NULL, prec = 1e-4, mu = 0.01,
max.ite = 1e4, standardize = FALSE, correlation = FALSE,
perturb = TRUE, verbose = TRUE)
}
\arguments{
\item{data}{There are 2 options for \code{"clime"}: (1) \code{data} is an \code{n} by \code{d} data matrix (2) a \code{d} by \code{d} sample covariance matrix. The program automatically identifies the input matrix by checking the symmetry. (\code{n} is the sample size and \code{d} is the dimension). For \code{"slasso"}, covariance input is not supported.}
\item{lambda}{A sequence of decresing positive numbers to control the regularization. Typical usage is to leave the input \code{lambda = NULL} and have the program compute its own \code{lambda} sequence based on \code{nlambda} and \code{lambda.min.ratio}. Users can also specify a sequence to override this. Default value is from \eqn{lambda.max} to \code{lambda.min.ratio*lambda.max}. For TIGER, the default value of \eqn{lambda.max} is \eqn{\pi\sqrt{\log(d)/n}}. For CLIME , the default value of \eqn{lambda.max} is the minimum regularization parameter, which yields an all-zero off-diagonal estiamtes.}
\item{nlambda}{The number of values used in \code{lambda}. Default value is 10.}
\item{lambda.min.ratio}{The smallest value for \code{lambda}, as a fraction of the uppperbound (\code{MAX}) of the regularization parameter. The program can automatically generate \code{lambda} as a sequence of length = \code{nlambda} starting from \code{MAX} to \code{lambda.min.ratio*MAX} in log scale. The default value is \code{0.25} for TIGER and \code{0.5} for CLIME.}
\item{method}{TIGER is applied if \code{method = "slasso"}, CLIME is applied if \code{method="clime"}. Default value is \code{"slasso"}.}
\item{sym}{Symmetrization of output graphs. If \code{sym = "and"}, the edge between node \code{i} and node \code{j} is selected ONLY when both node \code{i} and node \code{j} are selected as neighbors for each other. If \code{sym = "or"}, the edge is selected when either node \code{i} or node \code{j} is selected as the neighbor for each other. The default value is \code{"or"}.}
\item{shrink}{Shrinkage of regularization parameter based on precision of estimation. The default value is 1.5 if \code{method = "clime"} and the default value is 0 if \code{method="slasso"} or \code{method = "aclime"}.}
\item{prec}{Stopping criterion. The default value is 1e-4.}
\item{mu}{The smoothing parameter. The default value is 0.01.}
\item{max.ite}{The iteration limit. The default value is 1e4.}
\item{standardize}{All varaibles are standardized to have mean zero and standard deviation one if \code{standardize = TRUE}. The default value is \code{FALSE}.}
\item{correlation}{Correlation matrix is used as the input of \code{Sigma} for \code{method = "clime"} if \code{correlation = TRUE}. The default value is \code{FALSE}.}
\item{perturb}{The diagonal of \code{Sigma} is added by a positive value to guarantee that \code{Sigma} is positive definite if \code{perturb = TRUE}. User can specify a numeric value for \code{perturbe}. The default value is TRUE.}
\item{verbose}{Tracing information is disabled if \code{verbose = FALSE}. The default value is \code{TRUE}.}
}
\details{
TIGER and Calibrated CLIME adjust the regularization with respect to each column of the sparse precision matrix. Thus it achieves both improved finite sample performance and tuning insensitiveness.
}
\value{
An object with S3 class \code{"tiger"} is returned:
\item{data}{
The \code{n} by \code{d} data matrix or \code{d} by \code{d} sample covariance matrix from the input.
}
\item{cov.input}{
An indicator of the sample covariance.
}
\item{lambda}{
The sequence of regularization parameters \code{lambda} used in the program.
}
\item{nlambda}{
The number of values used in \code{lambda}.
}
\item{icov}{
A list of \code{d} by \code{d} precision matrices corresponding to regularization parameters.
}
\item{sym}{
The \code{sym} from the input.
}
\item{method}{
The \code{method} from the input.
}
\item{path}{
A list of \code{d} by \code{d} adjacency matrices of estimated graphs as a graph path corresponding to \code{lambda}.
}
\item{sparsity}{
The sparsity levels of the graph path.
}
\item{ite}{
If \code{method = "clime"}, it is a list of two matrices where ite[[1]] is the number of external iterations and ite[[2]] is the number of internal iterations with the entry of (i,j) as the number of iteration of i-th column and j-th lambda. If \code{method="slasso"}, it is a matrix of iteration with the entry of (i,j) as the number of iteration of i-th column and j-th lambda.
}
\item{df}{
It is a \code{d} by \code{nlambda} matrix. Each row contains the number of nonzero coefficients along the lasso solution path.
}
\item{standardize}{The \code{standardize} from the input.}
\item{correlation}{The \code{correlation} from the input.}
\item{perturb}{The \code{perturb} from the input.}
\item{verbose}{The \code{verbose} from the input.}
}
\author{
Xingguo Li, Tuo Zhao, and Han Liu \cr
Maintainer: Xingguo Li <xingguo.leo@gmail.com>
}
\references{
1. T. Cai, W. Liu and X. Luo. A constrained L1 minimization approach to sparse precision matrix estimation. \emph{Journal of the American Statistical Association}, 2011. \cr
2. H. Liu and L. Wang. TIGER: A tuning-insensitive approach for optimally estimating large undirected graphs. \emph{Technical Report}, 2012. \cr
}
\seealso{
\code{\link{camel-package}}, \code{\link{camel.tiger.generator}}, \code{\link{camel.tiger.select}}, \code{\link{camel.plot}}, \code{\link{camel.tiger.roc}}, \code{\link{plot.tiger}}, \code{\link{plot.select}}, \code{\link{plot.roc}}, \code{\link{plot.sim}}, \code{\link{print.tiger}}, \code{\link{print.select}}, \code{\link{print.roc}} and \code{\link{print.sim}}.
}
\examples{
## generating data
n = 100
d = 100
D = camel.tiger.generator(n=n,d=d,graph="hub",g=10)
plot(D)
## sparse precision matrix estimation with method "clime"
out1 = camel.tiger(D$data, method = "clime")
plot(out1)
camel.plot(out1$path[[7]])
## sparse precision matrix estimation with method "slasso"
out2 = camel.tiger(D$data, method = "slasso")
plot(out2)
camel.plot(out2$path[[4]])
}
| /man/camel.tiger.Rd | no_license | cran/camel | R | false | false | 6,577 | rd | \name{camel.tiger}
\alias{camel.tiger}
\title{
Tuning Insensitive Graph Estimation and Regression
}
\description{
The function "camel.cmr" implements TIGER and Calibrated CLIME using L1 norm regularization}
\usage{
camel.tiger(data, lambda = NULL, nlambda = NULL, lambda.min.ratio = NULL,
method = "slasso", sym = "or", shrink=NULL, prec = 1e-4, mu = 0.01,
max.ite = 1e4, standardize = FALSE, correlation = FALSE,
perturb = TRUE, verbose = TRUE)
}
\arguments{
\item{data}{There are 2 options for \code{"clime"}: (1) \code{data} is an \code{n} by \code{d} data matrix (2) a \code{d} by \code{d} sample covariance matrix. The program automatically identifies the input matrix by checking the symmetry. (\code{n} is the sample size and \code{d} is the dimension). For \code{"slasso"}, covariance input is not supported.}
\item{lambda}{A sequence of decresing positive numbers to control the regularization. Typical usage is to leave the input \code{lambda = NULL} and have the program compute its own \code{lambda} sequence based on \code{nlambda} and \code{lambda.min.ratio}. Users can also specify a sequence to override this. Default value is from \eqn{lambda.max} to \code{lambda.min.ratio*lambda.max}. For TIGER, the default value of \eqn{lambda.max} is \eqn{\pi\sqrt{\log(d)/n}}. For CLIME , the default value of \eqn{lambda.max} is the minimum regularization parameter, which yields an all-zero off-diagonal estiamtes.}
\item{nlambda}{The number of values used in \code{lambda}. Default value is 10.}
\item{lambda.min.ratio}{The smallest value for \code{lambda}, as a fraction of the uppperbound (\code{MAX}) of the regularization parameter. The program can automatically generate \code{lambda} as a sequence of length = \code{nlambda} starting from \code{MAX} to \code{lambda.min.ratio*MAX} in log scale. The default value is \code{0.25} for TIGER and \code{0.5} for CLIME.}
\item{method}{TIGER is applied if \code{method = "slasso"}, CLIME is applied if \code{method="clime"}. Default value is \code{"slasso"}.}
\item{sym}{Symmetrization of output graphs. If \code{sym = "and"}, the edge between node \code{i} and node \code{j} is selected ONLY when both node \code{i} and node \code{j} are selected as neighbors for each other. If \code{sym = "or"}, the edge is selected when either node \code{i} or node \code{j} is selected as the neighbor for each other. The default value is \code{"or"}.}
\item{shrink}{Shrinkage of regularization parameter based on precision of estimation. The default value is 1.5 if \code{method = "clime"} and the default value is 0 if \code{method="slasso"} or \code{method = "aclime"}.}
\item{prec}{Stopping criterion. The default value is 1e-4.}
\item{mu}{The smoothing parameter. The default value is 0.01.}
\item{max.ite}{The iteration limit. The default value is 1e4.}
\item{standardize}{All varaibles are standardized to have mean zero and standard deviation one if \code{standardize = TRUE}. The default value is \code{FALSE}.}
\item{correlation}{Correlation matrix is used as the input of \code{Sigma} for \code{method = "clime"} if \code{correlation = TRUE}. The default value is \code{FALSE}.}
\item{perturb}{The diagonal of \code{Sigma} is added by a positive value to guarantee that \code{Sigma} is positive definite if \code{perturb = TRUE}. User can specify a numeric value for \code{perturbe}. The default value is TRUE.}
\item{verbose}{Tracing information is disabled if \code{verbose = FALSE}. The default value is \code{TRUE}.}
}
\details{
TIGER and Calibrated CLIME adjust the regularization with respect to each column of the sparse precision matrix. Thus it achieves both improved finite sample performance and tuning insensitiveness.
}
\value{
An object with S3 class \code{"tiger"} is returned:
\item{data}{
The \code{n} by \code{d} data matrix or \code{d} by \code{d} sample covariance matrix from the input.
}
\item{cov.input}{
An indicator of the sample covariance.
}
\item{lambda}{
The sequence of regularization parameters \code{lambda} used in the program.
}
\item{nlambda}{
The number of values used in \code{lambda}.
}
\item{icov}{
A list of \code{d} by \code{d} precision matrices corresponding to regularization parameters.
}
\item{sym}{
The \code{sym} from the input.
}
\item{method}{
The \code{method} from the input.
}
\item{path}{
A list of \code{d} by \code{d} adjacency matrices of estimated graphs as a graph path corresponding to \code{lambda}.
}
\item{sparsity}{
The sparsity levels of the graph path.
}
\item{ite}{
If \code{method = "clime"}, it is a list of two matrices where ite[[1]] is the number of external iterations and ite[[2]] is the number of internal iterations with the entry of (i,j) as the number of iteration of i-th column and j-th lambda. If \code{method="slasso"}, it is a matrix of iteration with the entry of (i,j) as the number of iteration of i-th column and j-th lambda.
}
\item{df}{
It is a \code{d} by \code{nlambda} matrix. Each row contains the number of nonzero coefficients along the lasso solution path.
}
\item{standardize}{The \code{standardize} from the input.}
\item{correlation}{The \code{correlation} from the input.}
\item{perturb}{The \code{perturb} from the input.}
\item{verbose}{The \code{verbose} from the input.}
}
\author{
Xingguo Li, Tuo Zhao, and Han Liu \cr
Maintainer: Xingguo Li <xingguo.leo@gmail.com>
}
\references{
1. T. Cai, W. Liu and X. Luo. A constrained L1 minimization approach to sparse precision matrix estimation. \emph{Journal of the American Statistical Association}, 2011. \cr
2. H. Liu and L. Wang. TIGER: A tuning-insensitive approach for optimally estimating large undirected graphs. \emph{Technical Report}, 2012. \cr
}
\seealso{
\code{\link{camel-package}}, \code{\link{camel.tiger.generator}}, \code{\link{camel.tiger.select}}, \code{\link{camel.plot}}, \code{\link{camel.tiger.roc}}, \code{\link{plot.tiger}}, \code{\link{plot.select}}, \code{\link{plot.roc}}, \code{\link{plot.sim}}, \code{\link{print.tiger}}, \code{\link{print.select}}, \code{\link{print.roc}} and \code{\link{print.sim}}.
}
\examples{
## generating data
n = 100
d = 100
D = camel.tiger.generator(n=n,d=d,graph="hub",g=10)
plot(D)
## sparse precision matrix estimation with method "clime"
out1 = camel.tiger(D$data, method = "clime")
plot(out1)
camel.plot(out1$path[[7]])
## sparse precision matrix estimation with method "slasso"
out2 = camel.tiger(D$data, method = "slasso")
plot(out2)
camel.plot(out2$path[[4]])
}
|
tar_test("command$update_string()", {
command <- command_init(quote(a <- b + c))
expect_equal(command$string, "expression(a <- b + c)")
})
tar_test("command$hash", {
command <- command_init(quote(a <- b + c))
out <- command$hash
expect_true(is.character(out))
expect_equal(length(out), 1L)
expect_equal(nchar(out), 16L)
})
tar_test("command_produce_build()", {
command <- command_init(expr = quote(a <- b + c))
b <- 1L
c <- 2L
envir <- environment()
build <- command_produce_build(command, envir)
expect_silent(build_validate(build))
expect_equal(build$object, 3L)
expect_true(is.numeric(build$metrics$seconds))
})
tar_test("command$produce_build() uses seed", {
x <- command_init(expr = quote(sample.int(1e9, 1L)))
x$seed <- 0L
sample_with_seed <- function(seed) {
# Borrowed from https://github.com/r-lib/withr/blob/main/R/seed.R
# under the MIT license. See the NOTICE file
# in the targets package source.
old_seed <- .GlobalEnv[[".Random.seed"]]
set.seed(seed)
on.exit(restore_seed(old_seed))
sample.int(1e9, 1L)
}
exp0 <- sample_with_seed(0L)
for (i in seq_len(2)) {
out <- command_produce_build(x, environment())$object
expect_equal(out, exp0)
}
x$seed <- 1L
exp1 <- sample_with_seed(1L)
for (i in seq_len(2)) {
out <- command_produce_build(x, environment())$object
expect_equal(out, exp1)
}
expect_false(exp0 == exp1)
})
tar_test("command_init(deps)", {
command <- command_init(quote(a <- b + c), deps = "custom")
expect_equal(command$deps, "custom")
})
tar_test("command_init() with automatic deps", {
command <- command_init(quote(a <- b + c))
expect_true(all(c("b", "c") %in% command$deps))
expect_false("a" %in% command$deps)
})
tar_test("command_init() inspects formulas", {
command <- command_init(quote(map_dfr(data, ~do_row(.x, dataset))))
expect_true(all(c("dataset", "do_row") %in% command$deps))
expect_false("~" %in% command$deps)
})
tar_test("command_init(string)", {
command <- command_init(quote(a <- b + c), string = "custom")
expect_equal(command$string, "custom")
})
tar_test("command_validate() on a good expr", {
command <- command_init(quote(a <- b + c))
expect_silent(command_validate(command))
})
tar_test("command_validate() with an extra field", {
command <- command_init(quote(a <- b + c))
command$nope <- 123
expect_error(command_validate(command), class = "tar_condition_validate")
})
tar_test("command_validate() with empty expr field", {
command <- command_init()
command$expr <- NULL
expect_error(command_validate(command), class = "tar_condition_validate")
})
tar_test("command_validate() with bad packages field", {
command <- command_init(expr = quote(a <- b + c), packages = 123)
expect_error(command_validate(command), class = "tar_condition_validate")
})
tar_test("command validation with packages (test 2)", {
command_good <- command_init(quote(a <- b + c))
expect_silent(command_validate(command_good))
command_bad <- command_init(quote(a <- b + c), packages = 123)
expect_error(command_validate(command_bad), class = "tar_condition_validate")
expect_error(command_validate_packages(command_bad))
})
tar_test("command_validate() with bad library field", {
command <- command_init(expr = quote(a <- b + c), library = 123)
expect_error(command_validate(command), class = "tar_condition_validate")
})
tar_test("command_validate() with bad deps field", {
command <- command_init(expr = quote(a <- b + c))
command$deps <- 123L
expect_error(command_validate(command), class = "tar_condition_validate")
})
tar_test("command_validate() with bad string field", {
command <- command_new(
expr = quote(a <- b + c),
packages = character(0),
deps = character(0),
seed = 0L
)
expect_error(command_validate(command), class = "tar_condition_validate")
})
tar_test("command_validate() with bad hash field", {
command <- command_new(
expr = quote(a <- b + c),
packages = character(0),
deps = character(0),
string = "abcde",
seed = 0L
)
expect_error(command_validate(command), class = "tar_condition_validate")
})
tar_test("command_validate() with a bad seed", {
x <- command_init(expr = quote(a <- b + c))
x$seed <- "123"
expect_error(command_validate(x), class = "tar_condition_validate")
x$seed <- integer(0)
expect_error(command_validate(x), class = "tar_condition_validate")
})
| /tests/testthat/test-class_command.R | permissive | ropensci/targets | R | false | false | 4,444 | r | tar_test("command$update_string()", {
command <- command_init(quote(a <- b + c))
expect_equal(command$string, "expression(a <- b + c)")
})
tar_test("command$hash", {
command <- command_init(quote(a <- b + c))
out <- command$hash
expect_true(is.character(out))
expect_equal(length(out), 1L)
expect_equal(nchar(out), 16L)
})
tar_test("command_produce_build()", {
command <- command_init(expr = quote(a <- b + c))
b <- 1L
c <- 2L
envir <- environment()
build <- command_produce_build(command, envir)
expect_silent(build_validate(build))
expect_equal(build$object, 3L)
expect_true(is.numeric(build$metrics$seconds))
})
tar_test("command$produce_build() uses seed", {
x <- command_init(expr = quote(sample.int(1e9, 1L)))
x$seed <- 0L
sample_with_seed <- function(seed) {
# Borrowed from https://github.com/r-lib/withr/blob/main/R/seed.R
# under the MIT license. See the NOTICE file
# in the targets package source.
old_seed <- .GlobalEnv[[".Random.seed"]]
set.seed(seed)
on.exit(restore_seed(old_seed))
sample.int(1e9, 1L)
}
exp0 <- sample_with_seed(0L)
for (i in seq_len(2)) {
out <- command_produce_build(x, environment())$object
expect_equal(out, exp0)
}
x$seed <- 1L
exp1 <- sample_with_seed(1L)
for (i in seq_len(2)) {
out <- command_produce_build(x, environment())$object
expect_equal(out, exp1)
}
expect_false(exp0 == exp1)
})
tar_test("command_init(deps)", {
command <- command_init(quote(a <- b + c), deps = "custom")
expect_equal(command$deps, "custom")
})
tar_test("command_init() with automatic deps", {
command <- command_init(quote(a <- b + c))
expect_true(all(c("b", "c") %in% command$deps))
expect_false("a" %in% command$deps)
})
tar_test("command_init() inspects formulas", {
command <- command_init(quote(map_dfr(data, ~do_row(.x, dataset))))
expect_true(all(c("dataset", "do_row") %in% command$deps))
expect_false("~" %in% command$deps)
})
tar_test("command_init(string)", {
command <- command_init(quote(a <- b + c), string = "custom")
expect_equal(command$string, "custom")
})
tar_test("command_validate() on a good expr", {
command <- command_init(quote(a <- b + c))
expect_silent(command_validate(command))
})
tar_test("command_validate() with an extra field", {
command <- command_init(quote(a <- b + c))
command$nope <- 123
expect_error(command_validate(command), class = "tar_condition_validate")
})
tar_test("command_validate() with empty expr field", {
command <- command_init()
command$expr <- NULL
expect_error(command_validate(command), class = "tar_condition_validate")
})
tar_test("command_validate() with bad packages field", {
command <- command_init(expr = quote(a <- b + c), packages = 123)
expect_error(command_validate(command), class = "tar_condition_validate")
})
tar_test("command validation with packages (test 2)", {
command_good <- command_init(quote(a <- b + c))
expect_silent(command_validate(command_good))
command_bad <- command_init(quote(a <- b + c), packages = 123)
expect_error(command_validate(command_bad), class = "tar_condition_validate")
expect_error(command_validate_packages(command_bad))
})
tar_test("command_validate() with bad library field", {
command <- command_init(expr = quote(a <- b + c), library = 123)
expect_error(command_validate(command), class = "tar_condition_validate")
})
tar_test("command_validate() with bad deps field", {
command <- command_init(expr = quote(a <- b + c))
command$deps <- 123L
expect_error(command_validate(command), class = "tar_condition_validate")
})
tar_test("command_validate() with bad string field", {
command <- command_new(
expr = quote(a <- b + c),
packages = character(0),
deps = character(0),
seed = 0L
)
expect_error(command_validate(command), class = "tar_condition_validate")
})
tar_test("command_validate() with bad hash field", {
command <- command_new(
expr = quote(a <- b + c),
packages = character(0),
deps = character(0),
string = "abcde",
seed = 0L
)
expect_error(command_validate(command), class = "tar_condition_validate")
})
tar_test("command_validate() with a bad seed", {
x <- command_init(expr = quote(a <- b + c))
x$seed <- "123"
expect_error(command_validate(x), class = "tar_condition_validate")
x$seed <- integer(0)
expect_error(command_validate(x), class = "tar_condition_validate")
})
|
require(spatialEco)
require(sp)
require(usedist)
require(rgeos)
require(raster)
require(spatstat)
require(igraph)
require(sf)
require(rgdal)
require(gdistance)
require(otuSummary)
require(gdata)
require(maptools)
require(tidyverse)
require(reshape2)
require(data.table)
#setwd("E:/LCP sensitivity test/ConnectivityHPC_inputs")
#Create master map of all habitat created by a conservation strategy
i=80
u=5000
model="cluster"
species="g"
replicate=4
XO<- list.files(paste0(model,"/"), pattern =paste0(species,".tif$",sep=""))
ManyRunsStack<-raster::stack(paste0(model,"/",XO))
SumStack<-sum(ManyRunsStack)
rm(ManyRunsStack)
#Bring in ecoregion map to use for crs and extent template
Ecoregion <- raster(paste0(model,"/","Ecoregion100f.tif"))
#Read in roads file
roads <- raster(paste0(model,"/","road.tif",sep=""))
#Create empty vectors for connectivity indices
nodes <- vector()
links <- vector()
avgnode <- vector()
totnode <- vector()
avgLCP <- vector()
avgENN <- vector()
density <- vector()
transitivity <- vector()
#Time steps
TimestepList <- as.character(seq(from=0, to=80, by=10))
#Connectivity analysis
Longleaf<-"PinuPalu"
Loblolly<-"PinuTaed"
Pine<- c("PinuEchi","PinuTaed","PinuVirg")
Hardwood<-c("QuerAlba","AcerRubr","LiriTuli","LiquStyr","OxydArbo","CornFlor")
Year0<-list.files(paste0(model,"/",model,replicate),pattern=(".img$"))
#paste0("inputs/", model, "/",model,replicate,"/")
Longleaf_Stack<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Longleaf,"-",i,".img")]))
Loblolly_Stack<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Loblolly,"-",i,".img")]))
Pine_Stack<-raster::stack(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Pine,"-",i,".img")]))
Hardwood_Stack<-raster::stack(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Hardwood,"-",i,".img")]))
Total<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-TotalBiomass-", i, ".img")]))
###Reclassification of biomass into community types
###Rule 1
Longleaf_Stack[Longleaf_Stack> 0.25*(Total),]<-1
Longleaf_Stack[!Longleaf_Stack==1]<-999
### Rule 2
Loblolly_Stack[Loblolly_Stack> 0.9*(Total),]<-2
Loblolly_Stack[!Loblolly_Stack==2]<-999
### Rule 3
Pine_Stack[Pine_Stack> 0.65*(Total),]<-3
Pine_Stack[!Pine_Stack==3]<-999
### Rule 4
Hardwood_Stack[Hardwood_Stack>0.5*(Total),]<-4
Hardwood_Stack[!Hardwood_Stack==4]<-999
### Rule 5
Total[Total >0,]<-5
bigstack<-stack(Longleaf_Stack, Loblolly_Stack, Pine_Stack, Hardwood_Stack, Total)
test_stack<-min(bigstack)
crs(test_stack) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
extent(test_stack)<-raster::extent(Ecoregion)
median0 <- raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("AGE-MED-",i,".img")]))
crs(median0) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
extent(median0)<-raster::extent(Ecoregion)
#use to incorporate land use change
#LU0 <- raster(paste("C:/Users/tgmozele/Desktop/LCP sensitivity test/geo2noLUC/land-use-", i, ".tif",sep=""))
#use to not incorporate land use change, but establish BAU land use types
LU0 <- raster(paste0(model,"/","NLCD100.tif"))
#Create a raster that will become resistance raster
test_raster <- test_stack
#Assign projection and reformat to ecoregion extent for the resistance raster
crs(test_raster) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
extent(test_raster)<-raster::extent(Ecoregion)
#Assign values to resistance raster
#longleaf community comp
#test_raster[test_stack == 1 & median0 %in% c(0:1),] <- (1/100)
#test_raster[test_stack == 1 & median0 %in% c(2:5),] <- (1/95)
#test_raster[test_stack == 1 & median0 %in% c(6:7),] <- (1/85)
#test_raster[test_stack == 1 & median0 %in% c(8:9),] <- (1/50)
#test_raster[test_stack == 1 & median0 %in% c(10:20),] <- (1/10)
#test_raster[test_stack == 1 & median0 %in% c(21:34),] <- (1/5)
#test_raster[test_stack == 1 & median0 >= 35,] <- 1
test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/90)
test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/60)
test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/20)
test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/10)
test_raster[test_stack == 3 & median0 >= 35,] <- 1
#pine plantation community type (was pine mix)
test_raster[test_stack == 2 & median0 %in% c(0:5),] <- (1/90)
test_raster[test_stack == 2 & median0 %in% c(6:10),] <- (1/70)
test_raster[test_stack == 2 & median0 %in% c(11:20),] <- (1/60)
test_raster[test_stack == 2 & median0 %in% c(21:30),] <- (1/50)
test_raster[test_stack == 2 & median0 >= 31,] <- (1/40)
#pine mix community type (was lob_)
#test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/95)
#test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/80)
#test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/40)
#test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/30)
#test_raster[test_stack == 3 & median0 >= 35,] <- (1/20)
test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/90)
test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/70)
test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/30)
test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/10)
test_raster[test_stack == 3 & median0 >= 35,] <- 1
#hardwood community type (was mix)
test_raster[test_stack == 4 & median0 %in% c(0:10),] <- (1/90)
test_raster[test_stack == 4 & median0 %in% c(11:20),] <- (1/80)
test_raster[test_stack == 4 & median0 %in% c(21:30),] <- (1/70)
test_raster[test_stack == 4 & median0 >= 31,] <- (1/60)
#mixed forest community type (was hardwood)
#test_raster[test_stack == 5 & median0 %in% c(0:10),] <- (1/100)
#test_raster[test_stack == 5 & median0 %in% c(11:20),] <- (1/95)
#test_raster[test_stack == 5 & median0 %in% c(21:30),] <- (1/90)
#test_raster[test_stack == 5 & median0 >= 31,] <- (1/80)
test_raster[test_stack == 5 & median0 %in% c(0:10),] <- (1/90)
test_raster[test_stack == 5 & median0 %in% c(11:20),] <- (1/70)
test_raster[test_stack == 5 & median0 %in% c(21:30),] <- (1/60)
test_raster[test_stack == 5 & median0 >= 31,] <- (1/50)
test_raster2 <- test_raster
test_raster2[test_raster ==0] <- NA
#land use types
test_raster2[LU0 == 82] <- (1/90) #cropland
test_raster2[LU0 == 81] <- (1/90) #hay/pasture
test_raster2[LU0 == 11] <- (1/100) #water
test_raster2[LU0 == 24] <- (1/100) #developed, high intensity
test_raster2[LU0 == 23] <- (1/90) #developed, med intensity
test_raster2[LU0 == 22] <- (1/80) #developed, low intensity
test_raster2[LU0 == 31] <- (1/90) #barren land
#test_raster2[LU0 == 6] <- (1/100) #mining
test_raster2[test_raster2 ==0] <- (1/90)
#roads
test_raster2[roads %in% c(1:2)] <- (1/100)
test_raster2[roads %in% c(3:4)] <- (1/100)
test_raster2[roads %in% c(5:89)] <- (1/90)
test_raster3 <- test_raster2
test_raster3[test_raster3 >0.1] <- 1
test_raster3[test_raster3 < 1] <- 0
habitat_raster <- overlay(test_raster3, SumStack, fun=function(x,y){(x*y)} )
#Cluster habitat cells into habitat nodes using quintiles of occurrence
LikelyHabitat8<-habitat_raster
LikelyHabitat8[LikelyHabitat8%in%c(0:36),]<-NA
pol8 <- rasterToPolygons(LikelyHabitat8)
proj4string(pol8) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol8$ID<-seq(1,length(pol8[1]))
polbuf <- gBuffer(pol8, byid=TRUE, id=pol8$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
a<-raster::disaggregate(polbufdis)
LikelyHabitat5<-habitat_raster
LikelyHabitat5[LikelyHabitat5%in%c(0:27,37:45),]<-NA
pol5 <- rasterToPolygons(LikelyHabitat5)
proj4string(pol5) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol5$ID<-seq(1,length(pol5[1]))
polbuf <- gBuffer(pol5, byid=TRUE, id=pol5$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
b<-raster::disaggregate(polbufdis)
LikelyHabitat3<-habitat_raster
LikelyHabitat3[LikelyHabitat3%in%c(0:18,28:45),]<-NA
pol3 <- rasterToPolygons(LikelyHabitat3)
proj4string(pol3) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol3$ID<-seq(1,length(pol3[1]))
polbuf <- gBuffer(pol3, byid=TRUE, id=pol3$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
c<-raster::disaggregate(polbufdis)
LikelyHabitat1<-habitat_raster
LikelyHabitat1[LikelyHabitat1%in%c(0:9,19:45),]<-NA
pol1 <- rasterToPolygons(LikelyHabitat1)
proj4string(pol1) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol1$ID<-seq(1,length(pol1[1]))
polbuf <- gBuffer(pol1, byid=TRUE, id=pol1$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
d<-raster::disaggregate(polbufdis)
LikelyHabitat<-habitat_raster
LikelyHabitat[LikelyHabitat%in%c(0,10:45),]<-NA
pol <- rasterToPolygons(LikelyHabitat)
proj4string(pol) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol$ID<-seq(1,length(pol[1]))
polbuf <- gBuffer(pol, byid=TRUE, id=pol$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
e<-raster::disaggregate(polbufdis)
#Bring quintile-based habitat nodes together into one SpatialPolygonsDataFrame, find area of nodes, and assign numbers
polys <- bind(a,b,c,d,e)
data<-data.frame(ID=seq(1,length(polys)))
pol1_dis<-SpatialPolygonsDataFrame(polys,data)
pol1_dis$area_ha <- raster::area(pol1_dis)/10000
pol1_dis$num1 <- seq(from = 1, to= length(pol1_dis), by=1)
pol1_dis$num2 <- seq(from = 1, to= length(pol1_dis), by=1)
#Assign weight to habitat by type and area to be used in Conefor
pol1_dis$weight <- NA
pol1_dis$weight <- pol1_dis$area_ha
#Restrict habitat patches to those 2 hectares and larger, reassign ID's
pol1_dis <- pol1_dis[pol1_dis$area_ha >= 2,]
pol1_dis$ID<-seq(from = 1, to= length(pol1_dis), by=1)
#Make habitat nodes file to be used for Conefor
maketext <- cbind(pol1_dis$ID, pol1_dis$weight)
write.table(maketext, file=paste0(model,replicate,"/","Output_",model,replicate,species,u,"/nodes_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = FALSE, col.names = FALSE)
#use to find #nodes, avg node size, and total habitat area (to be used for ECA:Area)
nodes[length(nodes)+1] <- length(pol1_dis$ID)
avgnode[length(avgnode)+1] <- mean(pol1_dis$area_ha)
totnode[length(totnode)+1] <- sum(pol1_dis$area_ha)
###create transition matrix from resistance raster, which is required by gdistance package to calculate resistance
###distance and least cost path
test_tr <- transition(test_raster2, transitionFunction=mean, directions=8)
#find polgyon centroid
trueCentroids <- gCentroid(pol1_dis, byid=TRUE, id = pol1_dis$ID)
#clear memory
rm(Longleaf_Stack)
rm(Loblolly_Stack)
rm(Pine_Stack)
rm(Hardwood_Stack)
rm(Total)
rm(bigstack)
rm(pol8)
rm(pol5)
rm(pol3)
rm(pol1)
rm(pol)
rm(a)
rm(b)
rm(c)
rm(d)
rm(e)
rm(polbuf)
rm(polys)
rm(LikelyHabitat8)
rm(LikelyHabitat5)
rm(LikelyHabitat3)
rm(LikelyHabitat1)
rm(LikelyHabitat)
#get coordinates from trueCentroids
cent_coords <- geom(trueCentroids)
#find euclidean distance nearest neighbor
EUnn <- nndist(cent_coords)
avgENN[length(avgENN)+1] <- mean(EUnn)
#Euclidean distance between points- if euclidean distance is greater than 2000 meters, remove that pair- STILL NEED TO DO!!
#1500 meters for small songbird (Minor and Urban 2008, Sutherland et al. 2000)
#timber rattlesnake (generalist) ~1200 meters (USFS FEIS)
#~500 (449) for eastern spadefoot toad (Baumberger et al. 2019- Movement and habtiat selecton of western spadefoot)
#10,000 biggest median disersal distance for birds found by Sutherland et al.
#create matrix of euclidean distance between polygon centroids
EUpts <- spDists(x= trueCentroids, y = trueCentroids, longlat = FALSE, segments = FALSE, diagonal = FALSE)
#condense matrix into table and remove duplicate pairs
EUnew <- subset(reshape2::melt(EUpts), value!=0)
EU5000<-EUnew[!(EUnew$value > u),]
EU5000_nodups <- EU5000[!duplicated(data.frame(list(do.call(pmin,EU5000),do.call(pmax,EU5000)))),]
rm(EUpts)
#merge
colnames(EU5000_nodups) <- c("num1", "num2", "EUD")
lookup <- cbind(pol1_dis$ID, pol1_dis$num1, pol1_dis$num2)
colnames(lookup) <- c("ID", "num1", "num2")
EU_test <- merge(x = EU5000_nodups, y = lookup, by = "num1", all.x = TRUE)
colnames(EU_test) <- c("num1", "num2", "EUD", "ID", "num2.y")
EU_test2 <- merge(x = EU_test, y = lookup, by = "num2", all.x = TRUE)
EU_fin <- cbind(EU_test2$ID.x, EU_test2$ID.y)
EU_fin_df <- data.frame(EU_fin)
#clear more memory
rm(EU_test)
rm(EU_test2)
rm(EU_fin)
#
print("#####################################Entering Cost Distance#############################")
#calculate least cost path
test_trC <- geoCorrection(test_tr, type="c") #geocorrection for least cost path
rm(test_tr)
costDist <- costDistance(test_trC, trueCentroids) #LCP
rm(trueCentroids)
costmatrix <- matrixConvert(costDist, colname = c("X1", "X2", "resistance"))
colnames(costmatrix) <- c("X1", "X2", "resistance")
EU_fin_df$costdis <- NULL
costdist5000 <- merge(EU_fin_df, costmatrix, by.x= c("X2", "X1"), by.y = c("X1", "X2"))
costdist5000df <- data.frame(costdist5000)
costcomplete <- costdist5000df[!is.infinite(rowSums(costdist5000df)),]
write.table(costcomplete, file=paste0(model,replicate,"/","Output_",model,replicate,species,u,"/distance_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = FALSE, col.names = FALSE)
#write.csv(costcomplete, file=paste0("Outputs/distance_",u,model,"yr",i,"Rep_",replicate,".csv"), row.names=F)
print("#####################################Finished Cost distance#############################")
links[length(links)+1] <- nrow(costcomplete)
avgLCP[length(avgLCP)+1] <- mean(costcomplete$resistance)
#get adjacency matrix to build igraph
cost_col<- cbind(costcomplete$X2, costcomplete$X1)
adj <- get.adjacency(graph.edgelist(as.matrix(cost_col), directed=FALSE))
network <- graph_from_adjacency_matrix(adj)
gdensity <- edge_density(network, loops = FALSE)
density[length(density)+1] <- gdensity
trans <- transitivity(network, type="global")
transitivity[length(transitivity)+1] <- trans
results <- data.frame(nodes, links, avgnode, totnode, avgLCP, avgENN, density, transitivity)
write.table(results, file=paste0(model,replicate,"/","Output_",model,replicate,species,u,"/Metrics_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = TRUE, col.names = TRUE)
| /Code/HPC_5000clusterspgyr80Rep4.R | no_license | ZacharyRobbins/TransferHPC | R | false | false | 14,786 | r | require(spatialEco)
require(sp)
require(usedist)
require(rgeos)
require(raster)
require(spatstat)
require(igraph)
require(sf)
require(rgdal)
require(gdistance)
require(otuSummary)
require(gdata)
require(maptools)
require(tidyverse)
require(reshape2)
require(data.table)
#setwd("E:/LCP sensitivity test/ConnectivityHPC_inputs")
#Create master map of all habitat created by a conservation strategy
i=80
u=5000
model="cluster"
species="g"
replicate=4
XO<- list.files(paste0(model,"/"), pattern =paste0(species,".tif$",sep=""))
ManyRunsStack<-raster::stack(paste0(model,"/",XO))
SumStack<-sum(ManyRunsStack)
rm(ManyRunsStack)
#Bring in ecoregion map to use for crs and extent template
Ecoregion <- raster(paste0(model,"/","Ecoregion100f.tif"))
#Read in roads file
roads <- raster(paste0(model,"/","road.tif",sep=""))
#Create empty vectors for connectivity indices
nodes <- vector()
links <- vector()
avgnode <- vector()
totnode <- vector()
avgLCP <- vector()
avgENN <- vector()
density <- vector()
transitivity <- vector()
#Time steps
TimestepList <- as.character(seq(from=0, to=80, by=10))
#Connectivity analysis
Longleaf<-"PinuPalu"
Loblolly<-"PinuTaed"
Pine<- c("PinuEchi","PinuTaed","PinuVirg")
Hardwood<-c("QuerAlba","AcerRubr","LiriTuli","LiquStyr","OxydArbo","CornFlor")
Year0<-list.files(paste0(model,"/",model,replicate),pattern=(".img$"))
#paste0("inputs/", model, "/",model,replicate,"/")
Longleaf_Stack<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Longleaf,"-",i,".img")]))
Loblolly_Stack<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Loblolly,"-",i,".img")]))
Pine_Stack<-raster::stack(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Pine,"-",i,".img")]))
Hardwood_Stack<-raster::stack(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Hardwood,"-",i,".img")]))
Total<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-TotalBiomass-", i, ".img")]))
###Reclassification of biomass into community types
###Rule 1
Longleaf_Stack[Longleaf_Stack> 0.25*(Total),]<-1
Longleaf_Stack[!Longleaf_Stack==1]<-999
### Rule 2
Loblolly_Stack[Loblolly_Stack> 0.9*(Total),]<-2
Loblolly_Stack[!Loblolly_Stack==2]<-999
### Rule 3
Pine_Stack[Pine_Stack> 0.65*(Total),]<-3
Pine_Stack[!Pine_Stack==3]<-999
### Rule 4
Hardwood_Stack[Hardwood_Stack>0.5*(Total),]<-4
Hardwood_Stack[!Hardwood_Stack==4]<-999
### Rule 5
Total[Total >0,]<-5
bigstack<-stack(Longleaf_Stack, Loblolly_Stack, Pine_Stack, Hardwood_Stack, Total)
test_stack<-min(bigstack)
crs(test_stack) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
extent(test_stack)<-raster::extent(Ecoregion)
median0 <- raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("AGE-MED-",i,".img")]))
crs(median0) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
extent(median0)<-raster::extent(Ecoregion)
#use to incorporate land use change
#LU0 <- raster(paste("C:/Users/tgmozele/Desktop/LCP sensitivity test/geo2noLUC/land-use-", i, ".tif",sep=""))
#use to not incorporate land use change, but establish BAU land use types
LU0 <- raster(paste0(model,"/","NLCD100.tif"))
#Create a raster that will become resistance raster
test_raster <- test_stack
#Assign projection and reformat to ecoregion extent for the resistance raster
crs(test_raster) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
extent(test_raster)<-raster::extent(Ecoregion)
#Assign values to resistance raster
#longleaf community comp
#test_raster[test_stack == 1 & median0 %in% c(0:1),] <- (1/100)
#test_raster[test_stack == 1 & median0 %in% c(2:5),] <- (1/95)
#test_raster[test_stack == 1 & median0 %in% c(6:7),] <- (1/85)
#test_raster[test_stack == 1 & median0 %in% c(8:9),] <- (1/50)
#test_raster[test_stack == 1 & median0 %in% c(10:20),] <- (1/10)
#test_raster[test_stack == 1 & median0 %in% c(21:34),] <- (1/5)
#test_raster[test_stack == 1 & median0 >= 35,] <- 1
test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/90)
test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/60)
test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/20)
test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/10)
test_raster[test_stack == 3 & median0 >= 35,] <- 1
#pine plantation community type (was pine mix)
test_raster[test_stack == 2 & median0 %in% c(0:5),] <- (1/90)
test_raster[test_stack == 2 & median0 %in% c(6:10),] <- (1/70)
test_raster[test_stack == 2 & median0 %in% c(11:20),] <- (1/60)
test_raster[test_stack == 2 & median0 %in% c(21:30),] <- (1/50)
test_raster[test_stack == 2 & median0 >= 31,] <- (1/40)
#pine mix community type (was lob_)
#test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/95)
#test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/80)
#test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/40)
#test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/30)
#test_raster[test_stack == 3 & median0 >= 35,] <- (1/20)
test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/90)
test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/70)
test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/30)
test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/10)
test_raster[test_stack == 3 & median0 >= 35,] <- 1
#hardwood community type (was mix)
test_raster[test_stack == 4 & median0 %in% c(0:10),] <- (1/90)
test_raster[test_stack == 4 & median0 %in% c(11:20),] <- (1/80)
test_raster[test_stack == 4 & median0 %in% c(21:30),] <- (1/70)
test_raster[test_stack == 4 & median0 >= 31,] <- (1/60)
#mixed forest community type (was hardwood)
#test_raster[test_stack == 5 & median0 %in% c(0:10),] <- (1/100)
#test_raster[test_stack == 5 & median0 %in% c(11:20),] <- (1/95)
#test_raster[test_stack == 5 & median0 %in% c(21:30),] <- (1/90)
#test_raster[test_stack == 5 & median0 >= 31,] <- (1/80)
test_raster[test_stack == 5 & median0 %in% c(0:10),] <- (1/90)
test_raster[test_stack == 5 & median0 %in% c(11:20),] <- (1/70)
test_raster[test_stack == 5 & median0 %in% c(21:30),] <- (1/60)
test_raster[test_stack == 5 & median0 >= 31,] <- (1/50)
test_raster2 <- test_raster
test_raster2[test_raster ==0] <- NA
#land use types
test_raster2[LU0 == 82] <- (1/90) #cropland
test_raster2[LU0 == 81] <- (1/90) #hay/pasture
test_raster2[LU0 == 11] <- (1/100) #water
test_raster2[LU0 == 24] <- (1/100) #developed, high intensity
test_raster2[LU0 == 23] <- (1/90) #developed, med intensity
test_raster2[LU0 == 22] <- (1/80) #developed, low intensity
test_raster2[LU0 == 31] <- (1/90) #barren land
#test_raster2[LU0 == 6] <- (1/100) #mining
test_raster2[test_raster2 ==0] <- (1/90)
#roads
test_raster2[roads %in% c(1:2)] <- (1/100)
test_raster2[roads %in% c(3:4)] <- (1/100)
test_raster2[roads %in% c(5:89)] <- (1/90)
test_raster3 <- test_raster2
test_raster3[test_raster3 >0.1] <- 1
test_raster3[test_raster3 < 1] <- 0
habitat_raster <- overlay(test_raster3, SumStack, fun=function(x,y){(x*y)} )
#Cluster habitat cells into habitat nodes using quintiles of occurrence
LikelyHabitat8<-habitat_raster
LikelyHabitat8[LikelyHabitat8%in%c(0:36),]<-NA
pol8 <- rasterToPolygons(LikelyHabitat8)
proj4string(pol8) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol8$ID<-seq(1,length(pol8[1]))
polbuf <- gBuffer(pol8, byid=TRUE, id=pol8$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
a<-raster::disaggregate(polbufdis)
LikelyHabitat5<-habitat_raster
LikelyHabitat5[LikelyHabitat5%in%c(0:27,37:45),]<-NA
pol5 <- rasterToPolygons(LikelyHabitat5)
proj4string(pol5) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol5$ID<-seq(1,length(pol5[1]))
polbuf <- gBuffer(pol5, byid=TRUE, id=pol5$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
b<-raster::disaggregate(polbufdis)
LikelyHabitat3<-habitat_raster
LikelyHabitat3[LikelyHabitat3%in%c(0:18,28:45),]<-NA
pol3 <- rasterToPolygons(LikelyHabitat3)
proj4string(pol3) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol3$ID<-seq(1,length(pol3[1]))
polbuf <- gBuffer(pol3, byid=TRUE, id=pol3$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
c<-raster::disaggregate(polbufdis)
LikelyHabitat1<-habitat_raster
LikelyHabitat1[LikelyHabitat1%in%c(0:9,19:45),]<-NA
pol1 <- rasterToPolygons(LikelyHabitat1)
proj4string(pol1) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol1$ID<-seq(1,length(pol1[1]))
polbuf <- gBuffer(pol1, byid=TRUE, id=pol1$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
d<-raster::disaggregate(polbufdis)
LikelyHabitat<-habitat_raster
LikelyHabitat[LikelyHabitat%in%c(0,10:45),]<-NA
pol <- rasterToPolygons(LikelyHabitat)
proj4string(pol) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol$ID<-seq(1,length(pol[1]))
polbuf <- gBuffer(pol, byid=TRUE, id=pol$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
e<-raster::disaggregate(polbufdis)
#Bring quintile-based habitat nodes together into one SpatialPolygonsDataFrame, find area of nodes, and assign numbers
polys <- bind(a,b,c,d,e)
data<-data.frame(ID=seq(1,length(polys)))
pol1_dis<-SpatialPolygonsDataFrame(polys,data)
pol1_dis$area_ha <- raster::area(pol1_dis)/10000
pol1_dis$num1 <- seq(from = 1, to= length(pol1_dis), by=1)
pol1_dis$num2 <- seq(from = 1, to= length(pol1_dis), by=1)
#Assign weight to habitat by type and area to be used in Conefor
pol1_dis$weight <- NA
pol1_dis$weight <- pol1_dis$area_ha
#Restrict habitat patches to those 2 hectares and larger, reassign ID's
pol1_dis <- pol1_dis[pol1_dis$area_ha >= 2,]
pol1_dis$ID<-seq(from = 1, to= length(pol1_dis), by=1)
#Make habitat nodes file to be used for Conefor
maketext <- cbind(pol1_dis$ID, pol1_dis$weight)
write.table(maketext, file=paste0(model,replicate,"/","Output_",model,replicate,species,u,"/nodes_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = FALSE, col.names = FALSE)
#use to find #nodes, avg node size, and total habitat area (to be used for ECA:Area)
nodes[length(nodes)+1] <- length(pol1_dis$ID)
avgnode[length(avgnode)+1] <- mean(pol1_dis$area_ha)
totnode[length(totnode)+1] <- sum(pol1_dis$area_ha)
###create transition matrix from resistance raster, which is required by gdistance package to calculate resistance
###distance and least cost path
test_tr <- transition(test_raster2, transitionFunction=mean, directions=8)
#find polgyon centroid
trueCentroids <- gCentroid(pol1_dis, byid=TRUE, id = pol1_dis$ID)
#clear memory
rm(Longleaf_Stack)
rm(Loblolly_Stack)
rm(Pine_Stack)
rm(Hardwood_Stack)
rm(Total)
rm(bigstack)
rm(pol8)
rm(pol5)
rm(pol3)
rm(pol1)
rm(pol)
rm(a)
rm(b)
rm(c)
rm(d)
rm(e)
rm(polbuf)
rm(polys)
rm(LikelyHabitat8)
rm(LikelyHabitat5)
rm(LikelyHabitat3)
rm(LikelyHabitat1)
rm(LikelyHabitat)
#get coordinates from trueCentroids
cent_coords <- geom(trueCentroids)
#find euclidean distance nearest neighbor
EUnn <- nndist(cent_coords)
avgENN[length(avgENN)+1] <- mean(EUnn)
#Euclidean distance between points- if euclidean distance is greater than 2000 meters, remove that pair- STILL NEED TO DO!!
#1500 meters for small songbird (Minor and Urban 2008, Sutherland et al. 2000)
#timber rattlesnake (generalist) ~1200 meters (USFS FEIS)
#~500 (449) for eastern spadefoot toad (Baumberger et al. 2019- Movement and habtiat selecton of western spadefoot)
#10,000 biggest median disersal distance for birds found by Sutherland et al.
#create matrix of euclidean distance between polygon centroids
EUpts <- spDists(x= trueCentroids, y = trueCentroids, longlat = FALSE, segments = FALSE, diagonal = FALSE)
#condense matrix into table and remove duplicate pairs
EUnew <- subset(reshape2::melt(EUpts), value!=0)
EU5000<-EUnew[!(EUnew$value > u),]
EU5000_nodups <- EU5000[!duplicated(data.frame(list(do.call(pmin,EU5000),do.call(pmax,EU5000)))),]
rm(EUpts)
#merge
colnames(EU5000_nodups) <- c("num1", "num2", "EUD")
lookup <- cbind(pol1_dis$ID, pol1_dis$num1, pol1_dis$num2)
colnames(lookup) <- c("ID", "num1", "num2")
EU_test <- merge(x = EU5000_nodups, y = lookup, by = "num1", all.x = TRUE)
colnames(EU_test) <- c("num1", "num2", "EUD", "ID", "num2.y")
EU_test2 <- merge(x = EU_test, y = lookup, by = "num2", all.x = TRUE)
EU_fin <- cbind(EU_test2$ID.x, EU_test2$ID.y)
EU_fin_df <- data.frame(EU_fin)
#clear more memory
rm(EU_test)
rm(EU_test2)
rm(EU_fin)
#
print("#####################################Entering Cost Distance#############################")
#calculate least cost path
test_trC <- geoCorrection(test_tr, type="c") #geocorrection for least cost path
rm(test_tr)
costDist <- costDistance(test_trC, trueCentroids) #LCP
rm(trueCentroids)
costmatrix <- matrixConvert(costDist, colname = c("X1", "X2", "resistance"))
colnames(costmatrix) <- c("X1", "X2", "resistance")
EU_fin_df$costdis <- NULL
costdist5000 <- merge(EU_fin_df, costmatrix, by.x= c("X2", "X1"), by.y = c("X1", "X2"))
costdist5000df <- data.frame(costdist5000)
costcomplete <- costdist5000df[!is.infinite(rowSums(costdist5000df)),]
write.table(costcomplete, file=paste0(model,replicate,"/","Output_",model,replicate,species,u,"/distance_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = FALSE, col.names = FALSE)
#write.csv(costcomplete, file=paste0("Outputs/distance_",u,model,"yr",i,"Rep_",replicate,".csv"), row.names=F)
print("#####################################Finished Cost distance#############################")
links[length(links)+1] <- nrow(costcomplete)
avgLCP[length(avgLCP)+1] <- mean(costcomplete$resistance)
#get adjacency matrix to build igraph
cost_col<- cbind(costcomplete$X2, costcomplete$X1)
adj <- get.adjacency(graph.edgelist(as.matrix(cost_col), directed=FALSE))
network <- graph_from_adjacency_matrix(adj)
gdensity <- edge_density(network, loops = FALSE)
density[length(density)+1] <- gdensity
trans <- transitivity(network, type="global")
transitivity[length(transitivity)+1] <- trans
results <- data.frame(nodes, links, avgnode, totnode, avgLCP, avgENN, density, transitivity)
write.table(results, file=paste0(model,replicate,"/","Output_",model,replicate,species,u,"/Metrics_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = TRUE, col.names = TRUE)
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(dplyr)
library(readr)
library(ggplot2)
library(stringr)
setwd("/cloud/project/billboardLyrics/LyricTermer")
lyricCorpus <- read_csv("lyric_corpus.csv")
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$timePlot <- renderPlot({
# generate bins based on input$bins from ui.R
lyricCorpus %>%
filter(word %in% str_to_lower(input$word)) %>%
group_by(word, year) %>%
dplyr::summarize(freq = n()) %>%
ggplot(aes(x = year, y = freq, fill = word, color = word)) + geom_path() +
geom_point() +
expand_limits(y = 0) +
theme_minimal() +
coord_cartesian(xlim = c(2000,2020)) +
scale_y_continuous(breaks = 10)
})
output$artistList <- renderTable({
lyricCorpus %>%
filter(word %in% str_to_lower(input$word)) %>%
group_by(as.character(year), artist, word) %>%
dplyr::summarize(freq = n()) %>%
arrange(desc(freq)) %>%
head()
})
})
| /billboardLyrics/LyricTermer/server.R | permissive | dpmeltz/shiny_server | R | false | false | 1,287 | r | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(dplyr)
library(readr)
library(ggplot2)
library(stringr)
setwd("/cloud/project/billboardLyrics/LyricTermer")
lyricCorpus <- read_csv("lyric_corpus.csv")
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$timePlot <- renderPlot({
# generate bins based on input$bins from ui.R
lyricCorpus %>%
filter(word %in% str_to_lower(input$word)) %>%
group_by(word, year) %>%
dplyr::summarize(freq = n()) %>%
ggplot(aes(x = year, y = freq, fill = word, color = word)) + geom_path() +
geom_point() +
expand_limits(y = 0) +
theme_minimal() +
coord_cartesian(xlim = c(2000,2020)) +
scale_y_continuous(breaks = 10)
})
output$artistList <- renderTable({
lyricCorpus %>%
filter(word %in% str_to_lower(input$word)) %>%
group_by(as.character(year), artist, word) %>%
dplyr::summarize(freq = n()) %>%
arrange(desc(freq)) %>%
head()
})
})
|
context("Checking race")
test_that("race ...",{
})
| /tests/testthat/test-race.R | no_license | couthcommander/wakefield | R | false | false | 55 | r | context("Checking race")
test_that("race ...",{
})
|
library(Ecdat)
### Name: bankingCrises
### Title: Countries in Banking Crises
### Aliases: bankingCrises
### Keywords: datasets
### ** Examples
data(bankingCrises)
numberOfCrises <- rowSums(bankingCrises[-1], na.rm=TRUE)
plot(bankingCrises$year, numberOfCrises, type='b')
# Write to a file for Wikimedia Commons
svg('bankingCrises.svg')
plot(bankingCrises$year, numberOfCrises, type='b', cex.axis=2,
las=1, xlab='', ylab='', bty='n', cex=0.5)
abline(v=c(1945, 1971), lty='dashed', col='blue')
text(1958, 14, 'Bretton Woods', srt=90, cex=2, col='blue')
dev.off()
| /data/genthat_extracted_code/Ecdat/examples/bankingCrises.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 575 | r | library(Ecdat)
### Name: bankingCrises
### Title: Countries in Banking Crises
### Aliases: bankingCrises
### Keywords: datasets
### ** Examples
data(bankingCrises)
numberOfCrises <- rowSums(bankingCrises[-1], na.rm=TRUE)
plot(bankingCrises$year, numberOfCrises, type='b')
# Write to a file for Wikimedia Commons
svg('bankingCrises.svg')
plot(bankingCrises$year, numberOfCrises, type='b', cex.axis=2,
las=1, xlab='', ylab='', bty='n', cex=0.5)
abline(v=c(1945, 1971), lty='dashed', col='blue')
text(1958, 14, 'Bretton Woods', srt=90, cex=2, col='blue')
dev.off()
|
predict_lambda <- function(df_row, model) {
df <- tibble(
"spi" = c(df_row$spi1, df_row$spi2),
"opp_spi" = c(df_row$spi2, df_row$spi1),
"home" = c(1,-1))
lambdas <- exp(predict(model, newdata = df))
lambdas <- tibble("lambda1" = lambdas[1], "lambda2" = lambdas[2])
return(lambdas)
}
match_probs <- function(lambda_1, lambda_2, dif) {
max_goals <- 10
score_matrix <- dpois(0:max_goals, lambda_1) %o% dpois(0:max_goals, lambda_2)
diag(score_matrix) <- dif * diag(score_matrix)
score_matrix <- score_matrix/sum(score_matrix)
tie_prob <- sum(diag(score_matrix))
win_prob <- sum(score_matrix[lower.tri(score_matrix)])
loss_prob <- sum(score_matrix[upper.tri(score_matrix)])
return(tibble("win_prob" = win_prob, "tie_prob" = tie_prob, "loss_prob" = loss_prob))
}
get_predictions <- function(df, hfa_reduction = 0, model, dif) {
model$coefficients['home'] <- (1 - hfa_reduction) * model$coefficients['home']
df <-
df %>%
bind_cols(future_map_dfr(1:nrow(df), ~predict_lambda(df[.x,], model))) %>%
bind_cols(future_map2_dfr(.$lambda1, .$lambda2, ~match_probs(.x, .y, dif)))
df$hfa_reduction <- hfa_reduction
return(df)
} | /prediction_helpers.R | no_license | lbenz730/soccer_hfa | R | false | false | 1,170 | r | predict_lambda <- function(df_row, model) {
df <- tibble(
"spi" = c(df_row$spi1, df_row$spi2),
"opp_spi" = c(df_row$spi2, df_row$spi1),
"home" = c(1,-1))
lambdas <- exp(predict(model, newdata = df))
lambdas <- tibble("lambda1" = lambdas[1], "lambda2" = lambdas[2])
return(lambdas)
}
match_probs <- function(lambda_1, lambda_2, dif) {
max_goals <- 10
score_matrix <- dpois(0:max_goals, lambda_1) %o% dpois(0:max_goals, lambda_2)
diag(score_matrix) <- dif * diag(score_matrix)
score_matrix <- score_matrix/sum(score_matrix)
tie_prob <- sum(diag(score_matrix))
win_prob <- sum(score_matrix[lower.tri(score_matrix)])
loss_prob <- sum(score_matrix[upper.tri(score_matrix)])
return(tibble("win_prob" = win_prob, "tie_prob" = tie_prob, "loss_prob" = loss_prob))
}
get_predictions <- function(df, hfa_reduction = 0, model, dif) {
model$coefficients['home'] <- (1 - hfa_reduction) * model$coefficients['home']
df <-
df %>%
bind_cols(future_map_dfr(1:nrow(df), ~predict_lambda(df[.x,], model))) %>%
bind_cols(future_map2_dfr(.$lambda1, .$lambda2, ~match_probs(.x, .y, dif)))
df$hfa_reduction <- hfa_reduction
return(df)
} |
# Stop-signal context independent parameterization n-choice Wald racing with an EXG,
# External parameters types: "v","B","A","mu","sigma","tau","t0","tf","gf","ts" CHECK THIS
# Internal parameters types: "v","B","A","mu","sigma","tau","t0","tf","gf","ts"
#
# NB1: st0 is not available
# NB2: ts is slope of TRIALS covariate on B (NB: Threshold = B+A)
#
my.integrate <- function(...,big=10)
# Avoids but in integrate upper=Inf that uses only 1 subdivision
# Use of big=10 is arbitary ...
{
out <- try(integrate(...,upper=Inf),silent=TRUE)
if (class(out)=="try-error") 0 else
{
if (out$subdivisions==1)
{
out <- try(integrate(...,upper=big),silent=TRUE)
if (class(out)=="try-error") 0 else
{
if (out$subdivisions==1) 0 else out$value
}
} else out$value
}
}
transform.dmc <- function(par.df)
{
# Context independence: seperate go and stop accumulator parameterization.
par.df["NR",c("v","B","A")] <- par.df["NR",c("mu","sigma","tau")]
par.df[,c("v","B","A","mu","sigma","tau","t0","tf","gf","ts")]
}
random.dmc<- function(n,p.df,model,SSD=Inf,staircase=NULL,TRIALS=NULL)
{
rWaldss(n,v=p.df$v,B=p.df$B,A=p.df$A,t0=p.df$t0[1],
tf=p.df$tf[1],gf=p.df$gf[1],ts=p.df$ts[1],TRIALS=TRIALS,
SSD=SSD,staircase=staircase)
}
likelihood.dmc <- function(p.vector,data,min.like=1e-10)
# Returns vector of likelihoods for each RT in data (in same order)
{
likelihood <- numeric(dim(data)[1])
for ( i in row.names(attr(data,"model")) ) if ( !attr(data,"cell.empty")[i] )
{
if ( is.null(data$TRIALS[attr(data,"cell.index")[[i]]]) )
TRIALS <- NA else TRIALS <- data$TRIALS[attr(data,"cell.index")[[i]]]
p.df <- p.df.dmc(p.vector,i,attributes(data)$model,n1order=TRUE)
likelihood[ attr(data,"cell.index")[[i]] ] <-
n1PDF.Waldss(rt=data$RT[attr(data,"cell.index")[[i]]],
v=p.df$v,
B=p.df$B,
A=p.df$A,
t0=p.df$t0[1],
tf=p.df$tf[1],
gf=p.df$gf[1],
ts=p.df$ts[1],
# Stop-signal delays
SSD=data$SSD[attr(data,"cell.index")[[i]]],
# TRIAL regression
TRIALS=TRIALS,# In case no TRIALS
# Index of stop signal accumulator
Si=c(1:dim(p.df)[1])[row.names(p.df)=="NR"]
)
}
pmax(likelihood,min.like)
}
| /dmc/models/WALD-SSEXG/waldSSexg.R | no_license | lukestrickland/ARI | R | false | false | 2,340 | r | # Stop-signal context independent parameterization n-choice Wald racing with an EXG,
# External parameters types: "v","B","A","mu","sigma","tau","t0","tf","gf","ts" CHECK THIS
# Internal parameters types: "v","B","A","mu","sigma","tau","t0","tf","gf","ts"
#
# NB1: st0 is not available
# NB2: ts is slope of TRIALS covariate on B (NB: Threshold = B+A)
#
my.integrate <- function(...,big=10)
# Avoids but in integrate upper=Inf that uses only 1 subdivision
# Use of big=10 is arbitary ...
{
out <- try(integrate(...,upper=Inf),silent=TRUE)
if (class(out)=="try-error") 0 else
{
if (out$subdivisions==1)
{
out <- try(integrate(...,upper=big),silent=TRUE)
if (class(out)=="try-error") 0 else
{
if (out$subdivisions==1) 0 else out$value
}
} else out$value
}
}
transform.dmc <- function(par.df)
{
# Context independence: seperate go and stop accumulator parameterization.
par.df["NR",c("v","B","A")] <- par.df["NR",c("mu","sigma","tau")]
par.df[,c("v","B","A","mu","sigma","tau","t0","tf","gf","ts")]
}
random.dmc<- function(n,p.df,model,SSD=Inf,staircase=NULL,TRIALS=NULL)
{
rWaldss(n,v=p.df$v,B=p.df$B,A=p.df$A,t0=p.df$t0[1],
tf=p.df$tf[1],gf=p.df$gf[1],ts=p.df$ts[1],TRIALS=TRIALS,
SSD=SSD,staircase=staircase)
}
likelihood.dmc <- function(p.vector,data,min.like=1e-10)
# Returns vector of likelihoods for each RT in data (in same order)
{
likelihood <- numeric(dim(data)[1])
for ( i in row.names(attr(data,"model")) ) if ( !attr(data,"cell.empty")[i] )
{
if ( is.null(data$TRIALS[attr(data,"cell.index")[[i]]]) )
TRIALS <- NA else TRIALS <- data$TRIALS[attr(data,"cell.index")[[i]]]
p.df <- p.df.dmc(p.vector,i,attributes(data)$model,n1order=TRUE)
likelihood[ attr(data,"cell.index")[[i]] ] <-
n1PDF.Waldss(rt=data$RT[attr(data,"cell.index")[[i]]],
v=p.df$v,
B=p.df$B,
A=p.df$A,
t0=p.df$t0[1],
tf=p.df$tf[1],
gf=p.df$gf[1],
ts=p.df$ts[1],
# Stop-signal delays
SSD=data$SSD[attr(data,"cell.index")[[i]]],
# TRIAL regression
TRIALS=TRIALS,# In case no TRIALS
# Index of stop signal accumulator
Si=c(1:dim(p.df)[1])[row.names(p.df)=="NR"]
)
}
pmax(likelihood,min.like)
}
|
# Copyright 2021 Observational Health Data Sciences and Informatics
#
# This file is part of SkeletonCohortDiagnosticsStudy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
verifyDependencies <- function() {
expected <- jsonlite::fromJSON("renv.lock")
expected <- dplyr::bind_rows(expected[[2]])
basePackages <- rownames(installed.packages(priority = "base"))
expected <- expected[!expected$Package %in% basePackages, ]
observedVersions <- sapply(sapply(expected$Package, packageVersion), paste, collapse = ".")
expectedVersions <- sapply(sapply(expected$Version, numeric_version), paste, collapse = ".")
mismatchIdx <- which(observedVersions != expectedVersions)
if (length(mismatchIdx) > 0) {
lines <- sapply(mismatchIdx, function(idx) sprintf("- Package %s version %s should be %s",
expected$Package[idx],
observedVersions[idx],
expectedVersions[idx]))
message <- paste(c("Mismatch between required and installed package versions. Did you forget to run renv::restore()?",
lines),
collapse = "\n")
stop(message)
}
}
| /R/VerifyDependencies.R | no_license | jmsyang/PhenotypeTesting | R | false | false | 1,760 | r | # Copyright 2021 Observational Health Data Sciences and Informatics
#
# This file is part of SkeletonCohortDiagnosticsStudy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
verifyDependencies <- function() {
expected <- jsonlite::fromJSON("renv.lock")
expected <- dplyr::bind_rows(expected[[2]])
basePackages <- rownames(installed.packages(priority = "base"))
expected <- expected[!expected$Package %in% basePackages, ]
observedVersions <- sapply(sapply(expected$Package, packageVersion), paste, collapse = ".")
expectedVersions <- sapply(sapply(expected$Version, numeric_version), paste, collapse = ".")
mismatchIdx <- which(observedVersions != expectedVersions)
if (length(mismatchIdx) > 0) {
lines <- sapply(mismatchIdx, function(idx) sprintf("- Package %s version %s should be %s",
expected$Package[idx],
observedVersions[idx],
expectedVersions[idx]))
message <- paste(c("Mismatch between required and installed package versions. Did you forget to run renv::restore()?",
lines),
collapse = "\n")
stop(message)
}
}
|
library(tidycensus)
library(blscrapeR)
library(tidyverse)
library(usmap)
v <- load_variables(2018, "acs1")
County_profile_Census_Pull <- function(county_fips, Estimates_year, ACS_year, dataset){
Census_Pull <- get_estimates(
geography = "county",
product = "population",
state = "PA",
year = Estimates_year) %>%
mutate(year = as.character(Estimates_year)) %>%
rename(Total_Population = value) %>%
filter(variable == "POP" & GEOID %in% c(county_fips)) %>%
select(NAME, GEOID, year, Total_Population)
PoP_over_25_vars <- function() {
PoP_over_25_endings <- c()
Male_var_endings <- 11:25
for (i in 1:length(Male_var_endings)) {
new <- as.character(paste0("B01001_0", Male_var_endings[i]))
PoP_over_25_endings <- c(PoP_over_25_endings, new)
}
female_var_endings <- 35:49
for (i in 1:length(female_var_endings)) {
new <- as.character(paste0("B01001_0", female_var_endings[i]))
PoP_over_25_endings <- c(PoP_over_25_endings, new)
}
return(PoP_over_25_endings)
}
PoP_over_25_vars <- PoP_over_25_vars()
PoP_over_25 <- lapply(seq_along(PoP_over_25_vars),
function(i) get_acs(
geography = "county",
state = "PA",
variables = PoP_over_25_vars[i],
year = ACS_year,
survey = dataset,
geometry = FALSE)) %>%
bind_rows() %>%
filter(GEOID %in% c(county_fips)) %>%
group_by(GEOID) %>%
summarise(Total_Pop_over_25 = sum(estimate)) %>%
select(Total_Pop_over_25)
Median_Household_income <- get_acs(geography = "county",
state = "PA",
variables = "B19013_001",
year = ACS_year,
survey = dataset,
geometry = FALSE) %>%
filter(GEOID %in% c(county_fips)) %>%
rename(Median_Household_income = estimate) %>%
select(Median_Household_income)
Persons_below_poverty_line <- get_acs(geography = "county",
state = "PA",
variables = "B17001_002",
year = ACS_year,
survey = dataset,
geometry = FALSE) %>%
filter(GEOID %in% c(county_fips)) %>%
rename(Persons_below_poverty_line = estimate) %>%
select(Persons_below_poverty_line)
HS_Education_vars_over_25 <- c( "B15001_014", "B15001_015", "B15001_016", "B15001_017", "B15001_018", "B15001_022", "B15001_023", "B15001_024",
"B15001_025", "B15001_026", "B15001_030", "B15001_031", "B15001_032", "B15001_033", "B15001_034", "B15001_038",
"B15001_039", "B15001_040", "B15001_041", "B15001_042", "B15001_055", "B15001_056", "B15001_057", "B15001_058",
"B15001_059", "B15001_063", "B15001_064", "B15001_065", "B15001_066", "B15001_067", "B15001_071", "B15001_072",
"B15001_073", "B15001_074", "B15001_075", "B15001_079", "B15001_080", "B15001_081", "B15001_082", "B15001_083")
HS_over_25 <- lapply(seq_along(HS_Education_vars_over_25),
function(i) get_acs(
geography = "county",
state = "PA",
variables = HS_Education_vars_over_25[i],
year = ACS_year,
survey = dataset,
geometry = FALSE)) %>%
bind_rows() %>%
filter(GEOID %in% c(county_fips)) %>%
group_by(GEOID) %>%
summarise(HS_over_25 = sum(estimate)) %>%
select(HS_over_25)
Bachelors_over_25_vars <- c( "B15001_017", "B15001_018", "B15001_025", "B15001_026", "B15001_033", "B15001_034", "B15001_041", "B15001_042",
"B15001_058", "B15001_059", "B15001_066", "B15001_067", "B15001_074", "B15001_075", "B15001_082", "B15001_083")
Bachelors_over_25 <- lapply(seq_along(Bachelors_over_25_vars),
function(i) get_acs(
geography = "county",
state = "PA",
variables = Bachelors_over_25_vars[i],
year = ACS_year,
survey = dataset,
geometry = FALSE)) %>%
bind_rows() %>%
filter(GEOID %in% c(county_fips)) %>%
group_by(GEOID) %>%
summarise(Bachelors_over_25 = sum(estimate)) %>%
select(Bachelors_over_25)
Census_Pull <- cbind(Census_Pull, PoP_over_25, HS_over_25, Bachelors_over_25)
Census_Pull <- Census_Pull %>%
mutate(HS_percentage = HS_over_25 / Total_Pop_over_25, Bach_percentage = Bachelors_over_25/Total_Pop_over_25)
return(Census_Pull <- cbind(Census_Pull, Median_Household_income, Persons_below_poverty_line))
}
City_Census_Pull <- function(city_code, Estimates_year, ACS_year, dataset){
Census_Pull <- get_estimates(
geography = "place",
product = "population",
state = "PA",
year = Estimates_year) %>%
mutate(year = as.character(Estimates_year)) %>%
rename(Total_Population = value) %>%
filter(variable == "POP" & GEOID %in% c(city_code)) %>%
select(NAME, GEOID, year, Total_Population)
PoP_over_25_vars <- function() {
PoP_over_25_endings <- c()
Male_var_endings <- 11:25
for (i in 1:length(Male_var_endings)) {
new <- as.character(paste0("B01001_0", Male_var_endings[i]))
PoP_over_25_endings <- c(PoP_over_25_endings, new)
}
female_var_endings <- 35:49
for (i in 1:length(female_var_endings)) {
new <- as.character(paste0("B01001_0", female_var_endings[i]))
PoP_over_25_endings <- c(PoP_over_25_endings, new)
}
return(PoP_over_25_endings)
}
PoP_over_25_vars <- PoP_over_25_vars()
PoP_over_25 <- lapply(seq_along(PoP_over_25_vars),
function(i) get_acs(
geography = "place",
state = "PA",
variables = PoP_over_25_vars[i],
year = ACS_year,
survey = dataset,
geometry = FALSE)) %>%
bind_rows() %>%
filter(GEOID %in% c(city_code)) %>%
group_by(GEOID) %>%
summarise(Total_Pop_over_25 = sum(estimate)) %>%
select(Total_Pop_over_25)
Median_Household_income <- get_acs(geography = "place",
state = "PA",
variables = "B19013_001",
year = ACS_year,
survey = dataset,
geometry = FALSE) %>%
filter(GEOID %in% c(city_code)) %>%
rename(Median_Household_income = estimate) %>%
select(Median_Household_income)
Persons_below_poverty_line <- get_acs(geography = "place",
state = "PA",
variables = "B17001_002",
year = ACS_year,
survey = dataset,
geometry = FALSE) %>%
filter(GEOID %in% c(city_code)) %>%
rename(Persons_below_poverty_line = estimate) %>%
select(Persons_below_poverty_line)
HS_Education_vars_over_25 <- c( "B15001_014", "B15001_015", "B15001_016", "B15001_017", "B15001_018", "B15001_022", "B15001_023", "B15001_024",
"B15001_025", "B15001_026", "B15001_030", "B15001_031", "B15001_032", "B15001_033", "B15001_034", "B15001_038",
"B15001_039", "B15001_040", "B15001_041", "B15001_042", "B15001_055", "B15001_056", "B15001_057", "B15001_058",
"B15001_059", "B15001_063", "B15001_064", "B15001_065", "B15001_066", "B15001_067", "B15001_071", "B15001_072",
"B15001_073", "B15001_074", "B15001_075", "B15001_079", "B15001_080", "B15001_081", "B15001_082", "B15001_083")
HS_over_25 <- lapply(seq_along(HS_Education_vars_over_25),
function(i) get_acs(
geography = "place",
state = "PA",
variables = HS_Education_vars_over_25[i],
year = ACS_year,
survey = dataset,
geometry = FALSE)) %>%
bind_rows() %>%
filter(GEOID %in% c(city_code)) %>%
group_by(GEOID) %>%
summarise(HS_over_25 = sum(estimate)) %>%
select(HS_over_25)
Bachelors_over_25_vars <- c( "B15001_017", "B15001_018", "B15001_025", "B15001_026", "B15001_033", "B15001_034", "B15001_041", "B15001_042",
"B15001_058", "B15001_059", "B15001_066", "B15001_067", "B15001_074", "B15001_075", "B15001_082", "B15001_083")
Bachelors_over_25 <- lapply(seq_along(Bachelors_over_25_vars),
function(i) get_acs(
geography = "place",
state = "PA",
variables = Bachelors_over_25_vars[i],
year = ACS_year,
survey = dataset,
geometry = FALSE)) %>%
bind_rows() %>%
filter(GEOID %in% c(city_code)) %>%
group_by(GEOID) %>%
summarise(Bachelors_over_25 = sum(estimate)) %>%
select(Bachelors_over_25)
Census_Pull <- cbind(Census_Pull, PoP_over_25, HS_over_25, Bachelors_over_25)
Census_Pull <- Census_Pull %>%
mutate(HS_percentage = HS_over_25 / Total_Pop_over_25, Bach_percentage = Bachelors_over_25/Total_Pop_over_25)
return(Census_Pull <- cbind(Census_Pull, Median_Household_income, Persons_below_poverty_line))
}
#https://pitt.libguides.com/uscensus/pghcensustracts
SQH <- get_acs(geography = "tract",
variables = "B01001_001",
state = "PA",
county = "Allegheny",
year = 2017,
survey = "acs5")
SQH <- separate(SQH, NAME, into = c("Tract", "County"), sep = ",")
SQH$Tract <- substr(SQH$Tract, 13, 18)
SQH$Tract <- as.integer(SQH$Tract)
SQH_var <- c(1401, 1402, 1403, 1408, 1413, 1414, 9803, 9805)
SQH <- SQH %>%
filter(Tract %in% SQH_var)
sum(SQH$estimate) | /Census Functions.R | no_license | jvanloon93/ACCD-Tools-Scripts | R | false | false | 11,067 | r | library(tidycensus)
library(blscrapeR)
library(tidyverse)
library(usmap)
v <- load_variables(2018, "acs1")
County_profile_Census_Pull <- function(county_fips, Estimates_year, ACS_year, dataset){
Census_Pull <- get_estimates(
geography = "county",
product = "population",
state = "PA",
year = Estimates_year) %>%
mutate(year = as.character(Estimates_year)) %>%
rename(Total_Population = value) %>%
filter(variable == "POP" & GEOID %in% c(county_fips)) %>%
select(NAME, GEOID, year, Total_Population)
PoP_over_25_vars <- function() {
PoP_over_25_endings <- c()
Male_var_endings <- 11:25
for (i in 1:length(Male_var_endings)) {
new <- as.character(paste0("B01001_0", Male_var_endings[i]))
PoP_over_25_endings <- c(PoP_over_25_endings, new)
}
female_var_endings <- 35:49
for (i in 1:length(female_var_endings)) {
new <- as.character(paste0("B01001_0", female_var_endings[i]))
PoP_over_25_endings <- c(PoP_over_25_endings, new)
}
return(PoP_over_25_endings)
}
PoP_over_25_vars <- PoP_over_25_vars()
PoP_over_25 <- lapply(seq_along(PoP_over_25_vars),
function(i) get_acs(
geography = "county",
state = "PA",
variables = PoP_over_25_vars[i],
year = ACS_year,
survey = dataset,
geometry = FALSE)) %>%
bind_rows() %>%
filter(GEOID %in% c(county_fips)) %>%
group_by(GEOID) %>%
summarise(Total_Pop_over_25 = sum(estimate)) %>%
select(Total_Pop_over_25)
Median_Household_income <- get_acs(geography = "county",
state = "PA",
variables = "B19013_001",
year = ACS_year,
survey = dataset,
geometry = FALSE) %>%
filter(GEOID %in% c(county_fips)) %>%
rename(Median_Household_income = estimate) %>%
select(Median_Household_income)
Persons_below_poverty_line <- get_acs(geography = "county",
state = "PA",
variables = "B17001_002",
year = ACS_year,
survey = dataset,
geometry = FALSE) %>%
filter(GEOID %in% c(county_fips)) %>%
rename(Persons_below_poverty_line = estimate) %>%
select(Persons_below_poverty_line)
HS_Education_vars_over_25 <- c( "B15001_014", "B15001_015", "B15001_016", "B15001_017", "B15001_018", "B15001_022", "B15001_023", "B15001_024",
"B15001_025", "B15001_026", "B15001_030", "B15001_031", "B15001_032", "B15001_033", "B15001_034", "B15001_038",
"B15001_039", "B15001_040", "B15001_041", "B15001_042", "B15001_055", "B15001_056", "B15001_057", "B15001_058",
"B15001_059", "B15001_063", "B15001_064", "B15001_065", "B15001_066", "B15001_067", "B15001_071", "B15001_072",
"B15001_073", "B15001_074", "B15001_075", "B15001_079", "B15001_080", "B15001_081", "B15001_082", "B15001_083")
HS_over_25 <- lapply(seq_along(HS_Education_vars_over_25),
function(i) get_acs(
geography = "county",
state = "PA",
variables = HS_Education_vars_over_25[i],
year = ACS_year,
survey = dataset,
geometry = FALSE)) %>%
bind_rows() %>%
filter(GEOID %in% c(county_fips)) %>%
group_by(GEOID) %>%
summarise(HS_over_25 = sum(estimate)) %>%
select(HS_over_25)
Bachelors_over_25_vars <- c( "B15001_017", "B15001_018", "B15001_025", "B15001_026", "B15001_033", "B15001_034", "B15001_041", "B15001_042",
"B15001_058", "B15001_059", "B15001_066", "B15001_067", "B15001_074", "B15001_075", "B15001_082", "B15001_083")
Bachelors_over_25 <- lapply(seq_along(Bachelors_over_25_vars),
function(i) get_acs(
geography = "county",
state = "PA",
variables = Bachelors_over_25_vars[i],
year = ACS_year,
survey = dataset,
geometry = FALSE)) %>%
bind_rows() %>%
filter(GEOID %in% c(county_fips)) %>%
group_by(GEOID) %>%
summarise(Bachelors_over_25 = sum(estimate)) %>%
select(Bachelors_over_25)
Census_Pull <- cbind(Census_Pull, PoP_over_25, HS_over_25, Bachelors_over_25)
Census_Pull <- Census_Pull %>%
mutate(HS_percentage = HS_over_25 / Total_Pop_over_25, Bach_percentage = Bachelors_over_25/Total_Pop_over_25)
return(Census_Pull <- cbind(Census_Pull, Median_Household_income, Persons_below_poverty_line))
}
City_Census_Pull <- function(city_code, Estimates_year, ACS_year, dataset){
Census_Pull <- get_estimates(
geography = "place",
product = "population",
state = "PA",
year = Estimates_year) %>%
mutate(year = as.character(Estimates_year)) %>%
rename(Total_Population = value) %>%
filter(variable == "POP" & GEOID %in% c(city_code)) %>%
select(NAME, GEOID, year, Total_Population)
PoP_over_25_vars <- function() {
PoP_over_25_endings <- c()
Male_var_endings <- 11:25
for (i in 1:length(Male_var_endings)) {
new <- as.character(paste0("B01001_0", Male_var_endings[i]))
PoP_over_25_endings <- c(PoP_over_25_endings, new)
}
female_var_endings <- 35:49
for (i in 1:length(female_var_endings)) {
new <- as.character(paste0("B01001_0", female_var_endings[i]))
PoP_over_25_endings <- c(PoP_over_25_endings, new)
}
return(PoP_over_25_endings)
}
PoP_over_25_vars <- PoP_over_25_vars()
PoP_over_25 <- lapply(seq_along(PoP_over_25_vars),
function(i) get_acs(
geography = "place",
state = "PA",
variables = PoP_over_25_vars[i],
year = ACS_year,
survey = dataset,
geometry = FALSE)) %>%
bind_rows() %>%
filter(GEOID %in% c(city_code)) %>%
group_by(GEOID) %>%
summarise(Total_Pop_over_25 = sum(estimate)) %>%
select(Total_Pop_over_25)
Median_Household_income <- get_acs(geography = "place",
state = "PA",
variables = "B19013_001",
year = ACS_year,
survey = dataset,
geometry = FALSE) %>%
filter(GEOID %in% c(city_code)) %>%
rename(Median_Household_income = estimate) %>%
select(Median_Household_income)
Persons_below_poverty_line <- get_acs(geography = "place",
state = "PA",
variables = "B17001_002",
year = ACS_year,
survey = dataset,
geometry = FALSE) %>%
filter(GEOID %in% c(city_code)) %>%
rename(Persons_below_poverty_line = estimate) %>%
select(Persons_below_poverty_line)
HS_Education_vars_over_25 <- c( "B15001_014", "B15001_015", "B15001_016", "B15001_017", "B15001_018", "B15001_022", "B15001_023", "B15001_024",
"B15001_025", "B15001_026", "B15001_030", "B15001_031", "B15001_032", "B15001_033", "B15001_034", "B15001_038",
"B15001_039", "B15001_040", "B15001_041", "B15001_042", "B15001_055", "B15001_056", "B15001_057", "B15001_058",
"B15001_059", "B15001_063", "B15001_064", "B15001_065", "B15001_066", "B15001_067", "B15001_071", "B15001_072",
"B15001_073", "B15001_074", "B15001_075", "B15001_079", "B15001_080", "B15001_081", "B15001_082", "B15001_083")
HS_over_25 <- lapply(seq_along(HS_Education_vars_over_25),
function(i) get_acs(
geography = "place",
state = "PA",
variables = HS_Education_vars_over_25[i],
year = ACS_year,
survey = dataset,
geometry = FALSE)) %>%
bind_rows() %>%
filter(GEOID %in% c(city_code)) %>%
group_by(GEOID) %>%
summarise(HS_over_25 = sum(estimate)) %>%
select(HS_over_25)
Bachelors_over_25_vars <- c( "B15001_017", "B15001_018", "B15001_025", "B15001_026", "B15001_033", "B15001_034", "B15001_041", "B15001_042",
"B15001_058", "B15001_059", "B15001_066", "B15001_067", "B15001_074", "B15001_075", "B15001_082", "B15001_083")
Bachelors_over_25 <- lapply(seq_along(Bachelors_over_25_vars),
function(i) get_acs(
geography = "place",
state = "PA",
variables = Bachelors_over_25_vars[i],
year = ACS_year,
survey = dataset,
geometry = FALSE)) %>%
bind_rows() %>%
filter(GEOID %in% c(city_code)) %>%
group_by(GEOID) %>%
summarise(Bachelors_over_25 = sum(estimate)) %>%
select(Bachelors_over_25)
Census_Pull <- cbind(Census_Pull, PoP_over_25, HS_over_25, Bachelors_over_25)
Census_Pull <- Census_Pull %>%
mutate(HS_percentage = HS_over_25 / Total_Pop_over_25, Bach_percentage = Bachelors_over_25/Total_Pop_over_25)
return(Census_Pull <- cbind(Census_Pull, Median_Household_income, Persons_below_poverty_line))
}
#https://pitt.libguides.com/uscensus/pghcensustracts
SQH <- get_acs(geography = "tract",
variables = "B01001_001",
state = "PA",
county = "Allegheny",
year = 2017,
survey = "acs5")
SQH <- separate(SQH, NAME, into = c("Tract", "County"), sep = ",")
SQH$Tract <- substr(SQH$Tract, 13, 18)
SQH$Tract <- as.integer(SQH$Tract)
SQH_var <- c(1401, 1402, 1403, 1408, 1413, 1414, 9803, 9805)
SQH <- SQH %>%
filter(Tract %in% SQH_var)
sum(SQH$estimate) |
#기말고사 공부용
#통계분석 차이
#패키지 모음
#propagate -- fitDistr()은 적합한 분포를 찾게해줌
library(ggplot2)
#어린이 4리터 이하 물
x=seq(0,16,length=100)
y=dnorm(x,mean=7.5, sd=1.5)
plot(x,y,type="l",
xlab="Liters per day",
ylab="Density",
main="Water drunken by school children < 12 years old")
pnorm(4, mean = 7.5, sd=1.5, lower.tail = T)
#어린이 8리터 이상 물
lower = 8
upper = 15
i = x>=lower & x<upper
polygon(c(lower, x[i], upper), c(0,y[i],0),col = "red")
abline(h=0, col="red")
pb=round(pnorm(8, mean=7.5, sd=1.5, lower.tail = F),2)
pb
pb.results = paste("cumulative probability of a child drinking > 8L/day",
pb, sep=":")
title(pb.results)
#기초통계량
mean(1:5)
var(1:5)
sd(1:5)
summary(1:11)
x=factor(c("a","b","c","c","c","c","d","d"))
x
table(x)
which.max(table(x))
names(table(x))[
which.max(table(x))]
#연봉협상 데이터분석
data = read.csv("employees_ex.csv")
data
str(data)
summary(data)
hist(data$incentive, breaks = 50)
summary(data$incentive)
#조건에따른 incentive data 분포 분석
hist(data$incentive[data$year==2007], breaks=50)
hist(data$incentive[data$year==2008], breaks=50)
hist(data$incentive[data$gender=="F"], breaks=50)
hist(data$incentive[data$gender=="M"], breaks=50)
#협상여부가 영향을 미침
hist(data$incentive[data$negotiated==F], breaks=50)
hist(data$incentive[data$negotiated==T], breaks=50)
library(propagate)
set.seed(275)
observations = rnorm(10000,5)
distTested = fitDistr(observations)
distTested
incentive_dist = fitDistr(data$incentive)
incentive_dist
#교차분석
table(c("a","b","a","d","e","a","c","a","b","d"))
CTable = data.frame(x=c("3","7","9","10"),
y=c("A1","B2","A1","B2"),
num=c(4,6,2,9))
CTable
xtabs(num~x, data=CTable)
xtabs(num~y, data=CTable)
temp = xtabs(num~x+y, data=CTable)
addmargins(temp,margin=1)
addmargins(temp,margin=2)
addmargins(temp)
str(Cars93)
Car_table_3 = with(Cars93, table(Type, Cylinders))
Car_table_3
Car_table_4 = xtabs(~Type + Cylinders, data=Cars93)
Car_table_4
addmargins(Car_table_4)
library(vcd)
mosaic(Car_table_4,
gp=gpar(fill=c("Red","blue")),
direction = "v",
main="Mosaic plot of CarType vs. Type+Cylinders")
data("UCBAdmissions")
str(UCBAdmissions)
UCBAdmissions.df = as.data.frame(UCBAdmissions)
str(UCBAdmissions)
ucb_table1 = xtabs(Freq~Gender+Admit, data=UCBAdmissions.df)
ucb_table1
mosaic(ucb_table1,
gp=gpar(fill=c("Red","Blue")),
direction = "v",
main = "Mosaic plot of UCB Admission vs. Gender")
options("digit"=3)
ucb_table2 = prop.table(ucb_table1)
ucb_table2
chisq.test(ucb_table1)
#-----------------------------------------------------------------------------
#차이 분석
#-----------------------------------------------------------------------------
#카이제곱분포(학생들의 성별에 따른 운동량에 차이가 있는가)
#H0: 성별과 운동은 독립이다, H1: 성별과 운동은 독립이 아니다.
data("survey")
str(survey)
survey
SexExer = xtabs(~Sex+Exer, data = survey)
SexExer
#자유도는 성별이 2레벨, 운동량이 3레벨이므로 (2-1)(3-1)=df=2
#p-value가 0.05보다 크므로 귀무가설채택(성별과 운동은 독립)
chisq.test(SexExer)
#카이제곱 검정(비율)
child1 = c(5,11,1)
child2 = c(4,7,3)
Toy = cbind(child1, child2)
rownames(Toy) = c("car", "truck", "doll")
Toy
chisq.test(Toy)
fisher.test(Toy)
#KS검정(두 데이터의 분포가 다른지 검정)
x = rnorm(50)
x
y = runif(30)
y
ks.test(x,y)
#shapiro wilk test(데이터가 정규분포를 따르는지 검정)
shapiro.test(rnorm(100,mean = 5,sd=3))
library(UsingR)
data(cfb)
str(cfb)
shapiro.test(cfb$INCOME)
#시각화 (히스토그램)
hist(cfb$INCOME, breaks = 100)
#Kernel Density Plot, y:probability(freq=False)
hist(cfb$INCOME, freq=F, breaks = 100,
main="Kernel Density Plot of cfb$INCOME")
#not Normal Dist, King of F dit (Skewed Left)
#Q-QPlot: qqnorm(), qqline()
qqnorm(cfb$INCOME)
qqline(cfb$INCOME)
#T-test 한 집단 혹은 두 집단의 평균을 비교하는 Parametic Test(모수적 감정법)
#모수적 방법: 정규성을 갖는다는 모수적 특성을 이용
#비모수적 방법: 순위척도를 사용함(모수 사용불가능):
#순위합검정: 자료를 크기 순으로 배열하여 순위를 매긴 다음 순위의 합을 통해 차이 비교
#t.test
t.test(
#일표본(표본이 하나)t검정인 경우 x에만,이표본이면 x,y모두에 숫자벡터지정
x,
y=NULL,
alternative=c("two.sided", "less", "greater"), #대립가설
mu=0, #모집단의 평균
paired=F, #paired t-test의 경우는 TRUE
var.equal = F, #이표본 검정에서 두 집단의 분산이 같은지 여부
conf.level = 0.95,
)
#T-test
#A회사의 건전지 수명시간이 1000시간일때, 무작위로 뽑은 10개의 건전지에 대한
#수명은 다음과 같다
a=c(980,1008,968,1032,1012,996,1021,1002,996,1017)
#정규성 검정을 통해 정규분포를 이룬다고 알게됨
shapiro.test(a)
t.test(a, mu=1000, alternative = "two.sided")
#3-1반 학생들의 수학 평균성적은 55점
#0교시 수업 시행 후, 시험성적은 다음과 같다
a=c(58,49,39,99,32,88,62,30,55,65,44,55,57,53,88,42,39)
shapiro.test(a)
t.test(a, mu=55, alternative = "greater")
#표본이 두개인 T-test
pre = c(13.2,8.2,10.9,14.3, 10.7, 6.6, 9.5, 10.8, 8.8, 13.3)
post = c(14.0, 8.8, 11.2, 14.2, 11.8, 6.4, 9.8, 11.3, 9.3, 13.6)
shapiro.test(pre)
shapiro.test(post)
#두 데이터의 분산 분석
var.test(pre,post)
t.test(pre,post, paired=F, var.equal=T)
#결론. 딱히 차이가 없다
#표본이 두 개인 경우
A = c(rep(5,8), rep(4,11), rep(3,9), rep(2,2), rep(1,3))
B = c(rep(5,4), rep(4,6), rep(3,10), rep(2,8), rep(1,4))
A
#A는 정규분포가 아님
shapiro.test(A)
B
#B는 정규분포가 아님
shapiro.test(B)
#정규성을 띄지 않을 때
wilcox.test(A,B, exact = F, correct = F)
#수면제 효과도 분석
str(sleep)
#Extra 수면 시간 증가량
#Group 그룹 ID
#ID 환자 ID
sleep
#개인차 고려하지 않을 때 개개인을 고려X 단순 group1과 group2 비교
sleep2 = sleep[,-3]
sleep2
tapply(sleep2$extra, sleep2$group, mean)
var.test(extra~group, sleep2)
t.test(extra~group, data=sleep2, paired=F,var.equal=T)
#개인별 고려o 1번은 1번끼리.. paired t-test
with(sleep, t.test(extra[group==1], extra[group==2],paired=T))
#비율검정
a=c(3,5,4,5.5,3.5,2.5,3,5,4.5,3,3.5)
above4.hrs = ifelse(a >4, "yes","no")
above4.hrs
above4hr.table = table(above4.hrs)
above4hr.table
binom.test(4, n=11, p=0.5)
prop.test(42,100, 0.5)
#이표본 비율 검정
prop.test(c(45,55), c(100,90))
prop.test(c(16,63),c(430,1053))
#------------------------------------------------------------------------
#ANOVA
#------------------------------------------------------------------------
#정규성가정
#분산의 동질성 가정
#관찰의 독립성 가정
xx = c(1,2,3,4,5,6,7,8,9)
yy = c(1.09, 2.12, 2.92, 4.06, 4.90, 6.08, 7.01, 7.92, 8.94)
zz = c(1.10, 1.96, 2.98, 4.09, 4.92, 6.10, 6.88, 7.97, 9.01)
mydata = c(xx,yy,zz)
mydata
group = c(rep(1,9), rep(2,9), rep(3,9))
group
#p-value가 0.05보다 크므로, 그룹간 차이가 없다
oneway.test(mydata~group, var=T)
my_data = PlantGrowth
my_data
my_data$group = ordered(my_data$group, levels=c("ctrl","trt1","trt2"))
#compute summary
library(dplyr)
group_by(my_data,group) %>%
summarise(
count=n(),
mean=mean(weight, na.rm=T),
sd=sd(weight,na.rm=T)
)
#visualize the three groups
#boxplot
boxplot(weight~group, data=my_data,
xlab="Treatment", ylab="Weight",
frame=F, col=c("blue", "yellow", "red"))
#Compute the analysis of variance
res.aov = aov(weight~group, data=my_data)
summary(res.aov)
#TukeyHSD
TukeyHSD(res.aov)
tyre = read.csv("tyre.csv")
str(tyre)
tyre %>% group_by(Brands) %>%
summarise(N=n(),
Mean=mean(Mileage),
Median=median(Mileage),
Min=min(Mileage),
Max=max(Mileage),
SD=sd(Mileage)
)
boxplot(tyre$Mileage~tyre$Brands,
main="Boxplot comparing Mileage of Four Brands",
col=rainbow(4),
horizontal = T)
tyres.aov = aov(Mileage~Brands, tyre)
summary(tyres.aov)
TukeyHSD(tyres.aov)
#
drug = read.csv("drug.csv")
drug
str(drug)
drug$fatigue = ordered(drug$fatigue, levels=c("low","med","high"))
boxplot(dose~fatigue, data=drug,
xlab="fatigue",ylab="dose",
col=c("blue","red","yellow"))
drug.aov = aov(dose~fatigue, data=drug)
summary(drug.aov)
plot(drug.aov)
drug_aov2 = update(drug.aov, subset(drug, patientID!=20))
summary(drug_aov2)
drug_aov3 = aov(dose~fatigue+gender, data=drug)
summary(drug_aov3)
#두 anova 모델 비교
#p-value가 0.05보다 크므로,차이가 없기에, 더 간단한 모델을 선택
anova(drug.aov, drug_aov3)
#--------------------------------------------------------------------------
#상관분석
#--------------------------------------------------------------------------
iris
cor(iris$Sepal.Width, iris$Sepal.Length)
cor(iris[,1:4])
#상관분석 시각화
symnum(cor(iris[,1:4]))
library(corrgram)
corrgram(iris)
corrgram(iris,upper.panel = panel.conf)
cor.test(c(1,2,3,4,5),c(1,0,3,4,5), method = "pearson")
cor.test(c(1,2,3,4,5),c(1,0,3,4,5), method = "spearman")
cor.test(c(1,2,3,4,5),c(1,0,3,4,5), method = "kendall")
#
economics = as.data.frame(ggplot2::economics)
cor.test(economics$unemploy, economics$pce)
#
head(mtcars)
car_cor = cor(mtcars)
round(car_cor,2)
library(corrplot)
corrplot(car_cor)
corrplot(car_cor, method="color",#circle, square,ellipse,number,shade,color,pie
type="lower",
order="FPC",#AOE, hclust, FPC, alphabet
addCoef.col = "black",
tl.col = "black", tl.srt = 45, diag=F)
#-----------------------------------------------------------------------------
#회귀분석
#-----------------------------------------------------------------------------
data(cars)
cor.test(cars$speed, cars$dist, method = "pearson")
attach(cars)
plot(speed,dist)
cor.test(speed,dist)
#선형회귀모델 생성
m = lm(dist~speed, cars)
#dist = -17.579 + 3.932*speed + e
m
summary(m)
plot(speed, dist)
abline(coef(m))
#선형회귀모델-예측
predict(m, newdata=data.frame(speed=3), interval = "confidence")
#-5.781869 = -17.599095+3.932*3
#예측 및 검정
set.seed(100)
#row indices for training data
trainingRowIndex = sample(1:nrow(cars), 0.8*nrow(cars))
#model training data
trainingData = cars[trainingRowIndex,]
trainingData
testData = cars[-trainingRowIndex,]
testData
lmMod = lm(dist~speed, data=trainingData)
lmMod
summary(lmMod)
distPred = predict(lmMod, testData)
distPred
#검정
actuals_preds = data.frame(cbind(actuals = testData$dist,
predict=distPred))
head(actuals_preds)
correlation_accuracy = cor(actuals_preds)
correlation_accuracy
#키랑 몸무게 (몸무게로부터 키 예측)
reg = read.csv("regression.csv")
head(reg)
tail(reg)
cor(reg$height, reg$weight)
r=lm(reg$height~reg$weight)
plot(reg$weight, reg$height)
abline(coef(r))
summary(r)
#height = 70.9481 + 1.5218*weight + e
plot(r)
#다중선형회귀(독립변수가 2개 이상)
m=lm(Sepal.Length~Sepal.Width+Petal.Length+Petal.Width, data=iris)
summary(m)
iris$Species
m=lm(Sepal.Length~., data=iris)
summary(m)
model.matrix(m)[c(1,51,101),]
anova(m)
with(iris, plot(Sepal.Width, Sepal.Length, cex=.7, pch=as.numeric(Species)))
(m=lm(Sepal.Length~Sepal.Width + Species, data=iris))
coef(m)
abline(2.25,0.80, lty=1)
abline(2.25+1.45, 0.80, lty=2)
abline(2.25+1.94, 0.80, lty=3)
#다중선형회귀 상호작용
with(iris, plot(Species, Petal.Length, xlab="Species", ylab="Petal.Length"))
m2 = lm(Petal.Length~Petal.Width*Species, data=iris)
anova(m2)
summary(m2)
#Setosa 일때, 1.32+Petal.Width*0.54
#VersiColor 일때, 1.32+0.45+Petal.Width*0.54 + Petal.Width*1.32
#Virginica 일때, 1.32+2.91+Petal.Width*0.54 + Petal.Width*0.10
library(interactions)
interact_plot(m2,pred = "Petal.Width", modx="Species",plot.points = T)
#1.A,B,C와 그 상호작용을 모두 표현할 경우
#A+B+C+A:B+A:C+B:C+A:B:C
#A*B*C
x=1:1000
y=x^2+3*x+5+rnorm(1000)
y
lm(y~I(x^2)+x)
x=101:200
y=exp(3*x+rnorm(100))
lm(log(y)~x)
x=1:1000
y=log(x)+rnorm(1000)
lm(y~log(x))
#선형회귀 데이터 변환
time=c(1,5,15,30,60,120,240,480,720,1440,2880,5760,10080)
prop=c(0.84,0.71,0.61,0.56,0.54,0.47,0.45,0.38,0.36,0.26,0.20,0.16,0.08)
data=as.data.frame(cbind(time,prop))
data
m=lm(prop~time, data=data)
summary(m)
plot(data$time,data$prop)
abline(coef(m))
plot(m)
#곡선형을 띌땐 로그를 취하라
m2 = lm(prop~log(time), data=data)
summary(m2)
plot(m2)
plot(log(data$time),prop)
abline(coef(m2))
#몸무게와 뇌의 무게간 관계분석
library(MASS)
data(mammals)
head(mammals)
plot(mammals$body,mammals$brain)
m1 = lm(brain~body, data = mammals)
summary(m1)
plot(m1)
plot(mammals$body, mammals$brain, log="xy")
m2 = lm(log(brain)~log(body), data = mammals)
summary(m2)
par(mfrow=c(2,2), mar=c(2,3,1.5,0.5))
plot(m2)
#잔차비교
plot(density(m1$resid),main="m1")
plot(density(m2$resid),main="m2")
#2.Stepwise Algorithm
data("attitude")
m = lm(rating~., data=attitude)
summary(m)
#후진제거법
library(mlbench)
m2 = step(m, direction="backward")
m2
summary(m2)
#단계선택법
data("BostonHousing")
m=lm(medv~., data=BostonHousing)
m2=step(m, direction="both")
summary(m)
summary(m2)
library(leaps)
m=regsubsets(medv~., data=BostonHousing)
summary(m)
plot(m, scale="adjr2")
plot(m, scale="bic")
(bestpic = summary(m)$bic)
(min.bic = which.min(bestpic))
coef(m,min.bic)
#par(mfrow=c(1,1), mar=c(2,3,1.5,0.5))
#3.Dealing with Outliers
best_jump = c(5.30, 5.55, 5.47, 5.45, 5.07, 5.32, 6.15, 4.70, 5.22,
5.77, 5.12, 5.77, 6.22, 5.82, 5.15, 4.92, 5.20, 5.42)
avg_takeoff = c(.09, .17, .19, .24, .16, .22, .09, .12, .09, .09,
.13, .16, .03, .50, .13, .04, .07, .04)
plot(avg_takeoff, best_jump)
jump_model = lm(best_jump~avg_takeoff)
abline(reg = jump_model, col="red")
summary(jump_model)
plot(jump_model)
#이상치 제거 실행!
best_jump2 = best_jump[-14]
avg_takeoff2 = avg_takeoff[-14]
jump_model2 = lm(best_jump2~avg_takeoff2)
plot(best_jump2~avg_takeoff2)
abline(jump_model2, col="red")
summary(jump_model2)
plot(jump_model2)
#오렌지 이상치
data("Orange")
Orange
plot(Orange$circumference ~ Orange$age)
#이상치 주입
Orange = rbind(Orange,
data.frame(Tree=as.factor(c(6,6,6)),
age=c(118,484,664),
circumference=c(177,50,30)))
with(Orange,
plot(Tree, circumference, xlab="tree",
ylab="circumference"))
with(Orange, interaction.plot(age, Tree, circumference))
m = lm(circumference~age+I(age^2), data=Orange)
summary(m)
plot(m)
library(car)
#Bonferroni p value < 0.05
outlierTest(m)
data("airquality")
str(airquality)
head(airquality)
col1 = mapply(anyNA, airquality)
col1
for(i in 1:nrow(airquality)){
if(is.na(airquality[i,"Ozone"])){
airquality[i,"Ozone"] = mean(airquality[which(airquality[,"Month"]
==airquality[i,"Month"]),
"Ozone"],na.rm=T)
}
}
for(i in 1:nrow(airquality)){
if(is.na(airquality[i,"Solar.R"])){
airquality[i,"Solar.R"] = mean(airquality[which(airquality[,"Month"]
==airquality[i,"Month"]),
"Solar.R"],na.rm=T)
}
}
head(airquality)
normalize = function(x){
return((x-min(x))/(max(x)-min(x)))
}
airquality = normalize(airquality)
str(airquality)
head(airquality)
library(corrplot)
airquality_cor = cor(airquality)
corrplot(airquality_cor, method = "color",
type="lower",addCoef.col = "black")
Y = airquality[,"Ozone"]
X = airquality[,"Solar.R"]
model1 = lm(Y~X)
model1
summary(model1)
plot(Y~X)
abline(model1, col="blue", lwd=2)
model2 = lm(Y~airquality$Wind)
summary(model2)
plot(Y~airquality$Wind)
abline(model2, col="blue", lwd=3)
airquality$forecast = predict(model1)
ggplot(data=airquality, aes(x=Ozone, y=forecast))+
geom_point()+
geom_smooth(method = lm)+
labs(title="airquality linear regression model")
model3 = lm(airquality$Ozone~., data=airquality)
summary(model3)
m3=step(model3, direction="both")
summary(m3)
#-------------------------------------------------------------------------
#분류
#-------------------------------------------------------------------------
#의사결정나무
library(rpart)
library(rpart.plot)
m=rpart(Species~., data=iris)
m
prp(m, type=5, extra=2, digits=3)
table(iris$Species, predict(m, newdata = iris, type="class"))
#타이타닉
titanic = read.csv("titanic_clean.csv")
str(titanic)
#data cleansing
titanic$pclass = as.factor(titanic$pclass)
titanic$survived = factor(titanic$survived, levels=c(0,1))
str(titanic)
#understanding data: by plot
ggplot(titanic, aes(x=factor(pclass), fill=factor(sex)))+
geom_bar(position="dodge")
ggplot(titanic, aes(x=factor(pclass), fill=factor(sex)))+
geom_bar(position="dodge")+
facet_grid(".~survived")
posn.j=position_jitter(0.3,0)
ggplot(titanic, aes(x=factor(pclass), y=age, col=factor(sex)))+
geom_jitter(size=3, alpha=0.5, position=posn.j)+
facet_grid(".~survived")
mosaicplot(survived~pclass+sex, main="pclass and sex",data = titanic,
color=T)
library(Hmisc)
summary(survived~pclass + sex+ age + sibsp+parch+fare+embarked,
data = titanic, method = "reverse")
library(rpart)
library(rpart.plot)
set.seed(100)
trainingRowIndex = sample(1:nrow(titanic), 0.8*nrow(titanic))
trainingData=titanic[trainingRowIndex,]
testData = titanic[-trainingRowIndex,]
dt1 = rpart(survived~pclass+sex+age+sibsp+parch+fare+embarked,
data=trainingData)
prp(dt1, type=0, extra=2, digits=3)
#prediction
prediction = predict(dt1, testData, type="class")
#check accuracy
table(ifelse(testData$survived==prediction, "yes","no"))
#9주차 homework 독립성분석
library(MASS)
str(survey)
#손글씨를 어느 손으로 쓰는지와 박수를 칠 때 어느 손이 위로 가는지는 모두
#명목형 번수이므로 카이제곱검정을 이용해 독립성 여부를 판단한다.
#쓰는 손과 박수 별로 빈도수
WHnd_Clap = xtabs(~W.Hnd+Clap, data=survey)
WHnd_Clap
#표본의 수가 작으므로 카이제곱검정 보다는 피셔검정으로 재검정한다.
chisq.test(WHnd_Clap)
#p-value가 0.05보다 작으므로, 두 변수는 독립하지 않다(관계가 있다.)
fisher.test(WHnd_Clap)
ExerSmoke = xtabs(~Exer+Smoke, data=survey)
ExerSmoke
chisq.test(ExerSmoke)
fisher.test(ExerSmoke)
#10주차 homework 독립성 분석
dat = data(iris)
dat = iris
dat$size = ifelse(dat$Sepal.Length<median(dat$Sepal.Length),"small","big")
sizeSpecies = xtabs(~size+Species, data=dat)
sizeSpecies
#p-value가 0.05보다 작으므로, 독립이 아니다
chisq.test(sizeSpecies)
library(dplyr)
group = c(rep("Woman",9),rep("Man",9))
weight=c(38.9,61.2,73.3,21.8,63.4,64.6, 48.4, 48.8, 48.5,
67.8, 60.0, 63.4, 76.0, 89.4, 73.3, 67.3, 61.3, 62.4)
data = as.data.frame(cbind(group,weight))
data$weight = as.double(data$weight)
womanWeight = data %>% filter(group=="Woman")
manWeight = data %>% filter(group=="Man")
#두 성별 모두 몸무게가 정규분포임
shapiro.test(womanWeight$weight)
shapiro.test(manWeight$weight)
#t-test를 통해 평균은 다르다고 할 수 있는지 검정
t.test(manWeight$weight, mu=mean(womanWeight$weight), alternative = "two.sided")
#p-value가 0.05 보다 작으므로 평균은 다르다고 할 수 있다. | /finalexam_study.R | no_license | znehraks/2021-1-Statistical-Analysis-With-R | R | false | false | 19,600 | r | #기말고사 공부용
#통계분석 차이
#패키지 모음
#propagate -- fitDistr()은 적합한 분포를 찾게해줌
library(ggplot2)
#어린이 4리터 이하 물
x=seq(0,16,length=100)
y=dnorm(x,mean=7.5, sd=1.5)
plot(x,y,type="l",
xlab="Liters per day",
ylab="Density",
main="Water drunken by school children < 12 years old")
pnorm(4, mean = 7.5, sd=1.5, lower.tail = T)
#어린이 8리터 이상 물
lower = 8
upper = 15
i = x>=lower & x<upper
polygon(c(lower, x[i], upper), c(0,y[i],0),col = "red")
abline(h=0, col="red")
pb=round(pnorm(8, mean=7.5, sd=1.5, lower.tail = F),2)
pb
pb.results = paste("cumulative probability of a child drinking > 8L/day",
pb, sep=":")
title(pb.results)
#기초통계량
mean(1:5)
var(1:5)
sd(1:5)
summary(1:11)
x=factor(c("a","b","c","c","c","c","d","d"))
x
table(x)
which.max(table(x))
names(table(x))[
which.max(table(x))]
#연봉협상 데이터분석
data = read.csv("employees_ex.csv")
data
str(data)
summary(data)
hist(data$incentive, breaks = 50)
summary(data$incentive)
#조건에따른 incentive data 분포 분석
hist(data$incentive[data$year==2007], breaks=50)
hist(data$incentive[data$year==2008], breaks=50)
hist(data$incentive[data$gender=="F"], breaks=50)
hist(data$incentive[data$gender=="M"], breaks=50)
#협상여부가 영향을 미침
hist(data$incentive[data$negotiated==F], breaks=50)
hist(data$incentive[data$negotiated==T], breaks=50)
library(propagate)
set.seed(275)
observations = rnorm(10000,5)
distTested = fitDistr(observations)
distTested
incentive_dist = fitDistr(data$incentive)
incentive_dist
#교차분석
table(c("a","b","a","d","e","a","c","a","b","d"))
CTable = data.frame(x=c("3","7","9","10"),
y=c("A1","B2","A1","B2"),
num=c(4,6,2,9))
CTable
xtabs(num~x, data=CTable)
xtabs(num~y, data=CTable)
temp = xtabs(num~x+y, data=CTable)
addmargins(temp,margin=1)
addmargins(temp,margin=2)
addmargins(temp)
str(Cars93)
Car_table_3 = with(Cars93, table(Type, Cylinders))
Car_table_3
Car_table_4 = xtabs(~Type + Cylinders, data=Cars93)
Car_table_4
addmargins(Car_table_4)
library(vcd)
mosaic(Car_table_4,
gp=gpar(fill=c("Red","blue")),
direction = "v",
main="Mosaic plot of CarType vs. Type+Cylinders")
data("UCBAdmissions")
str(UCBAdmissions)
UCBAdmissions.df = as.data.frame(UCBAdmissions)
str(UCBAdmissions)
ucb_table1 = xtabs(Freq~Gender+Admit, data=UCBAdmissions.df)
ucb_table1
mosaic(ucb_table1,
gp=gpar(fill=c("Red","Blue")),
direction = "v",
main = "Mosaic plot of UCB Admission vs. Gender")
options("digit"=3)
ucb_table2 = prop.table(ucb_table1)
ucb_table2
chisq.test(ucb_table1)
#-----------------------------------------------------------------------------
#차이 분석
#-----------------------------------------------------------------------------
#카이제곱분포(학생들의 성별에 따른 운동량에 차이가 있는가)
#H0: 성별과 운동은 독립이다, H1: 성별과 운동은 독립이 아니다.
data("survey")
str(survey)
survey
SexExer = xtabs(~Sex+Exer, data = survey)
SexExer
#자유도는 성별이 2레벨, 운동량이 3레벨이므로 (2-1)(3-1)=df=2
#p-value가 0.05보다 크므로 귀무가설채택(성별과 운동은 독립)
chisq.test(SexExer)
#카이제곱 검정(비율)
child1 = c(5,11,1)
child2 = c(4,7,3)
Toy = cbind(child1, child2)
rownames(Toy) = c("car", "truck", "doll")
Toy
chisq.test(Toy)
fisher.test(Toy)
#KS검정(두 데이터의 분포가 다른지 검정)
x = rnorm(50)
x
y = runif(30)
y
ks.test(x,y)
#shapiro wilk test(데이터가 정규분포를 따르는지 검정)
shapiro.test(rnorm(100,mean = 5,sd=3))
library(UsingR)
data(cfb)
str(cfb)
shapiro.test(cfb$INCOME)
#시각화 (히스토그램)
hist(cfb$INCOME, breaks = 100)
#Kernel Density Plot, y:probability(freq=False)
hist(cfb$INCOME, freq=F, breaks = 100,
main="Kernel Density Plot of cfb$INCOME")
#not Normal Dist, King of F dit (Skewed Left)
#Q-QPlot: qqnorm(), qqline()
qqnorm(cfb$INCOME)
qqline(cfb$INCOME)
#T-test 한 집단 혹은 두 집단의 평균을 비교하는 Parametic Test(모수적 감정법)
#모수적 방법: 정규성을 갖는다는 모수적 특성을 이용
#비모수적 방법: 순위척도를 사용함(모수 사용불가능):
#순위합검정: 자료를 크기 순으로 배열하여 순위를 매긴 다음 순위의 합을 통해 차이 비교
#t.test
t.test(
#일표본(표본이 하나)t검정인 경우 x에만,이표본이면 x,y모두에 숫자벡터지정
x,
y=NULL,
alternative=c("two.sided", "less", "greater"), #대립가설
mu=0, #모집단의 평균
paired=F, #paired t-test의 경우는 TRUE
var.equal = F, #이표본 검정에서 두 집단의 분산이 같은지 여부
conf.level = 0.95,
)
#T-test
#A회사의 건전지 수명시간이 1000시간일때, 무작위로 뽑은 10개의 건전지에 대한
#수명은 다음과 같다
a=c(980,1008,968,1032,1012,996,1021,1002,996,1017)
#정규성 검정을 통해 정규분포를 이룬다고 알게됨
shapiro.test(a)
t.test(a, mu=1000, alternative = "two.sided")
#3-1반 학생들의 수학 평균성적은 55점
#0교시 수업 시행 후, 시험성적은 다음과 같다
a=c(58,49,39,99,32,88,62,30,55,65,44,55,57,53,88,42,39)
shapiro.test(a)
t.test(a, mu=55, alternative = "greater")
#표본이 두개인 T-test
pre = c(13.2,8.2,10.9,14.3, 10.7, 6.6, 9.5, 10.8, 8.8, 13.3)
post = c(14.0, 8.8, 11.2, 14.2, 11.8, 6.4, 9.8, 11.3, 9.3, 13.6)
shapiro.test(pre)
shapiro.test(post)
#두 데이터의 분산 분석
var.test(pre,post)
t.test(pre,post, paired=F, var.equal=T)
#결론. 딱히 차이가 없다
#표본이 두 개인 경우
A = c(rep(5,8), rep(4,11), rep(3,9), rep(2,2), rep(1,3))
B = c(rep(5,4), rep(4,6), rep(3,10), rep(2,8), rep(1,4))
A
#A는 정규분포가 아님
shapiro.test(A)
B
#B는 정규분포가 아님
shapiro.test(B)
#정규성을 띄지 않을 때
wilcox.test(A,B, exact = F, correct = F)
#수면제 효과도 분석
str(sleep)
#Extra 수면 시간 증가량
#Group 그룹 ID
#ID 환자 ID
sleep
#개인차 고려하지 않을 때 개개인을 고려X 단순 group1과 group2 비교
sleep2 = sleep[,-3]
sleep2
tapply(sleep2$extra, sleep2$group, mean)
var.test(extra~group, sleep2)
t.test(extra~group, data=sleep2, paired=F,var.equal=T)
#개인별 고려o 1번은 1번끼리.. paired t-test
with(sleep, t.test(extra[group==1], extra[group==2],paired=T))
#비율검정
a=c(3,5,4,5.5,3.5,2.5,3,5,4.5,3,3.5)
above4.hrs = ifelse(a >4, "yes","no")
above4.hrs
above4hr.table = table(above4.hrs)
above4hr.table
binom.test(4, n=11, p=0.5)
prop.test(42,100, 0.5)
#이표본 비율 검정
prop.test(c(45,55), c(100,90))
prop.test(c(16,63),c(430,1053))
#------------------------------------------------------------------------
#ANOVA
#------------------------------------------------------------------------
#정규성가정
#분산의 동질성 가정
#관찰의 독립성 가정
xx = c(1,2,3,4,5,6,7,8,9)
yy = c(1.09, 2.12, 2.92, 4.06, 4.90, 6.08, 7.01, 7.92, 8.94)
zz = c(1.10, 1.96, 2.98, 4.09, 4.92, 6.10, 6.88, 7.97, 9.01)
mydata = c(xx,yy,zz)
mydata
group = c(rep(1,9), rep(2,9), rep(3,9))
group
#p-value가 0.05보다 크므로, 그룹간 차이가 없다
oneway.test(mydata~group, var=T)
my_data = PlantGrowth
my_data
my_data$group = ordered(my_data$group, levels=c("ctrl","trt1","trt2"))
#compute summary
library(dplyr)
group_by(my_data,group) %>%
summarise(
count=n(),
mean=mean(weight, na.rm=T),
sd=sd(weight,na.rm=T)
)
#visualize the three groups
#boxplot
boxplot(weight~group, data=my_data,
xlab="Treatment", ylab="Weight",
frame=F, col=c("blue", "yellow", "red"))
#Compute the analysis of variance
res.aov = aov(weight~group, data=my_data)
summary(res.aov)
#TukeyHSD
TukeyHSD(res.aov)
tyre = read.csv("tyre.csv")
str(tyre)
tyre %>% group_by(Brands) %>%
summarise(N=n(),
Mean=mean(Mileage),
Median=median(Mileage),
Min=min(Mileage),
Max=max(Mileage),
SD=sd(Mileage)
)
boxplot(tyre$Mileage~tyre$Brands,
main="Boxplot comparing Mileage of Four Brands",
col=rainbow(4),
horizontal = T)
tyres.aov = aov(Mileage~Brands, tyre)
summary(tyres.aov)
TukeyHSD(tyres.aov)
#
drug = read.csv("drug.csv")
drug
str(drug)
drug$fatigue = ordered(drug$fatigue, levels=c("low","med","high"))
boxplot(dose~fatigue, data=drug,
xlab="fatigue",ylab="dose",
col=c("blue","red","yellow"))
drug.aov = aov(dose~fatigue, data=drug)
summary(drug.aov)
plot(drug.aov)
drug_aov2 = update(drug.aov, subset(drug, patientID!=20))
summary(drug_aov2)
drug_aov3 = aov(dose~fatigue+gender, data=drug)
summary(drug_aov3)
#두 anova 모델 비교
#p-value가 0.05보다 크므로,차이가 없기에, 더 간단한 모델을 선택
anova(drug.aov, drug_aov3)
#--------------------------------------------------------------------------
#상관분석
#--------------------------------------------------------------------------
iris
cor(iris$Sepal.Width, iris$Sepal.Length)
cor(iris[,1:4])
#상관분석 시각화
symnum(cor(iris[,1:4]))
library(corrgram)
corrgram(iris)
corrgram(iris,upper.panel = panel.conf)
cor.test(c(1,2,3,4,5),c(1,0,3,4,5), method = "pearson")
cor.test(c(1,2,3,4,5),c(1,0,3,4,5), method = "spearman")
cor.test(c(1,2,3,4,5),c(1,0,3,4,5), method = "kendall")
#
economics = as.data.frame(ggplot2::economics)
cor.test(economics$unemploy, economics$pce)
#
head(mtcars)
car_cor = cor(mtcars)
round(car_cor,2)
library(corrplot)
corrplot(car_cor)
corrplot(car_cor, method="color",#circle, square,ellipse,number,shade,color,pie
type="lower",
order="FPC",#AOE, hclust, FPC, alphabet
addCoef.col = "black",
tl.col = "black", tl.srt = 45, diag=F)
#-----------------------------------------------------------------------------
#회귀분석
#-----------------------------------------------------------------------------
data(cars)
cor.test(cars$speed, cars$dist, method = "pearson")
attach(cars)
plot(speed,dist)
cor.test(speed,dist)
#선형회귀모델 생성
m = lm(dist~speed, cars)
#dist = -17.579 + 3.932*speed + e
m
summary(m)
plot(speed, dist)
abline(coef(m))
#선형회귀모델-예측
predict(m, newdata=data.frame(speed=3), interval = "confidence")
#-5.781869 = -17.599095+3.932*3
#예측 및 검정
set.seed(100)
#row indices for training data
trainingRowIndex = sample(1:nrow(cars), 0.8*nrow(cars))
#model training data
trainingData = cars[trainingRowIndex,]
trainingData
testData = cars[-trainingRowIndex,]
testData
lmMod = lm(dist~speed, data=trainingData)
lmMod
summary(lmMod)
distPred = predict(lmMod, testData)
distPred
#검정
actuals_preds = data.frame(cbind(actuals = testData$dist,
predict=distPred))
head(actuals_preds)
correlation_accuracy = cor(actuals_preds)
correlation_accuracy
#키랑 몸무게 (몸무게로부터 키 예측)
reg = read.csv("regression.csv")
head(reg)
tail(reg)
cor(reg$height, reg$weight)
r=lm(reg$height~reg$weight)
plot(reg$weight, reg$height)
abline(coef(r))
summary(r)
#height = 70.9481 + 1.5218*weight + e
plot(r)
#다중선형회귀(독립변수가 2개 이상)
m=lm(Sepal.Length~Sepal.Width+Petal.Length+Petal.Width, data=iris)
summary(m)
iris$Species
m=lm(Sepal.Length~., data=iris)
summary(m)
model.matrix(m)[c(1,51,101),]
anova(m)
with(iris, plot(Sepal.Width, Sepal.Length, cex=.7, pch=as.numeric(Species)))
(m=lm(Sepal.Length~Sepal.Width + Species, data=iris))
coef(m)
abline(2.25,0.80, lty=1)
abline(2.25+1.45, 0.80, lty=2)
abline(2.25+1.94, 0.80, lty=3)
#다중선형회귀 상호작용
with(iris, plot(Species, Petal.Length, xlab="Species", ylab="Petal.Length"))
m2 = lm(Petal.Length~Petal.Width*Species, data=iris)
anova(m2)
summary(m2)
#Setosa 일때, 1.32+Petal.Width*0.54
#VersiColor 일때, 1.32+0.45+Petal.Width*0.54 + Petal.Width*1.32
#Virginica 일때, 1.32+2.91+Petal.Width*0.54 + Petal.Width*0.10
library(interactions)
interact_plot(m2,pred = "Petal.Width", modx="Species",plot.points = T)
#1.A,B,C와 그 상호작용을 모두 표현할 경우
#A+B+C+A:B+A:C+B:C+A:B:C
#A*B*C
x=1:1000
y=x^2+3*x+5+rnorm(1000)
y
lm(y~I(x^2)+x)
x=101:200
y=exp(3*x+rnorm(100))
lm(log(y)~x)
x=1:1000
y=log(x)+rnorm(1000)
lm(y~log(x))
#선형회귀 데이터 변환
time=c(1,5,15,30,60,120,240,480,720,1440,2880,5760,10080)
prop=c(0.84,0.71,0.61,0.56,0.54,0.47,0.45,0.38,0.36,0.26,0.20,0.16,0.08)
data=as.data.frame(cbind(time,prop))
data
m=lm(prop~time, data=data)
summary(m)
plot(data$time,data$prop)
abline(coef(m))
plot(m)
#곡선형을 띌땐 로그를 취하라
m2 = lm(prop~log(time), data=data)
summary(m2)
plot(m2)
plot(log(data$time),prop)
abline(coef(m2))
#몸무게와 뇌의 무게간 관계분석
library(MASS)
data(mammals)
head(mammals)
plot(mammals$body,mammals$brain)
m1 = lm(brain~body, data = mammals)
summary(m1)
plot(m1)
plot(mammals$body, mammals$brain, log="xy")
m2 = lm(log(brain)~log(body), data = mammals)
summary(m2)
par(mfrow=c(2,2), mar=c(2,3,1.5,0.5))
plot(m2)
#잔차비교
plot(density(m1$resid),main="m1")
plot(density(m2$resid),main="m2")
#2.Stepwise Algorithm
data("attitude")
m = lm(rating~., data=attitude)
summary(m)
#후진제거법
library(mlbench)
m2 = step(m, direction="backward")
m2
summary(m2)
#단계선택법
data("BostonHousing")
m=lm(medv~., data=BostonHousing)
m2=step(m, direction="both")
summary(m)
summary(m2)
library(leaps)
m=regsubsets(medv~., data=BostonHousing)
summary(m)
plot(m, scale="adjr2")
plot(m, scale="bic")
(bestpic = summary(m)$bic)
(min.bic = which.min(bestpic))
coef(m,min.bic)
#par(mfrow=c(1,1), mar=c(2,3,1.5,0.5))
#3.Dealing with Outliers
best_jump = c(5.30, 5.55, 5.47, 5.45, 5.07, 5.32, 6.15, 4.70, 5.22,
5.77, 5.12, 5.77, 6.22, 5.82, 5.15, 4.92, 5.20, 5.42)
avg_takeoff = c(.09, .17, .19, .24, .16, .22, .09, .12, .09, .09,
.13, .16, .03, .50, .13, .04, .07, .04)
plot(avg_takeoff, best_jump)
jump_model = lm(best_jump~avg_takeoff)
abline(reg = jump_model, col="red")
summary(jump_model)
plot(jump_model)
#이상치 제거 실행!
best_jump2 = best_jump[-14]
avg_takeoff2 = avg_takeoff[-14]
jump_model2 = lm(best_jump2~avg_takeoff2)
plot(best_jump2~avg_takeoff2)
abline(jump_model2, col="red")
summary(jump_model2)
plot(jump_model2)
#오렌지 이상치
data("Orange")
Orange
plot(Orange$circumference ~ Orange$age)
#이상치 주입
Orange = rbind(Orange,
data.frame(Tree=as.factor(c(6,6,6)),
age=c(118,484,664),
circumference=c(177,50,30)))
with(Orange,
plot(Tree, circumference, xlab="tree",
ylab="circumference"))
with(Orange, interaction.plot(age, Tree, circumference))
m = lm(circumference~age+I(age^2), data=Orange)
summary(m)
plot(m)
library(car)
#Bonferroni p value < 0.05
outlierTest(m)
data("airquality")
str(airquality)
head(airquality)
col1 = mapply(anyNA, airquality)
col1
for(i in 1:nrow(airquality)){
if(is.na(airquality[i,"Ozone"])){
airquality[i,"Ozone"] = mean(airquality[which(airquality[,"Month"]
==airquality[i,"Month"]),
"Ozone"],na.rm=T)
}
}
for(i in 1:nrow(airquality)){
if(is.na(airquality[i,"Solar.R"])){
airquality[i,"Solar.R"] = mean(airquality[which(airquality[,"Month"]
==airquality[i,"Month"]),
"Solar.R"],na.rm=T)
}
}
head(airquality)
normalize = function(x){
return((x-min(x))/(max(x)-min(x)))
}
airquality = normalize(airquality)
str(airquality)
head(airquality)
library(corrplot)
airquality_cor = cor(airquality)
corrplot(airquality_cor, method = "color",
type="lower",addCoef.col = "black")
Y = airquality[,"Ozone"]
X = airquality[,"Solar.R"]
model1 = lm(Y~X)
model1
summary(model1)
plot(Y~X)
abline(model1, col="blue", lwd=2)
model2 = lm(Y~airquality$Wind)
summary(model2)
plot(Y~airquality$Wind)
abline(model2, col="blue", lwd=3)
airquality$forecast = predict(model1)
ggplot(data=airquality, aes(x=Ozone, y=forecast))+
geom_point()+
geom_smooth(method = lm)+
labs(title="airquality linear regression model")
model3 = lm(airquality$Ozone~., data=airquality)
summary(model3)
m3=step(model3, direction="both")
summary(m3)
#-------------------------------------------------------------------------
#분류
#-------------------------------------------------------------------------
#의사결정나무
library(rpart)
library(rpart.plot)
m=rpart(Species~., data=iris)
m
prp(m, type=5, extra=2, digits=3)
table(iris$Species, predict(m, newdata = iris, type="class"))
#타이타닉
titanic = read.csv("titanic_clean.csv")
str(titanic)
#data cleansing
titanic$pclass = as.factor(titanic$pclass)
titanic$survived = factor(titanic$survived, levels=c(0,1))
str(titanic)
#understanding data: by plot
ggplot(titanic, aes(x=factor(pclass), fill=factor(sex)))+
geom_bar(position="dodge")
ggplot(titanic, aes(x=factor(pclass), fill=factor(sex)))+
geom_bar(position="dodge")+
facet_grid(".~survived")
posn.j=position_jitter(0.3,0)
ggplot(titanic, aes(x=factor(pclass), y=age, col=factor(sex)))+
geom_jitter(size=3, alpha=0.5, position=posn.j)+
facet_grid(".~survived")
mosaicplot(survived~pclass+sex, main="pclass and sex",data = titanic,
color=T)
library(Hmisc)
summary(survived~pclass + sex+ age + sibsp+parch+fare+embarked,
data = titanic, method = "reverse")
library(rpart)
library(rpart.plot)
set.seed(100)
trainingRowIndex = sample(1:nrow(titanic), 0.8*nrow(titanic))
trainingData=titanic[trainingRowIndex,]
testData = titanic[-trainingRowIndex,]
dt1 = rpart(survived~pclass+sex+age+sibsp+parch+fare+embarked,
data=trainingData)
prp(dt1, type=0, extra=2, digits=3)
#prediction
prediction = predict(dt1, testData, type="class")
#check accuracy
table(ifelse(testData$survived==prediction, "yes","no"))
#9주차 homework 독립성분석
library(MASS)
str(survey)
#손글씨를 어느 손으로 쓰는지와 박수를 칠 때 어느 손이 위로 가는지는 모두
#명목형 번수이므로 카이제곱검정을 이용해 독립성 여부를 판단한다.
#쓰는 손과 박수 별로 빈도수
WHnd_Clap = xtabs(~W.Hnd+Clap, data=survey)
WHnd_Clap
#표본의 수가 작으므로 카이제곱검정 보다는 피셔검정으로 재검정한다.
chisq.test(WHnd_Clap)
#p-value가 0.05보다 작으므로, 두 변수는 독립하지 않다(관계가 있다.)
fisher.test(WHnd_Clap)
ExerSmoke = xtabs(~Exer+Smoke, data=survey)
ExerSmoke
chisq.test(ExerSmoke)
fisher.test(ExerSmoke)
#10주차 homework 독립성 분석
dat = data(iris)
dat = iris
dat$size = ifelse(dat$Sepal.Length<median(dat$Sepal.Length),"small","big")
sizeSpecies = xtabs(~size+Species, data=dat)
sizeSpecies
#p-value가 0.05보다 작으므로, 독립이 아니다
chisq.test(sizeSpecies)
library(dplyr)
group = c(rep("Woman",9),rep("Man",9))
weight=c(38.9,61.2,73.3,21.8,63.4,64.6, 48.4, 48.8, 48.5,
67.8, 60.0, 63.4, 76.0, 89.4, 73.3, 67.3, 61.3, 62.4)
data = as.data.frame(cbind(group,weight))
data$weight = as.double(data$weight)
womanWeight = data %>% filter(group=="Woman")
manWeight = data %>% filter(group=="Man")
#두 성별 모두 몸무게가 정규분포임
shapiro.test(womanWeight$weight)
shapiro.test(manWeight$weight)
#t-test를 통해 평균은 다르다고 할 수 있는지 검정
t.test(manWeight$weight, mu=mean(womanWeight$weight), alternative = "two.sided")
#p-value가 0.05 보다 작으므로 평균은 다르다고 할 수 있다. |
make.glm.formula <- function(formula, data,
name.runningtime = ".t",
Min_T=Min_T,Max_T=Max_T, model=model, # used for formulasplit
...){
# make formula for glm() on splitdata
# replace timevar by workingtime variable name
special <- c("NPH", "NPHNLL", "WCEI")
Terms <- if (missing(data)){
terms(formula, special, keep.order = TRUE)
} else {
terms(formula, special, data = data, keep.order = TRUE)
}
NamesNPHVars<- all_specials_vars( Terms,
specials=c("NPH"),
unique = TRUE,
order="formula")
NamesNPHNLLVars<- all_specials_vars( Terms,
specials="NPHNLL",
unique = TRUE,
order="formula")
NamesWCEIVars<- all_specials_vars( Terms,
specials=c("WCEI"),
unique = TRUE,
order="formula")
modified <- 0
newtermlabels <- labels(Terms)
# change timevar in NPH() call
if(length(NamesNPHVars) >0){
for (i in attr(Terms, "specials")[["NPH"]]){
for (k in 1:length(i)){
thecall <- match.call(NPH, attr(Terms,"variables")[[i[k]+1]])
modified <- modified + 1
thecall[["timevar"]] <- as.name(name.runningtime)
indxterm <- variable2term(i, Terms)
charcall<-deparse(thecall, 500)
oldtermlabel <- newtermlabels[indxterm[k]]
newtermlabels <- gsub(oldtermlabel, charcall, newtermlabels, fixed=TRUE)
}
}
}
# change timevar in NPHNLL() call
if(length(NamesNPHNLLVars) >0){
for (i in attr(Terms, "specials")[["NPHNLL"]]){
for (k in 1:length(i)){
thecall <- match.call(NPHNLL, attr(Terms,"variables")[[i[k]+1]])
modified <- modified + 1
thecall[["timevar"]] <- as.name(name.runningtime)
indxterm <- variable2term(i, Terms)
charcall<-deparse(thecall, 500)
oldtermlabel <- newtermlabels[indxterm[k]]
newtermlabels <- gsub(oldtermlabel, charcall, newtermlabels, fixed=TRUE)
}
}
}
# change timevar in WCEI() call
if(length(NamesWCEIVars) >0){
for (i in attr(Terms, "specials")[["WCEI"]]){
for (k in 1:length(i)){
thecall <- match.call(WCEI, attr(Terms,"variables")[[i[k]+1]])
modified <- modified + 1
thecall[["timevar"]] <- as.name(name.runningtime)
indxterm <- variable2term(i, Terms)
charcall<-deparse(thecall, 500)
oldtermlabel <- newtermlabels[indxterm[k]]
newtermlabels <- gsub(oldtermlabel, charcall, newtermlabels, fixed=TRUE)
}
}
}
if(modified > 0){
formula <- reformulate(newtermlabels,
response = if (attr(Terms, "response")){
Terms[[2L]]
}
else NULL,
intercept = attr(Terms, "intercept"))
}
if(FALSE){
# Add missing default arguments
#----------------------------------------------------------------------------------------
fbis <- attr(terms(formula, keep.order = TRUE),"term.labels")
if (model=="multiplicative" & (length(attr(Terms, "specials")$NPHNLL)!=0)) {
for (i in 1:(length(fbis))) {
if ((substr(fbis[i],1,4)=="NPH(") | (substr(fbis[i],1,4)=="NLL(")) {
fbis[i] <- paste((substring(fbis[i],first=1,last=(nchar(fbis[i]))-1)),
")", sep="")
}
if ((substr(fbis[i],1,7)=="NPHNLL(")) {
fbis[i] <- paste((substring(fbis[i],first=1,last=(nchar(fbis[i]))-1)),
")",sep="")
if (length(strsplit(fbis[i],"Boundary.knots")[[1]])==length(strsplit(fbis[i],"Boundary.knots.t")[[1]]) &
length(strsplit(fbis[i],"Boundary.knots.t")[[1]])==1) {
fbis[i] <- paste(substring(fbis[i],first=1,last=(nchar(fbis[i]))-1),
",Boundary.knots.t=range(c(",Min_T,",",Max_T,")))",sep="")
}
if (length(strsplit(fbis[i],"Boundary.knots.t")[[1]])==1) {
fbis[i] <- paste(substring(fbis[i],first=1,last=(nchar(fbis[i]))-1),
",Boundary.knots.t=range(c(",Min_T,",",Max_T,")))",sep="")
}
if (length(strsplit(fbis[i],"Boundary.knots")[[1]])==2) {
fbis[i] <- paste(substring(fbis[i],first=1,last=(nchar(fbis[i]))-1),",Boundary.knots=range(",
strsplit(strsplit(as.character(fbis[i]),"(",fixed=TRUE)[[1]][2],",")[[1]][1],
"))",sep="")
}
}
}
}
}
return(formula)
}
| /R/make.glm.formula.R | no_license | cran/flexrsurv | R | false | false | 4,224 | r | make.glm.formula <- function(formula, data,
name.runningtime = ".t",
Min_T=Min_T,Max_T=Max_T, model=model, # used for formulasplit
...){
# make formula for glm() on splitdata
# replace timevar by workingtime variable name
special <- c("NPH", "NPHNLL", "WCEI")
Terms <- if (missing(data)){
terms(formula, special, keep.order = TRUE)
} else {
terms(formula, special, data = data, keep.order = TRUE)
}
NamesNPHVars<- all_specials_vars( Terms,
specials=c("NPH"),
unique = TRUE,
order="formula")
NamesNPHNLLVars<- all_specials_vars( Terms,
specials="NPHNLL",
unique = TRUE,
order="formula")
NamesWCEIVars<- all_specials_vars( Terms,
specials=c("WCEI"),
unique = TRUE,
order="formula")
modified <- 0
newtermlabels <- labels(Terms)
# change timevar in NPH() call
if(length(NamesNPHVars) >0){
for (i in attr(Terms, "specials")[["NPH"]]){
for (k in 1:length(i)){
thecall <- match.call(NPH, attr(Terms,"variables")[[i[k]+1]])
modified <- modified + 1
thecall[["timevar"]] <- as.name(name.runningtime)
indxterm <- variable2term(i, Terms)
charcall<-deparse(thecall, 500)
oldtermlabel <- newtermlabels[indxterm[k]]
newtermlabels <- gsub(oldtermlabel, charcall, newtermlabels, fixed=TRUE)
}
}
}
# change timevar in NPHNLL() call
if(length(NamesNPHNLLVars) >0){
for (i in attr(Terms, "specials")[["NPHNLL"]]){
for (k in 1:length(i)){
thecall <- match.call(NPHNLL, attr(Terms,"variables")[[i[k]+1]])
modified <- modified + 1
thecall[["timevar"]] <- as.name(name.runningtime)
indxterm <- variable2term(i, Terms)
charcall<-deparse(thecall, 500)
oldtermlabel <- newtermlabels[indxterm[k]]
newtermlabels <- gsub(oldtermlabel, charcall, newtermlabels, fixed=TRUE)
}
}
}
# change timevar in WCEI() call
if(length(NamesWCEIVars) >0){
for (i in attr(Terms, "specials")[["WCEI"]]){
for (k in 1:length(i)){
thecall <- match.call(WCEI, attr(Terms,"variables")[[i[k]+1]])
modified <- modified + 1
thecall[["timevar"]] <- as.name(name.runningtime)
indxterm <- variable2term(i, Terms)
charcall<-deparse(thecall, 500)
oldtermlabel <- newtermlabels[indxterm[k]]
newtermlabels <- gsub(oldtermlabel, charcall, newtermlabels, fixed=TRUE)
}
}
}
if(modified > 0){
formula <- reformulate(newtermlabels,
response = if (attr(Terms, "response")){
Terms[[2L]]
}
else NULL,
intercept = attr(Terms, "intercept"))
}
if(FALSE){
# Add missing default arguments
#----------------------------------------------------------------------------------------
fbis <- attr(terms(formula, keep.order = TRUE),"term.labels")
if (model=="multiplicative" & (length(attr(Terms, "specials")$NPHNLL)!=0)) {
for (i in 1:(length(fbis))) {
if ((substr(fbis[i],1,4)=="NPH(") | (substr(fbis[i],1,4)=="NLL(")) {
fbis[i] <- paste((substring(fbis[i],first=1,last=(nchar(fbis[i]))-1)),
")", sep="")
}
if ((substr(fbis[i],1,7)=="NPHNLL(")) {
fbis[i] <- paste((substring(fbis[i],first=1,last=(nchar(fbis[i]))-1)),
")",sep="")
if (length(strsplit(fbis[i],"Boundary.knots")[[1]])==length(strsplit(fbis[i],"Boundary.knots.t")[[1]]) &
length(strsplit(fbis[i],"Boundary.knots.t")[[1]])==1) {
fbis[i] <- paste(substring(fbis[i],first=1,last=(nchar(fbis[i]))-1),
",Boundary.knots.t=range(c(",Min_T,",",Max_T,")))",sep="")
}
if (length(strsplit(fbis[i],"Boundary.knots.t")[[1]])==1) {
fbis[i] <- paste(substring(fbis[i],first=1,last=(nchar(fbis[i]))-1),
",Boundary.knots.t=range(c(",Min_T,",",Max_T,")))",sep="")
}
if (length(strsplit(fbis[i],"Boundary.knots")[[1]])==2) {
fbis[i] <- paste(substring(fbis[i],first=1,last=(nchar(fbis[i]))-1),",Boundary.knots=range(",
strsplit(strsplit(as.character(fbis[i]),"(",fixed=TRUE)[[1]][2],",")[[1]][1],
"))",sep="")
}
}
}
}
}
return(formula)
}
|
#' Source all .R files in a directory
#'
#' @param directory The directory to source
#'
#' @examples
#' \dontrun{
#' source.dir('R')
#' }
#'
#' @export
source.dir <- function(directory) {
all.files <- list.files(directory)
nc <- nchar(all.files)
source.file <- all.files[substr(all.files, start=nc-1, stop=nc)==".R"]
if (length(source.file)!=0) {
for (i in 1:length(source.file)) {
source(paste0(directory, "/", source.file[i]))
}
}
}
| /R/source.dir.r | no_license | pbreheny/breheny | R | false | false | 462 | r | #' Source all .R files in a directory
#'
#' @param directory The directory to source
#'
#' @examples
#' \dontrun{
#' source.dir('R')
#' }
#'
#' @export
source.dir <- function(directory) {
all.files <- list.files(directory)
nc <- nchar(all.files)
source.file <- all.files[substr(all.files, start=nc-1, stop=nc)==".R"]
if (length(source.file)!=0) {
for (i in 1:length(source.file)) {
source(paste0(directory, "/", source.file[i]))
}
}
}
|
test_that("Writing SF objects", {
skip_if_not_installed("sf")
compare_to_geojson_obj <- function(sf_obj){
sf_as_dataframe <- fromJSON(toJSON(sf_obj, digits = 8))
sf_as_features <- fromJSON(toJSON(sf_obj, sf = 'features', digits = 8))
geojson_string <- toJSON(sf_obj, sf = 'geojson', digits = 8)
sf_as_geojson <- parse_json(geojson_string)
tmp <- file.path(tempdir(), 'sfdata.geojson')
on.exit(unlink(tmp))
sf::write_sf(sf_obj, tmp, driver = 'GeoJSON')
geojson <- fromJSON(tmp)
geojson_exact <- read_json(tmp)
geojson_exact$crs = NULL # We don't add CRS because it was deprecated in geojson spec
expect_equal(sf_as_dataframe[[attr(sf_obj, 'sf_column')]], geojson$features$geometry)
expect_equal(sf_as_features, geojson$features)
expect_equal(sf_as_geojson, geojson_exact)
}
compare_to_geojson_file <- function(file){
sf_obj <- sf::st_read(file, quiet = TRUE)
compare_to_geojson_obj(sf_obj)
compare_to_geojson_obj(sf_obj[1,])
compare_to_geojson_obj(sf_obj[0,])
}
# Test with some standard data
compare_to_geojson_file(system.file("shape/storms_xyz.shp", package = "sf"))
compare_to_geojson_file(system.file("shape/nc.shp", package = "sf"))
#compare_to_geojson_file(system.file("examples", "us_states.topojson", package = "geojsonio"))
# Test special types, from ?st examples
outer = matrix(c(0,0,10,0,10,10,0,10,0,0.0), ncol=2, byrow=TRUE)
hole1 = matrix(c(1,1,1,2,2,2,2,1,1,1.0), ncol=2, byrow=TRUE)
hole2 = matrix(c(5,5,5,6,6,6,6,5,5,5.0), ncol=2, byrow=TRUE)
pts = list(outer, hole1, hole2)
ml1 = sf::st_multilinestring(pts)
pl1 = sf::st_polygon(pts)
pol1 = list(outer, hole1, hole2)
pol2 = list(outer + 12, hole1 + 12)
pol3 = list(outer + 24)
mpl1 = sf::st_multipolygon(list(pol1,pol2,pol3))
p1 = sf::st_point(as.double(1:2))
mp1 = sf::st_multipoint(matrix(as.double(1:10), ncol = 2))
ls1 = sf::st_linestring(matrix(as.double(1:10), ncol = 2))
gcol = sf::st_geometrycollection(list(p1, ls1, pl1, mp1))
geometry = sf::st_sfc(
p1,
mp1,
ls1,
ml1,
pl1,
mpl1,
gcol
)
sf_obj <- sf::st_sf(geoms = geometry)
compare_to_geojson_obj(sf_obj)
# One very strict test
geojson_string <- toJSON(sf_obj, sf = 'geojson', always_decimal = TRUE, digits = 8)
tmp <- file.path(tempdir(), 'sfdata.geojson')
on.exit(unlink(tmp))
sf::write_sf(sf_obj, tmp, driver = 'GeoJSON')
expect_equal(minify(geojson_string), minify(readLines(tmp)))
})
| /tests/testthat/test-toJSON-sf.R | permissive | jeroen/jsonlite | R | false | false | 2,485 | r | test_that("Writing SF objects", {
skip_if_not_installed("sf")
compare_to_geojson_obj <- function(sf_obj){
sf_as_dataframe <- fromJSON(toJSON(sf_obj, digits = 8))
sf_as_features <- fromJSON(toJSON(sf_obj, sf = 'features', digits = 8))
geojson_string <- toJSON(sf_obj, sf = 'geojson', digits = 8)
sf_as_geojson <- parse_json(geojson_string)
tmp <- file.path(tempdir(), 'sfdata.geojson')
on.exit(unlink(tmp))
sf::write_sf(sf_obj, tmp, driver = 'GeoJSON')
geojson <- fromJSON(tmp)
geojson_exact <- read_json(tmp)
geojson_exact$crs = NULL # We don't add CRS because it was deprecated in geojson spec
expect_equal(sf_as_dataframe[[attr(sf_obj, 'sf_column')]], geojson$features$geometry)
expect_equal(sf_as_features, geojson$features)
expect_equal(sf_as_geojson, geojson_exact)
}
compare_to_geojson_file <- function(file){
sf_obj <- sf::st_read(file, quiet = TRUE)
compare_to_geojson_obj(sf_obj)
compare_to_geojson_obj(sf_obj[1,])
compare_to_geojson_obj(sf_obj[0,])
}
# Test with some standard data
compare_to_geojson_file(system.file("shape/storms_xyz.shp", package = "sf"))
compare_to_geojson_file(system.file("shape/nc.shp", package = "sf"))
#compare_to_geojson_file(system.file("examples", "us_states.topojson", package = "geojsonio"))
# Test special types, from ?st examples
outer = matrix(c(0,0,10,0,10,10,0,10,0,0.0), ncol=2, byrow=TRUE)
hole1 = matrix(c(1,1,1,2,2,2,2,1,1,1.0), ncol=2, byrow=TRUE)
hole2 = matrix(c(5,5,5,6,6,6,6,5,5,5.0), ncol=2, byrow=TRUE)
pts = list(outer, hole1, hole2)
ml1 = sf::st_multilinestring(pts)
pl1 = sf::st_polygon(pts)
pol1 = list(outer, hole1, hole2)
pol2 = list(outer + 12, hole1 + 12)
pol3 = list(outer + 24)
mpl1 = sf::st_multipolygon(list(pol1,pol2,pol3))
p1 = sf::st_point(as.double(1:2))
mp1 = sf::st_multipoint(matrix(as.double(1:10), ncol = 2))
ls1 = sf::st_linestring(matrix(as.double(1:10), ncol = 2))
gcol = sf::st_geometrycollection(list(p1, ls1, pl1, mp1))
geometry = sf::st_sfc(
p1,
mp1,
ls1,
ml1,
pl1,
mpl1,
gcol
)
sf_obj <- sf::st_sf(geoms = geometry)
compare_to_geojson_obj(sf_obj)
# One very strict test
geojson_string <- toJSON(sf_obj, sf = 'geojson', always_decimal = TRUE, digits = 8)
tmp <- file.path(tempdir(), 'sfdata.geojson')
on.exit(unlink(tmp))
sf::write_sf(sf_obj, tmp, driver = 'GeoJSON')
expect_equal(minify(geojson_string), minify(readLines(tmp)))
})
|
library(liftOver)
#system("wget http://hgdownload.cse.ucsc.edu/goldenpath/hg19/liftOver/hg19ToHg38.over.chain.gz")
#system("gunzip hg19ToHg38.over.chain.gz")
dmr <- read.csv("mmc3_DMR.csv")
write.table(dmr,"mmc3_DMR.bed",sep="\t",quote=F,row.names=F,col.names=F)
peaksori = dmr[,1:3]
peaks <- GRanges(peaksori[,1],IRanges(peaksori[,2], peaksori[,3]))
ch <- import.chain("hg19ToHg38.over.chain")
cur19 <- rtracklayer::liftOver(peaks, ch)
lc19= lapply( cur19, length )
dmr_filtered <- dmr[unlist(lc19)==1,]
peaksori_filtered = dmr_filtered[,1:3]
peaks <- GRanges(peaksori_filtered[,1],IRanges(peaksori_filtered[,2], peaksori_filtered[,3]))
cur38 <- rtracklayer::liftOver(peaks, ch)
# lc38= lapply( cur38, length )
cur38 <- unlist(cur38)
cur38_df <- data.frame(cur38)
dmr_filtered[,1] <- cur38_df[,1]
dmr_filtered[,2] <- cur38_df[,2]
dmr_filtered[,3] <- cur38_df[,3]
dmr1<-dmr_filtered[dmr_filtered$diffDMRClusterSmall==1,]
dmr3<-dmr_filtered[dmr_filtered$diffDMRClusterSmall==3,]
mydiff <- readRDS("~/methCA/bismark_methylation/dumps/D6_vs_D0_CpA_mydiff_fisher_pooled.rds")
mydiff <- getData(mydiff)
ca_united <- readRDS("~/methCA/bismark_methylation/dumps/CAmeth_d0d6_normalized_united_pooled.rds")
ca_united <- getData(ca_united)
library(data.table)
library(rtracklayer)
ca_united.gr <- ca_united[,1:3]
ca_united.gr[,3] <- ca_united[,3]+1
colnames(ca_united.gr) <- c("chr","start","end")
ca_united.gr <- makeGRangesFromDataFrame(ca_united.gr)
dmr1.gr <- dmr1[,1:3]
colnames(dmr1.gr) <- c("chr","start","end")
dmr1.gr <- makeGRangesFromDataFrame(dmr1.gr)
dmr3.gr <- dmr3[,1:3]
colnames(dmr3.gr) <- c("chr","start","end")
dmr3.gr <- makeGRangesFromDataFrame(dmr3.gr)
hits1 <- findOverlaps(ca_united.gr, dmr1.gr)
hits1.df <- as.data.frame(hits1)
hits3 <- findOverlaps(ca_united.gr, dmr3.gr)
hits3.df <- as.data.frame(hits3)
ca_united_dmr1 <- ca_united[unique(hits1.df[,1]),]
dm1_beta_d0<-ca_united_dmr1$numCs1/ca_united_dmr1$coverage1
dm1_beta_d6<-ca_united_dmr1$numCs2/ca_united_dmr1$coverage2
pdf("dmr1_betaScore_density_mCpA_d0_d6.pdf")
plot(density(dm1_beta_d0),ylim=c(0,180),col="black",lwd=2)
lines(density(dm1_beta_d6),col="red",lwd=2)
legend("topright",legend=c("D0","D6"),fill=c("black","red"),bty = "n")
dev.off()
ca_united_dmr3 <- ca_united[unique(hits3.df[,1]),]
dm3_beta_d0<-ca_united_dmr3$numCs1/ca_united_dmr3$coverage1
dm3_beta_d6<-ca_united_dmr3$numCs2/ca_united_dmr3$coverage2
pdf("dmr3_betaScore_density_mCpA_d0_d6.pdf")
plot(density(dm3_beta_d0),ylim=c(0,180),col="black",lwd=2)
lines(density(dm3_beta_d6),col="red",lwd=2)
legend("topright",legend=c("D0","D6"),fill=c("black","red"),bty = "n")
dev.off()
####
table(mydiff$qvalue<=0.05 & mydiff$meth.diff>20)
dmr1.gr <- dmr1[,1:3]
colnames(dmr1.gr) <- c("chr","start","end")
dmr1.gr <- makeGRangesFromDataFrame(dmr1.gr)
dmr3.gr <- dmr3[,1:3]
colnames(dmr3.gr) <- c("chr","start","end")
dmr3.gr <- makeGRangesFromDataFrame(dmr3.gr)
ca_united.gr <- mydiff[,1:3]
ca_united.gr[,3] <- ca_united.gr[,3]+1
colnames(ca_united.gr) <- c("chr","start","end")
ca_united.gr <- makeGRangesFromDataFrame(ca_united.gr)
hits1 <- findOverlaps(ca_united.gr, dmr1.gr)
hits1.df <- as.data.frame(hits1)
hits3 <- findOverlaps(ca_united.gr, dmr3.gr)
hits3.df <- as.data.frame(hits3)
ca_united_dmr1 <- mydiff[unique(hits1.df[,1]),]
dm1_beta_d0<-ca_united_dmr1$meth.diff
dm1_beta_d6<-ca_united_dmr1$meth.diff
pdf("dmr1_methdiff_density_mCpA_d0_d6.pdf")
plot(density(dm1_beta_d0),col="black",lwd=2)
lines(density(dm1_beta_d6),col="red",lwd=2)
legend("topright",legend=c("D0","D6"),fill=c("black","red"),bty = "n")
dev.off()
ca_united_dmr3 <- mydiff[unique(hits3.df[,1]),]
dm3_beta_d0<-ca_united_dmr3$meth.diff
dm3_beta_d6<-ca_united_dmr3$meth.diff
pdf("dmr_methdiff_density_mCpA_d0_d6.pdf")
plot(density(dm1_beta_d0),col="black",lwd=2)
lines(density(dm3_beta_d6),col="red",lwd=2)
legend("topright",legend=c("DMR1","DMR2"),fill=c("black","red"),bty = "n")
dev.off()
| /2_mCA_in_CpGchanges.R | no_license | rtmag/meth_ca | R | false | false | 4,057 | r | library(liftOver)
#system("wget http://hgdownload.cse.ucsc.edu/goldenpath/hg19/liftOver/hg19ToHg38.over.chain.gz")
#system("gunzip hg19ToHg38.over.chain.gz")
dmr <- read.csv("mmc3_DMR.csv")
write.table(dmr,"mmc3_DMR.bed",sep="\t",quote=F,row.names=F,col.names=F)
peaksori = dmr[,1:3]
peaks <- GRanges(peaksori[,1],IRanges(peaksori[,2], peaksori[,3]))
ch <- import.chain("hg19ToHg38.over.chain")
cur19 <- rtracklayer::liftOver(peaks, ch)
lc19= lapply( cur19, length )
dmr_filtered <- dmr[unlist(lc19)==1,]
peaksori_filtered = dmr_filtered[,1:3]
peaks <- GRanges(peaksori_filtered[,1],IRanges(peaksori_filtered[,2], peaksori_filtered[,3]))
cur38 <- rtracklayer::liftOver(peaks, ch)
# lc38= lapply( cur38, length )
cur38 <- unlist(cur38)
cur38_df <- data.frame(cur38)
dmr_filtered[,1] <- cur38_df[,1]
dmr_filtered[,2] <- cur38_df[,2]
dmr_filtered[,3] <- cur38_df[,3]
dmr1<-dmr_filtered[dmr_filtered$diffDMRClusterSmall==1,]
dmr3<-dmr_filtered[dmr_filtered$diffDMRClusterSmall==3,]
mydiff <- readRDS("~/methCA/bismark_methylation/dumps/D6_vs_D0_CpA_mydiff_fisher_pooled.rds")
mydiff <- getData(mydiff)
ca_united <- readRDS("~/methCA/bismark_methylation/dumps/CAmeth_d0d6_normalized_united_pooled.rds")
ca_united <- getData(ca_united)
library(data.table)
library(rtracklayer)
ca_united.gr <- ca_united[,1:3]
ca_united.gr[,3] <- ca_united[,3]+1
colnames(ca_united.gr) <- c("chr","start","end")
ca_united.gr <- makeGRangesFromDataFrame(ca_united.gr)
dmr1.gr <- dmr1[,1:3]
colnames(dmr1.gr) <- c("chr","start","end")
dmr1.gr <- makeGRangesFromDataFrame(dmr1.gr)
dmr3.gr <- dmr3[,1:3]
colnames(dmr3.gr) <- c("chr","start","end")
dmr3.gr <- makeGRangesFromDataFrame(dmr3.gr)
hits1 <- findOverlaps(ca_united.gr, dmr1.gr)
hits1.df <- as.data.frame(hits1)
hits3 <- findOverlaps(ca_united.gr, dmr3.gr)
hits3.df <- as.data.frame(hits3)
ca_united_dmr1 <- ca_united[unique(hits1.df[,1]),]
dm1_beta_d0<-ca_united_dmr1$numCs1/ca_united_dmr1$coverage1
dm1_beta_d6<-ca_united_dmr1$numCs2/ca_united_dmr1$coverage2
pdf("dmr1_betaScore_density_mCpA_d0_d6.pdf")
plot(density(dm1_beta_d0),ylim=c(0,180),col="black",lwd=2)
lines(density(dm1_beta_d6),col="red",lwd=2)
legend("topright",legend=c("D0","D6"),fill=c("black","red"),bty = "n")
dev.off()
ca_united_dmr3 <- ca_united[unique(hits3.df[,1]),]
dm3_beta_d0<-ca_united_dmr3$numCs1/ca_united_dmr3$coverage1
dm3_beta_d6<-ca_united_dmr3$numCs2/ca_united_dmr3$coverage2
pdf("dmr3_betaScore_density_mCpA_d0_d6.pdf")
plot(density(dm3_beta_d0),ylim=c(0,180),col="black",lwd=2)
lines(density(dm3_beta_d6),col="red",lwd=2)
legend("topright",legend=c("D0","D6"),fill=c("black","red"),bty = "n")
dev.off()
####
table(mydiff$qvalue<=0.05 & mydiff$meth.diff>20)
dmr1.gr <- dmr1[,1:3]
colnames(dmr1.gr) <- c("chr","start","end")
dmr1.gr <- makeGRangesFromDataFrame(dmr1.gr)
dmr3.gr <- dmr3[,1:3]
colnames(dmr3.gr) <- c("chr","start","end")
dmr3.gr <- makeGRangesFromDataFrame(dmr3.gr)
ca_united.gr <- mydiff[,1:3]
ca_united.gr[,3] <- ca_united.gr[,3]+1
colnames(ca_united.gr) <- c("chr","start","end")
ca_united.gr <- makeGRangesFromDataFrame(ca_united.gr)
hits1 <- findOverlaps(ca_united.gr, dmr1.gr)
hits1.df <- as.data.frame(hits1)
hits3 <- findOverlaps(ca_united.gr, dmr3.gr)
hits3.df <- as.data.frame(hits3)
ca_united_dmr1 <- mydiff[unique(hits1.df[,1]),]
dm1_beta_d0<-ca_united_dmr1$meth.diff
dm1_beta_d6<-ca_united_dmr1$meth.diff
pdf("dmr1_methdiff_density_mCpA_d0_d6.pdf")
plot(density(dm1_beta_d0),col="black",lwd=2)
lines(density(dm1_beta_d6),col="red",lwd=2)
legend("topright",legend=c("D0","D6"),fill=c("black","red"),bty = "n")
dev.off()
ca_united_dmr3 <- mydiff[unique(hits3.df[,1]),]
dm3_beta_d0<-ca_united_dmr3$meth.diff
dm3_beta_d6<-ca_united_dmr3$meth.diff
pdf("dmr_methdiff_density_mCpA_d0_d6.pdf")
plot(density(dm1_beta_d0),col="black",lwd=2)
lines(density(dm3_beta_d6),col="red",lwd=2)
legend("topright",legend=c("DMR1","DMR2"),fill=c("black","red"),bty = "n")
dev.off()
|
# -------------------------------------------------------------------------------------------
# Purpose: This script contains the constants that are used by functions or are commonly used
# by the exec scripts assoicated with this package.
#
# Created by: Dorheim, Kalyn
# Created on: December 1 2017
# Modified: xxx
#
# Notes:
#
# Graphing Constants -----------------------------------------------------------------------
# Define my fav figure settings, often used inside graphing functions and in exec code
MY_SETTINGS <- ggplot2::theme(text = ggplot2::element_text(size = 18)) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45, hjust = 1))
# Month Name -------------------------------------------------------------------------------
# A dataframe containing the month number and month name, often used to add the month_name
# column to a data frame.
MONTH_NAME <- data.frame(month = 1:12, month_name = c("Jan", "Feb", "Mar", "Apr", "May",
"Jun", "Jul", "Aug", "Sep", "Oct",
"Nov", "Dec"),
month_ch = c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10",
"11", "12"))
| /oceanpH/R/constants.R | no_license | JGCRI/OA_variability | R | false | false | 1,272 | r | # -------------------------------------------------------------------------------------------
# Purpose: This script contains the constants that are used by functions or are commonly used
# by the exec scripts assoicated with this package.
#
# Created by: Dorheim, Kalyn
# Created on: December 1 2017
# Modified: xxx
#
# Notes:
#
# Graphing Constants -----------------------------------------------------------------------
# Define my fav figure settings, often used inside graphing functions and in exec code
MY_SETTINGS <- ggplot2::theme(text = ggplot2::element_text(size = 18)) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45, hjust = 1))
# Month Name -------------------------------------------------------------------------------
# A dataframe containing the month number and month name, often used to add the month_name
# column to a data frame.
MONTH_NAME <- data.frame(month = 1:12, month_name = c("Jan", "Feb", "Mar", "Apr", "May",
"Jun", "Jul", "Aug", "Sep", "Oct",
"Nov", "Dec"),
month_ch = c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10",
"11", "12"))
|
#' Soft-threshold
#' Soft-threshold elements of a vector
#'
#' \code{softthreshold} softthresholds the elements of a vector
#'
#' @param x vector to shrink
#' @param lambda regularization parameter
#' @export
softthreshold <- function(x, lambda) {
}
#' Lasso (Coordinate Descent)
#'
#' \code{lasso_cd} solves the lasso problem using coordinate descent.
#'
#' @param y Response variable
#' @param X design matrix
#' @param beta0 initial guess of regression parameter
#' @param lambda regularization parameter
#' @param max_iter maximum number of iterations
#' @param tol convergence tolerance
#' @export
lasso_cd <- function(y, X, beta0, lambda, max_iter=1e2, tol=1e-3) {
}
#' Lasso (Proximal Gradient Descent)
#'
#' \code{lasso_pgd} solves the lasso problem using proximal gradient descent with a fixed step size
#'
#' @param y Response variable
#' @param X design matrix
#' @param beta0 initial guess of regression parameter
#' @param lambda regularization parameter
#' @param t step-size
#' @param max_iter maximum number of iterations
#' @param tol convergence tolerance
#' @export
lasso_pgd <- function(y, X, beta0, lambda, t, max_iter=1e2, tol=1e-3) {
}
#' Lasso (ADMM)
#'
#' \code{lasso_admm} solves the lasso problem using ADMM
#'
#' @param y Response variable
#' @param X design matrix
#' @param beta0 initial guess of regression parameter
#' @param lambda regularization parameter
#' @param rho parameter
#' @param max_iter maximum number of iterations
#' @param tol convergence tolerance
#' @export
lasso_admm <- function(y, X, beta0, lambda, rho=1, max_iter=1e2, tol=1e-3) {
}
| /Labs/myopt/R/lab2.R | no_license | echi/samsi_opt | R | false | false | 1,598 | r | #' Soft-threshold
#' Soft-threshold elements of a vector
#'
#' \code{softthreshold} softthresholds the elements of a vector
#'
#' @param x vector to shrink
#' @param lambda regularization parameter
#' @export
softthreshold <- function(x, lambda) {
}
#' Lasso (Coordinate Descent)
#'
#' \code{lasso_cd} solves the lasso problem using coordinate descent.
#'
#' @param y Response variable
#' @param X design matrix
#' @param beta0 initial guess of regression parameter
#' @param lambda regularization parameter
#' @param max_iter maximum number of iterations
#' @param tol convergence tolerance
#' @export
lasso_cd <- function(y, X, beta0, lambda, max_iter=1e2, tol=1e-3) {
}
#' Lasso (Proximal Gradient Descent)
#'
#' \code{lasso_pgd} solves the lasso problem using proximal gradient descent with a fixed step size
#'
#' @param y Response variable
#' @param X design matrix
#' @param beta0 initial guess of regression parameter
#' @param lambda regularization parameter
#' @param t step-size
#' @param max_iter maximum number of iterations
#' @param tol convergence tolerance
#' @export
lasso_pgd <- function(y, X, beta0, lambda, t, max_iter=1e2, tol=1e-3) {
}
#' Lasso (ADMM)
#'
#' \code{lasso_admm} solves the lasso problem using ADMM
#'
#' @param y Response variable
#' @param X design matrix
#' @param beta0 initial guess of regression parameter
#' @param lambda regularization parameter
#' @param rho parameter
#' @param max_iter maximum number of iterations
#' @param tol convergence tolerance
#' @export
lasso_admm <- function(y, X, beta0, lambda, rho=1, max_iter=1e2, tol=1e-3) {
}
|
#' Filter Odyssey Data Based on Water Level Data
#'
#' Filters odyssey data to select only observations taken at the same times as a water level logger reports being inundated
#' Outputs optional data frame and optionally saves file
#' If input csv is used, output save file retains base name. Otherwise, output file is named by timestamp
#' @param ody_csv path to csv file of cleaned odyssey data
#' @param waterlevel_csv path to csv file of water-level-above-sensor data
#' @param height.offset height difference between sensors or threshold water level at which to accept odyssey data
#' @param output.dir optional directory in which to save cleaned data
#' @param ody_df data frame of cleaned odyssey data if ody_csv not used
#' @param waterlevel_df data frame of cleaned water level data if waterlevel_csv not used
#' @param return.data boolean, whether or not to return data as data frame
#' @export
#' @examples filter_ody(ody_csv = 'MadOdy_2437.csv', waterlevel_csv = 'Mad01-A_BaroDensCompensated.csv', height.offset = 0.07, output.dir = 'Mad01-A')
filter_ody <- function(ody_csv, waterlevel_csv, height.offset, output.dir,
ody_df, waterlevel_df, return.data = FALSE) {
# make sure there is a height offset
if(missing(height.offset)) stop('Odyssey height offset required to filter water levels!')
# warn about duplicate data inputs
if(!missing(ody_csv) && !missing(ody_df)){
warning('Odyssey data must be either csv or data frame, not both',
call. = FALSE, immediate. = TRUE)
}
if(!missing(waterlevel_csv) && !missing(waterlevel_df)){
warning('Water Level Above Sensor data must be either csv or data frame, not both',
call. = FALSE, immediate. = TRUE)
}
# read in Odyssey data
if(!missing(ody_csv)){
ody.data <- read.csv(ody_csv) %>%
mutate(date.time = as.POSIXct(date.time, tz = "Etc/GMT+8"))
} else ody.data <- ody_df
if(!('salinity' %in% names(ody.data)) &&
!('salinity.calc' %in% names(ody.data))){
stop('Odyssey data must contain the field "salinity or salinity.calc"')
}
if('calc.salinity' %in% names(ody.data)){
ody.data <- rename(ody.data, salinity = salinity.calc)}
# read in water level data
if(!missing(waterlevel_csv)){
wll.data <- read.csv(waterlevel_csv) %>%
mutate(time = as.POSIXct(time, tz = "Etc/GMT+8"))
} else wll.data <- waterlevel_df
if(!('water.level.above.sensor' %in% names(wll.data))){
stop('Water Level data must contain the field "water.level.above.sensor"')
}
# warn if odyssey data is not contained within wll data
if(min(wll.data$time) > min(ody.data$date.time) ||
max(wll.data$time) < max(ody.data$date.time)){
warning(sprintf(paste0('URGENT: Time range for odyssey data (%s - %s)',
'is not fully contained within time range for water level data (%s - %s).',
' Data will be trimmed. Use leve data from another time period to recover',
' trimmed section.'),
format(min(ody.data$date.time), '%Y-%m-%d %H:%M'),
format(max(ody.data$date.time), '%Y-%m-%d %H:%M'),
format(min(wll.data$time), '%Y-%m-%d %H:%M'),
format(max(wll.data$time), '%Y-%m-%d %H:%M'), collapse =''),
call. = FALSE, immediate. = TRUE)}
# check height offset
if(height.offset < 0.05){
warning('Minimum threshold of 5cm will be used to filter water levels.',
call. = FALSE, immediate. = TRUE)
}
# filter water data using the provided threshold or 5 cm, whichever is bigger
filtered.water <- wll.data %>%
filter(water.level.above.sensor > max(c(0.05, height.offset))) %>%
transmute(date.time = time,
water.level.above.wll = water.level.above.sensor)
# join data frames to keep matching ody data
filtered.ody <- inner_join(ody.data, filtered.water, by = 'date.time')
print(sprintf('Filtering complete. %s observations removed from odyssey data',
nrow(ody.data) - nrow(filtered.ody)))
# save in output directory
if(!missing(output.dir)){
record.metadata(output.dir)
if(!missing(ody_csv)){
ody.title <- basename(tools::file_path_sans_ext(ody_csv))
} else {
ody.title <- strftime(Sys.time(), format="%Y-%m-%d-%H%M%S")
}
write.csv(filtered.ody,
file.path(output.dir, sprintf('%s_%sm_filtered.csv', ody.title,
max(c(0.05, height.offset)))))
}
if(return.data) return(filtered.ody)
} | /R/FilterOdy.Data.R | no_license | acgoodman/wlTools | R | false | false | 4,566 | r | #' Filter Odyssey Data Based on Water Level Data
#'
#' Filters odyssey data to select only observations taken at the same times as a water level logger reports being inundated
#' Outputs optional data frame and optionally saves file
#' If input csv is used, output save file retains base name. Otherwise, output file is named by timestamp
#' @param ody_csv path to csv file of cleaned odyssey data
#' @param waterlevel_csv path to csv file of water-level-above-sensor data
#' @param height.offset height difference between sensors or threshold water level at which to accept odyssey data
#' @param output.dir optional directory in which to save cleaned data
#' @param ody_df data frame of cleaned odyssey data if ody_csv not used
#' @param waterlevel_df data frame of cleaned water level data if waterlevel_csv not used
#' @param return.data boolean, whether or not to return data as data frame
#' @export
#' @examples filter_ody(ody_csv = 'MadOdy_2437.csv', waterlevel_csv = 'Mad01-A_BaroDensCompensated.csv', height.offset = 0.07, output.dir = 'Mad01-A')
filter_ody <- function(ody_csv, waterlevel_csv, height.offset, output.dir,
ody_df, waterlevel_df, return.data = FALSE) {
# make sure there is a height offset
if(missing(height.offset)) stop('Odyssey height offset required to filter water levels!')
# warn about duplicate data inputs
if(!missing(ody_csv) && !missing(ody_df)){
warning('Odyssey data must be either csv or data frame, not both',
call. = FALSE, immediate. = TRUE)
}
if(!missing(waterlevel_csv) && !missing(waterlevel_df)){
warning('Water Level Above Sensor data must be either csv or data frame, not both',
call. = FALSE, immediate. = TRUE)
}
# read in Odyssey data
if(!missing(ody_csv)){
ody.data <- read.csv(ody_csv) %>%
mutate(date.time = as.POSIXct(date.time, tz = "Etc/GMT+8"))
} else ody.data <- ody_df
if(!('salinity' %in% names(ody.data)) &&
!('salinity.calc' %in% names(ody.data))){
stop('Odyssey data must contain the field "salinity or salinity.calc"')
}
if('calc.salinity' %in% names(ody.data)){
ody.data <- rename(ody.data, salinity = salinity.calc)}
# read in water level data
if(!missing(waterlevel_csv)){
wll.data <- read.csv(waterlevel_csv) %>%
mutate(time = as.POSIXct(time, tz = "Etc/GMT+8"))
} else wll.data <- waterlevel_df
if(!('water.level.above.sensor' %in% names(wll.data))){
stop('Water Level data must contain the field "water.level.above.sensor"')
}
# warn if odyssey data is not contained within wll data
if(min(wll.data$time) > min(ody.data$date.time) ||
max(wll.data$time) < max(ody.data$date.time)){
warning(sprintf(paste0('URGENT: Time range for odyssey data (%s - %s)',
'is not fully contained within time range for water level data (%s - %s).',
' Data will be trimmed. Use leve data from another time period to recover',
' trimmed section.'),
format(min(ody.data$date.time), '%Y-%m-%d %H:%M'),
format(max(ody.data$date.time), '%Y-%m-%d %H:%M'),
format(min(wll.data$time), '%Y-%m-%d %H:%M'),
format(max(wll.data$time), '%Y-%m-%d %H:%M'), collapse =''),
call. = FALSE, immediate. = TRUE)}
# check height offset
if(height.offset < 0.05){
warning('Minimum threshold of 5cm will be used to filter water levels.',
call. = FALSE, immediate. = TRUE)
}
# filter water data using the provided threshold or 5 cm, whichever is bigger
filtered.water <- wll.data %>%
filter(water.level.above.sensor > max(c(0.05, height.offset))) %>%
transmute(date.time = time,
water.level.above.wll = water.level.above.sensor)
# join data frames to keep matching ody data
filtered.ody <- inner_join(ody.data, filtered.water, by = 'date.time')
print(sprintf('Filtering complete. %s observations removed from odyssey data',
nrow(ody.data) - nrow(filtered.ody)))
# save in output directory
if(!missing(output.dir)){
record.metadata(output.dir)
if(!missing(ody_csv)){
ody.title <- basename(tools::file_path_sans_ext(ody_csv))
} else {
ody.title <- strftime(Sys.time(), format="%Y-%m-%d-%H%M%S")
}
write.csv(filtered.ody,
file.path(output.dir, sprintf('%s_%sm_filtered.csv', ody.title,
max(c(0.05, height.offset)))))
}
if(return.data) return(filtered.ody)
} |
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
destfile <- paste(getwd(),"/HHpowerconsumption.zip", sep="")
download.file(fileUrl, destfile)
unzip(destfile)
data <- read.table(paste(getwd(),"/household_power_consumption.txt", sep=""), nrows=3000, header=FALSE, skip=66600, sep=";", stringsAsFactors=FALSE)
data <- data[38:2917,]
data[["Date"]] <- strptime(data$V1, format = "%d/%m/%Y")
data[["Time"]] <- strptime(paste(data$V1,data$V2), format = "%d/%m/%Y %H:%M:%S")
par(mar=c(5,5,4,2))
plot(data$Time, data[,7], type="l", xlab="", ylab="Energy sub metering")
lines(data$Time, data[,8], type="l", col="red")
lines(data$Time, data[,9], type="l", col="blue")
legend("topright", lty=1, lwd=1, col = c("black", "red", "blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), cex=0.6, x.intersp=1.2, y.intersp=0.8, adj=0.1)
dev.copy(png,'plot3.png')
dev.off()
| /plot3.R | no_license | KK0h/ExData_Plotting1 | R | false | false | 942 | r | fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
destfile <- paste(getwd(),"/HHpowerconsumption.zip", sep="")
download.file(fileUrl, destfile)
unzip(destfile)
data <- read.table(paste(getwd(),"/household_power_consumption.txt", sep=""), nrows=3000, header=FALSE, skip=66600, sep=";", stringsAsFactors=FALSE)
data <- data[38:2917,]
data[["Date"]] <- strptime(data$V1, format = "%d/%m/%Y")
data[["Time"]] <- strptime(paste(data$V1,data$V2), format = "%d/%m/%Y %H:%M:%S")
par(mar=c(5,5,4,2))
plot(data$Time, data[,7], type="l", xlab="", ylab="Energy sub metering")
lines(data$Time, data[,8], type="l", col="red")
lines(data$Time, data[,9], type="l", col="blue")
legend("topright", lty=1, lwd=1, col = c("black", "red", "blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), cex=0.6, x.intersp=1.2, y.intersp=0.8, adj=0.1)
dev.copy(png,'plot3.png')
dev.off()
|
library(kernlab)
library(caret)
# read input file for test and train dataset.
train_file <- read.csv("mnist_train.csv")
test_file <- read.csv("mnist_test.csv")
# identiy and name the classifier column as "digit". this is the output variable
colnames(train_file)[1] <- "digit"
colnames(test_file)[1] <- "digit"
# Changing output variable "digit" to factor type
comb$digit <- as.factor(comb$digit)
# rename rest of the columns in format P_<number>
for(i in 2:ncol(train_file)){
colnames(train_file)[i] <- paste("P",i,sep="_")
colnames(test_file)[i] <- paste("P",i,sep="_")
}
# reduced sample for test and train dataset
set.seed(100)
# train dataset sampling
indices_train= sample(1:nrow(train_file), 0.09*nrow(train_file))
train = train_file[indices_train,]
# test dataset sampling
indices_test= sample(1:nrow(test_file), 0.05*nrow(test_file))
test = test_file[indices_test,]
# combine train and test dataset
comb <- rbind(train,test)
# Checking missing value
missing <- sapply(comb, function(x) sum(is.na(x)))
sum(which(missing ==1)) # No missing values
# split train and test data
train <- comb[1:nrow(train),]
test <- comb[-(1:nrow(train)),]
# remove columns with all zero values
train_nonZero <- train[, colSums(train != 0) > 0]
# remove output column 'digit' from train dataset
train_fil <- train_nonZero[,-1]
# Dimension Reduction
# run PCA on train dataset
prin_comp <- prcomp(train_fil, scale. = T)
names(prin_comp)
# determine PC
prin_comp$rotation
# compute standard dev and variance
stdev <- prin_comp$sdev
var <- stdev^2
var[1:10]
# understand proportion covered by PC
pr_var <- var/sum(var)
pr_var[1:20]
#draw scree plot to determin number of PC
plot(pr_var, xlab = "Principal Component", ylab = "Proportion of Variance Explained")
# First 10 PC cover 98.5% variance
pr_var[10]
#transform training set with principal components
train.data <- data.frame(digit = train$digit, prin_comp$x)
# First 10 PCs filtering
train.data <- train.data[,1:11]
train.data$digit <- as.factor(train.data$digit)
# transforming test data with PC
test.data <- predict(prin_comp, newdata = test)
test.data <- as.data.frame(test.data)
# filtering first 10 PCAs
test.data <- test.data[,1:10]
test.data<- cbind(digit=test$digit,test.data)
model_rbf1 <- ksvm(digit ~ ., data =train.data,scale=FALSE, kernel = "rbfdot")
# Predicting the model results
Eval_RBF1<- predict(model_rbf1, test.data)
# round the output variable
round_pred <- round(Eval_RBF1)
# combining levels in model and test data
u = union(levels(as.factor(round_pred)), levels(test.data$digit))
t = table(factor(round_pred, u), factor(test.data$digit, u))
# compute model accuracy
confusionMatrix(t)
metric1 <- "Accuracy"
# Hyperparameter tuning and Cross Validation - Non Linear - SVM
trainControl1 <- trainControl(method="cv", number=5)
####################### Linear Model #####################################
# making a grid of C values.
grid_linear <- expand.grid(C=seq(1, 5, by=1))
# Performing 5-fold cross validation
fit.svm_linear <- train(digit~., data=train.data, method="svmLinear", metric=metric1,
tuneGrid=grid_linear, trControl=trainControl1)
# Printing cross validation result
print(fit.svm_linear)
# Best tune at C=1,
# Accuracy - 0.835
# Plotting "fit.svm_linear" results
plot(fit.svm_linear)
# Accuracy : 0.8297
# 95% CI : (0.7937, 0.8616)
# No Information Rate : 0.1162
# P-Value [Acc > NIR] : < 2.2e-16
# Kappa : 0.8108
evaluate_linear<- predict(fit.svm_linear, test.data)
confusionMatrix(evaluate_linear, test.data$digit)
####################### Non Linear Evaluation #############################
# Making grid of "sigma" and C values.
grid_nonlinear1 <- expand.grid(.sigma=seq(0.01, 0.05, by=0.01), .C=seq(1, 5, by=1))
# Performing 5-fold cross validation
fit.svm_radial1 <- train(digit~., data=train.data, method="svmRadial", metric=metric1,
tuneGrid=grid_nonlinear1, trControl=trainControl1)
# Printing cross validation result
print(fit.svm_radial1)
# Accuracy was used to select the optimal model using the largest value.
# The final values used for the model were sigma = 0.05 and C = 5. Accuracy = .89
# Plotting model results
plot(fit.svm_radial1)
# Validating the model results on test data
# Accuracy : 0.8778
# 95% CI : (0.8458, 0.9052)
# No Information Rate : 0.1162
# P-Value [Acc > NIR] : < 2.2e-16
# Kappa : 0.8641
evaluate_non_linear<- predict(fit.svm_radial1, test.data)
confusionMatrix(evaluate_non_linear, test.data$digit)
################################################################################3
# Non-linear model gives accuracy of .8778
# The final values used for the model were sigma = 0.05 and C = 5. Accuracy = .89
| /SVM - Image Classification/DDA1710363_main.R | no_license | nitinsriv/R | R | false | false | 4,798 | r |
library(kernlab)
library(caret)
# read input file for test and train dataset.
train_file <- read.csv("mnist_train.csv")
test_file <- read.csv("mnist_test.csv")
# identiy and name the classifier column as "digit". this is the output variable
colnames(train_file)[1] <- "digit"
colnames(test_file)[1] <- "digit"
# Changing output variable "digit" to factor type
comb$digit <- as.factor(comb$digit)
# rename rest of the columns in format P_<number>
for(i in 2:ncol(train_file)){
colnames(train_file)[i] <- paste("P",i,sep="_")
colnames(test_file)[i] <- paste("P",i,sep="_")
}
# reduced sample for test and train dataset
set.seed(100)
# train dataset sampling
indices_train= sample(1:nrow(train_file), 0.09*nrow(train_file))
train = train_file[indices_train,]
# test dataset sampling
indices_test= sample(1:nrow(test_file), 0.05*nrow(test_file))
test = test_file[indices_test,]
# combine train and test dataset
comb <- rbind(train,test)
# Checking missing value
missing <- sapply(comb, function(x) sum(is.na(x)))
sum(which(missing ==1)) # No missing values
# split train and test data
train <- comb[1:nrow(train),]
test <- comb[-(1:nrow(train)),]
# remove columns with all zero values
train_nonZero <- train[, colSums(train != 0) > 0]
# remove output column 'digit' from train dataset
train_fil <- train_nonZero[,-1]
# Dimension Reduction
# run PCA on train dataset
prin_comp <- prcomp(train_fil, scale. = T)
names(prin_comp)
# determine PC
prin_comp$rotation
# compute standard dev and variance
stdev <- prin_comp$sdev
var <- stdev^2
var[1:10]
# understand proportion covered by PC
pr_var <- var/sum(var)
pr_var[1:20]
#draw scree plot to determin number of PC
plot(pr_var, xlab = "Principal Component", ylab = "Proportion of Variance Explained")
# First 10 PC cover 98.5% variance
pr_var[10]
#transform training set with principal components
train.data <- data.frame(digit = train$digit, prin_comp$x)
# First 10 PCs filtering
train.data <- train.data[,1:11]
train.data$digit <- as.factor(train.data$digit)
# transforming test data with PC
test.data <- predict(prin_comp, newdata = test)
test.data <- as.data.frame(test.data)
# filtering first 10 PCAs
test.data <- test.data[,1:10]
test.data<- cbind(digit=test$digit,test.data)
model_rbf1 <- ksvm(digit ~ ., data =train.data,scale=FALSE, kernel = "rbfdot")
# Predicting the model results
Eval_RBF1<- predict(model_rbf1, test.data)
# round the output variable
round_pred <- round(Eval_RBF1)
# combining levels in model and test data
u = union(levels(as.factor(round_pred)), levels(test.data$digit))
t = table(factor(round_pred, u), factor(test.data$digit, u))
# compute model accuracy
confusionMatrix(t)
metric1 <- "Accuracy"
# Hyperparameter tuning and Cross Validation - Non Linear - SVM
trainControl1 <- trainControl(method="cv", number=5)
####################### Linear Model #####################################
# making a grid of C values.
grid_linear <- expand.grid(C=seq(1, 5, by=1))
# Performing 5-fold cross validation
fit.svm_linear <- train(digit~., data=train.data, method="svmLinear", metric=metric1,
tuneGrid=grid_linear, trControl=trainControl1)
# Printing cross validation result
print(fit.svm_linear)
# Best tune at C=1,
# Accuracy - 0.835
# Plotting "fit.svm_linear" results
plot(fit.svm_linear)
# Accuracy : 0.8297
# 95% CI : (0.7937, 0.8616)
# No Information Rate : 0.1162
# P-Value [Acc > NIR] : < 2.2e-16
# Kappa : 0.8108
evaluate_linear<- predict(fit.svm_linear, test.data)
confusionMatrix(evaluate_linear, test.data$digit)
####################### Non Linear Evaluation #############################
# Making grid of "sigma" and C values.
grid_nonlinear1 <- expand.grid(.sigma=seq(0.01, 0.05, by=0.01), .C=seq(1, 5, by=1))
# Performing 5-fold cross validation
fit.svm_radial1 <- train(digit~., data=train.data, method="svmRadial", metric=metric1,
tuneGrid=grid_nonlinear1, trControl=trainControl1)
# Printing cross validation result
print(fit.svm_radial1)
# Accuracy was used to select the optimal model using the largest value.
# The final values used for the model were sigma = 0.05 and C = 5. Accuracy = .89
# Plotting model results
plot(fit.svm_radial1)
# Validating the model results on test data
# Accuracy : 0.8778
# 95% CI : (0.8458, 0.9052)
# No Information Rate : 0.1162
# P-Value [Acc > NIR] : < 2.2e-16
# Kappa : 0.8641
evaluate_non_linear<- predict(fit.svm_radial1, test.data)
confusionMatrix(evaluate_non_linear, test.data$digit)
################################################################################3
# Non-linear model gives accuracy of .8778
# The final values used for the model were sigma = 0.05 and C = 5. Accuracy = .89
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MiscFuns.R
\name{AIC.fixest}
\alias{AIC.fixest}
\title{Aikake's an information criterion}
\usage{
\method{AIC}{fixest}(object, ..., k = 2)
}
\arguments{
\item{object}{A \code{fixest} object. Obtained using the functions \code{\link[fixest]{femlm}}, \code{\link[fixest]{feols}} or \code{\link[fixest]{feglm}}.}
\item{...}{Optionally, more fitted objects.}
\item{k}{A numeric, the penalty per parameter to be used; the default k = 2 is the classical AIC (i.e. \code{AIC=-2*LL+k*nparams}).}
}
\value{
It return a numeric vector, with length the same as the number of objects taken as arguments.
}
\description{
This function computes the AIC (Aikake's, an information criterion) from a \code{fixest} estimation.
}
\details{
The AIC is computed as:
\deqn{AIC = -2\times LogLikelihood + k\times nbParams}
with k the penalty parameter.
You can have more information on this criterion on \code{\link[stats]{AIC}}.
}
\examples{
# two fitted models with different expl. variables:
res1 = femlm(Sepal.Length ~ Sepal.Width + Petal.Length +
Petal.Width | Species, iris)
res2 = femlm(Sepal.Length ~ Petal.Width | Species, iris)
AIC(res1, res2)
BIC(res1, res2)
}
\seealso{
See also the main estimation functions \code{\link[fixest]{femlm}}, \code{\link[fixest]{feols}} or \code{\link[fixest]{feglm}}. Other statictics methods: \code{\link[fixest]{BIC.fixest}}, \code{\link[fixest]{logLik.fixest}}, \code{\link[fixest]{nobs.fixest}}.
}
\author{
Laurent Berge
}
| /fixest/man/AIC.fixest.Rd | no_license | akhikolla/InformationHouse | R | false | true | 1,590 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MiscFuns.R
\name{AIC.fixest}
\alias{AIC.fixest}
\title{Aikake's an information criterion}
\usage{
\method{AIC}{fixest}(object, ..., k = 2)
}
\arguments{
\item{object}{A \code{fixest} object. Obtained using the functions \code{\link[fixest]{femlm}}, \code{\link[fixest]{feols}} or \code{\link[fixest]{feglm}}.}
\item{...}{Optionally, more fitted objects.}
\item{k}{A numeric, the penalty per parameter to be used; the default k = 2 is the classical AIC (i.e. \code{AIC=-2*LL+k*nparams}).}
}
\value{
It return a numeric vector, with length the same as the number of objects taken as arguments.
}
\description{
This function computes the AIC (Aikake's, an information criterion) from a \code{fixest} estimation.
}
\details{
The AIC is computed as:
\deqn{AIC = -2\times LogLikelihood + k\times nbParams}
with k the penalty parameter.
You can have more information on this criterion on \code{\link[stats]{AIC}}.
}
\examples{
# two fitted models with different expl. variables:
res1 = femlm(Sepal.Length ~ Sepal.Width + Petal.Length +
Petal.Width | Species, iris)
res2 = femlm(Sepal.Length ~ Petal.Width | Species, iris)
AIC(res1, res2)
BIC(res1, res2)
}
\seealso{
See also the main estimation functions \code{\link[fixest]{femlm}}, \code{\link[fixest]{feols}} or \code{\link[fixest]{feglm}}. Other statictics methods: \code{\link[fixest]{BIC.fixest}}, \code{\link[fixest]{logLik.fixest}}, \code{\link[fixest]{nobs.fixest}}.
}
\author{
Laurent Berge
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rsunlight-package.R
\docType{package}
\name{rsunlight-package}
\alias{rsunlight-package}
\alias{rsunlight}
\title{Methods for retreiving data from APIs that were
previously under Sulight Labs}
\description{
The various APIs included here are now managed by ProPublica and
OpenStates organizations.
}
\details{
You need API keys for Propublica APIs. Get them at
\url{https://www.propublica.org/datastore/apis}
You need an API key for the OpenStates API. Get it at
\url{https://openstates.org/api/register/}
We set up the functions so that you can use either env vars, or R options.
For env vars, put an entry in your \code{.Renviron} file with the names
\code{PROPUBLICA_API_KEY} and \code{OPEN_STATES_KEY}.
Currently we have functions to interface with the following ProPublica API
and OpenStates API:
\itemize{
\item ProPublica Congress API (\code{cg})
\item Open States API (\code{os})
}
NOTES:
\itemize{
\item Capitol Words API is now defunct
\item We will support ProPublica campaign finance API in the future
}
}
\author{
Scott Chamberlain \email{myrmecocystus@gmail.com}
Thomas J. Leeper \email{thosjleeper@gmail.com}
}
\keyword{package}
| /man/rsunlight-package.Rd | permissive | rOpenGov/rsunlight | R | false | true | 1,227 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rsunlight-package.R
\docType{package}
\name{rsunlight-package}
\alias{rsunlight-package}
\alias{rsunlight}
\title{Methods for retreiving data from APIs that were
previously under Sulight Labs}
\description{
The various APIs included here are now managed by ProPublica and
OpenStates organizations.
}
\details{
You need API keys for Propublica APIs. Get them at
\url{https://www.propublica.org/datastore/apis}
You need an API key for the OpenStates API. Get it at
\url{https://openstates.org/api/register/}
We set up the functions so that you can use either env vars, or R options.
For env vars, put an entry in your \code{.Renviron} file with the names
\code{PROPUBLICA_API_KEY} and \code{OPEN_STATES_KEY}.
Currently we have functions to interface with the following ProPublica API
and OpenStates API:
\itemize{
\item ProPublica Congress API (\code{cg})
\item Open States API (\code{os})
}
NOTES:
\itemize{
\item Capitol Words API is now defunct
\item We will support ProPublica campaign finance API in the future
}
}
\author{
Scott Chamberlain \email{myrmecocystus@gmail.com}
Thomas J. Leeper \email{thosjleeper@gmail.com}
}
\keyword{package}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_umap.R
\name{run_umap}
\alias{run_umap}
\title{Run Umap dimension reduction}
\usage{
run_umap(
popseg_long,
ploidy_VAL = NULL,
umap_dist = "manhattan",
umap_min_dist = 0,
umap_spread = 1,
umap_n_neighbors = 40,
mc.cores = 10,
seed = 55,
round = FALSE
)
}
\arguments{
\item{popseg_long}{The resulting data from segmentation}
\item{ploidy_VAL}{Optional ploidy value to scale the segment ratios values by its ploidy}
\item{umap_dist}{Distance metric used by umap. Defaults to manhattan}
\item{umap_min_dist}{UMAP min dist parameter}
\item{umap_spread}{UMAP spread parameter}
\item{umap_n_neighbors}{UMAP n_neighbors parameter}
\item{mc.cores}{Number of threads to be used}
\item{seed}{Seed, defaults to 55}
\item{round}{Optional in case data is ploidy scaled values are rounded to the nearest integer}
}
\value{
A data frame with the resulting embedding from umap and a column containing the cell names.
}
\description{
Run Umap dimension reduction
}
\examples{
}
| /man/run_umap.Rd | no_license | gladelephant/Nature_2021_Breast-Tumors-Maintain-a-Reservoir-of-Subclonal-Diversity-During-Expansion | R | false | true | 1,070 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_umap.R
\name{run_umap}
\alias{run_umap}
\title{Run Umap dimension reduction}
\usage{
run_umap(
popseg_long,
ploidy_VAL = NULL,
umap_dist = "manhattan",
umap_min_dist = 0,
umap_spread = 1,
umap_n_neighbors = 40,
mc.cores = 10,
seed = 55,
round = FALSE
)
}
\arguments{
\item{popseg_long}{The resulting data from segmentation}
\item{ploidy_VAL}{Optional ploidy value to scale the segment ratios values by its ploidy}
\item{umap_dist}{Distance metric used by umap. Defaults to manhattan}
\item{umap_min_dist}{UMAP min dist parameter}
\item{umap_spread}{UMAP spread parameter}
\item{umap_n_neighbors}{UMAP n_neighbors parameter}
\item{mc.cores}{Number of threads to be used}
\item{seed}{Seed, defaults to 55}
\item{round}{Optional in case data is ploidy scaled values are rounded to the nearest integer}
}
\value{
A data frame with the resulting embedding from umap and a column containing the cell names.
}
\description{
Run Umap dimension reduction
}
\examples{
}
|
# ################################################################################################################################################
# # The following script is based on Adnrew's work #
# ################################################################################################################################################
# #install all packages:
# packages = c("ggplot2", "VGAM", "igraph", "pRlyr", "combinat", "fastICA", "irlba", "matrixStats", "reshape2", "R.utils", "snow",
# "stringr", "modeest", "Hmisc", "boot", "doMC", "data.table", "fitdistrplus", "ggdendro", "gplots", "princurve", "sp",
# "lmtest", "MASS", "mixsmsn", "pheatmap", "plyr", "pscl", "RColorBrewer", "VennDiagram", "zoo", "raster", "colorRamps", "grid")
# install.packages(packages, repo = 'http://cran.fhcrc.org/')
# bio_packages = c("Biobase", "BiocGenerics", "limma", "edgeR", "DESeq", "DESeq2", "piano")
# source("http://bioconductor.org/biocLite.R")
# biocLite(bio_packages)
# go to https://github.com/settings/tokens and generate personal tokens for install the private monocle / devtree package:
# install_github("cole-trapnell-lab/monocle-dev", auth_token = "2b5f9747e17c8512f1ecd2bf76f5df4730be21e2")
# install_github("cole-trapnell-lab/branch-diff", auth_token = "2b5f9747e17c8512f1ecd2bf76f5df4730be21e2")
# install.packages('./xacHelper_0.0.0.9000.tar.gz', dependencies = TRUE)
# install.packages('./monocle_1.99.0.tar.gz', dependencies = TRUE)
library(monocle)
library(stringr)
library(plyr)
library(xacHelper)
library(igraph)
# #load all the go/reactome/kegg datasets for the analysis:
root_directory <- "./Quake_data"
human_go_gsc <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Human/symbol/Human_GO_AllPathways_with_GO_iea_June_20_2014_symbol.gmt", sep=""), encoding="latin1")
names(human_go_gsc$gsc) <- str_split_fixed(names(human_go_gsc$gsc), "%", 2)[,1]
human_reactome_gsc <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Human/symbol/Pathways/Human_Reactome_June_20_2014_symbol.gmt", sep=""), encoding="latin1")
names(human_reactome_gsc$gsc) <- str_split_fixed(names(human_reactome_gsc$gsc), "%", 2)[,1]
mouse_go_gsc <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Mouse/by_symbol/GO/MOUSE_GO_bp_with_GO_iea_symbol.gmt", sep=""), encoding="latin1")
names(mouse_go_gsc$gsc) <- str_split_fixed(names(mouse_go_gsc$gsc), "%", 2)[,1]
mouse_reactome_gsc <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Mouse/by_symbol/Pathways/Mouse_Reactome_June_20_2014_symbol.gmt", sep=""), encoding="latin1")
names(mouse_reactome_gsc$gsc) <- str_split_fixed(names(mouse_reactome_gsc$gsc), "%", 2)[,1]
mouse_kegg_gsc <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Mouse/by_symbol/Pathways/Mouse_Human_KEGG_June_20_2014_symbol.gmt", sep=""), encoding="latin1")
names(mouse_kegg_gsc$gsc) <- str_split_fixed(names(mouse_kegg_gsc$gsc), "%", 2)[,1]
mouse_go_gsc_cc <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Mouse/by_symbol/GO/MOUSE_GO_cc_with_GO_iea_symbol.gmt", sep=""), encoding="latin1")
names(mouse_go_gsc_cc$gsc) <- str_split_fixed(names(mouse_go_gsc_cc$gsc), "%", 2)[,1]
mouse_go_gsc_mf <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Mouse/by_symbol/GO/MOUSE_GO_mf_with_GO_iea_symbol.gmt", sep=""), encoding="latin1")
names(mouse_go_gsc_mf$gsc) <- str_split_fixed(names(mouse_go_gsc_mf$gsc), "%", 2)[,1]
mouse_reactome_gsc <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Mouse/by_symbol/Pathways/Mouse_Reactome_June_20_2014_symbol.gmt", sep=""), encoding="latin1")
names(mouse_reactome_gsc$gsc) <- str_split_fixed(names(mouse_reactome_gsc$gsc), "%", 2)[,1]
mouse_kegg_gsc <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Mouse/by_symbol/Pathways/Mouse_Human_KEGG_June_20_2014_symbol.gmt", sep=""), encoding="latin1")
names(mouse_kegg_gsc$gsc) <- str_split_fixed(names(mouse_kegg_gsc$gsc), "%", 2)[,1]
#set the directory:
prog_cell_state = "#979797"
AT1_cell_state = "#F05662"
AT2_cell_state = "#7990C8"
AT1_Lineage = "#BD1C7C"
AT2_Lineage = "#337DB9"
# #load the data:
# # load('xiaojie_test_data.gz') #make the dataset
source('./monocle_helper_functions.R')
Shalek_valid_genes <- read.table('./Aviv_data/valid_genes_for_analyis.txt', header = T)
Shalek_exprs_mat <- read.table("./Aviv_data/cuffnorm_output_files/genes.fpkm_table", row.names = 1, header = T)
Shalek_fd <- read.table("./Aviv_data/cuffnorm_output_files/genes.attr_table", row.names = 1, header = T)
Shalek_pd <- read.table("./Aviv_data/sample_metadata_table.txt", sep = '\t', row.names = 1, header = T)
rownames(Shalek_pd) <- paste(rownames(Shalek_pd), "_0", sep = "")
Shalek_exprs_mat <- Shalek_exprs_mat[row.names(Shalek_fd), row.names(Shalek_pd)]
Shalek_std <- newCellDataSet(Shalek_exprs_mat,
phenoData = new("AnnotatedDataFrame", data = Shalek_pd),
featureData = new("AnnotatedDataFrame", data = Shalek_fd),
expressionFamily=tobit(),
lowerDetectionLimit=1)
Shalek_std <- Shalek_std[, which(pData(Shalek_std)$used_in_study == T)]
Shalek_std <- Shalek_std[row.names(Shalek_std) %in% Shalek_valid_genes$gene_id, ] #27386 * 1787 cells
#check the consistency with the current Shalek_abs data:
Shalek_isoform_fpkm_matrix <- read.table("./Aviv_data/cuffnorm_output_files/isoforms.fpkm_table", row.names = 1, header = T)
# colnames(Shalek_isoform_fpkm_matrix) <- str_replace(colnames(Shalek_isoform_fpkm_matrix), "_0$", "")
#colnames(isoform_fpkm_matrix) <- str_replace(colnames(isoform_fpkm_matrix), "GolgiPlugh", "GolgiPlug")
# row.names(Shalek_isoform_fpkm_matrix) <- Shalek_isoform_fpkm_matrix$tracking_id
Shalek_isoform_fpkm_matrix <- Shalek_isoform_fpkm_matrix[, colnames(Shalek_std)]
# save(Shalek_exprs_mat, Shalek_pd, Shalek_fd, Shalek_isoform_fpkm_matrix, Shalek_valid_genes, file = 'AvivDC_cell.RData') #data for making the help package
# save(Shalek_valid_genes, file = 'Shalek_valid_genes')
# Convert expression measurements from FPKM to absolute transcript counts, using the isoforms object to estimate the t parameter
Shalek_abs= relative2abs(Shalek_std, estimate_t(Shalek_isoform_fpkm_matrix), modelFormulaStr = "~1", cores=detectCores())
pd <- new("AnnotatedDataFrame", data = pData(Shalek_std))
fd <- new("AnnotatedDataFrame", data = fData(Shalek_std))
Shalek_abs <- newCellDataSet(Shalek_abs,
phenoData = pd,
featureData = fd,
expressionFamily=negbinomial(),
lowerDetectionLimit=1)
pData(Shalek_abs)$Total_mRNAs <- colSums(exprs(Shalek_abs))
#Calculate size factors and dispersions
Shalek_abs = estimateSizeFactors(Shalek_abs)
Shalek_abs = estimateDispersions(Shalek_abs)
# Filter to only expressed genes (feel free to change this as needed, I have tried several methods)
Shalek_abs = detectGenes(Shalek_abs, min_expr = 1)
###################################################################################################################################
### performing the DEG tests to obtain the genes used for ordering the cells #####
# LPS:
Shalek_LPS <- Shalek_abs[, pData(Shalek_abs)$experiment_name %in% c('LPS', 'Unstimulated_Replicate')]
pData(Shalek_LPS)[, 'stim_time'] <- as.character(pData(Shalek_LPS)$time)
pData(Shalek_LPS)$stim_time[pData(Shalek_LPS)$stim_time == ''] <- 0
Shalek_LPS <- detectGenes(Shalek_LPS, min_expr = 0.1)
expressed_genes <- row.names(subset(fData(Shalek_LPS), num_cells_expressed > 50))
genes_in_range <- selectGenesInExpressionRange(Shalek_LPS[expressed_genes,], 2, Inf, 0.1, stat_fun=function(x) { median(round(x)) })
Shalek_LPS_subset_DEG_res <- differentialGeneTest(Shalek_LPS[genes_in_range, ], fullModelFormulaStr = '~stim_time', cores = detectCores())
#ko: include all LPS cells
Shalek_abs_subset_ko_LPS <- Shalek_abs[, pData(Shalek_abs)$experiment_name %in% c('Ifnar1_KO_LPS', 'Stat1_KO_LPS', "LPS", "Unstimulated_Replicate")]
pData(Shalek_abs_subset_ko_LPS)[, 'stim_time'] <- as.character(pData(Shalek_abs_subset_ko_LPS)$time)
pData(Shalek_abs_subset_ko_LPS)$stim_time[pData(Shalek_abs_subset_ko_LPS)$stim_time == ''] <- 0
pData(Shalek_abs_subset_ko_LPS)$stim_time <- as.integer(revalue(pData(Shalek_abs_subset_ko_LPS)$stim_time, c("1h" = 1, "2h" = 2, "4h" = 4, "6h" = 6)))
Shalek_abs_subset_ko_LPS <- detectGenes(Shalek_abs_subset_ko_LPS, min_expr = 0.1)
expressed_genes <- row.names(subset(fData(Shalek_abs_subset_ko_LPS), num_cells_expressed > 50))
genes_in_range <- selectGenesInExpressionRange(Shalek_abs_subset_ko_LPS[expressed_genes,], 2, Inf, 0.1, stat_fun=function(x) { median(round(x)) })
Shalek_abs_subset_ko_LPS_subset_DEG_res <- differentialGeneTest(Shalek_abs_subset_ko_LPS[genes_in_range, ], fullModelFormulaStr = '~experiment_name + stim_time', cores = detectCores())
#make spanning trees (select subset of the CDS for downstream analysis):
pData(Shalek_abs_subset_ko_LPS)$Total_mRNAs <- colSums(exprs(Shalek_abs_subset_ko_LPS))
Shalek_abs_subset_ko_LPS <- Shalek_abs_subset_ko_LPS[, pData(Shalek_abs_subset_ko_LPS)$Total_mRNAs < 75000]
order_genes <- c(row.names(subset(Shalek_abs_subset_ko_LPS_subset_DEG_res, qval < 1e-40)))
Shalek_abs_subset_ko_LPS <- setOrderingFilter(Shalek_abs_subset_ko_LPS, order_genes)
Shalek_abs_subset_ko_LPS <- reduceDimension(Shalek_abs_subset_ko_LPS, use_vst = T, use_irlba=F, pseudo_expr = 0, covariates = as.vector(pData(Shalek_abs_subset_ko_LPS)$num_genes_expressed) )
Shalek_abs_subset_ko_LPS <- orderCells(Shalek_abs_subset_ko_LPS, num_path = 2)
# Figure 5C -- Heatmap
# Detect branching genes and calulate ABCs and ILRs
full_model_string = '~sm.ns(Pseudotime, df = 3)*Lineage'
ko_branching_genes = branchTest(Shalek_abs_subset_ko_LPS, fullModelFormulaStr = full_model_string, cores = detectCores(), relative_expr = T, weighted = T)
Shalek_abs_subset_ko_LPS_abcs = calABCs(Shalek_abs_subset_ko_LPS, fullModelFormulaStr=full_model_string, cores=1)
# Generate a plot
# regev_cat <- read.table(file = './Aviv_data/cuffnorm_output_files/study_gene_categories.txt', header = T, sep = '\t') #pass this to the plot_genes_branched_heatmap function
# Figure 5B annotations -- Enrichment analysis on clusters
save_hyper_df(Shalek_abs_subset_ko_LPS_tree_hyper_geometric_results_reactome, 'ko_hyper_df.xls')
# make venn diagram for the genes for figure 5/6:
# figure 5:
# pseudotime test for the WT cells
##two group tests:
#comparign with time 4h:
ko_Ifnar1_wt4 <- differentialGeneTest(Shalek_abs_subset_ko_LPS[, c(pData(Shalek_abs_subset_ko_LPS)$experiment_name %in% c('LPS') &
pData(Shalek_abs_subset_ko_LPS)$time %in% '4h') | c(pData(Shalek_abs_subset_ko_LPS)$experiment_name %in% c('Ifnar1_KO_LPS') &
pData(Shalek_abs_subset_ko_LPS)$time %in% '4h')
], fullModelFormulaStr="~experiment_name", reducedModelFormulaStr="~1", cores=detectCores())
ko_stat1_wt4 <- differentialGeneTest(Shalek_abs_subset_ko_LPS[, c(pData(Shalek_abs_subset_ko_LPS)$experiment_name %in% c('LPS') &
pData(Shalek_abs_subset_ko_LPS)$time %in% '4h') | c(pData(Shalek_abs_subset_ko_LPS)$experiment_name %in% c('Stat1_KO_LPS') &
pData(Shalek_abs_subset_ko_LPS)$time %in% '4h')
], fullModelFormulaStr="~experiment_name", reducedModelFormulaStr="~1", cores=detectCores())
#####################golgi: with all LPS cells: ######################
Shalek_golgi_update <- Shalek_abs[,pData(Shalek_abs)$experiment_name %in% c("LPS_GolgiPlug", "LPS", "Unstimulated_Replicate")]
#add both the golgi time and stim time:
split_cols <- str_split_fixed(pData(Shalek_golgi_update)$time, '_', 2)
pData(Shalek_golgi_update)[, 'stim_time'] <- split_cols[, 1]
pData(Shalek_golgi_update)$stim_time[pData(Shalek_golgi_update)$stim_time == ''] <- 0
pData(Shalek_golgi_update)$stim_time <- as.numeric(revalue(pData(Shalek_golgi_update)$stim_time, c("1h" = 1, "2h" = 2, "4h" = 4, "6h" = 6)))
#the predictor cannot be Inf
pData(Shalek_golgi_update)[, 'golgi_time'] <- split_cols[, 2]
pData(Shalek_golgi_update)$golgi_time[pData(Shalek_golgi_update)$golgi_time == ''] <- 'NEVER'
Shalek_golgi_update <- detectGenes(Shalek_golgi_update, min_expr = 0.1)
expressed_genes <- row.names(subset(fData(Shalek_golgi_update), num_cells_expressed > 50))
genes_in_range <- selectGenesInExpressionRange(Shalek_golgi_update[expressed_genes,], 2, Inf, 0.1, stat_fun=function(x) { median(round(x)) })
Shalek_golgi_update_subset_DEG_res <- differentialGeneTest(Shalek_golgi_update[genes_in_range, ], fullModelFormulaStr = '~stim_time + golgi_time', cores = detectCores())
#make spanning trees for golgi-plug:
pData(Shalek_golgi_update)$Total_mRNAs <- colSums(exprs(Shalek_golgi_update))
Shalek_golgi_update <- Shalek_golgi_update[, pData(Shalek_golgi_update)$Total_mRNAs < 75000]
#select genes for ordering cells:
golgi_order_genes <- c(row.names(subset(Shalek_golgi_update_subset_DEG_res, qval < 1e-40)))
Shalek_golgi_update <- setOrderingFilter(Shalek_golgi_update, golgi_order_genes)
Shalek_golgi_update <- reduceDimension(Shalek_golgi_update, use_vst = T, use_irlba=F, pseudo_expr = 0, covariates = as.vector(pData(Shalek_golgi_update)$num_genes_expressed) )
Shalek_golgi_update <- orderCells(Shalek_golgi_update, num_path = 2)
# Figure 6C -- Heatmap of trajectory from 6A
## Perform branch test and calculate ABCs
full_model_string = '~sm.ns(Pseudotime, df = 3)*Lineage'
golgi_branching_genes = branchTest(Shalek_golgi_update, fullModelFormulaStr = full_model_string, cores= detectCores(), relative_expr = T, weighted = T)
# ABCs_golgi = calABCs(Shalek_golgi_update, fullModelFormulaStr=full_model_string, cores=1) # change to calABCs once fixed
#figure 6:
#pseudotime test for the WT cells
golgi_wt_0to4_pseudo <- differentialGeneTest(Shalek_golgi_update[, pData(Shalek_golgi_update)$experiment_name %in% c('LPS', 'Unstimulated_Replicate') & pData(Shalek_golgi_update)$time %in% c('', '1h', '2h', '4h')], fullModelFormulaStr="~sm.ns(Pseudotime, df = 3)", reducedModelFormulaStr="~1", cores=detectCores() / 1)
golgi_wt_0to4_pseudo_gene_ids = row.names(subset(golgi_wt_0to4_pseudo, qval < 1e-2))
golgi_wt_0to6_pseudo <- differentialGeneTest(Shalek_golgi_update[, pData(Shalek_golgi_update)$experiment_name %in% c('LPS', 'Unstimulated_Replicate')], fullModelFormulaStr="~sm.ns(Pseudotime, df = 3)", reducedModelFormulaStr="~1", cores=detectCores() )
golgi_wt_0to6_pseudo_gene_ids = row.names(subset(golgi_wt_0to6_pseudo, qval < 1e-2))
##two group tests:
#test all Golgi plug cells at once:
all_golgi_plug0_wt4 <- differentialGeneTest(Shalek_golgi_update[, c(pData(Shalek_golgi_update)$experiment_name %in% c('LPS') &
pData(Shalek_golgi_update)$time %in% '4h') | c(pData(Shalek_golgi_update)$experiment_name %in% c('LPS_GolgiPlug'))
], fullModelFormulaStr="~time", reducedModelFormulaStr="~1", cores=detectCores())
all_golgi_plug0_wt4_gene_ids = row.names(subset(all_golgi_plug0_wt4, qval < 1e-2))
all_golgi_plug0_wt0 <- differentialGeneTest(Shalek_golgi_update[, c(pData(Shalek_golgi_update)$experiment_name %in% c('Unstimulated_Replicate')) | c(pData(Shalek_golgi_update)$experiment_name %in% c('LPS_GolgiPlug'))
], fullModelFormulaStr="~time", reducedModelFormulaStr="~1", cores=detectCores())
all_golgi_plug0_wt0_gene_ids = row.names(subset(all_golgi_plug0_wt0, qval < 1e-2))
#different time comparing to WT 4h
golgi_plug0_wt4 <- differentialGeneTest(Shalek_golgi_update[, c(pData(Shalek_golgi_update)$experiment_name %in% c('LPS') &
pData(Shalek_golgi_update)$time %in% '4h') | c(pData(Shalek_golgi_update)$experiment_name %in% c('LPS_GolgiPlug') &
pData(Shalek_golgi_update)$time %in% '4h_0h')
], fullModelFormulaStr="~time", reducedModelFormulaStr="~1", cores=detectCores())
golgi_plug0_wt4_gene_ids = row.names(subset(golgi_plug0_wt4, qval < 1e-2))
# #different time comparing to WT 0h
golgi_plug0_wt0 <- differentialGeneTest(Shalek_golgi_update[, c(pData(Shalek_golgi_update)$experiment_name %in% c('Unstimulated_Replicate')) | c(pData(Shalek_golgi_update)$experiment_name %in% c('LPS_GolgiPlug') &
pData(Shalek_golgi_update)$time %in% '4h_0h')
], fullModelFormulaStr="~time", reducedModelFormulaStr="~1", cores=detectCores())
golgi_plug0_wt0_gene_ids = row.names(subset(golgi_plug0_wt0, qval < 1e-2))
save.image('shalek_data_analysis.RData')
| /analysis_shalek_data.R | no_license | Xiaojieqiu/Census_BEAM | R | false | false | 17,076 | r | # ################################################################################################################################################
# # The following script is based on Adnrew's work #
# ################################################################################################################################################
# #install all packages:
# packages = c("ggplot2", "VGAM", "igraph", "pRlyr", "combinat", "fastICA", "irlba", "matrixStats", "reshape2", "R.utils", "snow",
# "stringr", "modeest", "Hmisc", "boot", "doMC", "data.table", "fitdistrplus", "ggdendro", "gplots", "princurve", "sp",
# "lmtest", "MASS", "mixsmsn", "pheatmap", "plyr", "pscl", "RColorBrewer", "VennDiagram", "zoo", "raster", "colorRamps", "grid")
# install.packages(packages, repo = 'http://cran.fhcrc.org/')
# bio_packages = c("Biobase", "BiocGenerics", "limma", "edgeR", "DESeq", "DESeq2", "piano")
# source("http://bioconductor.org/biocLite.R")
# biocLite(bio_packages)
# go to https://github.com/settings/tokens and generate personal tokens for install the private monocle / devtree package:
# install_github("cole-trapnell-lab/monocle-dev", auth_token = "2b5f9747e17c8512f1ecd2bf76f5df4730be21e2")
# install_github("cole-trapnell-lab/branch-diff", auth_token = "2b5f9747e17c8512f1ecd2bf76f5df4730be21e2")
# install.packages('./xacHelper_0.0.0.9000.tar.gz', dependencies = TRUE)
# install.packages('./monocle_1.99.0.tar.gz', dependencies = TRUE)
library(monocle)
library(stringr)
library(plyr)
library(xacHelper)
library(igraph)
# #load all the go/reactome/kegg datasets for the analysis:
root_directory <- "./Quake_data"
human_go_gsc <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Human/symbol/Human_GO_AllPathways_with_GO_iea_June_20_2014_symbol.gmt", sep=""), encoding="latin1")
names(human_go_gsc$gsc) <- str_split_fixed(names(human_go_gsc$gsc), "%", 2)[,1]
human_reactome_gsc <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Human/symbol/Pathways/Human_Reactome_June_20_2014_symbol.gmt", sep=""), encoding="latin1")
names(human_reactome_gsc$gsc) <- str_split_fixed(names(human_reactome_gsc$gsc), "%", 2)[,1]
mouse_go_gsc <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Mouse/by_symbol/GO/MOUSE_GO_bp_with_GO_iea_symbol.gmt", sep=""), encoding="latin1")
names(mouse_go_gsc$gsc) <- str_split_fixed(names(mouse_go_gsc$gsc), "%", 2)[,1]
mouse_reactome_gsc <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Mouse/by_symbol/Pathways/Mouse_Reactome_June_20_2014_symbol.gmt", sep=""), encoding="latin1")
names(mouse_reactome_gsc$gsc) <- str_split_fixed(names(mouse_reactome_gsc$gsc), "%", 2)[,1]
mouse_kegg_gsc <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Mouse/by_symbol/Pathways/Mouse_Human_KEGG_June_20_2014_symbol.gmt", sep=""), encoding="latin1")
names(mouse_kegg_gsc$gsc) <- str_split_fixed(names(mouse_kegg_gsc$gsc), "%", 2)[,1]
mouse_go_gsc_cc <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Mouse/by_symbol/GO/MOUSE_GO_cc_with_GO_iea_symbol.gmt", sep=""), encoding="latin1")
names(mouse_go_gsc_cc$gsc) <- str_split_fixed(names(mouse_go_gsc_cc$gsc), "%", 2)[,1]
mouse_go_gsc_mf <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Mouse/by_symbol/GO/MOUSE_GO_mf_with_GO_iea_symbol.gmt", sep=""), encoding="latin1")
names(mouse_go_gsc_mf$gsc) <- str_split_fixed(names(mouse_go_gsc_mf$gsc), "%", 2)[,1]
mouse_reactome_gsc <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Mouse/by_symbol/Pathways/Mouse_Reactome_June_20_2014_symbol.gmt", sep=""), encoding="latin1")
names(mouse_reactome_gsc$gsc) <- str_split_fixed(names(mouse_reactome_gsc$gsc), "%", 2)[,1]
mouse_kegg_gsc <- loadGSCSafe(paste(root_directory,"/GMT/EM_pathways/Mouse/by_symbol/Pathways/Mouse_Human_KEGG_June_20_2014_symbol.gmt", sep=""), encoding="latin1")
names(mouse_kegg_gsc$gsc) <- str_split_fixed(names(mouse_kegg_gsc$gsc), "%", 2)[,1]
#set the directory:
prog_cell_state = "#979797"
AT1_cell_state = "#F05662"
AT2_cell_state = "#7990C8"
AT1_Lineage = "#BD1C7C"
AT2_Lineage = "#337DB9"
# #load the data:
# # load('xiaojie_test_data.gz') #make the dataset
source('./monocle_helper_functions.R')
Shalek_valid_genes <- read.table('./Aviv_data/valid_genes_for_analyis.txt', header = T)
Shalek_exprs_mat <- read.table("./Aviv_data/cuffnorm_output_files/genes.fpkm_table", row.names = 1, header = T)
Shalek_fd <- read.table("./Aviv_data/cuffnorm_output_files/genes.attr_table", row.names = 1, header = T)
Shalek_pd <- read.table("./Aviv_data/sample_metadata_table.txt", sep = '\t', row.names = 1, header = T)
rownames(Shalek_pd) <- paste(rownames(Shalek_pd), "_0", sep = "")
Shalek_exprs_mat <- Shalek_exprs_mat[row.names(Shalek_fd), row.names(Shalek_pd)]
Shalek_std <- newCellDataSet(Shalek_exprs_mat,
phenoData = new("AnnotatedDataFrame", data = Shalek_pd),
featureData = new("AnnotatedDataFrame", data = Shalek_fd),
expressionFamily=tobit(),
lowerDetectionLimit=1)
Shalek_std <- Shalek_std[, which(pData(Shalek_std)$used_in_study == T)]
Shalek_std <- Shalek_std[row.names(Shalek_std) %in% Shalek_valid_genes$gene_id, ] #27386 * 1787 cells
#check the consistency with the current Shalek_abs data:
Shalek_isoform_fpkm_matrix <- read.table("./Aviv_data/cuffnorm_output_files/isoforms.fpkm_table", row.names = 1, header = T)
# colnames(Shalek_isoform_fpkm_matrix) <- str_replace(colnames(Shalek_isoform_fpkm_matrix), "_0$", "")
#colnames(isoform_fpkm_matrix) <- str_replace(colnames(isoform_fpkm_matrix), "GolgiPlugh", "GolgiPlug")
# row.names(Shalek_isoform_fpkm_matrix) <- Shalek_isoform_fpkm_matrix$tracking_id
Shalek_isoform_fpkm_matrix <- Shalek_isoform_fpkm_matrix[, colnames(Shalek_std)]
# save(Shalek_exprs_mat, Shalek_pd, Shalek_fd, Shalek_isoform_fpkm_matrix, Shalek_valid_genes, file = 'AvivDC_cell.RData') #data for making the help package
# save(Shalek_valid_genes, file = 'Shalek_valid_genes')
# Convert expression measurements from FPKM to absolute transcript counts, using the isoforms object to estimate the t parameter
Shalek_abs= relative2abs(Shalek_std, estimate_t(Shalek_isoform_fpkm_matrix), modelFormulaStr = "~1", cores=detectCores())
pd <- new("AnnotatedDataFrame", data = pData(Shalek_std))
fd <- new("AnnotatedDataFrame", data = fData(Shalek_std))
Shalek_abs <- newCellDataSet(Shalek_abs,
phenoData = pd,
featureData = fd,
expressionFamily=negbinomial(),
lowerDetectionLimit=1)
pData(Shalek_abs)$Total_mRNAs <- colSums(exprs(Shalek_abs))
#Calculate size factors and dispersions
Shalek_abs = estimateSizeFactors(Shalek_abs)
Shalek_abs = estimateDispersions(Shalek_abs)
# Filter to only expressed genes (feel free to change this as needed, I have tried several methods)
Shalek_abs = detectGenes(Shalek_abs, min_expr = 1)
###################################################################################################################################
### performing the DEG tests to obtain the genes used for ordering the cells #####
# LPS:
Shalek_LPS <- Shalek_abs[, pData(Shalek_abs)$experiment_name %in% c('LPS', 'Unstimulated_Replicate')]
pData(Shalek_LPS)[, 'stim_time'] <- as.character(pData(Shalek_LPS)$time)
pData(Shalek_LPS)$stim_time[pData(Shalek_LPS)$stim_time == ''] <- 0
Shalek_LPS <- detectGenes(Shalek_LPS, min_expr = 0.1)
expressed_genes <- row.names(subset(fData(Shalek_LPS), num_cells_expressed > 50))
genes_in_range <- selectGenesInExpressionRange(Shalek_LPS[expressed_genes,], 2, Inf, 0.1, stat_fun=function(x) { median(round(x)) })
Shalek_LPS_subset_DEG_res <- differentialGeneTest(Shalek_LPS[genes_in_range, ], fullModelFormulaStr = '~stim_time', cores = detectCores())
#ko: include all LPS cells
Shalek_abs_subset_ko_LPS <- Shalek_abs[, pData(Shalek_abs)$experiment_name %in% c('Ifnar1_KO_LPS', 'Stat1_KO_LPS', "LPS", "Unstimulated_Replicate")]
pData(Shalek_abs_subset_ko_LPS)[, 'stim_time'] <- as.character(pData(Shalek_abs_subset_ko_LPS)$time)
pData(Shalek_abs_subset_ko_LPS)$stim_time[pData(Shalek_abs_subset_ko_LPS)$stim_time == ''] <- 0
pData(Shalek_abs_subset_ko_LPS)$stim_time <- as.integer(revalue(pData(Shalek_abs_subset_ko_LPS)$stim_time, c("1h" = 1, "2h" = 2, "4h" = 4, "6h" = 6)))
Shalek_abs_subset_ko_LPS <- detectGenes(Shalek_abs_subset_ko_LPS, min_expr = 0.1)
expressed_genes <- row.names(subset(fData(Shalek_abs_subset_ko_LPS), num_cells_expressed > 50))
genes_in_range <- selectGenesInExpressionRange(Shalek_abs_subset_ko_LPS[expressed_genes,], 2, Inf, 0.1, stat_fun=function(x) { median(round(x)) })
Shalek_abs_subset_ko_LPS_subset_DEG_res <- differentialGeneTest(Shalek_abs_subset_ko_LPS[genes_in_range, ], fullModelFormulaStr = '~experiment_name + stim_time', cores = detectCores())
#make spanning trees (select subset of the CDS for downstream analysis):
pData(Shalek_abs_subset_ko_LPS)$Total_mRNAs <- colSums(exprs(Shalek_abs_subset_ko_LPS))
Shalek_abs_subset_ko_LPS <- Shalek_abs_subset_ko_LPS[, pData(Shalek_abs_subset_ko_LPS)$Total_mRNAs < 75000]
order_genes <- c(row.names(subset(Shalek_abs_subset_ko_LPS_subset_DEG_res, qval < 1e-40)))
Shalek_abs_subset_ko_LPS <- setOrderingFilter(Shalek_abs_subset_ko_LPS, order_genes)
Shalek_abs_subset_ko_LPS <- reduceDimension(Shalek_abs_subset_ko_LPS, use_vst = T, use_irlba=F, pseudo_expr = 0, covariates = as.vector(pData(Shalek_abs_subset_ko_LPS)$num_genes_expressed) )
Shalek_abs_subset_ko_LPS <- orderCells(Shalek_abs_subset_ko_LPS, num_path = 2)
# Figure 5C -- Heatmap
# Detect branching genes and calulate ABCs and ILRs
full_model_string = '~sm.ns(Pseudotime, df = 3)*Lineage'
ko_branching_genes = branchTest(Shalek_abs_subset_ko_LPS, fullModelFormulaStr = full_model_string, cores = detectCores(), relative_expr = T, weighted = T)
Shalek_abs_subset_ko_LPS_abcs = calABCs(Shalek_abs_subset_ko_LPS, fullModelFormulaStr=full_model_string, cores=1)
# Generate a plot
# regev_cat <- read.table(file = './Aviv_data/cuffnorm_output_files/study_gene_categories.txt', header = T, sep = '\t') #pass this to the plot_genes_branched_heatmap function
# Figure 5B annotations -- Enrichment analysis on clusters
save_hyper_df(Shalek_abs_subset_ko_LPS_tree_hyper_geometric_results_reactome, 'ko_hyper_df.xls')
# make venn diagram for the genes for figure 5/6:
# figure 5:
# pseudotime test for the WT cells
##two group tests:
#comparign with time 4h:
ko_Ifnar1_wt4 <- differentialGeneTest(Shalek_abs_subset_ko_LPS[, c(pData(Shalek_abs_subset_ko_LPS)$experiment_name %in% c('LPS') &
pData(Shalek_abs_subset_ko_LPS)$time %in% '4h') | c(pData(Shalek_abs_subset_ko_LPS)$experiment_name %in% c('Ifnar1_KO_LPS') &
pData(Shalek_abs_subset_ko_LPS)$time %in% '4h')
], fullModelFormulaStr="~experiment_name", reducedModelFormulaStr="~1", cores=detectCores())
ko_stat1_wt4 <- differentialGeneTest(Shalek_abs_subset_ko_LPS[, c(pData(Shalek_abs_subset_ko_LPS)$experiment_name %in% c('LPS') &
pData(Shalek_abs_subset_ko_LPS)$time %in% '4h') | c(pData(Shalek_abs_subset_ko_LPS)$experiment_name %in% c('Stat1_KO_LPS') &
pData(Shalek_abs_subset_ko_LPS)$time %in% '4h')
], fullModelFormulaStr="~experiment_name", reducedModelFormulaStr="~1", cores=detectCores())
#####################golgi: with all LPS cells: ######################
Shalek_golgi_update <- Shalek_abs[,pData(Shalek_abs)$experiment_name %in% c("LPS_GolgiPlug", "LPS", "Unstimulated_Replicate")]
#add both the golgi time and stim time:
split_cols <- str_split_fixed(pData(Shalek_golgi_update)$time, '_', 2)
pData(Shalek_golgi_update)[, 'stim_time'] <- split_cols[, 1]
pData(Shalek_golgi_update)$stim_time[pData(Shalek_golgi_update)$stim_time == ''] <- 0
pData(Shalek_golgi_update)$stim_time <- as.numeric(revalue(pData(Shalek_golgi_update)$stim_time, c("1h" = 1, "2h" = 2, "4h" = 4, "6h" = 6)))
#the predictor cannot be Inf
pData(Shalek_golgi_update)[, 'golgi_time'] <- split_cols[, 2]
pData(Shalek_golgi_update)$golgi_time[pData(Shalek_golgi_update)$golgi_time == ''] <- 'NEVER'
Shalek_golgi_update <- detectGenes(Shalek_golgi_update, min_expr = 0.1)
expressed_genes <- row.names(subset(fData(Shalek_golgi_update), num_cells_expressed > 50))
genes_in_range <- selectGenesInExpressionRange(Shalek_golgi_update[expressed_genes,], 2, Inf, 0.1, stat_fun=function(x) { median(round(x)) })
Shalek_golgi_update_subset_DEG_res <- differentialGeneTest(Shalek_golgi_update[genes_in_range, ], fullModelFormulaStr = '~stim_time + golgi_time', cores = detectCores())
#make spanning trees for golgi-plug:
pData(Shalek_golgi_update)$Total_mRNAs <- colSums(exprs(Shalek_golgi_update))
Shalek_golgi_update <- Shalek_golgi_update[, pData(Shalek_golgi_update)$Total_mRNAs < 75000]
#select genes for ordering cells:
golgi_order_genes <- c(row.names(subset(Shalek_golgi_update_subset_DEG_res, qval < 1e-40)))
Shalek_golgi_update <- setOrderingFilter(Shalek_golgi_update, golgi_order_genes)
Shalek_golgi_update <- reduceDimension(Shalek_golgi_update, use_vst = T, use_irlba=F, pseudo_expr = 0, covariates = as.vector(pData(Shalek_golgi_update)$num_genes_expressed) )
Shalek_golgi_update <- orderCells(Shalek_golgi_update, num_path = 2)
# Figure 6C -- Heatmap of trajectory from 6A
## Perform branch test and calculate ABCs
full_model_string = '~sm.ns(Pseudotime, df = 3)*Lineage'
golgi_branching_genes = branchTest(Shalek_golgi_update, fullModelFormulaStr = full_model_string, cores= detectCores(), relative_expr = T, weighted = T)
# ABCs_golgi = calABCs(Shalek_golgi_update, fullModelFormulaStr=full_model_string, cores=1) # change to calABCs once fixed
#figure 6:
#pseudotime test for the WT cells
golgi_wt_0to4_pseudo <- differentialGeneTest(Shalek_golgi_update[, pData(Shalek_golgi_update)$experiment_name %in% c('LPS', 'Unstimulated_Replicate') & pData(Shalek_golgi_update)$time %in% c('', '1h', '2h', '4h')], fullModelFormulaStr="~sm.ns(Pseudotime, df = 3)", reducedModelFormulaStr="~1", cores=detectCores() / 1)
golgi_wt_0to4_pseudo_gene_ids = row.names(subset(golgi_wt_0to4_pseudo, qval < 1e-2))
golgi_wt_0to6_pseudo <- differentialGeneTest(Shalek_golgi_update[, pData(Shalek_golgi_update)$experiment_name %in% c('LPS', 'Unstimulated_Replicate')], fullModelFormulaStr="~sm.ns(Pseudotime, df = 3)", reducedModelFormulaStr="~1", cores=detectCores() )
golgi_wt_0to6_pseudo_gene_ids = row.names(subset(golgi_wt_0to6_pseudo, qval < 1e-2))
##two group tests:
#test all Golgi plug cells at once:
all_golgi_plug0_wt4 <- differentialGeneTest(Shalek_golgi_update[, c(pData(Shalek_golgi_update)$experiment_name %in% c('LPS') &
pData(Shalek_golgi_update)$time %in% '4h') | c(pData(Shalek_golgi_update)$experiment_name %in% c('LPS_GolgiPlug'))
], fullModelFormulaStr="~time", reducedModelFormulaStr="~1", cores=detectCores())
all_golgi_plug0_wt4_gene_ids = row.names(subset(all_golgi_plug0_wt4, qval < 1e-2))
all_golgi_plug0_wt0 <- differentialGeneTest(Shalek_golgi_update[, c(pData(Shalek_golgi_update)$experiment_name %in% c('Unstimulated_Replicate')) | c(pData(Shalek_golgi_update)$experiment_name %in% c('LPS_GolgiPlug'))
], fullModelFormulaStr="~time", reducedModelFormulaStr="~1", cores=detectCores())
all_golgi_plug0_wt0_gene_ids = row.names(subset(all_golgi_plug0_wt0, qval < 1e-2))
#different time comparing to WT 4h
golgi_plug0_wt4 <- differentialGeneTest(Shalek_golgi_update[, c(pData(Shalek_golgi_update)$experiment_name %in% c('LPS') &
pData(Shalek_golgi_update)$time %in% '4h') | c(pData(Shalek_golgi_update)$experiment_name %in% c('LPS_GolgiPlug') &
pData(Shalek_golgi_update)$time %in% '4h_0h')
], fullModelFormulaStr="~time", reducedModelFormulaStr="~1", cores=detectCores())
golgi_plug0_wt4_gene_ids = row.names(subset(golgi_plug0_wt4, qval < 1e-2))
# #different time comparing to WT 0h
golgi_plug0_wt0 <- differentialGeneTest(Shalek_golgi_update[, c(pData(Shalek_golgi_update)$experiment_name %in% c('Unstimulated_Replicate')) | c(pData(Shalek_golgi_update)$experiment_name %in% c('LPS_GolgiPlug') &
pData(Shalek_golgi_update)$time %in% '4h_0h')
], fullModelFormulaStr="~time", reducedModelFormulaStr="~1", cores=detectCores())
golgi_plug0_wt0_gene_ids = row.names(subset(golgi_plug0_wt0, qval < 1e-2))
save.image('shalek_data_analysis.RData')
|
library(shiny)
setwd("~/GitHub/mhardi")
# Load data processing file
source("data_processing.R")
themes <- sort(unique(data$theme))
# Shiny server
shinyServer(
function(input, output) {
# output$text1 <- renderText({input$text1})
# output$text2 <- renderText({input$text2})
# output$text3 <- renderText({
# input$goButton
# isolate(paste(input$text1, input$text2))
# })
output$setid <- renderText({input$setid})
output$address <- renderText({
input$goButtonAdd
isolate(paste("http://brickset.com/sets/",
input$setid, sep=""))
})
# getPage<-function(url) {
# return(tags$iframe(src = url,
# style="width:100%;",
# frameborder="0", id="iframe",
# height = "500px"))
# }
openPage <- function(url) {
return(tags$a(href=url, "Click here!", target="_blank"))
}
output$inc <- renderUI({
input$goButtonDirect
isolate(openPage(paste("http://brickset.com/sets/",
input$setid, sep="")))
})
# Initialize reactive values
values <- reactiveValues()
values$themes <- themes
# Create event type checkbox
output$themesControl <- renderUI({
checkboxGroupInput('themes', 'LEGO Themes:',
themes, selected = values$themes)
})
# Add observer on select-all button
observe({
if(input$selectAll == 0) return()
values$themes <- themes
})
# Add observer on clear-all button
observe({
if(input$clearAll == 0) return()
values$themes <- c() # empty list
})
# Prepare dataset
dataTable <- reactive({
groupByTheme(data, input$timeline[1],
input$timeline[2], input$pieces[1],
input$pieces[2], input$themes)
})
dataTableByYear <- reactive({
groupByYearAgg(data, input$timeline[1],
input$timeline[2], input$pieces[1],
input$pieces[2], input$themes)
})
dataTableByPiece <- reactive({
groupByYearPiece(data, input$timeline[1],
input$timeline[2], input$pieces[1],
input$pieces[2], input$themes)
})
dataTableByPieceAvg <- reactive({
groupByPieceAvg(data, input$timeline[1],
input$timeline[2], input$pieces[1],
input$pieces[2], input$themes)
})
dataTableByPieceThemeAvg <- reactive({
groupByPieceThemeAvg(data, input$timeline[1],
input$timeline[2], input$pieces[1],
input$pieces[2], input$themes)
})
# Render data table
output$dTable <- renderDataTable({
dataTable()
} #, options = list(bFilter = FALSE, iDisplayLength = 50)
)
output$themesByYear <- renderChart({
plotThemesCountByYear(dataTableByYear())
})
output$piecesByYear <- renderChart({
plotPiecesByYear(dataTableByPiece())
})
output$piecesByYearAvg <- renderChart({
plotPiecesByYearAvg(dataTableByPieceAvg())
})
output$piecesByThemeAvg <- renderChart({
plotPiecesByThemeAvg(dataTableByPieceThemeAvg())
})
} # end of function(input, output)
)
| /server.R | no_license | mohamed-hardi/mhardi | R | false | false | 3,454 | r | library(shiny)
setwd("~/GitHub/mhardi")
# Load data processing file
source("data_processing.R")
themes <- sort(unique(data$theme))
# Shiny server
shinyServer(
function(input, output) {
# output$text1 <- renderText({input$text1})
# output$text2 <- renderText({input$text2})
# output$text3 <- renderText({
# input$goButton
# isolate(paste(input$text1, input$text2))
# })
output$setid <- renderText({input$setid})
output$address <- renderText({
input$goButtonAdd
isolate(paste("http://brickset.com/sets/",
input$setid, sep=""))
})
# getPage<-function(url) {
# return(tags$iframe(src = url,
# style="width:100%;",
# frameborder="0", id="iframe",
# height = "500px"))
# }
openPage <- function(url) {
return(tags$a(href=url, "Click here!", target="_blank"))
}
output$inc <- renderUI({
input$goButtonDirect
isolate(openPage(paste("http://brickset.com/sets/",
input$setid, sep="")))
})
# Initialize reactive values
values <- reactiveValues()
values$themes <- themes
# Create event type checkbox
output$themesControl <- renderUI({
checkboxGroupInput('themes', 'LEGO Themes:',
themes, selected = values$themes)
})
# Add observer on select-all button
observe({
if(input$selectAll == 0) return()
values$themes <- themes
})
# Add observer on clear-all button
observe({
if(input$clearAll == 0) return()
values$themes <- c() # empty list
})
# Prepare dataset
dataTable <- reactive({
groupByTheme(data, input$timeline[1],
input$timeline[2], input$pieces[1],
input$pieces[2], input$themes)
})
dataTableByYear <- reactive({
groupByYearAgg(data, input$timeline[1],
input$timeline[2], input$pieces[1],
input$pieces[2], input$themes)
})
dataTableByPiece <- reactive({
groupByYearPiece(data, input$timeline[1],
input$timeline[2], input$pieces[1],
input$pieces[2], input$themes)
})
dataTableByPieceAvg <- reactive({
groupByPieceAvg(data, input$timeline[1],
input$timeline[2], input$pieces[1],
input$pieces[2], input$themes)
})
dataTableByPieceThemeAvg <- reactive({
groupByPieceThemeAvg(data, input$timeline[1],
input$timeline[2], input$pieces[1],
input$pieces[2], input$themes)
})
# Render data table
output$dTable <- renderDataTable({
dataTable()
} #, options = list(bFilter = FALSE, iDisplayLength = 50)
)
output$themesByYear <- renderChart({
plotThemesCountByYear(dataTableByYear())
})
output$piecesByYear <- renderChart({
plotPiecesByYear(dataTableByPiece())
})
output$piecesByYearAvg <- renderChart({
plotPiecesByYearAvg(dataTableByPieceAvg())
})
output$piecesByThemeAvg <- renderChart({
plotPiecesByThemeAvg(dataTableByPieceThemeAvg())
})
} # end of function(input, output)
)
|
context("board rsc")
test_rsconnect_boards <- function(key, server) {
board_register("rsconnect",
key = key,
server = server,
cache = tempfile())
board_test("rsconnect", destination = server)
board_deregister("rsconnect")
}
if (test_board_is_registered("rsconnect")) {
board_test("rsconnect")
} else {
test_rsconnect_server <- Sys.getenv("TEST_RSCONNECT_SERVERS", "")
if (nchar(test_rsconnect_server) > 0) {
rsc_servers <- strsplit(test_rsconnect_server, ",", fixed = TRUE)[[1]]
rsc_apis <- strsplit(Sys.getenv("TEST_RSCONNECT_APIS"), ",", fixed = TRUE)[[1]]
if (length(rsc_servers) != length(rsc_apis)) stop("Incorrect length for TEST_RSCONNECT_SERVER and RSCONNECT_API.")
for (rsc_index in seq_along(rsc_servers)) {
server <- gsub("/$", "", rsc_servers[[rsc_index]])
test_rsconnect_boards(rsc_apis[[rsc_index]], server)
# also test with trailing slash
test_rsconnect_boards(rsc_apis[[rsc_index]], paste0(server, "/"))
}
} else {
test_that("can't register rsconnect board", {
skip("failed to register rsconnect board")
})
}
}
| /tests/testthat/test-board-rsconnect.R | permissive | jonkeane/pins | R | false | false | 1,160 | r | context("board rsc")
test_rsconnect_boards <- function(key, server) {
board_register("rsconnect",
key = key,
server = server,
cache = tempfile())
board_test("rsconnect", destination = server)
board_deregister("rsconnect")
}
if (test_board_is_registered("rsconnect")) {
board_test("rsconnect")
} else {
test_rsconnect_server <- Sys.getenv("TEST_RSCONNECT_SERVERS", "")
if (nchar(test_rsconnect_server) > 0) {
rsc_servers <- strsplit(test_rsconnect_server, ",", fixed = TRUE)[[1]]
rsc_apis <- strsplit(Sys.getenv("TEST_RSCONNECT_APIS"), ",", fixed = TRUE)[[1]]
if (length(rsc_servers) != length(rsc_apis)) stop("Incorrect length for TEST_RSCONNECT_SERVER and RSCONNECT_API.")
for (rsc_index in seq_along(rsc_servers)) {
server <- gsub("/$", "", rsc_servers[[rsc_index]])
test_rsconnect_boards(rsc_apis[[rsc_index]], server)
# also test with trailing slash
test_rsconnect_boards(rsc_apis[[rsc_index]], paste0(server, "/"))
}
} else {
test_that("can't register rsconnect board", {
skip("failed to register rsconnect board")
})
}
}
|
macdatawd <- "/Volumes/GoogleDrive/Team Drives/Data/CISDM Files/"
windowsdatawd <- "C:/Users/USER/Google Drive/Data Files"
if(file.exists(macdatawd)){
setwd(file.path(macdatawd))
} else {
if(file.exists(windowsdatawd)){
setwd(file.path(windowsdatawd))
}
}
site_coordination <-readWorksheetFromFile('site_coordination.xlsx', sheet=1, header = T, startRow = 1)
#site_coordination <- data.frame(apply(site_coordination, 2, function(x) gsub("^$|^ $", NA, x)))
#site_coordination <- site_coordination[,colSums(is.na(site_coordination))<nrow(site_coordination)]
site_coordination <- site_coordination[!is.na(site_coordination$School), ]
#colnames(site_coordination)[15:18] <- c("students_served", "parents_served", "other_served", "volunteers")
mac_save_wd <- "/Volumes/GoogleDrive/Team Drives/Data/Generated Files/"
windows_save_wd <- "C:/Users/USER/Google Drive/Data Files"
if(file.exists(mac_save_wd)){
setwd(file.path(mac_save_wd))
} else {
if(file.exists(windows_save_wd)){
setwd(file.path(windows_save_wd))
}
}
write.csv(site_coordination, "site_coordination.csv")
| /site_coordination.R | no_license | jbakerr/CIS2017-18 | R | false | false | 1,096 | r | macdatawd <- "/Volumes/GoogleDrive/Team Drives/Data/CISDM Files/"
windowsdatawd <- "C:/Users/USER/Google Drive/Data Files"
if(file.exists(macdatawd)){
setwd(file.path(macdatawd))
} else {
if(file.exists(windowsdatawd)){
setwd(file.path(windowsdatawd))
}
}
site_coordination <-readWorksheetFromFile('site_coordination.xlsx', sheet=1, header = T, startRow = 1)
#site_coordination <- data.frame(apply(site_coordination, 2, function(x) gsub("^$|^ $", NA, x)))
#site_coordination <- site_coordination[,colSums(is.na(site_coordination))<nrow(site_coordination)]
site_coordination <- site_coordination[!is.na(site_coordination$School), ]
#colnames(site_coordination)[15:18] <- c("students_served", "parents_served", "other_served", "volunteers")
mac_save_wd <- "/Volumes/GoogleDrive/Team Drives/Data/Generated Files/"
windows_save_wd <- "C:/Users/USER/Google Drive/Data Files"
if(file.exists(mac_save_wd)){
setwd(file.path(mac_save_wd))
} else {
if(file.exists(windows_save_wd)){
setwd(file.path(windows_save_wd))
}
}
write.csv(site_coordination, "site_coordination.csv")
|
#' Query search traffic keyword data
#'
#' @description Download your Google SEO data.
#'
#' @param siteURL The URL of the website you have auth access to.
#' @param startDate Start date of requested range, in YYYY-MM-DD.
#' @param endDate End date of the requested date range, in YYYY-MM-DD.
#' @param dimensions Zero or more dimensions to group results by:
#' \code{"date", "country", "device", "page" , "query" or "searchAppearance"}
#' @param searchType Search type filter, default 'web'.
#' @param dimensionFilterExp A character vector of expressions to filter.
#' e.g. \code{("device==TABLET", "country~~GBR")}
#' @param aggregationType How data is aggregated.
#' @param rowLimit How many rows to fetch. Ignored if \code{walk_data} is "byDate"
#' @param prettyNames If TRUE, converts SO 3166-1 alpha-3 country code to full name and
#' creates new column called countryName.
#' @param walk_data Make multiple API calls. One of \code{("byBatch","byDate","none")}
#'
#' @return A dataframe with columns in order of dimensions plus metrics, with attribute "aggregationType"
#'
#' @seealso Guide to Search Analytics: \url{https://support.google.com/webmasters/answer/6155685}
#' API docs: \url{https://developers.google.com/webmaster-tools/v3/searchanalytics/query}
#' @export
#'
#' @details
#' \strong{startDate}: Start date of the requested date range, in YYYY-MM-DD format,
#' in PST time (UTC - 8:00). Must be less than or equal to the end date.
#' This value is included in the range.
#'
#' \strong{endDate}: End date of the requested date range, in YYYY-MM-DD format,
#' in PST time (UTC - 8:00). Must be greater than or equal to the start date.
#' This value is included in the range.
#'
#' \strong{dimensions}: [Optional] Zero or more dimensions to group results by.
#' \itemize{
#' \item 'date'
#' \item 'country'
#' \item 'device'
#' \item 'page'
#' \item 'query'
#' \item 'searchAppearance' (can only appear on its own)
#' }
#' The grouping dimension values are combined to create a unique key
#' for each result row. If no dimensions are specified,
#' all values will be combined into a single row.
#' There is no limit to the number of dimensions that you can group by apart from \code{searchAppearance} can only be grouped alone.
#' You cannot group by the same dimension twice.
#'
#' Example: \code{c('country', 'device')}
#'
#'
#' \strong{dimensionFilterExp}:
#' Results are grouped in the order that you supply these dimensions.
#' dimensionFilterExp expects a character vector of expressions in the form:
#' ("device==TABLET", "country~~GBR", "dimension operator expression")
#' \itemize{
#' \item dimension
#' \itemize{
#' \item 'country'
#' \item 'device'
#' \item 'page'
#' \item 'query'
#' \item 'searchAppearance'
#' }
#' \item operator
#' \itemize{
#' \item '~~' meaning 'contains'
#' \item '==' meaning 'equals'
#' \item '!~' meaning 'notContains'
#' \item '!=' meaning 'notEquals
#' }
#'
#' \item expression
#' \itemize{
#' \item country: an ISO 3166-1 alpha-3 country code.
#' \item device: 'DESKTOP','MOBILE','TABLET'.
#' \item page: not checked, a string in page URLs without hostname.
#' \item query: not checked, a string in keywords.
#' \item searchAppearance: 'AMP_BLUE_LINK', 'RICHCARD'
#'
#' }
#' }
#'
#'
#' \strong{searchType}: [Optional] The search type to filter for. Acceptable values are:
#' \itemize{
#' \item "web": [Default] Web search results
#' \item "image": Image search results
#' \item "video": Video search results
#' }
#'
#' \strong{aggregationType}: [Optional] How data is aggregated.
#' \itemize{
#' \item If aggregated by property, all data for the same property is aggregated;
#' \item If aggregated by page, all data is aggregated by canonical URI.
#' \item If you filter or group by page, choose auto; otherwise you can aggregate either by property or by page, depending on how you want your data calculated;
#' }
#' See the API documentation to learn how data is calculated differently by site versus by page.
#' Note: If you group or filter by page, you cannot aggregate by property.
#' If you specify any value other than auto, the aggregation type in the result will match the requested type, or if you request an invalid type, you will get an error.
#' The API will never change your aggregation type if the requested type is invalid.
#' Acceptable values are:
#' \itemize{
#' \item "auto": [Default] Let the service decide the appropriate aggregation type.
#' \item "byPage": Aggregate values by URI.
#' \item "byProperty": Aggregate values by property.
#' }
#'
#' \strong{batchType}: [Optional] Batching data into multiple API calls
#' \itemize{
#' \item byBatch Use the API call to batch
#' \item byData Runs a call over each day in the date range.
#' \item none No batching
#' }
#'
#' @examples
#'
#' \dontrun{
#'
#' library(searchConsoleR)
#' scr_auth()
#' sc_websites <- list_websites()
#'
#' default_fetch <- search_analytics("http://www.example.com")
#'
#' gbr_desktop_queries <-
#' search_analytics("http://www.example.com",
#' start = "2016-01-01", end = "2016-03-01",
#' dimensions = c("query", "page"),
#' dimensionFilterExp = c("device==DESKTOP", "country==GBR"),
#' searchType = "web", rowLimit = 100)
#'
#' batching <-
#' search_analytics("http://www.example.com",
#' start = "2016-01-01", end = "2016-03-01",
#' dimensions = c("query", "page", "date"),
#' searchType = "web", rowLimit = 100000,
#' walk_data = "byBatch")
#'
#' }
#' @importFrom googleAuthR gar_api_generator gar_batch_walk gar_api_page
search_analytics <- function(siteURL,
startDate = Sys.Date() - 93,
endDate = Sys.Date() - 3,
dimensions = NULL,
searchType = c("web","video","image"),
dimensionFilterExp = NULL,
aggregationType = c("auto","byPage","byProperty"),
rowLimit = 1000,
prettyNames = TRUE,
walk_data = c("byBatch","byDate","none")){
if(!googleAuthR::gar_has_token()){
stop("Not authenticated. Run scr_auth()", call. = FALSE)
}
searchType <- match.arg(searchType)
aggregationType <- match.arg(aggregationType)
walk_data <- match.arg(walk_data)
startDate <- as.character(startDate)
endDate <- as.character(endDate)
message("Fetching search analytics for ",
paste("url:", siteURL,
"dates:", startDate, endDate,
"dimensions:", paste(dimensions, collapse = " ", sep=";"),
"dimensionFilterExp:", paste(dimensionFilterExp, collapse = " ", sep=";"),
"searchType:", searchType,
"aggregationType:", aggregationType))
siteURL <- check.Url(siteURL, reserved=T)
if(any(is.na(as.Date(startDate, "%Y-%m-%d")), is.na(as.Date(endDate, "%Y-%m-%d")))){
stop("dates not in correct %Y-%m-%d format. Got these:", startDate, " - ", endDate)
}
if(any(as.Date(startDate, "%Y-%m-%d") > Sys.Date()-3, as.Date(endDate, "%Y-%m-%d") > Sys.Date()-3)){
warning("Search Analytics usually not available within 3 days (96 hrs) of today(",Sys.Date(),"). Got:", startDate, " - ", endDate)
}
if(!is.null(dimensions) && !dimensions %in% c('date','country', 'device', 'page', 'query','searchAppearance')){
stop("dimension must be NULL or one or more of 'date','country', 'device', 'page', 'query', 'searchAppearance'.
Got this: ", paste(dimensions, sep=", "))
}
if(!searchType %in% c("web","image","video")){
stop('searchType not one of "web","image","video". Got this: ', searchType)
}
if(!aggregationType %in% c("auto","byPage","byProperty")){
stop('aggregationType not one of "auto","byPage","byProperty". Got this: ', aggregationType)
}
if(aggregationType %in% c("byProperty") && 'page' %in% dimensions ){
stop("Can't aggregate byProperty and include page in dimensions.")
}
# if batching by day, row limits make no sense so we get 5000 per day.
if(walk_data == "byDate"){
message("Batching data via method: ", walk_data)
message("Will fetch up to 25000 rows per day")
rowLimit <- 25000
} else if(walk_data == "byBatch"){
# if batching byBatch, we set to 25000 per API call, repeating API calls
# up to the limit you have set
if(rowLimit > 25000){
message("Batching data via method: ", walk_data)
message("With rowLimit set to ", rowLimit ," will need up to [", (rowLimit %/% 25000) + 1, "] API calls")
rowLimit0 <- rowLimit
rowLimit <- 25000
} else {
# its batched, but we can get all rows in one API call
walk_data <- "none"
}
}
## a list of filter expressions
## expects dimensionFilterExp like c("device==TABLET", "country~~GBR")
parsedDimFilterGroup <- lapply(dimensionFilterExp, parseDimFilterGroup)
body <- list(
startDate = startDate,
endDate = endDate,
dimensions = as.list(dimensions),
searchType = searchType,
dimensionFilterGroups = list(
list( ## you don't want more than one of these until different groupType available
groupType = "and", ##only one available for now
filters = parsedDimFilterGroup
)
),
aggregationType = aggregationType,
rowLimit = rowLimit
)
search_analytics_g <- gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"POST",
path_args = list(sites = "siteURL",
searchAnalytics = "query"),
data_parse_function = parse_search_analytics)
# set this here as it may get reset if other googleAuthR packages there
options(googleAuthR.batch_endpoint = 'https://www.googleapis.com/batch/webmasters/v3')
if(walk_data == "byDate"){
if(!'date' %in% dimensions){
warning("To walk data per date requires 'date' to be one of the dimensions. Adding it")
dimensions <- c("date", dimensions)
}
walk_vector <- seq(as.Date(startDate), as.Date(endDate), 1)
out <- gar_batch_walk(search_analytics_g,
walk_vector = walk_vector,
gar_paths = list(sites = siteURL),
body_walk = c("startDate", "endDate"),
the_body = body,
batch_size = 1,
dim = dimensions)
} else if(walk_data == "byBatch") {
## byBatch uses API batching, but this pulls out less data
## 0 impression keywords not included.
walk_vector <- seq(0, rowLimit0, 25000)
do_it <- TRUE
i <- 1
pages <- list()
while(do_it){
message("Page [",i,"] of max [", length(walk_vector),"] API calls")
this_body <- utils::modifyList(body, list(startRow = walk_vector[i]))
this_page <- search_analytics_g(the_body = this_body,
list(sites = siteURL),
dim = dimensions)
if(all(is.na(this_page[[1]]))){
do_it <- FALSE
} else {
message("Downloaded ", nrow(this_page), " rows")
pages <- rbind(pages, this_page)
}
i <- i + 1
if(i>length(walk_vector)){
do_it <- FALSE
}
}
out <- pages
} else {
out <- search_analytics_g(the_body=body,
path_arguments=list(sites = siteURL),
dim = dimensions)
}
out
}
#' Retrieves dataframe of websites user has in Search Console
#'
#' @return a dataframe of siteUrl and permissionLevel
#'
#' @export
#' @family search console website functions
list_websites <- function() {
l <- googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/sites",
"GET",
data_parse_function = function(x) x$siteEntry)
l()
}
#' Adds website to Search Console
#'
#' @param siteURL The URL of the website to add.
#'
#' @return TRUE if successful, raises an error if not.
#' @family search console website functions
#'
#' @export
add_website <- function(siteURL) {
siteURL <- check.Url(siteURL, reserved = TRUE)
aw <- googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"PUT",
path_args = list(sites = "siteURL"))
aw(path_arguments = list(sites = siteURL))
TRUE
}
#' Deletes website in Search Console
#'
#' @param siteURL The URL of the website to delete.
#'
#' @return TRUE if successful, raises an error if not.
#' @family data fetching functions
#'
#' @export
#' @family search console website functions
delete_website <- function(siteURL) {
siteURL <- check.Url(siteURL, reserved = TRUE)
dw <- googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"DELETE",
path_args = list(sites = "siteURL"))
dw(path_arguments = list(sites = siteURL))
TRUE
}
#' Gets sitemap information for the URL supplied.
#'
#' See here for details: https://developers.google.com/webmaster-tools/v3/sitemaps
#'
#' @param siteURL The URL of the website to get sitemap information from. Must include protocol (http://).
#'
#' @return A list of two dataframes: $sitemap with general info and $contents with sitemap info.
#' @family data fetching functions
#'
#' @export
#' @family sitemap admin functions
list_sitemaps <- function(siteURL) {
siteURL <- check.Url(siteURL, reserved = TRUE)
ls <- googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"GET",
path_args = list(sites = "siteURL",
sitemaps = ""),
data_parse_function = parse_sitemaps)
ls(path_arguments = list(sites = siteURL))
}
#' Submit a sitemap.
#'
#' See here for details: https://developers.google.com/webmaster-tools/v3/sitemaps/submit
#'
#' @param siteURL The URL of the website to delete. Must include protocol (http://).
#' @param feedpath The URL of the sitemap to submit. Must include protocol (http://).
#'
#' @return TRUE if successful, raises an error if not.
#'
#' @export
#' @family sitemap admin functions
add_sitemap <- function(siteURL, feedpath) {
siteURL <- check.Url(siteURL, reserved = TRUE)
feedpath <- check.Url(feedpath, reserved = TRUE)
as <- googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"PUT",
path_args = list(sites = "siteURL",
sitemaps = "feedpath"))
as(path_arguments = list(sites = siteURL,
sitemaps = feedpath))
TRUE
}
#' Delete a sitemap.
#'
#' See here for details: https://developers.google.com/webmaster-tools/v3/sitemaps/delete
#'
#' @param siteURL The URL of the website you are deleting the sitemap from. Must include protocol (http://).
#' @param feedpath The URL of the sitemap to delete. Must include protocol (http://).
#'
#' @return TRUE if successful, raises an error if not.
#'
#' @export
#' @family sitemap admin functions
delete_sitemap <- function(siteURL, feedpath) {
siteURL <- check.Url(siteURL, reserved = TRUE)
feedpath <- check.Url(feedpath, reserved = TRUE)
ds <- googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"DELETE",
path_args = list(sites = "siteURL",
sitemaps = "feedpath"))
ds(path_arguments = list(sites = siteURL,
sitemaps = feedpath))
TRUE
}
#' Fetch a time-series of Googlebot crawl errors.
#'
#' @description
#' Get a list of errors detected by Googlebot over time.
#' See here for details: https://developers.google.com/webmaster-tools/v3/urlcrawlerrorscounts/query
#'
#' @param siteURL The URL of the website to delete. Must include protocol (http://).
#' @param category Crawl error category. Defaults to 'all'
#' @param platform The user agent type. 'all', 'mobile', 'smartphoneOnly' or 'web'.
#' @param latestCountsOnly Default FALSE. Only the latest crawl error counts returned if TRUE.
#'
#' @return dataframe of errors with $platform $category $count and $timecount.
#'
#' @details The timestamp is converted to a date as they are only available daily.
#'
#' Category is one of: authPermissions, manyToOneRedirect, notFollowed, notFound,
#' other, roboted, serverError, soft404.
#'
#' Platform is one of: mobile, smartphoneOnly or web.
#'
#' @export
#' @family working with search console errors
crawl_errors <- function(siteURL,
category="all",
platform=c("all","mobile","smartphoneOnly","web"),
latestCountsOnly = FALSE) {
stop("Crawl errors are no longer available in the API")
}
#' Lists a site's sample URLs for crawl errors.
#'
#' @description Category is one of: authPermissions, manyToOneRedirect, notFollowed, notFound,
#' other, roboted, serverError, soft404.
#'
#' Platform is one of: mobile, smartphoneOnly or web.
#'
#' @param siteURL The URL of the website to delete. Must include protocol (http://).
#' @param category Crawl error category. Default 'notFound'.
#' @param platform User agent type. Default 'web'.
#'
#' @details
#' See here for details: \url{https://developers.google.com/webmaster-tools/v3/urlcrawlerrorssamples}
#'
#' @return A dataframe of $pageUrl, $last_crawled, $first_detected, $response
#'
#' @export
#' @family working with search console errors
list_crawl_error_samples <- function(siteURL,
category="notFound",
platform="web") {
siteURL <- check.Url(siteURL, reserved=T)
## require pre-existing token, to avoid recursion
if(is.valid.category.platform(category, platform)) {
params <- list('category' = category,
'platform' = platform)
lces <-
googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"GET",
path_args = list(sites = "siteURL",
urlCrawlErrorsSamples = ""),
pars_args = params,
data_parse_function = parse_crawlerror_sample)
lces(path_arguments = list(sites = siteURL), pars_arguments = params)
}
}
#' Shows details of errors for individual sample URLs
#'
#' See here for details: https://developers.google.com/webmaster-tools/v3/urlcrawlerrorssamples/get
#'
#' @param siteURL The URL of the website to delete. Must include protocol (http://).
#' @param pageURL A PageUrl taken from list_crawl_error_samples.
#' @param category Crawl error category. Default 'notFound'.
#' @param platform User agent type. Default 'web'.
#'
#' @return Dataframe of $linkedFrom, with the calling URLs $last_crawled, $first_detected and a $exampleURL
#' @family working with search console errors
#' @description
#' pageURL is the relative path (without the site) of the sample URL.
#' It must be one of the URLs returned by list_crawl_error_samples.
#' For example, for the URL https://www.example.com/pagename on the site https://www.example.com/,
#' the url value is pagename (string)
#'
#' Category is one of: authPermissions, manyToOneRedirect, notFollowed, notFound,
#' other, roboted, serverError, soft404.
#'
#' Platform is one of: mobile, smartphoneOnly or web.
#'
#' @export
error_sample_url <- function(siteURL,
pageURL,
category="notFound",
platform="web") {
siteURL <- check.Url(siteURL, reserved = TRUE)
pageURL <- check.Url(pageURL, checkProtocol = FALSE, reserved = TRUE, repeated = TRUE)
## require pre-existing token, to avoid recursion
if(is.valid.category.platform(category, platform)){
params <- list('category' = category,
'platform' = platform)
esu <- googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"GET",
path_args = list(sites = "siteURL",
urlCrawlErrorsSamples = "pageURL"),
pars_args = params,
data_parse_function = parse_errorsample_url)
esu(path_arguments = list(sites = siteURL,
urlCrawlErrorsSamples = pageURL),
pars_arguments = params)
}
}
#' Mark As Fixed the individual sample URLs
#'
#' See here for details:
#' https://developers.google.com/webmaster-tools/v3/urlcrawlerrorssamples/markAsFixed
#'
#' @param siteURL The URL of the website to delete. Must include protocol (http://).
#' @param pageURL A PageUrl taken from list_crawl_error_samples.
#' @param category Crawl error category. Default 'notFound'.
#' @param platform User agent type. Default 'web'.
#'
#' @return TRUE if successful, raises an error if not.
#' @family working with search console errors
#'
#' @description
#' pageURL is the relative path (without the site) of the sample URL.
#' It must be one of the URLs returned by list_crawl_error_samples.
#' For example, for the URL https://www.example.com/pagename on the site https://www.example.com/,
#' the url value is pagename (string)
#'
#' Category is one of: authPermissions, manyToOneRedirect, notFollowed, notFound,
#' other, roboted, serverError, soft404.
#'
#' Platform is one of: mobile, smartphoneOnly or web.
#'
#' @export
fix_sample_url <- function(siteURL,
pageURL,
category = "notFound",
platform = "web") {
siteURL <- check.Url(siteURL, reserved = TRUE)
pageURL <- check.Url(pageURL, checkProtocol = FALSE, reserved = TRUE)
if(is.valid.category.platform(category, platform)){
params <- list('category' = category,
'platform' = platform)
fsu <- googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"DELETE",
path_args = list(sites = "siteURL",
urlCrawlErrorsSamples = "pageURL"),
pars_args = params)
fsu(path_arguments = list(sites = siteURL,
urlCrawlErrorsSamples = pageURL),
pars_arguments = params)
return(TRUE)
}
return(FALSE)
}
| /R/getData.R | no_license | cran/searchConsoleR | R | false | false | 23,610 | r | #' Query search traffic keyword data
#'
#' @description Download your Google SEO data.
#'
#' @param siteURL The URL of the website you have auth access to.
#' @param startDate Start date of requested range, in YYYY-MM-DD.
#' @param endDate End date of the requested date range, in YYYY-MM-DD.
#' @param dimensions Zero or more dimensions to group results by:
#' \code{"date", "country", "device", "page" , "query" or "searchAppearance"}
#' @param searchType Search type filter, default 'web'.
#' @param dimensionFilterExp A character vector of expressions to filter.
#' e.g. \code{("device==TABLET", "country~~GBR")}
#' @param aggregationType How data is aggregated.
#' @param rowLimit How many rows to fetch. Ignored if \code{walk_data} is "byDate"
#' @param prettyNames If TRUE, converts SO 3166-1 alpha-3 country code to full name and
#' creates new column called countryName.
#' @param walk_data Make multiple API calls. One of \code{("byBatch","byDate","none")}
#'
#' @return A dataframe with columns in order of dimensions plus metrics, with attribute "aggregationType"
#'
#' @seealso Guide to Search Analytics: \url{https://support.google.com/webmasters/answer/6155685}
#' API docs: \url{https://developers.google.com/webmaster-tools/v3/searchanalytics/query}
#' @export
#'
#' @details
#' \strong{startDate}: Start date of the requested date range, in YYYY-MM-DD format,
#' in PST time (UTC - 8:00). Must be less than or equal to the end date.
#' This value is included in the range.
#'
#' \strong{endDate}: End date of the requested date range, in YYYY-MM-DD format,
#' in PST time (UTC - 8:00). Must be greater than or equal to the start date.
#' This value is included in the range.
#'
#' \strong{dimensions}: [Optional] Zero or more dimensions to group results by.
#' \itemize{
#' \item 'date'
#' \item 'country'
#' \item 'device'
#' \item 'page'
#' \item 'query'
#' \item 'searchAppearance' (can only appear on its own)
#' }
#' The grouping dimension values are combined to create a unique key
#' for each result row. If no dimensions are specified,
#' all values will be combined into a single row.
#' There is no limit to the number of dimensions that you can group by apart from \code{searchAppearance} can only be grouped alone.
#' You cannot group by the same dimension twice.
#'
#' Example: \code{c('country', 'device')}
#'
#'
#' \strong{dimensionFilterExp}:
#' Results are grouped in the order that you supply these dimensions.
#' dimensionFilterExp expects a character vector of expressions in the form:
#' ("device==TABLET", "country~~GBR", "dimension operator expression")
#' \itemize{
#' \item dimension
#' \itemize{
#' \item 'country'
#' \item 'device'
#' \item 'page'
#' \item 'query'
#' \item 'searchAppearance'
#' }
#' \item operator
#' \itemize{
#' \item '~~' meaning 'contains'
#' \item '==' meaning 'equals'
#' \item '!~' meaning 'notContains'
#' \item '!=' meaning 'notEquals
#' }
#'
#' \item expression
#' \itemize{
#' \item country: an ISO 3166-1 alpha-3 country code.
#' \item device: 'DESKTOP','MOBILE','TABLET'.
#' \item page: not checked, a string in page URLs without hostname.
#' \item query: not checked, a string in keywords.
#' \item searchAppearance: 'AMP_BLUE_LINK', 'RICHCARD'
#'
#' }
#' }
#'
#'
#' \strong{searchType}: [Optional] The search type to filter for. Acceptable values are:
#' \itemize{
#' \item "web": [Default] Web search results
#' \item "image": Image search results
#' \item "video": Video search results
#' }
#'
#' \strong{aggregationType}: [Optional] How data is aggregated.
#' \itemize{
#' \item If aggregated by property, all data for the same property is aggregated;
#' \item If aggregated by page, all data is aggregated by canonical URI.
#' \item If you filter or group by page, choose auto; otherwise you can aggregate either by property or by page, depending on how you want your data calculated;
#' }
#' See the API documentation to learn how data is calculated differently by site versus by page.
#' Note: If you group or filter by page, you cannot aggregate by property.
#' If you specify any value other than auto, the aggregation type in the result will match the requested type, or if you request an invalid type, you will get an error.
#' The API will never change your aggregation type if the requested type is invalid.
#' Acceptable values are:
#' \itemize{
#' \item "auto": [Default] Let the service decide the appropriate aggregation type.
#' \item "byPage": Aggregate values by URI.
#' \item "byProperty": Aggregate values by property.
#' }
#'
#' \strong{batchType}: [Optional] Batching data into multiple API calls
#' \itemize{
#' \item byBatch Use the API call to batch
#' \item byData Runs a call over each day in the date range.
#' \item none No batching
#' }
#'
#' @examples
#'
#' \dontrun{
#'
#' library(searchConsoleR)
#' scr_auth()
#' sc_websites <- list_websites()
#'
#' default_fetch <- search_analytics("http://www.example.com")
#'
#' gbr_desktop_queries <-
#' search_analytics("http://www.example.com",
#' start = "2016-01-01", end = "2016-03-01",
#' dimensions = c("query", "page"),
#' dimensionFilterExp = c("device==DESKTOP", "country==GBR"),
#' searchType = "web", rowLimit = 100)
#'
#' batching <-
#' search_analytics("http://www.example.com",
#' start = "2016-01-01", end = "2016-03-01",
#' dimensions = c("query", "page", "date"),
#' searchType = "web", rowLimit = 100000,
#' walk_data = "byBatch")
#'
#' }
#' @importFrom googleAuthR gar_api_generator gar_batch_walk gar_api_page
search_analytics <- function(siteURL,
startDate = Sys.Date() - 93,
endDate = Sys.Date() - 3,
dimensions = NULL,
searchType = c("web","video","image"),
dimensionFilterExp = NULL,
aggregationType = c("auto","byPage","byProperty"),
rowLimit = 1000,
prettyNames = TRUE,
walk_data = c("byBatch","byDate","none")){
if(!googleAuthR::gar_has_token()){
stop("Not authenticated. Run scr_auth()", call. = FALSE)
}
searchType <- match.arg(searchType)
aggregationType <- match.arg(aggregationType)
walk_data <- match.arg(walk_data)
startDate <- as.character(startDate)
endDate <- as.character(endDate)
message("Fetching search analytics for ",
paste("url:", siteURL,
"dates:", startDate, endDate,
"dimensions:", paste(dimensions, collapse = " ", sep=";"),
"dimensionFilterExp:", paste(dimensionFilterExp, collapse = " ", sep=";"),
"searchType:", searchType,
"aggregationType:", aggregationType))
siteURL <- check.Url(siteURL, reserved=T)
if(any(is.na(as.Date(startDate, "%Y-%m-%d")), is.na(as.Date(endDate, "%Y-%m-%d")))){
stop("dates not in correct %Y-%m-%d format. Got these:", startDate, " - ", endDate)
}
if(any(as.Date(startDate, "%Y-%m-%d") > Sys.Date()-3, as.Date(endDate, "%Y-%m-%d") > Sys.Date()-3)){
warning("Search Analytics usually not available within 3 days (96 hrs) of today(",Sys.Date(),"). Got:", startDate, " - ", endDate)
}
if(!is.null(dimensions) && !dimensions %in% c('date','country', 'device', 'page', 'query','searchAppearance')){
stop("dimension must be NULL or one or more of 'date','country', 'device', 'page', 'query', 'searchAppearance'.
Got this: ", paste(dimensions, sep=", "))
}
if(!searchType %in% c("web","image","video")){
stop('searchType not one of "web","image","video". Got this: ', searchType)
}
if(!aggregationType %in% c("auto","byPage","byProperty")){
stop('aggregationType not one of "auto","byPage","byProperty". Got this: ', aggregationType)
}
if(aggregationType %in% c("byProperty") && 'page' %in% dimensions ){
stop("Can't aggregate byProperty and include page in dimensions.")
}
# if batching by day, row limits make no sense so we get 5000 per day.
if(walk_data == "byDate"){
message("Batching data via method: ", walk_data)
message("Will fetch up to 25000 rows per day")
rowLimit <- 25000
} else if(walk_data == "byBatch"){
# if batching byBatch, we set to 25000 per API call, repeating API calls
# up to the limit you have set
if(rowLimit > 25000){
message("Batching data via method: ", walk_data)
message("With rowLimit set to ", rowLimit ," will need up to [", (rowLimit %/% 25000) + 1, "] API calls")
rowLimit0 <- rowLimit
rowLimit <- 25000
} else {
# its batched, but we can get all rows in one API call
walk_data <- "none"
}
}
## a list of filter expressions
## expects dimensionFilterExp like c("device==TABLET", "country~~GBR")
parsedDimFilterGroup <- lapply(dimensionFilterExp, parseDimFilterGroup)
body <- list(
startDate = startDate,
endDate = endDate,
dimensions = as.list(dimensions),
searchType = searchType,
dimensionFilterGroups = list(
list( ## you don't want more than one of these until different groupType available
groupType = "and", ##only one available for now
filters = parsedDimFilterGroup
)
),
aggregationType = aggregationType,
rowLimit = rowLimit
)
search_analytics_g <- gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"POST",
path_args = list(sites = "siteURL",
searchAnalytics = "query"),
data_parse_function = parse_search_analytics)
# set this here as it may get reset if other googleAuthR packages there
options(googleAuthR.batch_endpoint = 'https://www.googleapis.com/batch/webmasters/v3')
if(walk_data == "byDate"){
if(!'date' %in% dimensions){
warning("To walk data per date requires 'date' to be one of the dimensions. Adding it")
dimensions <- c("date", dimensions)
}
walk_vector <- seq(as.Date(startDate), as.Date(endDate), 1)
out <- gar_batch_walk(search_analytics_g,
walk_vector = walk_vector,
gar_paths = list(sites = siteURL),
body_walk = c("startDate", "endDate"),
the_body = body,
batch_size = 1,
dim = dimensions)
} else if(walk_data == "byBatch") {
## byBatch uses API batching, but this pulls out less data
## 0 impression keywords not included.
walk_vector <- seq(0, rowLimit0, 25000)
do_it <- TRUE
i <- 1
pages <- list()
while(do_it){
message("Page [",i,"] of max [", length(walk_vector),"] API calls")
this_body <- utils::modifyList(body, list(startRow = walk_vector[i]))
this_page <- search_analytics_g(the_body = this_body,
list(sites = siteURL),
dim = dimensions)
if(all(is.na(this_page[[1]]))){
do_it <- FALSE
} else {
message("Downloaded ", nrow(this_page), " rows")
pages <- rbind(pages, this_page)
}
i <- i + 1
if(i>length(walk_vector)){
do_it <- FALSE
}
}
out <- pages
} else {
out <- search_analytics_g(the_body=body,
path_arguments=list(sites = siteURL),
dim = dimensions)
}
out
}
#' Retrieves dataframe of websites user has in Search Console
#'
#' @return a dataframe of siteUrl and permissionLevel
#'
#' @export
#' @family search console website functions
list_websites <- function() {
l <- googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/sites",
"GET",
data_parse_function = function(x) x$siteEntry)
l()
}
#' Adds website to Search Console
#'
#' @param siteURL The URL of the website to add.
#'
#' @return TRUE if successful, raises an error if not.
#' @family search console website functions
#'
#' @export
add_website <- function(siteURL) {
siteURL <- check.Url(siteURL, reserved = TRUE)
aw <- googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"PUT",
path_args = list(sites = "siteURL"))
aw(path_arguments = list(sites = siteURL))
TRUE
}
#' Deletes website in Search Console
#'
#' @param siteURL The URL of the website to delete.
#'
#' @return TRUE if successful, raises an error if not.
#' @family data fetching functions
#'
#' @export
#' @family search console website functions
delete_website <- function(siteURL) {
siteURL <- check.Url(siteURL, reserved = TRUE)
dw <- googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"DELETE",
path_args = list(sites = "siteURL"))
dw(path_arguments = list(sites = siteURL))
TRUE
}
#' Gets sitemap information for the URL supplied.
#'
#' See here for details: https://developers.google.com/webmaster-tools/v3/sitemaps
#'
#' @param siteURL The URL of the website to get sitemap information from. Must include protocol (http://).
#'
#' @return A list of two dataframes: $sitemap with general info and $contents with sitemap info.
#' @family data fetching functions
#'
#' @export
#' @family sitemap admin functions
list_sitemaps <- function(siteURL) {
siteURL <- check.Url(siteURL, reserved = TRUE)
ls <- googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"GET",
path_args = list(sites = "siteURL",
sitemaps = ""),
data_parse_function = parse_sitemaps)
ls(path_arguments = list(sites = siteURL))
}
#' Submit a sitemap.
#'
#' See here for details: https://developers.google.com/webmaster-tools/v3/sitemaps/submit
#'
#' @param siteURL The URL of the website to delete. Must include protocol (http://).
#' @param feedpath The URL of the sitemap to submit. Must include protocol (http://).
#'
#' @return TRUE if successful, raises an error if not.
#'
#' @export
#' @family sitemap admin functions
add_sitemap <- function(siteURL, feedpath) {
siteURL <- check.Url(siteURL, reserved = TRUE)
feedpath <- check.Url(feedpath, reserved = TRUE)
as <- googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"PUT",
path_args = list(sites = "siteURL",
sitemaps = "feedpath"))
as(path_arguments = list(sites = siteURL,
sitemaps = feedpath))
TRUE
}
#' Delete a sitemap.
#'
#' See here for details: https://developers.google.com/webmaster-tools/v3/sitemaps/delete
#'
#' @param siteURL The URL of the website you are deleting the sitemap from. Must include protocol (http://).
#' @param feedpath The URL of the sitemap to delete. Must include protocol (http://).
#'
#' @return TRUE if successful, raises an error if not.
#'
#' @export
#' @family sitemap admin functions
delete_sitemap <- function(siteURL, feedpath) {
siteURL <- check.Url(siteURL, reserved = TRUE)
feedpath <- check.Url(feedpath, reserved = TRUE)
ds <- googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"DELETE",
path_args = list(sites = "siteURL",
sitemaps = "feedpath"))
ds(path_arguments = list(sites = siteURL,
sitemaps = feedpath))
TRUE
}
#' Fetch a time-series of Googlebot crawl errors.
#'
#' @description
#' Get a list of errors detected by Googlebot over time.
#' See here for details: https://developers.google.com/webmaster-tools/v3/urlcrawlerrorscounts/query
#'
#' @param siteURL The URL of the website to delete. Must include protocol (http://).
#' @param category Crawl error category. Defaults to 'all'
#' @param platform The user agent type. 'all', 'mobile', 'smartphoneOnly' or 'web'.
#' @param latestCountsOnly Default FALSE. Only the latest crawl error counts returned if TRUE.
#'
#' @return dataframe of errors with $platform $category $count and $timecount.
#'
#' @details The timestamp is converted to a date as they are only available daily.
#'
#' Category is one of: authPermissions, manyToOneRedirect, notFollowed, notFound,
#' other, roboted, serverError, soft404.
#'
#' Platform is one of: mobile, smartphoneOnly or web.
#'
#' @export
#' @family working with search console errors
crawl_errors <- function(siteURL,
category="all",
platform=c("all","mobile","smartphoneOnly","web"),
latestCountsOnly = FALSE) {
stop("Crawl errors are no longer available in the API")
}
#' Lists a site's sample URLs for crawl errors.
#'
#' @description Category is one of: authPermissions, manyToOneRedirect, notFollowed, notFound,
#' other, roboted, serverError, soft404.
#'
#' Platform is one of: mobile, smartphoneOnly or web.
#'
#' @param siteURL The URL of the website to delete. Must include protocol (http://).
#' @param category Crawl error category. Default 'notFound'.
#' @param platform User agent type. Default 'web'.
#'
#' @details
#' See here for details: \url{https://developers.google.com/webmaster-tools/v3/urlcrawlerrorssamples}
#'
#' @return A dataframe of $pageUrl, $last_crawled, $first_detected, $response
#'
#' @export
#' @family working with search console errors
list_crawl_error_samples <- function(siteURL,
category="notFound",
platform="web") {
siteURL <- check.Url(siteURL, reserved=T)
## require pre-existing token, to avoid recursion
if(is.valid.category.platform(category, platform)) {
params <- list('category' = category,
'platform' = platform)
lces <-
googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"GET",
path_args = list(sites = "siteURL",
urlCrawlErrorsSamples = ""),
pars_args = params,
data_parse_function = parse_crawlerror_sample)
lces(path_arguments = list(sites = siteURL), pars_arguments = params)
}
}
#' Shows details of errors for individual sample URLs
#'
#' See here for details: https://developers.google.com/webmaster-tools/v3/urlcrawlerrorssamples/get
#'
#' @param siteURL The URL of the website to delete. Must include protocol (http://).
#' @param pageURL A PageUrl taken from list_crawl_error_samples.
#' @param category Crawl error category. Default 'notFound'.
#' @param platform User agent type. Default 'web'.
#'
#' @return Dataframe of $linkedFrom, with the calling URLs $last_crawled, $first_detected and a $exampleURL
#' @family working with search console errors
#' @description
#' pageURL is the relative path (without the site) of the sample URL.
#' It must be one of the URLs returned by list_crawl_error_samples.
#' For example, for the URL https://www.example.com/pagename on the site https://www.example.com/,
#' the url value is pagename (string)
#'
#' Category is one of: authPermissions, manyToOneRedirect, notFollowed, notFound,
#' other, roboted, serverError, soft404.
#'
#' Platform is one of: mobile, smartphoneOnly or web.
#'
#' @export
error_sample_url <- function(siteURL,
pageURL,
category="notFound",
platform="web") {
siteURL <- check.Url(siteURL, reserved = TRUE)
pageURL <- check.Url(pageURL, checkProtocol = FALSE, reserved = TRUE, repeated = TRUE)
## require pre-existing token, to avoid recursion
if(is.valid.category.platform(category, platform)){
params <- list('category' = category,
'platform' = platform)
esu <- googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"GET",
path_args = list(sites = "siteURL",
urlCrawlErrorsSamples = "pageURL"),
pars_args = params,
data_parse_function = parse_errorsample_url)
esu(path_arguments = list(sites = siteURL,
urlCrawlErrorsSamples = pageURL),
pars_arguments = params)
}
}
#' Mark As Fixed the individual sample URLs
#'
#' See here for details:
#' https://developers.google.com/webmaster-tools/v3/urlcrawlerrorssamples/markAsFixed
#'
#' @param siteURL The URL of the website to delete. Must include protocol (http://).
#' @param pageURL A PageUrl taken from list_crawl_error_samples.
#' @param category Crawl error category. Default 'notFound'.
#' @param platform User agent type. Default 'web'.
#'
#' @return TRUE if successful, raises an error if not.
#' @family working with search console errors
#'
#' @description
#' pageURL is the relative path (without the site) of the sample URL.
#' It must be one of the URLs returned by list_crawl_error_samples.
#' For example, for the URL https://www.example.com/pagename on the site https://www.example.com/,
#' the url value is pagename (string)
#'
#' Category is one of: authPermissions, manyToOneRedirect, notFollowed, notFound,
#' other, roboted, serverError, soft404.
#'
#' Platform is one of: mobile, smartphoneOnly or web.
#'
#' @export
fix_sample_url <- function(siteURL,
pageURL,
category = "notFound",
platform = "web") {
siteURL <- check.Url(siteURL, reserved = TRUE)
pageURL <- check.Url(pageURL, checkProtocol = FALSE, reserved = TRUE)
if(is.valid.category.platform(category, platform)){
params <- list('category' = category,
'platform' = platform)
fsu <- googleAuthR::gar_api_generator("https://www.googleapis.com/webmasters/v3/",
"DELETE",
path_args = list(sites = "siteURL",
urlCrawlErrorsSamples = "pageURL"),
pars_args = params)
fsu(path_arguments = list(sites = siteURL,
urlCrawlErrorsSamples = pageURL),
pars_arguments = params)
return(TRUE)
}
return(FALSE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/variables.R
\name{Constant}
\alias{Constant}
\title{Constant}
\usage{
Constant(value = NULL, shape = NULL, dtype = "float32", device = NULL,
name = "")
}
\arguments{
\item{shape}{- list of ints representing tensor shape}
\item{dtype}{- data type to be used ("float32", "float64", or "auto")}
\item{device}{- instance of DeviceDescriptor}
\item{name}{}
}
\description{
A constant value. It can be a scalar, vector, matrix, or tensor of floating
point numbers that cannot be modified. A Constant is a Variable and
therefore inherits all its methods.
}
\details{
****** Properties: ******
value
}
| /man/Constant.Rd | permissive | Bhaskers-Blu-Org2/CNTK-R | R | false | true | 679 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/variables.R
\name{Constant}
\alias{Constant}
\title{Constant}
\usage{
Constant(value = NULL, shape = NULL, dtype = "float32", device = NULL,
name = "")
}
\arguments{
\item{shape}{- list of ints representing tensor shape}
\item{dtype}{- data type to be used ("float32", "float64", or "auto")}
\item{device}{- instance of DeviceDescriptor}
\item{name}{}
}
\description{
A constant value. It can be a scalar, vector, matrix, or tensor of floating
point numbers that cannot be modified. A Constant is a Variable and
therefore inherits all its methods.
}
\details{
****** Properties: ******
value
}
|
remove(list=ls())
cat("\014")
setwd("C:\\Users\\bchen\\Desktop\\JHU-R-Dashboard\\")
# remotes::install_github("joachim-gassen/tidycovid19")
library(tidycovid19)
library(dplyr)
coronavirus<-download_jhu_csse_covid19_data(cached = TRUE)
coronavirus<-as.data.frame(coronavirus)
head(coronavirus, 1)
coronavirus_us <- coronavirus %>% filter(country == "US") %>% select(c("date", "confirmed"))
head(coronavirus_us,5)
class(coronavirus_us)
class(coronavirus$date)
library(tidyverse)
library(fpp2)
library(zoo)
library(xts)
library(fabletools)
daily_confirmed_us<-xts(coronavirus_us[,-1], order.by = coronavirus_us[,1])
# names(daily_confirmed_us) <- "confirmed_cases"
head(daily_confirmed_us,5)
autoplot(daily_confirmed_us)
str(daily_confirmed_us)
# index(daily_confirmed_us)
# us_confirmed_ses<-ses(daily_confirmed_us, alpha = 0.99, h = 7)
# us_confirmed_ses$model$par
us_confirmed_arima<-auto.arima(daily_confirmed_us)
us_confirmed_arima$fitted
summary(us_confirmed_arima)
# autoplot(us_confirmed_ses) + autolayer(us_confirmed_ses$fitted, series="SES Model") +
# ylab("Confirmed Cases") + xlab("Days Since 2020-01-22")
arima_forecast<-forecast(us_confirmed_arima,7)
require(scales)
autoplot(arima_forecast, main="Confirmed Cases Forecast") + autolayer(us_confirmed_arima$fitted, series="ARIMA Model") +
ylab("Confirmed Cases") + xlab("Days Since 2020-01-22") + scale_y_continuous(labels = comma)
map_covid19(coronavirus, type = "confirmed", region = "North America") | /chengb_dashboard.R | no_license | bcheng004/JHU-R-Dashboard | R | false | false | 1,478 | r | remove(list=ls())
cat("\014")
setwd("C:\\Users\\bchen\\Desktop\\JHU-R-Dashboard\\")
# remotes::install_github("joachim-gassen/tidycovid19")
library(tidycovid19)
library(dplyr)
coronavirus<-download_jhu_csse_covid19_data(cached = TRUE)
coronavirus<-as.data.frame(coronavirus)
head(coronavirus, 1)
coronavirus_us <- coronavirus %>% filter(country == "US") %>% select(c("date", "confirmed"))
head(coronavirus_us,5)
class(coronavirus_us)
class(coronavirus$date)
library(tidyverse)
library(fpp2)
library(zoo)
library(xts)
library(fabletools)
daily_confirmed_us<-xts(coronavirus_us[,-1], order.by = coronavirus_us[,1])
# names(daily_confirmed_us) <- "confirmed_cases"
head(daily_confirmed_us,5)
autoplot(daily_confirmed_us)
str(daily_confirmed_us)
# index(daily_confirmed_us)
# us_confirmed_ses<-ses(daily_confirmed_us, alpha = 0.99, h = 7)
# us_confirmed_ses$model$par
us_confirmed_arima<-auto.arima(daily_confirmed_us)
us_confirmed_arima$fitted
summary(us_confirmed_arima)
# autoplot(us_confirmed_ses) + autolayer(us_confirmed_ses$fitted, series="SES Model") +
# ylab("Confirmed Cases") + xlab("Days Since 2020-01-22")
arima_forecast<-forecast(us_confirmed_arima,7)
require(scales)
autoplot(arima_forecast, main="Confirmed Cases Forecast") + autolayer(us_confirmed_arima$fitted, series="ARIMA Model") +
ylab("Confirmed Cases") + xlab("Days Since 2020-01-22") + scale_y_continuous(labels = comma)
map_covid19(coronavirus, type = "confirmed", region = "North America") |
ecoplot <- function(){
library(ggplot2)
theme_set(theme_minimal())
# For data preparation
library("tidyverse")
df <- economics%>%select(date, psavert, uempmed)%>%gather(key = "variables", value = "value", -date)
# Data Visualization
p<-ggplot(df, aes(x = date, y = value)) + geom_line(aes(color = variables)) + scale_color_manual(values = c("yellow", "green")) + ggtitle("Yearly plot showing personal savings rate and median duration of unemployment")
p
} | /Medium_Test_2/economicplot/R/code.R | no_license | sarahab23/RSQLiteAdmin_Visualisation_Tools | R | false | false | 458 | r | ecoplot <- function(){
library(ggplot2)
theme_set(theme_minimal())
# For data preparation
library("tidyverse")
df <- economics%>%select(date, psavert, uempmed)%>%gather(key = "variables", value = "value", -date)
# Data Visualization
p<-ggplot(df, aes(x = date, y = value)) + geom_line(aes(color = variables)) + scale_color_manual(values = c("yellow", "green")) + ggtitle("Yearly plot showing personal savings rate and median duration of unemployment")
p
} |
suppressMessages(library("DESeq2"))
suppressMessages(library("edgeR"))
suppressMessages(library(data.table))
# ---------------
# FeatureCounts
# ---------------
fCounts <- read.delim(file=smkin$counts,
header=TRUE,
check.names = FALSE)
fCountsData <- fCounts[
,
-which(
tolower(names(fCounts))
%in%
tolower(smkp$fCountsDataCols) )]
geneidColname <- 'Geneid'
geneidIdx <- which(tolower(smkp$fCountsDataCols) %in% tolower(geneidColname))
rownames(fCountsData) <- fCounts[[geneidIdx]]
# Reordering counts matrix to have samples ordered as in metadata
metadata = read.delim(smkin$metadata, header=TRUE)
fCountsData <- fCountsData[,match(metadata[,1], colnames(fCountsData))] # assuming that the first column in metadata is sample name
dds <- DESeqDataSetFromMatrix(
countData= fCountsData,
colData = metadata,
design = as.formula(smkp$dge$design$string))
filter <- rowSums(cpm(counts(dds)) >= smkp$dge$minCounts) >= smkp$dge$minSamples
ddsFiltered <- dds[filter,]
ddsFiltered[[smkp$dge$design$refFactor]] <- relevel(
ddsFiltered[[smkp$dge$design$refFactor]],
ref = smkp$dge$design$refLevel)
dga <- DESeq(
object = ddsFiltered,
test = "Wald",
fitType = "parametric",
betaPrior = FALSE,
minReplicatesForReplace = Inf)
contrasts = resultsNames(dga)[- which(resultsNames(dga) %in% 'Intercept')]
dgeResults <- list()
for (contrast in contrasts) {
dgeResults[[contrast]] <- results(
dga,
name = contrast,
cooksCutoff = Inf,
independentFiltering = TRUE,
alpha = smkp$dge$alpha,
pAdjustMethod = "BH")
# sorting gene list according to significance
dgeResults[[contrast]] <- dgeResults[[contrast]][order(dgeResults[[contrast]]$pvalue, decreasing = F),]
}
| /modules/dge/deseq2.R | no_license | alexandre-nadin/pypette-github | R | false | false | 1,859 | r | suppressMessages(library("DESeq2"))
suppressMessages(library("edgeR"))
suppressMessages(library(data.table))
# ---------------
# FeatureCounts
# ---------------
fCounts <- read.delim(file=smkin$counts,
header=TRUE,
check.names = FALSE)
fCountsData <- fCounts[
,
-which(
tolower(names(fCounts))
%in%
tolower(smkp$fCountsDataCols) )]
geneidColname <- 'Geneid'
geneidIdx <- which(tolower(smkp$fCountsDataCols) %in% tolower(geneidColname))
rownames(fCountsData) <- fCounts[[geneidIdx]]
# Reordering counts matrix to have samples ordered as in metadata
metadata = read.delim(smkin$metadata, header=TRUE)
fCountsData <- fCountsData[,match(metadata[,1], colnames(fCountsData))] # assuming that the first column in metadata is sample name
dds <- DESeqDataSetFromMatrix(
countData= fCountsData,
colData = metadata,
design = as.formula(smkp$dge$design$string))
filter <- rowSums(cpm(counts(dds)) >= smkp$dge$minCounts) >= smkp$dge$minSamples
ddsFiltered <- dds[filter,]
ddsFiltered[[smkp$dge$design$refFactor]] <- relevel(
ddsFiltered[[smkp$dge$design$refFactor]],
ref = smkp$dge$design$refLevel)
dga <- DESeq(
object = ddsFiltered,
test = "Wald",
fitType = "parametric",
betaPrior = FALSE,
minReplicatesForReplace = Inf)
contrasts = resultsNames(dga)[- which(resultsNames(dga) %in% 'Intercept')]
dgeResults <- list()
for (contrast in contrasts) {
dgeResults[[contrast]] <- results(
dga,
name = contrast,
cooksCutoff = Inf,
independentFiltering = TRUE,
alpha = smkp$dge$alpha,
pAdjustMethod = "BH")
# sorting gene list according to significance
dgeResults[[contrast]] <- dgeResults[[contrast]][order(dgeResults[[contrast]]$pvalue, decreasing = F),]
}
|
library(csaw)
library(edgeR)
library(rtracklayer)
library(statmod)
library(xscss)
library(microbenchmark)
bpvec = 1000 # Window size
mark = 'EZH2'
method = 'csaw'
cell = 'Encode_twocells'
chip1 = c(
'wgEncodeBroadHistoneHelas3Ezh239875AlnRep1.markdup.q10.sorted.bam',
'wgEncodeBroadHistoneHelas3Ezh239875AlnRep2.markdup.q10.sorted.bam'
)
chip2 = c(
'wgEncodeBroadHistoneHepg2Ezh239875AlnRep1.markdup.q10.sorted.bam',
'wgEncodeBroadHistoneHepg2Ezh239875AlnRep2.markdup.q10.sorted.bam'
)
cell.type = c('Helas3', 'Helas3', 'Hepg2', 'Hepg2') # Cell type
contrast = c(0, 1) # Contrast for test differential
for (bp in bpvec) {
cptime = list()
tmpdir = paste0('temporary_data', bp)
outdir = paste0('Output', bp)
system(paste('mkdir', tmpdir))
system(paste('mkdir', paste0(tmpdir, '/chip'), paste0(tmpdir, '/control')))
system(paste('mkdir', outdir))
### Loading data
cmd = paste('cp',
paste(
list.files(
file.path('../../../../Data/Encode_helas3/', mark),
'wg*.*.bam',
full.names = TRUE
),
collapse = ' '
),
paste0(tmpdir, '/chip'))
cat('Command: ', cmd, '\n')
system(cmd)
cmd = paste('cp',
paste(
list.files(
file.path('../../../../Data/Encode_hepg2/', mark),
'wg*.*.bam',
full.names = TRUE
),
collapse = ' '
),
paste0(tmpdir, '/chip'))
cat('Command: ', cmd, '\n')
system(cmd)
cptime[[paste(method, mark, cell, 'Output', bp, sep = '_')]] = microbenchmark({
widebp = 10 * bp #Size of large bins to compute normalization
max.delay = 250 #Max. shift to attempt in cross-correlation analysis
common.length = 200 #Rescale fragment length to this value
fold.change = log2(2) #Fold change for filter
filterct = 20 #An integer scalar for the minimum count sum across libraries for each window.
tol = 100 #Parameters for mergeWindows
max.width = 5000 #Parameters for mergeWindows
### csaw starts now
# List of bam files
bam.files = list.files(
path = paste0(tmpdir, '/chip'),
pattern = '*.bam$',
full.names = T
)
bam.files
# Design matrix
design <- model.matrix(~ factor(cell.type))
colnames(design) <- c("intercept", "cell.type")
design
# Parameters (PCR duplicates already removed and quality score filtered)
param <- readParam(dedup = F)
param
# Estimating the average fragment length (rescaling all to 200bp)
x = lapply(bam.files,
correlateReads,
param = param,
max.dist = max.delay)
multi.frag.lens = list(unlist(lapply(x, maximizeCcf)), common.length)
multi.frag.lens
# Counting reads
data <-
windowCounts(
bam.files,
width = bp,
ext = multi.frag.lens,
param = param,
filter = filterct
)
data
# Filtering data
data.large <-
windowCounts(bam.files,
width = widebp,
bin = T,
param = param)
bin.ab <-
scaledAverage(data.large, scale = median(getWidths(data.large)) / median(getWidths(data)))
threshold <- median(bin.ab) + fold.change
keep.global <- aveLogCPM(asDGEList(data)) > threshold
sum(keep.global)
# Creating filtered data
filtered.data <- data[keep.global,]
# Testing for DB
y <-
DGEList(assay(filtered.data), lib.size = filtered.data$totals)
y$samples$norm.factors <- 1
y$offset <- NULL
y <- estimateDisp(y, design)
fit <- glmQLFit(y, design, robust = TRUE)
out <- glmQLFTest(fit, contrast = contrast)
tabres <- topTags(out, nrow(out))$table
tabres <- tabres[order(as.integer(rownames(tabres))),]
merged <-
mergeWindows(rowRanges(filtered.data),
tol = tol,
max.width = max.width)
tabneg <- combineTests(merged$id, tabres)
# Organizing output
output = merged$region
output$PValue = tabneg$PValue
output$FDR = tabneg$FDR
# Saving file in tsv format with all details preserved
ofile <-
gzfile(paste0(
outdir,
"/",
paste(method, mark, cell, paste0(bp, 'bp'), sep = '_'),
".tsv.gz"
), open = "w")
write.table(
as.data.frame(output),
file = ofile,
row.names = FALSE,
quote = FALSE,
sep = "\t"
)
close(ofile)
# Saving file in bed format for Genome Browser visualization
test <- output
names(test) <- paste0("region", 1:length(output))
export(test, paste0(outdir, "/temp.bed"))
system(paste(
"echo '",
paste0(
'track name="csaw (',
bp,
'bp)" description="',
mark,
'" color=150,150,0'
),
"' | cat - ",
paste0(outdir, "/temp.bed"),
" > ",
paste0(
outdir,
"/",
paste(method, mark, cell, paste0(bp, 'bp'), sep = '_'),
".bed"
)
))
system(paste0('rm ', paste0(outdir, "/temp.bed")))
### Removing files
system(paste('rm -r', tmpdir))
}, times = 1)
### Saving computing time
save(cptime, file = paste0(outdir, paste(
method, mark, cell, 'Time', paste0(bp, 'bp.RData'), sep = '_'
)))
} | /Public/csaw/EZH2/Encode_twocells/csaw_EZH2_Encode_twocells_1000bp.R | permissive | plbaldoni/epigraHMMPaper | R | false | false | 5,498 | r | library(csaw)
library(edgeR)
library(rtracklayer)
library(statmod)
library(xscss)
library(microbenchmark)
bpvec = 1000 # Window size
mark = 'EZH2'
method = 'csaw'
cell = 'Encode_twocells'
chip1 = c(
'wgEncodeBroadHistoneHelas3Ezh239875AlnRep1.markdup.q10.sorted.bam',
'wgEncodeBroadHistoneHelas3Ezh239875AlnRep2.markdup.q10.sorted.bam'
)
chip2 = c(
'wgEncodeBroadHistoneHepg2Ezh239875AlnRep1.markdup.q10.sorted.bam',
'wgEncodeBroadHistoneHepg2Ezh239875AlnRep2.markdup.q10.sorted.bam'
)
cell.type = c('Helas3', 'Helas3', 'Hepg2', 'Hepg2') # Cell type
contrast = c(0, 1) # Contrast for test differential
for (bp in bpvec) {
cptime = list()
tmpdir = paste0('temporary_data', bp)
outdir = paste0('Output', bp)
system(paste('mkdir', tmpdir))
system(paste('mkdir', paste0(tmpdir, '/chip'), paste0(tmpdir, '/control')))
system(paste('mkdir', outdir))
### Loading data
cmd = paste('cp',
paste(
list.files(
file.path('../../../../Data/Encode_helas3/', mark),
'wg*.*.bam',
full.names = TRUE
),
collapse = ' '
),
paste0(tmpdir, '/chip'))
cat('Command: ', cmd, '\n')
system(cmd)
cmd = paste('cp',
paste(
list.files(
file.path('../../../../Data/Encode_hepg2/', mark),
'wg*.*.bam',
full.names = TRUE
),
collapse = ' '
),
paste0(tmpdir, '/chip'))
cat('Command: ', cmd, '\n')
system(cmd)
cptime[[paste(method, mark, cell, 'Output', bp, sep = '_')]] = microbenchmark({
widebp = 10 * bp #Size of large bins to compute normalization
max.delay = 250 #Max. shift to attempt in cross-correlation analysis
common.length = 200 #Rescale fragment length to this value
fold.change = log2(2) #Fold change for filter
filterct = 20 #An integer scalar for the minimum count sum across libraries for each window.
tol = 100 #Parameters for mergeWindows
max.width = 5000 #Parameters for mergeWindows
### csaw starts now
# List of bam files
bam.files = list.files(
path = paste0(tmpdir, '/chip'),
pattern = '*.bam$',
full.names = T
)
bam.files
# Design matrix
design <- model.matrix(~ factor(cell.type))
colnames(design) <- c("intercept", "cell.type")
design
# Parameters (PCR duplicates already removed and quality score filtered)
param <- readParam(dedup = F)
param
# Estimating the average fragment length (rescaling all to 200bp)
x = lapply(bam.files,
correlateReads,
param = param,
max.dist = max.delay)
multi.frag.lens = list(unlist(lapply(x, maximizeCcf)), common.length)
multi.frag.lens
# Counting reads
data <-
windowCounts(
bam.files,
width = bp,
ext = multi.frag.lens,
param = param,
filter = filterct
)
data
# Filtering data
data.large <-
windowCounts(bam.files,
width = widebp,
bin = T,
param = param)
bin.ab <-
scaledAverage(data.large, scale = median(getWidths(data.large)) / median(getWidths(data)))
threshold <- median(bin.ab) + fold.change
keep.global <- aveLogCPM(asDGEList(data)) > threshold
sum(keep.global)
# Creating filtered data
filtered.data <- data[keep.global,]
# Testing for DB
y <-
DGEList(assay(filtered.data), lib.size = filtered.data$totals)
y$samples$norm.factors <- 1
y$offset <- NULL
y <- estimateDisp(y, design)
fit <- glmQLFit(y, design, robust = TRUE)
out <- glmQLFTest(fit, contrast = contrast)
tabres <- topTags(out, nrow(out))$table
tabres <- tabres[order(as.integer(rownames(tabres))),]
merged <-
mergeWindows(rowRanges(filtered.data),
tol = tol,
max.width = max.width)
tabneg <- combineTests(merged$id, tabres)
# Organizing output
output = merged$region
output$PValue = tabneg$PValue
output$FDR = tabneg$FDR
# Saving file in tsv format with all details preserved
ofile <-
gzfile(paste0(
outdir,
"/",
paste(method, mark, cell, paste0(bp, 'bp'), sep = '_'),
".tsv.gz"
), open = "w")
write.table(
as.data.frame(output),
file = ofile,
row.names = FALSE,
quote = FALSE,
sep = "\t"
)
close(ofile)
# Saving file in bed format for Genome Browser visualization
test <- output
names(test) <- paste0("region", 1:length(output))
export(test, paste0(outdir, "/temp.bed"))
system(paste(
"echo '",
paste0(
'track name="csaw (',
bp,
'bp)" description="',
mark,
'" color=150,150,0'
),
"' | cat - ",
paste0(outdir, "/temp.bed"),
" > ",
paste0(
outdir,
"/",
paste(method, mark, cell, paste0(bp, 'bp'), sep = '_'),
".bed"
)
))
system(paste0('rm ', paste0(outdir, "/temp.bed")))
### Removing files
system(paste('rm -r', tmpdir))
}, times = 1)
### Saving computing time
save(cptime, file = paste0(outdir, paste(
method, mark, cell, 'Time', paste0(bp, 'bp.RData'), sep = '_'
)))
} |
## This function is for caching the inverse of a matrix
## rather than compute it repeatedly
## This function creates a special "matrix" object that
## can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already
## been calculated (and the matrix has not changed), then the
## cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getaolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
| /cachematrix.R | no_license | fishboon/ProgrammingAssignment2 | R | false | false | 1,107 | r | ## This function is for caching the inverse of a matrix
## rather than compute it repeatedly
## This function creates a special "matrix" object that
## can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already
## been calculated (and the matrix has not changed), then the
## cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getaolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, 2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615831374-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,047 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, 2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
##getting full datatable
datatable <- read.table("household_power_consumption.txt", header = TRUE, sep = ';',na.strings = "?", nrows = 2075259,check.names=FALSE, stringsAsFactors = FALSE, comment.char = "",quote = '\"')
##subsetting the table between two days
date1 <-"1/2/2007"
date2 <-"2/2/2007"
dataused <- datatable[datatable$Date %in% c(date1,date2) ,]
##generating plot1 and export PNG
png("plot1.png", width=480, height=480)
hist(dataused$Global_active_power, col = "red", main = paste("Global Active Power"), xlab = "Global Active Power (kilowatts)")
dev.off() | /exploredata/plot1.R | no_license | waynezx/ExData_Plotting1 | R | false | false | 601 | r | ##getting full datatable
datatable <- read.table("household_power_consumption.txt", header = TRUE, sep = ';',na.strings = "?", nrows = 2075259,check.names=FALSE, stringsAsFactors = FALSE, comment.char = "",quote = '\"')
##subsetting the table between two days
date1 <-"1/2/2007"
date2 <-"2/2/2007"
dataused <- datatable[datatable$Date %in% c(date1,date2) ,]
##generating plot1 and export PNG
png("plot1.png", width=480, height=480)
hist(dataused$Global_active_power, col = "red", main = paste("Global Active Power"), xlab = "Global Active Power (kilowatts)")
dev.off() |
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
# The function makeCacheMatrix creates a special "Matrix" object that can cache its inverse
# 'makeCacheMatrix' function creates a special "Matrxi", which is really a list containing a functions to
# 1. set the value of the matrix
# 2. get the value of the vector
# 3. set the value of the inverse
# 4. get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
# The function 'cacheSolve' calculates the inverse of the special "Matrix" created with the above function.
# It first checks to see if the inverse has already been calculated
# If so, it 'get's the inverse from the cache and skips the computation. Otherwise, it calculates the inverse of the data and sets the value of the inverse in the cache via the 'setInverse' function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)) {
message("Getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data,...)
x$setInverse(inv)
inv
}
| /cachematrix.R | no_license | Faye2014/ProgrammingAssignment2 | R | false | false | 1,589 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
# The function makeCacheMatrix creates a special "Matrix" object that can cache its inverse
# 'makeCacheMatrix' function creates a special "Matrxi", which is really a list containing a functions to
# 1. set the value of the matrix
# 2. get the value of the vector
# 3. set the value of the inverse
# 4. get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
# The function 'cacheSolve' calculates the inverse of the special "Matrix" created with the above function.
# It first checks to see if the inverse has already been calculated
# If so, it 'get's the inverse from the cache and skips the computation. Otherwise, it calculates the inverse of the data and sets the value of the inverse in the cache via the 'setInverse' function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)) {
message("Getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data,...)
x$setInverse(inv)
inv
}
|
#===============================================================================
# 2021-06-18 -- MPIDR dataviz course
# Final Assignment: Maps challange
# Author: Luca Badolato
#
# Significant credits go to Ilya Kashnitsky from which I taken (and learned) several lines of code
#===============================================================================
# clear the working environment
rm(list=ls())
# load and (in case) install required packages
listofpackages = c("tidyverse",
"janitor",
"sf",
"ggthemes",
"eurostat",
"rmapshaper")
for (j in listofpackages){
if(sum(installed.packages()[, 1] == j) == 0) {
install.packages(j)
}
library(j, character.only = T)
}
# download the data (dataset name found at http://ec.europa.eu/eurostat/web/regions/data/database)
df <- get_eurostat("demo_r_pjanaggr3")
# explore available years and geographical locations
time <- df %>%
select(time) %>%
unique()
geo <- df %>% filter(
geo %>% str_sub(1,2) == "IT", # only Italy
geo %>% paste %>% nchar == 5, # only NUTS3 level
) %>%
select(geo) %>% unique()
# filter data of interest
df_it <- df %>%
mutate(
time = time %>% lubridate::year()
) %>%
filter(
sex == "T" , # total population
geo %>% str_sub(1,2) == "IT", # only Italy
geo %>% paste %>% nchar == 5, # only NUTS3 level
time %in% c("2020", "2001"),
age == "TOTAL"
) %>%
transmute(
id = geo,
year = time,
pop = values
)
# reshape to wide format
df_it <- df_it %>%
pivot_wider(names_from = year, values_from = pop)
# define variable of interest: population change
df_it <- df_it %>%
mutate(
pop_change = (`2020` - `2001`)/`2001`
)
# the built-in dataset of EU boundaries
gd <- eurostat_geodata_60_2016 %>%
clean_names()
# transform the projection for the one suitable for Europe
gdtr <- gd %>%
st_transform(crs = 3035)
# filter Italy
gd_it <- gdtr %>%
filter(cntr_code == "IT", levl_code == 3)
# join Eurostat and spatial datasets
dj_it <- left_join(gd_it, df_it, "id")
# get the NUTS2 (regions) bords to be plotted in the map
bord <- gdtr %>%
filter(cntr_code == "IT", levl_code == 2) %>%
ms_innerlines()
# visualization
dj_it %>%
ggplot()+
geom_sf()+
coord_sf(datum = NA)+
geom_sf(aes(fill = pop_change), color=NA)+
geom_sf(data = bord, color = "black", size=0.1)+
theme_map()+
labs(fill = "",
title = "POPULATION GROWTH RATE (2001 - 2020)",
subtitle = "Population growth rate from 2001 to 2020 in Italy at NUTS3 level",
caption = paste0("Source: Eurostat\n",
"@BadolatoLuca")) +
scale_fill_distiller(palette = "RdBu",
direction=1,
limits = c(-0.177985, 0.177985),
breaks = c(-0.1, 0, 0.1),
labels = c("-10%", "0", "+10%")
) +
theme(legend.position = c(.0, .06),
legend.direction = "horizontal",
plot.title = element_text(hjust = 0.5,
vjust = 0.1,
size = 16,
face = "bold"),
plot.subtitle = element_text(hjust = 0.5,
vjust = 0.1,
size = 10),
plot.caption = element_text(size = 7,
hjust = 1))
ggsave("BADOLATO-challenge.png", width = 6, height = 7,)
# end
| /code.R | no_license | LucaBadolato/IMPRS-2021-Data-Visualization | R | false | false | 3,575 | r | #===============================================================================
# 2021-06-18 -- MPIDR dataviz course
# Final Assignment: Maps challange
# Author: Luca Badolato
#
# Significant credits go to Ilya Kashnitsky from which I taken (and learned) several lines of code
#===============================================================================
# clear the working environment
rm(list=ls())
# load and (in case) install required packages
listofpackages = c("tidyverse",
"janitor",
"sf",
"ggthemes",
"eurostat",
"rmapshaper")
for (j in listofpackages){
if(sum(installed.packages()[, 1] == j) == 0) {
install.packages(j)
}
library(j, character.only = T)
}
# download the data (dataset name found at http://ec.europa.eu/eurostat/web/regions/data/database)
df <- get_eurostat("demo_r_pjanaggr3")
# explore available years and geographical locations
time <- df %>%
select(time) %>%
unique()
geo <- df %>% filter(
geo %>% str_sub(1,2) == "IT", # only Italy
geo %>% paste %>% nchar == 5, # only NUTS3 level
) %>%
select(geo) %>% unique()
# filter data of interest
df_it <- df %>%
mutate(
time = time %>% lubridate::year()
) %>%
filter(
sex == "T" , # total population
geo %>% str_sub(1,2) == "IT", # only Italy
geo %>% paste %>% nchar == 5, # only NUTS3 level
time %in% c("2020", "2001"),
age == "TOTAL"
) %>%
transmute(
id = geo,
year = time,
pop = values
)
# reshape to wide format
df_it <- df_it %>%
pivot_wider(names_from = year, values_from = pop)
# define variable of interest: population change
df_it <- df_it %>%
mutate(
pop_change = (`2020` - `2001`)/`2001`
)
# the built-in dataset of EU boundaries
gd <- eurostat_geodata_60_2016 %>%
clean_names()
# transform the projection for the one suitable for Europe
gdtr <- gd %>%
st_transform(crs = 3035)
# filter Italy
gd_it <- gdtr %>%
filter(cntr_code == "IT", levl_code == 3)
# join Eurostat and spatial datasets
dj_it <- left_join(gd_it, df_it, "id")
# get the NUTS2 (regions) bords to be plotted in the map
bord <- gdtr %>%
filter(cntr_code == "IT", levl_code == 2) %>%
ms_innerlines()
# visualization
dj_it %>%
ggplot()+
geom_sf()+
coord_sf(datum = NA)+
geom_sf(aes(fill = pop_change), color=NA)+
geom_sf(data = bord, color = "black", size=0.1)+
theme_map()+
labs(fill = "",
title = "POPULATION GROWTH RATE (2001 - 2020)",
subtitle = "Population growth rate from 2001 to 2020 in Italy at NUTS3 level",
caption = paste0("Source: Eurostat\n",
"@BadolatoLuca")) +
scale_fill_distiller(palette = "RdBu",
direction=1,
limits = c(-0.177985, 0.177985),
breaks = c(-0.1, 0, 0.1),
labels = c("-10%", "0", "+10%")
) +
theme(legend.position = c(.0, .06),
legend.direction = "horizontal",
plot.title = element_text(hjust = 0.5,
vjust = 0.1,
size = 16,
face = "bold"),
plot.subtitle = element_text(hjust = 0.5,
vjust = 0.1,
size = 10),
plot.caption = element_text(size = 7,
hjust = 1))
ggsave("BADOLATO-challenge.png", width = 6, height = 7,)
# end
|
page_one_sidepanel <- sidebarPanel(
selectInput("planet_choose",
label = "Choose a Body to see travel to from Earth:",
choices = c("Mercury", "Venus", "Mars", "Jupiter", "Saturn",
"Uranus", "Neptune", "Pluto", "Moon"),
selected = "Mercury"
),
textOutput(
outputId = "planet_animation",
)
) | /R/ui.R | permissive | Chicow-Moovers/chicow-moovers.github.io | R | false | false | 374 | r |
page_one_sidepanel <- sidebarPanel(
selectInput("planet_choose",
label = "Choose a Body to see travel to from Earth:",
choices = c("Mercury", "Venus", "Mars", "Jupiter", "Saturn",
"Uranus", "Neptune", "Pluto", "Moon"),
selected = "Mercury"
),
textOutput(
outputId = "planet_animation",
)
) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/activeC.R
\name{plot.const_C}
\alias{plot.const_C}
\title{Plot const_C objectc}
\usage{
\method{plot}{const_C}(x, output = c("all", "matrix", "logvals", "projfn"), ...)
}
\arguments{
\item{x}{A const_C object, the result of a call to C_GP}
\item{output}{one of \code{"image"} (image of the C matrix), \code{"logvals"} (log-eigen values),
\code{"projfn"} projected function on first eigen vector or all plots at once (default).}
\item{...}{Additional parameters. Not used.}
}
\description{
Plot const_C objectc
}
| /man/plot.const_C.Rd | no_license | cran/activegp | R | false | true | 593 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/activeC.R
\name{plot.const_C}
\alias{plot.const_C}
\title{Plot const_C objectc}
\usage{
\method{plot}{const_C}(x, output = c("all", "matrix", "logvals", "projfn"), ...)
}
\arguments{
\item{x}{A const_C object, the result of a call to C_GP}
\item{output}{one of \code{"image"} (image of the C matrix), \code{"logvals"} (log-eigen values),
\code{"projfn"} projected function on first eigen vector or all plots at once (default).}
\item{...}{Additional parameters. Not used.}
}
\description{
Plot const_C objectc
}
|
head(iris,n=3) # first three fows
| /inst/snippet/iris-head.R | no_license | cran/fastR | R | false | false | 45 | r | head(iris,n=3) # first three fows
|
# Objective: create a function to calculate atmospheric air pressure for a given site elevation
# Author: Grant Coble-Neal
AtmosphericPressure <- function( Elevation, Temperature, parameters = c(P_0 = 1013.25, a = 0.0065, b = 273.15, c = 5.257))
{
# P_0: atmospheric pressure at sea-level in hectopascals (hPA); hPA is equivalent to millibars
# Elevation: site elevation above sea level in metres
# Temperature: ambient temperature in Celsius
P_0 = parameters[['P_0']]
a = parameters[['a']]
b = parameters[['b']]
c = parameters[['c']]
return( P_0 * ( 1 - (a * Elevation)/(Temperature + a * Elevation + b))^c )
# This is the hypsometric formula; If the altitude is more than 11km high above sea level,
# the hypsometric formula cannot be applied because the temperature lapse rate varies considerably with altitude.
# reference: https://keisan.casio.com/exec/system/1224579725
}
AP <- AtmosphericPressure( Elevation = 22, Temperature = 15)
AP # correct answer: 1,010.61 hPa
| /R/Atmospheric Pressure function.R | no_license | cobleg/solarPV | R | false | false | 1,006 | r |
# Objective: create a function to calculate atmospheric air pressure for a given site elevation
# Author: Grant Coble-Neal
AtmosphericPressure <- function( Elevation, Temperature, parameters = c(P_0 = 1013.25, a = 0.0065, b = 273.15, c = 5.257))
{
# P_0: atmospheric pressure at sea-level in hectopascals (hPA); hPA is equivalent to millibars
# Elevation: site elevation above sea level in metres
# Temperature: ambient temperature in Celsius
P_0 = parameters[['P_0']]
a = parameters[['a']]
b = parameters[['b']]
c = parameters[['c']]
return( P_0 * ( 1 - (a * Elevation)/(Temperature + a * Elevation + b))^c )
# This is the hypsometric formula; If the altitude is more than 11km high above sea level,
# the hypsometric formula cannot be applied because the temperature lapse rate varies considerably with altitude.
# reference: https://keisan.casio.com/exec/system/1224579725
}
AP <- AtmosphericPressure( Elevation = 22, Temperature = 15)
AP # correct answer: 1,010.61 hPa
|
RML$Train$TrainModel <- function(model, dataSet, column = NULL) {
removeDuplicateColumns <- function(dataSet, column) {
if (is.null(column)) return(dataSet)
for (columnName in names(dataSet)) {
if (identical(dataSet[, columnName], column)) {
dataSet[, columnName] <- NULL
}
}
return(dataSet)
}
trainLinearModel <- function(model, trainData, column) {
fit <- lm(column ~., data = trainData, weights = model$Weights)
}
trainDescisionTree <- function(model, trainData, column) {
if (is.null(model$Methd)) {
fit <- rpart::rpart(column ~., data = trainData, control = model$Control)
} else {
fit <- rpart::rpart(column ~., data = trainData, control = model$Control, method = model$Method)
}
return(fit)
}
trainKMeans <- function (model, trainData) {
fit <- fit <- kmeans(trainData, centers = model$Centroids, nstart = model$NumberOfStarts, iter.max = model$Iterations);
}
trainData <- removeDuplicateColumns(dataSet, column)
if (identical(model$Model, lm)) {
result <- trainLinearModel(model, trainData, column)
} else if (identical(model$Model, rpart)) {
result <- trainDescisionTree(model, trainData, column)
} else if (identical(model$Model, kmeans)) {
result <- trainKMeans(model, trainData)
} else {
stop('Model not supported. Please ensure that this is a valid RML model.')
}
return(result)
} | /Train/TrainModel.R | permissive | ryannel/RML | R | false | false | 1,554 | r | RML$Train$TrainModel <- function(model, dataSet, column = NULL) {
removeDuplicateColumns <- function(dataSet, column) {
if (is.null(column)) return(dataSet)
for (columnName in names(dataSet)) {
if (identical(dataSet[, columnName], column)) {
dataSet[, columnName] <- NULL
}
}
return(dataSet)
}
trainLinearModel <- function(model, trainData, column) {
fit <- lm(column ~., data = trainData, weights = model$Weights)
}
trainDescisionTree <- function(model, trainData, column) {
if (is.null(model$Methd)) {
fit <- rpart::rpart(column ~., data = trainData, control = model$Control)
} else {
fit <- rpart::rpart(column ~., data = trainData, control = model$Control, method = model$Method)
}
return(fit)
}
trainKMeans <- function (model, trainData) {
fit <- fit <- kmeans(trainData, centers = model$Centroids, nstart = model$NumberOfStarts, iter.max = model$Iterations);
}
trainData <- removeDuplicateColumns(dataSet, column)
if (identical(model$Model, lm)) {
result <- trainLinearModel(model, trainData, column)
} else if (identical(model$Model, rpart)) {
result <- trainDescisionTree(model, trainData, column)
} else if (identical(model$Model, kmeans)) {
result <- trainKMeans(model, trainData)
} else {
stop('Model not supported. Please ensure that this is a valid RML model.')
}
return(result)
} |
############## Lauren chosen words Ebony #####################
tab_ebony <- get(load("../output/table_word_frequencies_ebony.rda"))
all_words_ebony <- get(load("../output/all_words_ebony.rda"))
tab_ebony_normalized <- t(apply(tab_ebony, 1, function(x) return(x/sum(x))))
rownames(tab_ebony_normalized) <- paste0("Ebony_", 1961:1976)
plot(tab_ebony_normalized[, "rights"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "rights")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "vietnam"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "vietnam")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "nonviolence"] + tab_ebony_normalized[, "nonviolent"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "nonviolence")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "violence"] + tab_ebony_normalized[, "violent"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "violence")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "beatles"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "vietnam")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "drugs"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "vietnam")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "hippie"] + tab_ebony_normalized[, "hippies"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "hippie")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "politics"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "politics")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "time"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "time")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "sick"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "sick")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "ill"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "ill")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "illness"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "illness")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "culture"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "culture")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "afroamerican"] + tab_ebony_normalized[, "africanamerican"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "afroamerican")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "africanamerican"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "africanamerican")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
tmp_rights <- list()
tmp_politics <- list()
tmp_time <- list()
tmp_sick <- list()
tmp_ill <- list()
tmp_illness <- list()
tmp_culture <- list()
tmp_afroamerican <- list()
library(wordVectors)
dirs <- c("../Ebony/1961/", "../Ebony/1962/", "../Ebony/1963/",
"../Ebony/1964/", "../Ebony/1965/", "../Ebony/1966/",
"../Ebony/1967/", "../Ebony/1968/", "../Ebony/1969/",
"../Ebony/1970/", "../Ebony/1971/", "../Ebony/1972/",
"../Ebony/1973/", "../Ebony/1974/", "../Ebony/1975/",
"../Ebony/1976/")
model_list <- list()
for(m in 1:length(dirs)){
model_list[[m]] = read.vectors(paste0(dirs[m], "pooled_word2vec.bin"))
}
for(m in 1:length(dirs)){
tmp_rights[[m]] <- model_list[[m]] %>% closest_to(c("rights"), n = 30)
tmp_politics[[m]] <- model_list[[m]] %>% closest_to(c("politics"), n = 30)
tmp_time[[m]] <- model_list[[m]] %>% closest_to(c("time"), n = 30)
tmp_sick[[m]] <- model_list[[m]] %>% closest_to(c("sick"), n = 30)
tmp_ill[[m]] <- model_list[[m]] %>% closest_to(c("ill"), n = 30)
tmp_illness[[m]] <- model_list[[m]] %>% closest_to(c("illness"), n = 30)
tmp_culture[[m]] <- model_list[[m]] %>% closest_to(c("culture"), n = 30)
tmp_afroamerican[[m]] <- model_list[[m]] %>% closest_to(c("afroamerican", "africanamerican"), n = 30)
}
sink("../output/Ebony_list_words/rights.txt")
names(tmp_rights) <- paste0("Ebony_", 1961:1976)
print(tmp_rights)
sink()
sink("../output/Ebony_list_words/politics.txt")
names(tmp_politics) <- paste0("Ebony_", 1961:1976)
print(tmp_politics)
sink()
sink("../output/Ebony_list_words/time.txt")
names(tmp_time) <- paste0("Ebony_", 1961:1976)
print(tmp_time)
sink()
sink("../output/Ebony_list_words/sick.txt")
names(tmp_sick) <- paste0("Ebony_", 1961:1976)
print(tmp_sick)
sink()
sink("../output/Ebony_list_words/ill.txt")
names(tmp_ill) <- paste0("Ebony_", 1961:1976)
print(tmp_ill)
sink()
sink("../output/Ebony_list_words/illness.txt")
names(tmp_illness) <- paste0("Ebony_", 1961:1976)
print(tmp_illness)
sink()
sink("../output/Ebony_list_words/culture.txt")
names(tmp_culture) <- paste0("Ebony_", 1961:1976)
print(tmp_culture)
sink()
sink("../output/Ebony_list_words/afroamerican.txt")
names(tmp_afroamerican) <- paste0("Ebony_", 1961:1976)
print(tmp_afroamerican)
sink()
| /codes/lauren_chosen_words_ebony.R | no_license | kkdey/Black_magazines | R | false | false | 6,710 | r |
############## Lauren chosen words Ebony #####################
tab_ebony <- get(load("../output/table_word_frequencies_ebony.rda"))
all_words_ebony <- get(load("../output/all_words_ebony.rda"))
tab_ebony_normalized <- t(apply(tab_ebony, 1, function(x) return(x/sum(x))))
rownames(tab_ebony_normalized) <- paste0("Ebony_", 1961:1976)
plot(tab_ebony_normalized[, "rights"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "rights")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "vietnam"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "vietnam")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "nonviolence"] + tab_ebony_normalized[, "nonviolent"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "nonviolence")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "violence"] + tab_ebony_normalized[, "violent"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "violence")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "beatles"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "vietnam")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "drugs"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "vietnam")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "hippie"] + tab_ebony_normalized[, "hippies"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "hippie")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "politics"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "politics")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "time"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "time")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "sick"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "sick")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "ill"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "ill")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "illness"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "illness")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "culture"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "culture")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "afroamerican"] + tab_ebony_normalized[, "africanamerican"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "afroamerican")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
plot(tab_ebony_normalized[, "africanamerican"],
col = "red", pch = 20, cex = 1.5, xaxt = "n",
ylab = "occurrence prop.", xlab = "", main = "africanamerican")
axis(1, at=1:dim(tab_ebony_normalized)[1],
rownames(tab_ebony_normalized), las = 2, cex.axis = 0.7)
tmp_rights <- list()
tmp_politics <- list()
tmp_time <- list()
tmp_sick <- list()
tmp_ill <- list()
tmp_illness <- list()
tmp_culture <- list()
tmp_afroamerican <- list()
library(wordVectors)
dirs <- c("../Ebony/1961/", "../Ebony/1962/", "../Ebony/1963/",
"../Ebony/1964/", "../Ebony/1965/", "../Ebony/1966/",
"../Ebony/1967/", "../Ebony/1968/", "../Ebony/1969/",
"../Ebony/1970/", "../Ebony/1971/", "../Ebony/1972/",
"../Ebony/1973/", "../Ebony/1974/", "../Ebony/1975/",
"../Ebony/1976/")
model_list <- list()
for(m in 1:length(dirs)){
model_list[[m]] = read.vectors(paste0(dirs[m], "pooled_word2vec.bin"))
}
for(m in 1:length(dirs)){
tmp_rights[[m]] <- model_list[[m]] %>% closest_to(c("rights"), n = 30)
tmp_politics[[m]] <- model_list[[m]] %>% closest_to(c("politics"), n = 30)
tmp_time[[m]] <- model_list[[m]] %>% closest_to(c("time"), n = 30)
tmp_sick[[m]] <- model_list[[m]] %>% closest_to(c("sick"), n = 30)
tmp_ill[[m]] <- model_list[[m]] %>% closest_to(c("ill"), n = 30)
tmp_illness[[m]] <- model_list[[m]] %>% closest_to(c("illness"), n = 30)
tmp_culture[[m]] <- model_list[[m]] %>% closest_to(c("culture"), n = 30)
tmp_afroamerican[[m]] <- model_list[[m]] %>% closest_to(c("afroamerican", "africanamerican"), n = 30)
}
sink("../output/Ebony_list_words/rights.txt")
names(tmp_rights) <- paste0("Ebony_", 1961:1976)
print(tmp_rights)
sink()
sink("../output/Ebony_list_words/politics.txt")
names(tmp_politics) <- paste0("Ebony_", 1961:1976)
print(tmp_politics)
sink()
sink("../output/Ebony_list_words/time.txt")
names(tmp_time) <- paste0("Ebony_", 1961:1976)
print(tmp_time)
sink()
sink("../output/Ebony_list_words/sick.txt")
names(tmp_sick) <- paste0("Ebony_", 1961:1976)
print(tmp_sick)
sink()
sink("../output/Ebony_list_words/ill.txt")
names(tmp_ill) <- paste0("Ebony_", 1961:1976)
print(tmp_ill)
sink()
sink("../output/Ebony_list_words/illness.txt")
names(tmp_illness) <- paste0("Ebony_", 1961:1976)
print(tmp_illness)
sink()
sink("../output/Ebony_list_words/culture.txt")
names(tmp_culture) <- paste0("Ebony_", 1961:1976)
print(tmp_culture)
sink()
sink("../output/Ebony_list_words/afroamerican.txt")
names(tmp_afroamerican) <- paste0("Ebony_", 1961:1976)
print(tmp_afroamerican)
sink()
|
#------------------------------
#Name: Kamlesh Patel
#Date: Feb-25, 2018
#Project: Chapter 1
#------------------------------
#Learning to create vecors and its oprtations
x <- 1:5
y <- 5
y
print (y)
#add numeric constant 2 to vector x
x+2
x + y
#add numeric vector to vector x
x+ 10:14
#Multiply numeric vector to vector x
x * 10:14
#Character String
h<-"Hello"
#y1<-readline("What is your name? ")
print(paste(h, y1))
#List and Remove Objects
ls()
rm (x)
| /Chapter1_R (1).R | permissive | KamleshSAS/Base_R- | R | false | false | 500 | r | #------------------------------
#Name: Kamlesh Patel
#Date: Feb-25, 2018
#Project: Chapter 1
#------------------------------
#Learning to create vecors and its oprtations
x <- 1:5
y <- 5
y
print (y)
#add numeric constant 2 to vector x
x+2
x + y
#add numeric vector to vector x
x+ 10:14
#Multiply numeric vector to vector x
x * 10:14
#Character String
h<-"Hello"
#y1<-readline("What is your name? ")
print(paste(h, y1))
#List and Remove Objects
ls()
rm (x)
|
#Jash Mohinish Mehta
#Homework for Class 3 RowCols-Hw
#TASK 1
mtcars
#Copying raw data into a New Data Set
mymtcars <- mtcars
mymtcars
#First of all assigning disp/cyl to newvar
#Then assigning new variable to mymtcars
mymtcars$newvar <- mymtcars$disp/mymtcars$cyl
mymtcars
#Displaying the summary for the new column
summary(mymtcars$newvar)
#Task 2
#Creating number of Pets
pets <- c(2,2,3,4,1)
pets
#Creating the birthorder
birthorder <- c(1,2,3,4,5)
birthorder
#Creating number of siblings
siblings <- c(2,2,2,2,2)
siblings
userId <- c("Shrey","Shrutik","Jash","Ayesha","Charlotte")
userId
#Creating the data frame
myFriends <- data.frame(userId,pets,birthorder,siblings)
myFriends
#This will give X obs. Of 4 variables
str(myFriends)
#this will give Min, Mean, Max, Median
summary(myFriends)
| /week 2 Row Columns/Jash Mehta, HW2, JM.R | no_license | jashmehta89/IST-687-Applied-Data-Science-LabWork | R | false | false | 846 | r | #Jash Mohinish Mehta
#Homework for Class 3 RowCols-Hw
#TASK 1
mtcars
#Copying raw data into a New Data Set
mymtcars <- mtcars
mymtcars
#First of all assigning disp/cyl to newvar
#Then assigning new variable to mymtcars
mymtcars$newvar <- mymtcars$disp/mymtcars$cyl
mymtcars
#Displaying the summary for the new column
summary(mymtcars$newvar)
#Task 2
#Creating number of Pets
pets <- c(2,2,3,4,1)
pets
#Creating the birthorder
birthorder <- c(1,2,3,4,5)
birthorder
#Creating number of siblings
siblings <- c(2,2,2,2,2)
siblings
userId <- c("Shrey","Shrutik","Jash","Ayesha","Charlotte")
userId
#Creating the data frame
myFriends <- data.frame(userId,pets,birthorder,siblings)
myFriends
#This will give X obs. Of 4 variables
str(myFriends)
#this will give Min, Mean, Max, Median
summary(myFriends)
|
#############################################################################
#
# XLConnect
# Copyright (C) 2010-2017 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# "Normalizes" a data.frame with respect to column types.
# This is necessary for comparing data.frames written to Excel and then
# read back in from Excel.
#
# Note that Excel does not know e.g. factor types. Factor variables
# in fact are written to Excel as ordinary strings. Therefore, when read
# back in to R, they are treated as character variables.
#
# 'normalizeDataframe' is used for RUnit tests to compare data.frame's
# written to and read from Excel
#
# Author: Martin Studer, Mirai Solutions GmbH
#
#############################################################################
normalizeDataframe <- function(df, replaceInf = FALSE) {
att = attr(df, "row.names")
res <- lapply(df,
function(col) {
if(is(col, "numeric")) {
if(replaceInf) {
col[is.infinite(col)] = NA # there are no infinites in Excel
col
} else {
col
}
} else if(is(col, "logical") || is(col, "character")) {
col
} else if(is(col, "Date") || is(col, "POSIXt")) {
ms = round(as.numeric(as.POSIXct(col)), 3) # only consider up to milliseconds
d = as.POSIXct("1970-01-01", tz = "UTC") + ms
attr(d, "tzone") = ""
d
} else
as.character(col)
}
)
res = data.frame(res, stringsAsFactors = F)
attr(res, "row.names") = att
res
}
| /R/normalizeDataframe.R | no_license | GSuvorov/xlconnect | R | false | false | 2,260 | r | #############################################################################
#
# XLConnect
# Copyright (C) 2010-2017 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# "Normalizes" a data.frame with respect to column types.
# This is necessary for comparing data.frames written to Excel and then
# read back in from Excel.
#
# Note that Excel does not know e.g. factor types. Factor variables
# in fact are written to Excel as ordinary strings. Therefore, when read
# back in to R, they are treated as character variables.
#
# 'normalizeDataframe' is used for RUnit tests to compare data.frame's
# written to and read from Excel
#
# Author: Martin Studer, Mirai Solutions GmbH
#
#############################################################################
normalizeDataframe <- function(df, replaceInf = FALSE) {
att = attr(df, "row.names")
res <- lapply(df,
function(col) {
if(is(col, "numeric")) {
if(replaceInf) {
col[is.infinite(col)] = NA # there are no infinites in Excel
col
} else {
col
}
} else if(is(col, "logical") || is(col, "character")) {
col
} else if(is(col, "Date") || is(col, "POSIXt")) {
ms = round(as.numeric(as.POSIXct(col)), 3) # only consider up to milliseconds
d = as.POSIXct("1970-01-01", tz = "UTC") + ms
attr(d, "tzone") = ""
d
} else
as.character(col)
}
)
res = data.frame(res, stringsAsFactors = F)
attr(res, "row.names") = att
res
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Factor_Order.R
\name{Factor_Order}
\alias{Factor_Order}
\title{Compute levels of factor}
\usage{
Factor_Order(x)
}
\arguments{
\item{x}{is a factor}
}
\value{
Return levels to the order in which they appear in the data,
An error is raised if the input is not a factor
}
\description{
This function computes the levels of a given factor by returning the unique ordering found in the dataset.
}
\examples{
Factor_Order(c("high", "med", "low"))
}
| /man/Factor_Order.Rd | no_license | STAT545-UBC-hw-2018-19/hw07-ecool50 | R | false | true | 522 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Factor_Order.R
\name{Factor_Order}
\alias{Factor_Order}
\title{Compute levels of factor}
\usage{
Factor_Order(x)
}
\arguments{
\item{x}{is a factor}
}
\value{
Return levels to the order in which they appear in the data,
An error is raised if the input is not a factor
}
\description{
This function computes the levels of a given factor by returning the unique ordering found in the dataset.
}
\examples{
Factor_Order(c("high", "med", "low"))
}
|
##' plot the remap object
##'
##' plot.remap is a function to create the htmlfile
##' and open it by browser
##'
##'
##' @param object an remap object
##' @param path the path for saving REmap object
##' @return Create a html file according to the id of the object
##' @author Chiffon <\url{http://lchiffon.github.io}>
##' @examples
##' set.seed(125)
##' out = remap(demoC,title = "REmap: Demo DATA",
##' subtitle = "theme:Dark")
##' plot(out)
plot.remap = function(object,path = ""){
if(.Platform$OS.type == "windows"){
locate = Sys.getlocale("LC_CTYPE")
Sys.setlocale("LC_CTYPE",
"chs")
}
## Web js, don't need to rewrite JS
if( getOption('remap.js.web')){
file_name = paste0(object@id,".html")
writeLines(object@content,
paste0(path,file_name),
useBytes = T)
}else{
## else, write file to local temp files
path = tempdir()
file_name = paste0(path,"/",object@id,".html")
local.plot(object,file_name)
}
if(.Platform$OS.type == "windows"){
Sys.setlocale("LC_CTYPE",locate)
}
cat("Save img as:",file_name)
browseURL(file_name)
}
local.plot = function(object,file_name){
## SVG rewrite JS path
if(object@maptype == 'SVG'){
content = sub("http://echarts.baidu.com/build/dist/echarts.js",
"./js/echarts.js",
object@content)
content = sub("http://echarts.baidu.com/build/dist/echarts-all.js",
"./js/echarts-all.js",
content)
}
if(object@maptype == 'SVGH'){
content = sub("http://echarts.baidu.com/build/dist/echarts.js",
"./js/echarts.js",
object@content)
content = sub("http://echarts.baidu.com/build/dist/echarts-all.js",
"./js/echarts-all.js",
content)
}
## Bmap rewrite JS path
if(object@maptype == "Bmap"){
content = sub("http://echarts.baidu.com/build/dist/echarts.js",
"./js/echarts.js",
object@content)
content = sub("http://echarts.baidu.com/doc/asset/js/jquery.min.js",
"./js/jquery.min.js",
content)
content = sub("http://lchiffon.github.io/reveal_slidify/echarts/require",
"./js",
content)
}
writeLines(content,
file_name,
useBytes = T)
}
| /REmap/R/plot.remap.R | permissive | moisiet/R-Packge | R | false | false | 2,397 | r | ##' plot the remap object
##'
##' plot.remap is a function to create the htmlfile
##' and open it by browser
##'
##'
##' @param object an remap object
##' @param path the path for saving REmap object
##' @return Create a html file according to the id of the object
##' @author Chiffon <\url{http://lchiffon.github.io}>
##' @examples
##' set.seed(125)
##' out = remap(demoC,title = "REmap: Demo DATA",
##' subtitle = "theme:Dark")
##' plot(out)
plot.remap = function(object,path = ""){
if(.Platform$OS.type == "windows"){
locate = Sys.getlocale("LC_CTYPE")
Sys.setlocale("LC_CTYPE",
"chs")
}
## Web js, don't need to rewrite JS
if( getOption('remap.js.web')){
file_name = paste0(object@id,".html")
writeLines(object@content,
paste0(path,file_name),
useBytes = T)
}else{
## else, write file to local temp files
path = tempdir()
file_name = paste0(path,"/",object@id,".html")
local.plot(object,file_name)
}
if(.Platform$OS.type == "windows"){
Sys.setlocale("LC_CTYPE",locate)
}
cat("Save img as:",file_name)
browseURL(file_name)
}
local.plot = function(object,file_name){
## SVG rewrite JS path
if(object@maptype == 'SVG'){
content = sub("http://echarts.baidu.com/build/dist/echarts.js",
"./js/echarts.js",
object@content)
content = sub("http://echarts.baidu.com/build/dist/echarts-all.js",
"./js/echarts-all.js",
content)
}
if(object@maptype == 'SVGH'){
content = sub("http://echarts.baidu.com/build/dist/echarts.js",
"./js/echarts.js",
object@content)
content = sub("http://echarts.baidu.com/build/dist/echarts-all.js",
"./js/echarts-all.js",
content)
}
## Bmap rewrite JS path
if(object@maptype == "Bmap"){
content = sub("http://echarts.baidu.com/build/dist/echarts.js",
"./js/echarts.js",
object@content)
content = sub("http://echarts.baidu.com/doc/asset/js/jquery.min.js",
"./js/jquery.min.js",
content)
content = sub("http://lchiffon.github.io/reveal_slidify/echarts/require",
"./js",
content)
}
writeLines(content,
file_name,
useBytes = T)
}
|
library(GenomicDataCommons)
library(magrittr)
context('legacy endpoint')
## IDs here were selected interactively, just for testing.
## If GDC removes these IDs, expect tests to fail.
files_legacy_ids = files(legacy = TRUE) %>% results(size = 10) %>% ids()
cases_legacy_ids = cases(legacy = TRUE) %>% results(size = 10) %>% ids()
###########################
##
## FILES
##
###########################
## ID functionality
test_that("legacy file ids in regular archive, also", {
fquery = files(legacy = FALSE) %>% filter( ~ file_id %in% files_legacy_ids)
fres = fquery %>% ids()
expect_length(fres,10)
})
test_that("legacy file ids found", {
fquery = files()
fquery$legacy = TRUE
fres = fquery %>% filter( ~ file_id %in% files_legacy_ids) %>% ids()
expect_length(fres,length(files_legacy_ids))
rm(fquery,fres)
})
## Manifest functionality
test_that("legacy manifest matches legacy ids", {
fquery = files()
fquery$legacy = TRUE
fres = fquery %>% filter( ~ file_id %in% files_legacy_ids) %>% manifest()
expect_equal(nrow(fres),length(files_legacy_ids))
expect_true(all(fres$id %in% files_legacy_ids))
})
###########################
##
## Cases
##
###########################
## ID functionality
test_that("legacy case ids found", {
cquery = cases()
cquery$legacy = TRUE
cres = cquery %>% filter( ~ case_id %in% cases_legacy_ids) %>% ids()
expect_equal(length(cres),length(cases_legacy_ids))
})
# Note that case ids may be in both legacy and default archives
test_that("legacy case ids in default archive, also", {
cquery = cases()
cquery$legacy = FALSE
cres = cquery %>% filter( ~ case_id %in% cases_legacy_ids) %>% ids()
expect_equal(length(cres),10)
})
| /tests/testthat/test_legacy.R | no_license | economistgame/GenomicDataCommons | R | false | false | 1,751 | r | library(GenomicDataCommons)
library(magrittr)
context('legacy endpoint')
## IDs here were selected interactively, just for testing.
## If GDC removes these IDs, expect tests to fail.
files_legacy_ids = files(legacy = TRUE) %>% results(size = 10) %>% ids()
cases_legacy_ids = cases(legacy = TRUE) %>% results(size = 10) %>% ids()
###########################
##
## FILES
##
###########################
## ID functionality
test_that("legacy file ids in regular archive, also", {
fquery = files(legacy = FALSE) %>% filter( ~ file_id %in% files_legacy_ids)
fres = fquery %>% ids()
expect_length(fres,10)
})
test_that("legacy file ids found", {
fquery = files()
fquery$legacy = TRUE
fres = fquery %>% filter( ~ file_id %in% files_legacy_ids) %>% ids()
expect_length(fres,length(files_legacy_ids))
rm(fquery,fres)
})
## Manifest functionality
test_that("legacy manifest matches legacy ids", {
fquery = files()
fquery$legacy = TRUE
fres = fquery %>% filter( ~ file_id %in% files_legacy_ids) %>% manifest()
expect_equal(nrow(fres),length(files_legacy_ids))
expect_true(all(fres$id %in% files_legacy_ids))
})
###########################
##
## Cases
##
###########################
## ID functionality
test_that("legacy case ids found", {
cquery = cases()
cquery$legacy = TRUE
cres = cquery %>% filter( ~ case_id %in% cases_legacy_ids) %>% ids()
expect_equal(length(cres),length(cases_legacy_ids))
})
# Note that case ids may be in both legacy and default archives
test_that("legacy case ids in default archive, also", {
cquery = cases()
cquery$legacy = FALSE
cres = cquery %>% filter( ~ case_id %in% cases_legacy_ids) %>% ids()
expect_equal(length(cres),10)
})
|
women
plot(women)
str(cars)
# 두 줄을 선택한 후 상단에 있는 'Run' 버튼을 누르면 한꺼번에 실행됨
a<-2
b<-a+a
# 작업 디렉토리 지정
getwd()
setwd('/Workspace/R')
getwd()
library(dplyr)
library(ggplot2)
search()
str(iris)
head(iris) #Default는 6개 기본으로 보여줌
head(iris,10)
tail(iris) #끝에서 6개 보여줌
plot(iris)
#두 속성의 상관관계
plot(iris$Petal.Length,iris$Petal.Width,col=iris$Species,pch=18)
#download
tips=read.csv('https://raw.githubusercontent.com/mwaskom/seaborn-data/master/tips.csv')
head(tips)
str(tips)
#요약통계
summary(tips)
#ggplot2 그림그리기
tips %>% ggplot(aes(size))+geom_histogram() #히스토그램
tips %>% ggplot(aes(total_bill,tip))+geom_point() #산점도
tips %>% ggplot(aes(total_bill,tip))+geom_point(aes(col=day))
tips %>% ggplot(aes(total_bill,tip))+geom_point(aes(col=day,pch=sex),size=3)
tips %>% ggplot(aes(total_bill,tip))+geom_point(aes(col=day,pch=time),size=3)
| /ch02.R | no_license | Dahee99/R-Lecture-2021 | R | false | false | 977 | r | women
plot(women)
str(cars)
# 두 줄을 선택한 후 상단에 있는 'Run' 버튼을 누르면 한꺼번에 실행됨
a<-2
b<-a+a
# 작업 디렉토리 지정
getwd()
setwd('/Workspace/R')
getwd()
library(dplyr)
library(ggplot2)
search()
str(iris)
head(iris) #Default는 6개 기본으로 보여줌
head(iris,10)
tail(iris) #끝에서 6개 보여줌
plot(iris)
#두 속성의 상관관계
plot(iris$Petal.Length,iris$Petal.Width,col=iris$Species,pch=18)
#download
tips=read.csv('https://raw.githubusercontent.com/mwaskom/seaborn-data/master/tips.csv')
head(tips)
str(tips)
#요약통계
summary(tips)
#ggplot2 그림그리기
tips %>% ggplot(aes(size))+geom_histogram() #히스토그램
tips %>% ggplot(aes(total_bill,tip))+geom_point() #산점도
tips %>% ggplot(aes(total_bill,tip))+geom_point(aes(col=day))
tips %>% ggplot(aes(total_bill,tip))+geom_point(aes(col=day,pch=sex),size=3)
tips %>% ggplot(aes(total_bill,tip))+geom_point(aes(col=day,pch=time),size=3)
|
#######################################
#
# Decision Making on Promotion Strategy for Maru Batting Center
# based on Customer Lifetime Value
# A first small smart attempt!
#
#######################################
rm(list=ls())
#### set up all the parameters
# give names for different customer segments
seg.names = c(
"Little Leaguers", "Summer Sluggers",
"Elite Ballplayers (Print Ad)", "Elite Ballplayers (Party)",
"Entertainment Seekers"
# Input contact cost and response rate on ads of each segment
contact.cost = c(1000, 1500, 300, 12500, 50)
response.rate = c(0.10, 0.15, 0.005, 0.25, 0.025)
# calculate the acquisition cost using the formula contact.cost / response.rate
acquisition.cost = contact.cost/response.rate
# input labor cost for marketing
workers.needed = c(2, 1, 1, 1, 2)
worker.labor.cost = rep(1500, 5)
instructors.needed = c(1, 0, 1, 1, 0)
instructor.hourly.labor.cost = c(3000, 0, 4500, 4500, 0)
# calculate the total cost per hour
total.cost.per.hour = workers.needed * worker.labor.cost + instructors.needed * instructor.hourly.labor.cost
# input the revenue received per hour
hourly.price.charged = c(6500, 3000, 7500, 7500, 4000)
# calculate the hourly margins
hourly.margin = hourly.price.charged - total.cost.per.hour
hourly.margin.pct = hourly.margin / hourly.price.charged
# input the annual hours in service for each segment
annual.hours = c(10, 4, 20, 20, 1.5)
# calculate total annual margin for each segment
annual.margin = annual.hours * hourly.margin
# input retention rate
retention.rate = c(0.75, 0.50, 0.60, 0.60, 0.35)
# number of years to calculate (assume they can be alive for total 25 years)(long enough haha)
num.year = 25
#### Start to consider different marketing proposals!
### Calculate breakeven point for each segment (ignore the discount rate)
discount.rate = 0
## Set up an awesome function to output the CLV of a customer in one segment.
calc.CLV = function(seg.index) {
# survival rate for each year
survival.rate = rep(0, num.year)
# annual profit for each year
annual.profit = rep(0, num.year)
# NPV of annual profit
npv.annual.profit = rep(0, num.year)
# cumulative profit
cum.profit = rep(0, num.year)
# NPV of cumulative profit
npv.cum.profit = rep(0, num.year)
for (t in 1:num.year) {
# At period 1, a customer is just acquired
# so his survival rate is 100%, has no profit, and need full acquisition cost
if (t==1) {
survival.rate[t] = 1
annual.profit[t] = annual.profit[t] - acquisition.cost[seg.index]
}
else {
# At other periods, survival rate = survival rate in previous period * retention rate
survival.rate[t] = survival.rate[t-1] * retention.rate[seg.index]
}
# calculate the annual profit for this period
annual.profit[t] = annual.profit[t] + survival.rate[t] * annual.margin[seg.index]
# calculate the NPV of the annual profit for this year
# purchase happens at the beginning of each year so there is no discounting for the 1st period
npv.annual.profit[t] = annual.profit[t] / (1+discount.rate)^(t-1)
if (t==1) {
# in the 1st period, cumulative profit is simply the annual profit
cum.profit[t] = annual.profit[t]
npv.cum.profit[t] = cum.profit[t]
}
else {
# in other periods, cumulative profit is the one in previous period plus the annual profit for this period
cum.profit[t] = cum.profit[t-1] + annual.profit[t]
npv.cum.profit[t] = cum.profit[t] / (1+discount.rate)^(t-1)
}
}
# wrap all results!
ret = list(
survival.rate=survival.rate,
annual.profit=annual.profit,
npv.annual.profit=npv.annual.profit,
cum.profit=cum.profit,
npv.cum.profit=npv.cum.profit)
return(ret)
}
# using lihai de lapply to apply this function to all segments
results = lapply(1:5, calc.CLV)
# obtain the break-even year for all the segments
# we assume that when npv.annual.profit<100, a customer is an attrition (he dies).
# managed to write a clever and gorgeous loop
for (i in 1:5) {
positive.npv.cum.profit = which(results[[i]]$npv.cum.profit>0)
print(positive.npv.cum.profit[1])
}
npv.annual.profit.large.enough = which(results[[1]]$npv.annual.profit>=100)
last.index = length(npv.annual.profit.large.enough)
results[[1]]$npv.cum.profit[npv.annual.profit.large.enough[last.index]]
### consider their is a discount rate of 10% and calculate CLV
discount.rate = 0.10
results = lapply(1:5, calc.CLV)
# calculate CLV for all the segments
All.clv = rep(0, 5)
for(i in 1:5) {
npv.annual.profit.large.enough = which(results[[i]]$npv.annual.profit>=100)
last.index = length(npv.annual.profit.large.enough)
All.clv[i] = results[[i]]$npv.cum.profit[npv.annual.profit.large.enough[last.index]]
}
All.clv
### identify which segment is most attractive (choose CLV)
best.segement = which.max(All.clv)
best.segement
### Consider an alternative place, Chiyoda ward, which is the alternative for original Minato ward
# managed to solve it by adding parameters to previous variables
contact.cost = c(contact.cost, 600)
response.rate = c(response.rate, 0.08)
# calculate the acquisition cost
acquisition.cost = contact.cost/response.rate
acquisition.cost
workers.needed = c(workers.needed, 2)
worker.labor.cost = c(worker.labor.cost, 1500)
instructors.needed = c(instructors.needed, 1)
instructor.hourly.labor.cost = c(instructor.hourly.labor.cost, 3000)
# calculate the total cost per hour
total.cost.per.hour = workers.needed * worker.labor.cost + instructors.needed * instructor.hourly.labor.cost
hourly.price.charged = c(hourly.price.charged, 6500)
hourly.margin = hourly.price.charged - total.cost.per.hour
hourly.margin.pct = hourly.margin / hourly.price.charged
annual.hours = c(annual.hours, 10)
annual.margin = hourly.margin * annual.hours
retention.rate = c(retention.rate, 0.65)
# calculate the results for Chiyoda
chiyoda.results <- calc.CLV(6)
# determine if Chiyoda should be targeted
npv.annual.profit.large.enough.Chiyoda = which(chiyoda.results$npv.annual.profit>=100)
last.index.Chiyoda = length(npv.annual.profit.large.enough.Chiyoda)
chiyoda.results$npv.cum.profit[npv.annual.profit.large.enough.Chiyoda[last.index.Chiyoda]]
### consider a 2nd proposal
## targeting Elite Ballplayers segment by offering a ¥500 discount
## on all future purchases to Elite Ballplayers who purchase at least 20 batting cage hours in Year 1.
# this means that they offer 500 discount since year 2
# to still make use of the function
# first assume the discount is offered since year 1,
# after calling the function to do the calculation,
# add the missing 500*20=10000 profit to annual.profit[1], npv.annual.profit[1],
# and to all the elements in cum.profit and npv.cum.profit
# change the parameters.
# 1. the hourly price is reduced to 7000
# 2. the retention rate is increased to 0.75
hourly.price.charged[4] = 7000
hourly.margin = hourly.price.charged-total.cost.per.hour
hourly.margin.pct = hourly.margin / hourly.price.charged
annual.margin = hourly.margin * annual.hours
retention.rate[4] = 0.75
elite.ballplayers.discount = calc.CLV(4)
# Add the 200*50 to the annual profit in the first period
elite.ballplayers.discount$annual.profit[1] = elite.ballplayers.discount$annual.profit[1] + 500*20
# also the NPV of the annual profit in the first period - Why?
elite.ballplayers.discount$npv.annual.profit[1] = elite.ballplayers.discount$npv.annual.profit[1] + 500*20
# Add the 200*50 to the cumulative profit in all the periods
elite.ballplayers.discount$cum.profit = elite.ballplayers.discount$cum.profit + 500*20
# also to the NPV of the cumulative profit in all the periods
elite.ballplayers.discount$npv.cum.profit = elite.ballplayers.discount$npv.cum.profit + {(500*20) / (1+discount.rate)^(c(1:num.year)-1)}
npv.annual.profit.large.enough.EBpromo = which(elite.ballplayers.discount$npv.annual.profit>=100)
last.index.EBpromo = length(npv.annual.profit.large.enough.EBpromo)
elite.ballplayers.discount$npv.cum.profit[npv.annual.profit.large.enough.EBpromo[last.index.EBpromo]]
### consider a 3rd proposal:
### offering converted Elite Ball-players a free bat
# change the parameters
# 1. the response rate is increased 0.29
# 2. for each "ACQUIARED" customer, a free bat is to be given, which increases the acquisition cost
response.rate[4] = 0.29
acquisition.cost = contact.cost / response.rate
# add the bat cost to the acquisition cost (only for this segment)
acquisition.cost[4] = acquisition.cost[4] + 10000
# set the parameters used in aforementioned proposal back to the original value
hourly.price.charged[4] = 7500
hourly.margin = hourly.price.charged-total.cost.per.hour
hourly.margin.pct = hourly.margin / hourly.price.charged
annual.margin = hourly.margin * annual.hours
retention.rate[4] = 0.60
elite.ballplayers.bat = calc.CLV(4)
round(elite.ballplayers.bat$npv.annual.profit,3)
# make decision!
npv.annual.profit.large.enough.EBbat = which(elite.ballplayers.bat$npv.annual.profit>=100)
last.index.EBbat = length(npv.annual.profit.large.enough.EBbat)
elite.ballplayers.bat$npv.cum.profit[npv.annual.profit.large.enough.EBbat[last.index.EBbat]]
| /Maru Batting Center.R | no_license | smartdumpling/Marketing-Analytics | R | false | false | 9,154 | r | #######################################
#
# Decision Making on Promotion Strategy for Maru Batting Center
# based on Customer Lifetime Value
# A first small smart attempt!
#
#######################################
rm(list=ls())
#### set up all the parameters
# give names for different customer segments
seg.names = c(
"Little Leaguers", "Summer Sluggers",
"Elite Ballplayers (Print Ad)", "Elite Ballplayers (Party)",
"Entertainment Seekers"
# Input contact cost and response rate on ads of each segment
contact.cost = c(1000, 1500, 300, 12500, 50)
response.rate = c(0.10, 0.15, 0.005, 0.25, 0.025)
# calculate the acquisition cost using the formula contact.cost / response.rate
acquisition.cost = contact.cost/response.rate
# input labor cost for marketing
workers.needed = c(2, 1, 1, 1, 2)
worker.labor.cost = rep(1500, 5)
instructors.needed = c(1, 0, 1, 1, 0)
instructor.hourly.labor.cost = c(3000, 0, 4500, 4500, 0)
# calculate the total cost per hour
total.cost.per.hour = workers.needed * worker.labor.cost + instructors.needed * instructor.hourly.labor.cost
# input the revenue received per hour
hourly.price.charged = c(6500, 3000, 7500, 7500, 4000)
# calculate the hourly margins
hourly.margin = hourly.price.charged - total.cost.per.hour
hourly.margin.pct = hourly.margin / hourly.price.charged
# input the annual hours in service for each segment
annual.hours = c(10, 4, 20, 20, 1.5)
# calculate total annual margin for each segment
annual.margin = annual.hours * hourly.margin
# input retention rate
retention.rate = c(0.75, 0.50, 0.60, 0.60, 0.35)
# number of years to calculate (assume they can be alive for total 25 years)(long enough haha)
num.year = 25
#### Start to consider different marketing proposals!
### Calculate breakeven point for each segment (ignore the discount rate)
discount.rate = 0
## Set up an awesome function to output the CLV of a customer in one segment.
calc.CLV = function(seg.index) {
# survival rate for each year
survival.rate = rep(0, num.year)
# annual profit for each year
annual.profit = rep(0, num.year)
# NPV of annual profit
npv.annual.profit = rep(0, num.year)
# cumulative profit
cum.profit = rep(0, num.year)
# NPV of cumulative profit
npv.cum.profit = rep(0, num.year)
for (t in 1:num.year) {
# At period 1, a customer is just acquired
# so his survival rate is 100%, has no profit, and need full acquisition cost
if (t==1) {
survival.rate[t] = 1
annual.profit[t] = annual.profit[t] - acquisition.cost[seg.index]
}
else {
# At other periods, survival rate = survival rate in previous period * retention rate
survival.rate[t] = survival.rate[t-1] * retention.rate[seg.index]
}
# calculate the annual profit for this period
annual.profit[t] = annual.profit[t] + survival.rate[t] * annual.margin[seg.index]
# calculate the NPV of the annual profit for this year
# purchase happens at the beginning of each year so there is no discounting for the 1st period
npv.annual.profit[t] = annual.profit[t] / (1+discount.rate)^(t-1)
if (t==1) {
# in the 1st period, cumulative profit is simply the annual profit
cum.profit[t] = annual.profit[t]
npv.cum.profit[t] = cum.profit[t]
}
else {
# in other periods, cumulative profit is the one in previous period plus the annual profit for this period
cum.profit[t] = cum.profit[t-1] + annual.profit[t]
npv.cum.profit[t] = cum.profit[t] / (1+discount.rate)^(t-1)
}
}
# wrap all results!
ret = list(
survival.rate=survival.rate,
annual.profit=annual.profit,
npv.annual.profit=npv.annual.profit,
cum.profit=cum.profit,
npv.cum.profit=npv.cum.profit)
return(ret)
}
# using lihai de lapply to apply this function to all segments
results = lapply(1:5, calc.CLV)
# obtain the break-even year for all the segments
# we assume that when npv.annual.profit<100, a customer is an attrition (he dies).
# managed to write a clever and gorgeous loop
for (i in 1:5) {
positive.npv.cum.profit = which(results[[i]]$npv.cum.profit>0)
print(positive.npv.cum.profit[1])
}
npv.annual.profit.large.enough = which(results[[1]]$npv.annual.profit>=100)
last.index = length(npv.annual.profit.large.enough)
results[[1]]$npv.cum.profit[npv.annual.profit.large.enough[last.index]]
### consider their is a discount rate of 10% and calculate CLV
discount.rate = 0.10
results = lapply(1:5, calc.CLV)
# calculate CLV for all the segments
All.clv = rep(0, 5)
for(i in 1:5) {
npv.annual.profit.large.enough = which(results[[i]]$npv.annual.profit>=100)
last.index = length(npv.annual.profit.large.enough)
All.clv[i] = results[[i]]$npv.cum.profit[npv.annual.profit.large.enough[last.index]]
}
All.clv
### identify which segment is most attractive (choose CLV)
best.segement = which.max(All.clv)
best.segement
### Consider an alternative place, Chiyoda ward, which is the alternative for original Minato ward
# managed to solve it by adding parameters to previous variables
contact.cost = c(contact.cost, 600)
response.rate = c(response.rate, 0.08)
# calculate the acquisition cost
acquisition.cost = contact.cost/response.rate
acquisition.cost
workers.needed = c(workers.needed, 2)
worker.labor.cost = c(worker.labor.cost, 1500)
instructors.needed = c(instructors.needed, 1)
instructor.hourly.labor.cost = c(instructor.hourly.labor.cost, 3000)
# calculate the total cost per hour
total.cost.per.hour = workers.needed * worker.labor.cost + instructors.needed * instructor.hourly.labor.cost
hourly.price.charged = c(hourly.price.charged, 6500)
hourly.margin = hourly.price.charged - total.cost.per.hour
hourly.margin.pct = hourly.margin / hourly.price.charged
annual.hours = c(annual.hours, 10)
annual.margin = hourly.margin * annual.hours
retention.rate = c(retention.rate, 0.65)
# calculate the results for Chiyoda
chiyoda.results <- calc.CLV(6)
# determine if Chiyoda should be targeted
npv.annual.profit.large.enough.Chiyoda = which(chiyoda.results$npv.annual.profit>=100)
last.index.Chiyoda = length(npv.annual.profit.large.enough.Chiyoda)
chiyoda.results$npv.cum.profit[npv.annual.profit.large.enough.Chiyoda[last.index.Chiyoda]]
### consider a 2nd proposal
## targeting Elite Ballplayers segment by offering a ¥500 discount
## on all future purchases to Elite Ballplayers who purchase at least 20 batting cage hours in Year 1.
# this means that they offer 500 discount since year 2
# to still make use of the function
# first assume the discount is offered since year 1,
# after calling the function to do the calculation,
# add the missing 500*20=10000 profit to annual.profit[1], npv.annual.profit[1],
# and to all the elements in cum.profit and npv.cum.profit
# change the parameters.
# 1. the hourly price is reduced to 7000
# 2. the retention rate is increased to 0.75
hourly.price.charged[4] = 7000
hourly.margin = hourly.price.charged-total.cost.per.hour
hourly.margin.pct = hourly.margin / hourly.price.charged
annual.margin = hourly.margin * annual.hours
retention.rate[4] = 0.75
elite.ballplayers.discount = calc.CLV(4)
# Add the 200*50 to the annual profit in the first period
elite.ballplayers.discount$annual.profit[1] = elite.ballplayers.discount$annual.profit[1] + 500*20
# also the NPV of the annual profit in the first period - Why?
elite.ballplayers.discount$npv.annual.profit[1] = elite.ballplayers.discount$npv.annual.profit[1] + 500*20
# Add the 200*50 to the cumulative profit in all the periods
elite.ballplayers.discount$cum.profit = elite.ballplayers.discount$cum.profit + 500*20
# also to the NPV of the cumulative profit in all the periods
elite.ballplayers.discount$npv.cum.profit = elite.ballplayers.discount$npv.cum.profit + {(500*20) / (1+discount.rate)^(c(1:num.year)-1)}
npv.annual.profit.large.enough.EBpromo = which(elite.ballplayers.discount$npv.annual.profit>=100)
last.index.EBpromo = length(npv.annual.profit.large.enough.EBpromo)
elite.ballplayers.discount$npv.cum.profit[npv.annual.profit.large.enough.EBpromo[last.index.EBpromo]]
### consider a 3rd proposal:
### offering converted Elite Ball-players a free bat
# change the parameters
# 1. the response rate is increased 0.29
# 2. for each "ACQUIARED" customer, a free bat is to be given, which increases the acquisition cost
response.rate[4] = 0.29
acquisition.cost = contact.cost / response.rate
# add the bat cost to the acquisition cost (only for this segment)
acquisition.cost[4] = acquisition.cost[4] + 10000
# set the parameters used in aforementioned proposal back to the original value
hourly.price.charged[4] = 7500
hourly.margin = hourly.price.charged-total.cost.per.hour
hourly.margin.pct = hourly.margin / hourly.price.charged
annual.margin = hourly.margin * annual.hours
retention.rate[4] = 0.60
elite.ballplayers.bat = calc.CLV(4)
round(elite.ballplayers.bat$npv.annual.profit,3)
# make decision!
npv.annual.profit.large.enough.EBbat = which(elite.ballplayers.bat$npv.annual.profit>=100)
last.index.EBbat = length(npv.annual.profit.large.enough.EBbat)
elite.ballplayers.bat$npv.cum.profit[npv.annual.profit.large.enough.EBbat[last.index.EBbat]]
|
#' Get game_ids in specified years
#'
#' @import rvest
#' @import stringr
#' @importFrom dplyr %>%
#' @importFrom purrr map
#'
#' @return game_id vector
#' @export
#'
get_gids <- function(start_year, end_year){
if(start_year < 2010 || end_year > lubridate::year(Sys.Date()))
stop(paste0("'years' must be between 2010 and ", lubridate::year(Sys.Date())))
`%+%` <- function(x, y) paste0(x, y)
date_seq <- seq(as.Date(start_year %+% "-02-01"),
as.Date(end_year %+% "-10-31"), by = "day")
date <- data.frame(date = date_seq, stringsAsFactors = F) %>%
tidyr::separate(date, c("year", "month", "day"), sep = "-")
candidate <- "http://gd2.mlb.com/components/game/mlb/year_" %+% date$year %+%
"/month_" %+% date$month %+% "/day_" %+% date$day
res_list <- candidate %>% map(function(x) {
tmp <- try(read_html(x), silent = T)
if (class(tmp)[1] != "try-error") {
tmp %>%
html_nodes("li") %>%
html_text() %>%
str_subset("^ gid_") %>%
str_subset(".*mlb.*") %>%
str_replace_all(" |/", "")
}
})
return(do.call("c", res_list))
}
| /R/get_gids.R | permissive | pontsuyu/pitchRx2 | R | false | false | 1,205 | r | #' Get game_ids in specified years
#'
#' @import rvest
#' @import stringr
#' @importFrom dplyr %>%
#' @importFrom purrr map
#'
#' @return game_id vector
#' @export
#'
get_gids <- function(start_year, end_year){
if(start_year < 2010 || end_year > lubridate::year(Sys.Date()))
stop(paste0("'years' must be between 2010 and ", lubridate::year(Sys.Date())))
`%+%` <- function(x, y) paste0(x, y)
date_seq <- seq(as.Date(start_year %+% "-02-01"),
as.Date(end_year %+% "-10-31"), by = "day")
date <- data.frame(date = date_seq, stringsAsFactors = F) %>%
tidyr::separate(date, c("year", "month", "day"), sep = "-")
candidate <- "http://gd2.mlb.com/components/game/mlb/year_" %+% date$year %+%
"/month_" %+% date$month %+% "/day_" %+% date$day
res_list <- candidate %>% map(function(x) {
tmp <- try(read_html(x), silent = T)
if (class(tmp)[1] != "try-error") {
tmp %>%
html_nodes("li") %>%
html_text() %>%
str_subset("^ gid_") %>%
str_subset(".*mlb.*") %>%
str_replace_all(" |/", "")
}
})
return(do.call("c", res_list))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/annotation.R
\docType{methods}
\name{keepEPloops}
\alias{keepEPloops}
\alias{keepEPloops,loops,GRanges,GRanges-method}
\title{Keep enhancer-promoter loops}
\usage{
keepEPloops(lto, enhancer, promoter)
\S4method{keepEPloops}{loops,GRanges,GRanges}(lto, enhancer, promoter)
}
\arguments{
\item{lto}{A loops object whose loops will be annotated}
\item{enhancer}{GRanges object corresponding to locations of enhancer peaks}
\item{promoter}{GRanges object corresponding to locations of promoter regions}
}
\value{
A loops object with an additional row 'loop.type' in the rowData slot
in addition to the gene.tss (which has the gene name) and the
anchor.tss which shows the anchor(s) near the promoter region for the gene.
}
\description{
\code{keepEPloops} adds a column to the rowData slot of a loops
object that shows the corresponding TSS of a gene name based on
the promoter GRanges. The loops object is then subsetted and returns
only loops that are enhancer-promoter.
}
\details{
This function works similar to the \code{annotateLoops} function but
returns only enhancer-promoter loops that are defined in this function.
Additionally, this function returns the gene name(s) of the nearby
transcription start sites in a comma-separted list if there are multiple.
These gene names are defined by the promoter GRanges mcol slot.
}
\examples{
rda<-paste(system.file('rda',package='diffloop'),'loops.small.rda',sep='/')
load(rda)
h3k27ac_j <- system.file('extdata','Jurkat_H3K27ac_chr1.narrowPeak',package='diffloop')
h3k27ac <- rmchr(padGRanges(bedToGRanges(h3k27ac_j), pad = 1000))
promoter <- padGRanges(getHumanTSS(c('1')), pad = 1000)
# small.ep <- keepEPloops(loops.small, h3k27ac, promoter)
}
| /man/keepEPloops.Rd | no_license | DoaneAS/diffloop | R | false | true | 1,780 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/annotation.R
\docType{methods}
\name{keepEPloops}
\alias{keepEPloops}
\alias{keepEPloops,loops,GRanges,GRanges-method}
\title{Keep enhancer-promoter loops}
\usage{
keepEPloops(lto, enhancer, promoter)
\S4method{keepEPloops}{loops,GRanges,GRanges}(lto, enhancer, promoter)
}
\arguments{
\item{lto}{A loops object whose loops will be annotated}
\item{enhancer}{GRanges object corresponding to locations of enhancer peaks}
\item{promoter}{GRanges object corresponding to locations of promoter regions}
}
\value{
A loops object with an additional row 'loop.type' in the rowData slot
in addition to the gene.tss (which has the gene name) and the
anchor.tss which shows the anchor(s) near the promoter region for the gene.
}
\description{
\code{keepEPloops} adds a column to the rowData slot of a loops
object that shows the corresponding TSS of a gene name based on
the promoter GRanges. The loops object is then subsetted and returns
only loops that are enhancer-promoter.
}
\details{
This function works similar to the \code{annotateLoops} function but
returns only enhancer-promoter loops that are defined in this function.
Additionally, this function returns the gene name(s) of the nearby
transcription start sites in a comma-separted list if there are multiple.
These gene names are defined by the promoter GRanges mcol slot.
}
\examples{
rda<-paste(system.file('rda',package='diffloop'),'loops.small.rda',sep='/')
load(rda)
h3k27ac_j <- system.file('extdata','Jurkat_H3K27ac_chr1.narrowPeak',package='diffloop')
h3k27ac <- rmchr(padGRanges(bedToGRanges(h3k27ac_j), pad = 1000))
promoter <- padGRanges(getHumanTSS(c('1')), pad = 1000)
# small.ep <- keepEPloops(loops.small, h3k27ac, promoter)
}
|
#set up variables
sqlConnString <- "Driver=SQL Server;Server=JrsSql2016e;Database=taxi;Uid=RServicesUser;Pwd=RServicesUser"
trainTable <- "v_pickups_train"
testTable <- "v_pickups_test"
#Column information
ccColInfo <- list(
target_pickup_hour = list(
type = "factor",
levels = as.character(0:23)),
target_pickup_dw = list(
type = "factor",
levels = as.character(1:7),
newLevels = c("Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"))
)
#Create the data set
trainDS <- RxSqlServerData(connectionString = sqlConnString,
table = trainTable, rowsPerRead = 5000, colInfo = ccColInfo)
testDS <- RxSqlServerData(connectionString = sqlConnString,
table = testTable, rowsPerRead = 5000, colInfo = ccColInfo)
#Get some basic information on the table
rxGetVarInfo(data = trainDS)
rxGetVarInfo(data = testDS)
#Need a local cache directory
sqlShareDir <- paste("c:\\AllShare\\", Sys.getenv("USERNAME"), sep="")
dir.create(sqlShareDir, recursive = TRUE)
#Set the compute context
sqlCompute <- RxInSqlServer(connectionString = sqlConnString, shareDir = sqlShareDir,
wait = TRUE, consoleOutput = TRUE)
# Set the compute context to compute in SQL Server
rxSetComputeContext(sqlCompute)
#rxSetComputeContext(computeContext = 'local')
#Display summary statistics
rxSummary(formula = ~target_pickup_hour + target_pickup_dw + target_pickups, data = trainDS)
#Perform some visualizations
rxHistogram(~target_pickups, data = trainDS)
rxHistogram(~target_pickups|target_pickup_dw, data = trainDS)
#build model
F <- "target_pickups ~ target_pickup_hour + target_pickup_dw + pickups_hour_lag_0 + pickups_hour_lag_1 +
pickups_hour_lag_2 + pickups_hour_lag_3 + pickups_hour_lag_4 + pickups_hour_lag_5 +
pickups_hour_lag_6 + pickups_hour_lag_7 + pickups_date_lag_1 + pickups_date_lag_2 +
pickups_date_lag_3 + pickups_date_lag_4 + pickups_date_lag_5 + pickups_date_lag_6 +
pickups_date_lag_7 +"
lm <- rxLinMod(F,data = trainDS)
summary(lm)
#Score the model
#Training data
scoreTrainTable <- "pickups_score_train"
scoreTrainDS <- RxSqlServerData(table = scoreTrainTable,
connectionString = sqlConnString, rowsPerRead = 5000 )
#Drop the destination table if exists
if (rxSqlServerTableExists(scoreTrainTable))
rxSqlServerDropTable(scoreTrainTable)
#Score the trainin data
rxPredict(modelObject = lm,
data = trainDS,
outData = scoreTrainDS,
predVarNames = "forecasted_pickups",
writeModelVars = FALSE,
extraVarsToWrite = c("id","target_pickups"),
overwrite = TRUE)
#Testing data
scoreTestTable <- "pickups_score_test"
scoreTestDS <- RxSqlServerData(table = scoreTestTable,
connectionString = sqlConnString, rowsPerRead = 5000 )
#Drop the destination table if exists
if (rxSqlServerTableExists(scoreTestTable))
rxSqlServerDropTable(scoreTestTable)
#Score the trainin data
rxPredict(modelObject = lm,
data = testDS,
outData = scoreTestDS,
predVarNames = "forecasted_pickups",
writeModelVars = FALSE,
extraVarsToWrite = c("id","target_pickups"),
overwrite = TRUE)
#Serialize the mode and save it in SQL
lm.df <- data.frame(model=as.raw(serialize(lm, connection=NULL)))
| /pwTaxi.R | no_license | jayman54/pwTaxi | R | false | false | 3,414 | r |
#set up variables
sqlConnString <- "Driver=SQL Server;Server=JrsSql2016e;Database=taxi;Uid=RServicesUser;Pwd=RServicesUser"
trainTable <- "v_pickups_train"
testTable <- "v_pickups_test"
#Column information
ccColInfo <- list(
target_pickup_hour = list(
type = "factor",
levels = as.character(0:23)),
target_pickup_dw = list(
type = "factor",
levels = as.character(1:7),
newLevels = c("Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"))
)
#Create the data set
trainDS <- RxSqlServerData(connectionString = sqlConnString,
table = trainTable, rowsPerRead = 5000, colInfo = ccColInfo)
testDS <- RxSqlServerData(connectionString = sqlConnString,
table = testTable, rowsPerRead = 5000, colInfo = ccColInfo)
#Get some basic information on the table
rxGetVarInfo(data = trainDS)
rxGetVarInfo(data = testDS)
#Need a local cache directory
sqlShareDir <- paste("c:\\AllShare\\", Sys.getenv("USERNAME"), sep="")
dir.create(sqlShareDir, recursive = TRUE)
#Set the compute context
sqlCompute <- RxInSqlServer(connectionString = sqlConnString, shareDir = sqlShareDir,
wait = TRUE, consoleOutput = TRUE)
# Set the compute context to compute in SQL Server
rxSetComputeContext(sqlCompute)
#rxSetComputeContext(computeContext = 'local')
#Display summary statistics
rxSummary(formula = ~target_pickup_hour + target_pickup_dw + target_pickups, data = trainDS)
#Perform some visualizations
rxHistogram(~target_pickups, data = trainDS)
rxHistogram(~target_pickups|target_pickup_dw, data = trainDS)
#build model
F <- "target_pickups ~ target_pickup_hour + target_pickup_dw + pickups_hour_lag_0 + pickups_hour_lag_1 +
pickups_hour_lag_2 + pickups_hour_lag_3 + pickups_hour_lag_4 + pickups_hour_lag_5 +
pickups_hour_lag_6 + pickups_hour_lag_7 + pickups_date_lag_1 + pickups_date_lag_2 +
pickups_date_lag_3 + pickups_date_lag_4 + pickups_date_lag_5 + pickups_date_lag_6 +
pickups_date_lag_7 +"
lm <- rxLinMod(F,data = trainDS)
summary(lm)
#Score the model
#Training data
scoreTrainTable <- "pickups_score_train"
scoreTrainDS <- RxSqlServerData(table = scoreTrainTable,
connectionString = sqlConnString, rowsPerRead = 5000 )
#Drop the destination table if exists
if (rxSqlServerTableExists(scoreTrainTable))
rxSqlServerDropTable(scoreTrainTable)
#Score the trainin data
rxPredict(modelObject = lm,
data = trainDS,
outData = scoreTrainDS,
predVarNames = "forecasted_pickups",
writeModelVars = FALSE,
extraVarsToWrite = c("id","target_pickups"),
overwrite = TRUE)
#Testing data
scoreTestTable <- "pickups_score_test"
scoreTestDS <- RxSqlServerData(table = scoreTestTable,
connectionString = sqlConnString, rowsPerRead = 5000 )
#Drop the destination table if exists
if (rxSqlServerTableExists(scoreTestTable))
rxSqlServerDropTable(scoreTestTable)
#Score the trainin data
rxPredict(modelObject = lm,
data = testDS,
outData = scoreTestDS,
predVarNames = "forecasted_pickups",
writeModelVars = FALSE,
extraVarsToWrite = c("id","target_pickups"),
overwrite = TRUE)
#Serialize the mode and save it in SQL
lm.df <- data.frame(model=as.raw(serialize(lm, connection=NULL)))
|
# Load Libraries ----------------------------------------------------------
library(marmap)
library(lattice)
# Map Buoys ---------------------------------------------------------------
# read in xyz data
socal <- read.bathy(xyz = "socal_2.xyz", header = FALSE, sep = " ")
# inital map of bathy data
plot(socal,image = TRUE, drawlabels = TRUE)
# create data and dataframe for san clemente basin buoy on map
station <- paste("station", c(46086, 46221), sep = " ")
x <- c(-118.052, -118.634)
y <- c(32.499, 33.855)
z <- c("red", "green")
buoy_location <- data.frame(x, y, z, station)
# plot san clemente buoy on map
points(buoy_location$x, buoy_location$y, pch = 21, col = "black",
bg = as.character(buoy_location$z), cex = 2)
legend(x = "topright",
legend = c("San Clemente Basin (46086)", "Santa Monica Bay (46221)"),
pch = 19,
col = c("red", "green"),
cex = .60)
# plot depth profile from san clemente basin buoy to shore
trsect <- get.transect(socal, -118, 32, -117, 33, distance = TRUE)
marmap::plotProfile(trsect)
# 3d plot of socal region
wireframe(unclass(socal),
shade = TRUE,
aspect = c(1, 0.1),
zlab = "", xlab = "", ylab = "",
scales = list(draw = FALSE))
| /Buoy_Bathymetry_Maps.R | no_license | camiel1/Masters_Capstone | R | false | false | 1,299 | r |
# Load Libraries ----------------------------------------------------------
library(marmap)
library(lattice)
# Map Buoys ---------------------------------------------------------------
# read in xyz data
socal <- read.bathy(xyz = "socal_2.xyz", header = FALSE, sep = " ")
# inital map of bathy data
plot(socal,image = TRUE, drawlabels = TRUE)
# create data and dataframe for san clemente basin buoy on map
station <- paste("station", c(46086, 46221), sep = " ")
x <- c(-118.052, -118.634)
y <- c(32.499, 33.855)
z <- c("red", "green")
buoy_location <- data.frame(x, y, z, station)
# plot san clemente buoy on map
points(buoy_location$x, buoy_location$y, pch = 21, col = "black",
bg = as.character(buoy_location$z), cex = 2)
legend(x = "topright",
legend = c("San Clemente Basin (46086)", "Santa Monica Bay (46221)"),
pch = 19,
col = c("red", "green"),
cex = .60)
# plot depth profile from san clemente basin buoy to shore
trsect <- get.transect(socal, -118, 32, -117, 33, distance = TRUE)
marmap::plotProfile(trsect)
# 3d plot of socal region
wireframe(unclass(socal),
shade = TRUE,
aspect = c(1, 0.1),
zlab = "", xlab = "", ylab = "",
scales = list(draw = FALSE))
|
#' Field type
#'
#' Test whether a variable is of the required 'field type'.
#'
#'
#' @param d When used in a validation rule, a bare (unquoted) name of a variable.
#' Otherwise a vector of class \code{character}. Coerced to character as
#' necessary.
#' @param ft \code{[character]} Field type.
#' @param exceptions \code{[character]} vector of acceptable values, beyond
#' \code{"Numeric"} or \code{"NumericWithDecimals"}.
#'
#' @section Details:
#' The sets of 'Alphabetic' and 'Alphanumeric' characters are determined by
#' the POSIX named ranges \code{"[:alpha:]"} respectively \code{"[:alnum:]"}.
#' The interpretation of these character ranges depends on the current \code{locale},
#' see \code{\link[base]{regex}}. Numeric values are those that can be coerced
#' to integer or are in the list of \code{exceptions}. Acceptable NumericWithDecimals
#' are numbers that have at least a single decimal after the decimal separator
#' '\code{.}' (it is not required to have a number before it).
#'
#'
#'
#' @family validation-functions
#' @references
#' \itemize{
#' \item{\href{../doc/20180202_maintypes.pdf}{Main types of validation rules for ESS data}: FDT}
#' \item{ \href{https://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap09.html}{
#' POSIX regular expressions}. The Open Group base specifications Issue 6.}
#' }
#'
#'
#' @examples
#' data(FDTdat)
#'
#' # Using FDT with 'validate'
#' library(validate)
#' rules <- validator(FDT(OBS_VALUE, ft="Numeric", exceptions="NA")==TRUE)
#' cf <- confront(FDTdat, rules)
#' summary(cf)
#' as.data.frame(cf)
#'
#' # Using FDT directly
#' FDT(FDTdat$OBS_VALUE, ft="Numeric", exceptions="NA")
#'
#'
#'
#' @export
FDT <- function(d
, ft = c("Alphabetic","Alphanumeric","Numeric","NumericWithDecimals")
, exceptions = "NA" ){
ft <- match.arg(ft)
d <- as.character(d)
switch(ft
, "Alphabetic" = grepl("^[[:alpha:]]*$", as.character(d)) | is.na(d)
, "Alphanumeric" = grepl("^[[:alnum:]]*$", as.character(d)) | is.na(d)
, "Numeric" = d %in% exceptions | !is.na(suppressWarnings(as.integer(d)))
, "NumericWithDecimals" = d %in% exceptions | grepl("^[0-9]*\\.[0-9]+$",d)
)
}
| /pkg/R/FDT.R | permissive | SNStatComp/GenericValidationRules | R | false | false | 2,195 | r | #' Field type
#'
#' Test whether a variable is of the required 'field type'.
#'
#'
#' @param d When used in a validation rule, a bare (unquoted) name of a variable.
#' Otherwise a vector of class \code{character}. Coerced to character as
#' necessary.
#' @param ft \code{[character]} Field type.
#' @param exceptions \code{[character]} vector of acceptable values, beyond
#' \code{"Numeric"} or \code{"NumericWithDecimals"}.
#'
#' @section Details:
#' The sets of 'Alphabetic' and 'Alphanumeric' characters are determined by
#' the POSIX named ranges \code{"[:alpha:]"} respectively \code{"[:alnum:]"}.
#' The interpretation of these character ranges depends on the current \code{locale},
#' see \code{\link[base]{regex}}. Numeric values are those that can be coerced
#' to integer or are in the list of \code{exceptions}. Acceptable NumericWithDecimals
#' are numbers that have at least a single decimal after the decimal separator
#' '\code{.}' (it is not required to have a number before it).
#'
#'
#'
#' @family validation-functions
#' @references
#' \itemize{
#' \item{\href{../doc/20180202_maintypes.pdf}{Main types of validation rules for ESS data}: FDT}
#' \item{ \href{https://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap09.html}{
#' POSIX regular expressions}. The Open Group base specifications Issue 6.}
#' }
#'
#'
#' @examples
#' data(FDTdat)
#'
#' # Using FDT with 'validate'
#' library(validate)
#' rules <- validator(FDT(OBS_VALUE, ft="Numeric", exceptions="NA")==TRUE)
#' cf <- confront(FDTdat, rules)
#' summary(cf)
#' as.data.frame(cf)
#'
#' # Using FDT directly
#' FDT(FDTdat$OBS_VALUE, ft="Numeric", exceptions="NA")
#'
#'
#'
#' @export
FDT <- function(d
, ft = c("Alphabetic","Alphanumeric","Numeric","NumericWithDecimals")
, exceptions = "NA" ){
ft <- match.arg(ft)
d <- as.character(d)
switch(ft
, "Alphabetic" = grepl("^[[:alpha:]]*$", as.character(d)) | is.na(d)
, "Alphanumeric" = grepl("^[[:alnum:]]*$", as.character(d)) | is.na(d)
, "Numeric" = d %in% exceptions | !is.na(suppressWarnings(as.integer(d)))
, "NumericWithDecimals" = d %in% exceptions | grepl("^[0-9]*\\.[0-9]+$",d)
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/substr.R
\name{substr}
\alias{substr}
\title{fun_name}
\usage{
substr(params)
}
\arguments{
\item{param}{fun_name}
}
\description{
kolejna funkcja podmieniona
}
| /man/substr.Rd | no_license | granatb/RapeR | R | false | true | 239 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/substr.R
\name{substr}
\alias{substr}
\title{fun_name}
\usage{
substr(params)
}
\arguments{
\item{param}{fun_name}
}
\description{
kolejna funkcja podmieniona
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AssignTransitService.R
\name{estimateTransitModel}
\alias{estimateTransitModel}
\title{Estimate public transit model parameters.}
\usage{
estimateTransitModel()
}
\value{
A list containing the following elements:
BusEquivalents_df: factors to convert revenue miles by mode into bus
equivalents,
UZABusEqRevMile_df: data on bus equivalent revenue miles by urbanized area,
VehMiFactors_df: factors to convert revenue miles by mode into vehicle miles
by mode.
}
\description{
\code{estimateTransitModel} estimates transit model parameters.
}
\details{
This function estimates transit model parameters from 2015 National Transit
Database information on transit agencies and service levels. The function
calculates factors for converting annual revenue miles by transit mode to
total bus-equivalent revenue miles. It also calculates factors to convert
revenue miles by mode into vehicle miles by mode.
}
| /sources/modules/VETransportSupply/man/estimateTransitModel.Rd | permissive | rickdonnelly/VisionEval-Dev | R | false | true | 977 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AssignTransitService.R
\name{estimateTransitModel}
\alias{estimateTransitModel}
\title{Estimate public transit model parameters.}
\usage{
estimateTransitModel()
}
\value{
A list containing the following elements:
BusEquivalents_df: factors to convert revenue miles by mode into bus
equivalents,
UZABusEqRevMile_df: data on bus equivalent revenue miles by urbanized area,
VehMiFactors_df: factors to convert revenue miles by mode into vehicle miles
by mode.
}
\description{
\code{estimateTransitModel} estimates transit model parameters.
}
\details{
This function estimates transit model parameters from 2015 National Transit
Database information on transit agencies and service levels. The function
calculates factors for converting annual revenue miles by transit mode to
total bus-equivalent revenue miles. It also calculates factors to convert
revenue miles by mode into vehicle miles by mode.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlotElicitedPrior.R
\name{PlotElicitedPrior}
\alias{PlotElicitedPrior}
\title{Plot elicited priors.}
\usage{
PlotElicitedPrior(elicited.prior, ylab = "Density", ...)
}
\arguments{
\item{elicited.prior}{output form the \code{\link{ElicitBeta}} function.}
\item{ylab}{a title for the y axis.}
\item{...}{further arguments passed to the \code{\link{curve}} fuction.}
}
\value{
Density plot.
}
\description{
Plots a Beta distribution parameterized with the \code{\link{ElicitBeta}} function.
}
\examples{
elicited.prior <- ElicitBeta(mode = 0.3, maximum = 0.5, confidence = 0.95)
PlotElicitedPrior(elicited.prior, col = 'red')
}
| /man/PlotElicitedPrior.Rd | no_license | leb-fmvz-usp/ptb | R | false | true | 705 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlotElicitedPrior.R
\name{PlotElicitedPrior}
\alias{PlotElicitedPrior}
\title{Plot elicited priors.}
\usage{
PlotElicitedPrior(elicited.prior, ylab = "Density", ...)
}
\arguments{
\item{elicited.prior}{output form the \code{\link{ElicitBeta}} function.}
\item{ylab}{a title for the y axis.}
\item{...}{further arguments passed to the \code{\link{curve}} fuction.}
}
\value{
Density plot.
}
\description{
Plots a Beta distribution parameterized with the \code{\link{ElicitBeta}} function.
}
\examples{
elicited.prior <- ElicitBeta(mode = 0.3, maximum = 0.5, confidence = 0.95)
PlotElicitedPrior(elicited.prior, col = 'red')
}
|
library(checkmate)
library(testthat)
context("gs_voronoi")
test_that("output is valid geometry", {
coords <- data.frame(x = c(40, 70, 70, 50),
y = c(40, 40, 60, 70))
window <- data.frame(x = c(0, 80),
y = c(0, 80))
input <- gs_point(anchor = coords, window = window)
# create voronoi from data.frame
output <- gs_voronoi(anchor = coords, window = window)
expect_class(output, classes = "geom")
expect_true(output@type == "polygon")
expect_data_frame(output@feature$geometry, any.missing = FALSE, nrows = 4, ncols = 2)
# create voronoi from geom
output <- gs_voronoi(anchor = input)
expect_class(output, classes = "geom")
expect_true(output@type == "polygon")
expect_data_frame(output@feature$geometry, any.missing = FALSE, nrows = 4, ncols = 2)
})
test_that("Error if arguments have wrong value", {
coords <- data.frame(x = c(40, 70, 70, 50),
y = c(40, 40, 60, 70))
window <- data.frame(x = c(0, 80),
y = c(0, 80))
input <- gs_point(anchor = coords, window = window)
expect_error(gs_voronoi(anchor = "bla"))
expect_error(gs_voronoi(window = "bla"))
expect_error(gs_voronoi(sketch = "bla"))
})
| /geometr/tests/testthat/test_gs_voronoi.R | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 1,230 | r | library(checkmate)
library(testthat)
context("gs_voronoi")
test_that("output is valid geometry", {
coords <- data.frame(x = c(40, 70, 70, 50),
y = c(40, 40, 60, 70))
window <- data.frame(x = c(0, 80),
y = c(0, 80))
input <- gs_point(anchor = coords, window = window)
# create voronoi from data.frame
output <- gs_voronoi(anchor = coords, window = window)
expect_class(output, classes = "geom")
expect_true(output@type == "polygon")
expect_data_frame(output@feature$geometry, any.missing = FALSE, nrows = 4, ncols = 2)
# create voronoi from geom
output <- gs_voronoi(anchor = input)
expect_class(output, classes = "geom")
expect_true(output@type == "polygon")
expect_data_frame(output@feature$geometry, any.missing = FALSE, nrows = 4, ncols = 2)
})
test_that("Error if arguments have wrong value", {
coords <- data.frame(x = c(40, 70, 70, 50),
y = c(40, 40, 60, 70))
window <- data.frame(x = c(0, 80),
y = c(0, 80))
input <- gs_point(anchor = coords, window = window)
expect_error(gs_voronoi(anchor = "bla"))
expect_error(gs_voronoi(window = "bla"))
expect_error(gs_voronoi(sketch = "bla"))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepare_fpkm_files_from_excel_data_functions.R
\name{get_updated_existing_annotations}
\alias{get_updated_existing_annotations}
\title{Get updated annotations objects (sample_info & ref_annot_to_expr_rds) of FungiExpreZ.}
\usage{
get_updated_existing_annotations(new_sample_info = sample_info)
}
\arguments{
\item{new_sample_info}{a tibble of SRA annotations associated with `new_data_matrix`. This object can be generated using the function [FungiExpresZ::prepare_annotations_for_fungiexpres_data_matrix()].}
}
\value{
a list of two -> updated_ref_annot_to_expr_rds, updated_sample_info.
}
\description{
Get updated annotations objects (sample_info & ref_annot_to_expr_rds) of FungiExpreZ.
}
\details{
This function returns a list of two objects -
1) updated_ref_annot_to_expr_rds: This is the updated version of existing FungiExpresZ object -> `ref_annot_to_expr_rds`.
2) updated_sample_info: This is the updated version of existing FungiExpresZ object -> `sample_info`.
Both these objects will be added R/sysdata.rda.
Internally it uses FungiExpresZ:::sample_info and FungiExpresZ:::ref_annot_to_expr_rds to get the access to existing annotaions.
}
| /man/get_updated_existing_annotations.Rd | permissive | cparsania/FungiExpresZ | R | false | true | 1,232 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepare_fpkm_files_from_excel_data_functions.R
\name{get_updated_existing_annotations}
\alias{get_updated_existing_annotations}
\title{Get updated annotations objects (sample_info & ref_annot_to_expr_rds) of FungiExpreZ.}
\usage{
get_updated_existing_annotations(new_sample_info = sample_info)
}
\arguments{
\item{new_sample_info}{a tibble of SRA annotations associated with `new_data_matrix`. This object can be generated using the function [FungiExpresZ::prepare_annotations_for_fungiexpres_data_matrix()].}
}
\value{
a list of two -> updated_ref_annot_to_expr_rds, updated_sample_info.
}
\description{
Get updated annotations objects (sample_info & ref_annot_to_expr_rds) of FungiExpreZ.
}
\details{
This function returns a list of two objects -
1) updated_ref_annot_to_expr_rds: This is the updated version of existing FungiExpresZ object -> `ref_annot_to_expr_rds`.
2) updated_sample_info: This is the updated version of existing FungiExpresZ object -> `sample_info`.
Both these objects will be added R/sysdata.rda.
Internally it uses FungiExpresZ:::sample_info and FungiExpresZ:::ref_annot_to_expr_rds to get the access to existing annotaions.
}
|
library(dplyr)
set.seed(11123)
instrumental = as.data.frame(rnorm(1000,mean = 1,sd = sqrt(2)))
colnames(instrumental) = "x_i"
instrumental = instrumental %>%
# mutate(w_star = (1/sqrt(2*pi*1)*exp(-(x_i-0)^2/(2*1)))/(1/sqrt(2*pi*2)*exp(-(x_i-1)^2/(2*2)))) %>%
mutate(w_star = dnorm(x_i,mean=0,sd = 1)/dnorm(x_i,mean=1,sd=sqrt(2)))%>%
mutate(w_star_weighted = w_star/sum(w_star)) %>%
mutate(h_x_weighted_standardized = x_i*w_star_weighted) %>%
mutate(h_x_weighted_unstandardized = x_i*w_star)
sum(instrumental$h_x_weighted_standardized)
mean(instrumental$h_x_weighted_unstandardized)
var(instrumental$h_x_weighted_unstandardized)/length(instrumental$h_x_weighted_unstandardized)
hist(instrumental$h_x_weighted_unstandardized)
hist(instrumental$h_x_weighted_standardized)
hist(rnorm(1000,0,1))
##Problem 4
mixture.dat = read.table("/Users/Yichen/Documents/JHU/Computational Statistics/Data/mixture.dat",header=TRUE)
y = mixture.dat$y
n = 10000
test = NULL
f = function(x){prod(x*dnorm(y,7,0.5) + (1-x)*dnorm(y,10,0.5))}
R = function(x_t,x_star){f(x_star)*g(x_t,x_t)/(f(x_t)*g(x_star,x_t))}
g = function(x,x_t){dnorm(x,x_t,.01)}
test[1] = 15
for(i in 1:n){
x_t = test[i]
x_star = rnorm(1,x_t,.01)
r = R(x_t,x_star)
if(r>=1){
test[i+1] = x_star
} else{
u = runif(1,0,1)
if(u<r){
test[i+1] = x_star
} else{
test[i+1] = x_t
}
}
}
plot(test[200:n+1], type = "l")
title("Sample path for N(x_t,.01^2) Proposal Dist.")
hist(test[200:n+1])
##Part 2
unif_values = NULL
n=10000
f = function(x){prod(x*dnorm(y,7,0.5) + (1-x)*dnorm(y,10,0.5))}
R_unif = function(x_t,x_star){f(x_star)*g_unif(x_t)/(f(x_t)*g_unif(x_star))}
g_unif = function(x){dunif(x,0,20)}
unif_values[1] = 7
for(i in 1:n){
x_t = unif_values[i]
x_star = runif(1,0,20)
r = R_unif(x_t,x_star)
if(r>=1){
unif_values[i+1] = x_star
} else{
u = runif(1,0,1)
if(u<r){
unif_values[i+1] = x_star
} else{
unif_values[i+1] = x_t
}
}
}
plot(unif_values[200:n+1],type="l", ylim = c(6,22))
hist(unif_values[200:n+1])
| /Module 5 HW/module 5.R | no_license | yichen-dong/computational_statistics | R | false | false | 2,138 | r | library(dplyr)
set.seed(11123)
instrumental = as.data.frame(rnorm(1000,mean = 1,sd = sqrt(2)))
colnames(instrumental) = "x_i"
instrumental = instrumental %>%
# mutate(w_star = (1/sqrt(2*pi*1)*exp(-(x_i-0)^2/(2*1)))/(1/sqrt(2*pi*2)*exp(-(x_i-1)^2/(2*2)))) %>%
mutate(w_star = dnorm(x_i,mean=0,sd = 1)/dnorm(x_i,mean=1,sd=sqrt(2)))%>%
mutate(w_star_weighted = w_star/sum(w_star)) %>%
mutate(h_x_weighted_standardized = x_i*w_star_weighted) %>%
mutate(h_x_weighted_unstandardized = x_i*w_star)
sum(instrumental$h_x_weighted_standardized)
mean(instrumental$h_x_weighted_unstandardized)
var(instrumental$h_x_weighted_unstandardized)/length(instrumental$h_x_weighted_unstandardized)
hist(instrumental$h_x_weighted_unstandardized)
hist(instrumental$h_x_weighted_standardized)
hist(rnorm(1000,0,1))
##Problem 4
mixture.dat = read.table("/Users/Yichen/Documents/JHU/Computational Statistics/Data/mixture.dat",header=TRUE)
y = mixture.dat$y
n = 10000
test = NULL
f = function(x){prod(x*dnorm(y,7,0.5) + (1-x)*dnorm(y,10,0.5))}
R = function(x_t,x_star){f(x_star)*g(x_t,x_t)/(f(x_t)*g(x_star,x_t))}
g = function(x,x_t){dnorm(x,x_t,.01)}
test[1] = 15
for(i in 1:n){
x_t = test[i]
x_star = rnorm(1,x_t,.01)
r = R(x_t,x_star)
if(r>=1){
test[i+1] = x_star
} else{
u = runif(1,0,1)
if(u<r){
test[i+1] = x_star
} else{
test[i+1] = x_t
}
}
}
plot(test[200:n+1], type = "l")
title("Sample path for N(x_t,.01^2) Proposal Dist.")
hist(test[200:n+1])
##Part 2
unif_values = NULL
n=10000
f = function(x){prod(x*dnorm(y,7,0.5) + (1-x)*dnorm(y,10,0.5))}
R_unif = function(x_t,x_star){f(x_star)*g_unif(x_t)/(f(x_t)*g_unif(x_star))}
g_unif = function(x){dunif(x,0,20)}
unif_values[1] = 7
for(i in 1:n){
x_t = unif_values[i]
x_star = runif(1,0,20)
r = R_unif(x_t,x_star)
if(r>=1){
unif_values[i+1] = x_star
} else{
u = runif(1,0,1)
if(u<r){
unif_values[i+1] = x_star
} else{
unif_values[i+1] = x_t
}
}
}
plot(unif_values[200:n+1],type="l", ylim = c(6,22))
hist(unif_values[200:n+1])
|
#!/usr/bin/env Rscript
library('tidyverse')
library('argparser')
argv <- arg_parser("True and false positive / negative calculations on MonoVar.") %>%
add_argument("--gt",
help="file with ground truth genotypes (incl. path), contents: `CHROM\tPOS\tGT`") %>%
add_argument("--calls", help="file with calls for one single cell") %>%
add_argument("--out", help="output file") %>%
parse_args();
PED_GTs <- as_tibble( read_tsv( argv$gt, col_names = c("CHROM", "POS", "PED_GT") ) ) %>%
mutate( PED_GT = factor(PED_GT, levels = c("1/1", "0/1", "0/0", NA) ),
CHROM = factor(CHROM) )
annotated <- as_tibble(
read_tsv(
argv$calls,
skip = 1,
col_types = "ci__c",
col_names = c( "CHROM", "POS", "GT" ) ) ) %>%
mutate(
CHROM = factor(CHROM),
GT = factor(GT, levels = c("1/1", "0/1", "0/0", NA) ) ) %>%
right_join(PED_GTs, by = c("CHROM", "POS")) %>%
mutate(
monovar_alt_match = case_when(
is.na( PED_GT ) ~ "no_ground_truth",
is.na( GT ) & (PED_GT == "0/1" | PED_GT == "1/1") ~ "P_not_called",
is.na( GT ) & (PED_GT == "0/0") ~ "N_not_called",
(GT == "0/1" | GT == "1/1") & (PED_GT == "0/1" | PED_GT == "1/1") ~ "TP",
(GT == "0/1" | GT == "1/1") & (PED_GT == "0/0") ~ "FP",
GT == "0/0" & (PED_GT == "0/1" | PED_GT == "1/1") ~ "FN",
GT == "0/0" & (PED_GT == "0/0") ~ "TN"),
monovar_alt_match = factor( monovar_alt_match,
levels=c("TP", "FP", "FN", "TN", "P_not_called","N_not_called", "no_ground_truth") ) )
## ALT presence calculations
pn <- annotated %>%
count(monovar_alt_match) %>%
complete(monovar_alt_match) %>%
mutate(n=ifelse(is.na(n),0,n)) %>%
spread(monovar_alt_match, n )
write_tsv(pn, argv$out)
| /analysis_pipelines/scripts/monovar.P-N_stats.alt-calling.R | permissive | ProSolo/benchmarking_prosolo | R | false | false | 1,761 | r | #!/usr/bin/env Rscript
library('tidyverse')
library('argparser')
argv <- arg_parser("True and false positive / negative calculations on MonoVar.") %>%
add_argument("--gt",
help="file with ground truth genotypes (incl. path), contents: `CHROM\tPOS\tGT`") %>%
add_argument("--calls", help="file with calls for one single cell") %>%
add_argument("--out", help="output file") %>%
parse_args();
PED_GTs <- as_tibble( read_tsv( argv$gt, col_names = c("CHROM", "POS", "PED_GT") ) ) %>%
mutate( PED_GT = factor(PED_GT, levels = c("1/1", "0/1", "0/0", NA) ),
CHROM = factor(CHROM) )
annotated <- as_tibble(
read_tsv(
argv$calls,
skip = 1,
col_types = "ci__c",
col_names = c( "CHROM", "POS", "GT" ) ) ) %>%
mutate(
CHROM = factor(CHROM),
GT = factor(GT, levels = c("1/1", "0/1", "0/0", NA) ) ) %>%
right_join(PED_GTs, by = c("CHROM", "POS")) %>%
mutate(
monovar_alt_match = case_when(
is.na( PED_GT ) ~ "no_ground_truth",
is.na( GT ) & (PED_GT == "0/1" | PED_GT == "1/1") ~ "P_not_called",
is.na( GT ) & (PED_GT == "0/0") ~ "N_not_called",
(GT == "0/1" | GT == "1/1") & (PED_GT == "0/1" | PED_GT == "1/1") ~ "TP",
(GT == "0/1" | GT == "1/1") & (PED_GT == "0/0") ~ "FP",
GT == "0/0" & (PED_GT == "0/1" | PED_GT == "1/1") ~ "FN",
GT == "0/0" & (PED_GT == "0/0") ~ "TN"),
monovar_alt_match = factor( monovar_alt_match,
levels=c("TP", "FP", "FN", "TN", "P_not_called","N_not_called", "no_ground_truth") ) )
## ALT presence calculations
pn <- annotated %>%
count(monovar_alt_match) %>%
complete(monovar_alt_match) %>%
mutate(n=ifelse(is.na(n),0,n)) %>%
spread(monovar_alt_match, n )
write_tsv(pn, argv$out)
|
Row <- read.table("household_power_consumption.txt", nrow = 1, header = TRUE, sep=";")
x <- as.POSIXct("2006-12-17 00:00:00")
y <- as.POSIXct("2007-02-01 00:00:00")
y - x
#46*24*60 = 66240
#66240+397 = 66637
Row <- read.table("household_power_consumption.txt", skip=66637, nrow = 1, header = TRUE, sep=";")
hpc <- read.table("household_power_consumption.txt", skip = 66637, nrow = 2880,
sep = ";", na.strings="?",
col.names = colnames(read.table("household_power_consumption.txt",
nrow = 1, header = TRUE, sep=";")))
for( i in 1:2) { hpc[, i] <- as.character(hpc[, i])}
for(i in 3:9){ hpc[, i] <- as.numeric(hpc[, i])}
hpc$Date_Time <- as.POSIXct(paste(hpc$Date,hpc$Time),format='%d/%m/%Y %H:%M:%S')
png(file="plot2.png", width = 480, height = 480)
with(hpc, plot(Date_Time, Global_active_power, type ="l", ylab="Global Active Power (kilowatts)", xlab =""))
dev.off() | /plot2.R | no_license | RashmShekar/ExData_Plotting1 | R | false | false | 936 | r | Row <- read.table("household_power_consumption.txt", nrow = 1, header = TRUE, sep=";")
x <- as.POSIXct("2006-12-17 00:00:00")
y <- as.POSIXct("2007-02-01 00:00:00")
y - x
#46*24*60 = 66240
#66240+397 = 66637
Row <- read.table("household_power_consumption.txt", skip=66637, nrow = 1, header = TRUE, sep=";")
hpc <- read.table("household_power_consumption.txt", skip = 66637, nrow = 2880,
sep = ";", na.strings="?",
col.names = colnames(read.table("household_power_consumption.txt",
nrow = 1, header = TRUE, sep=";")))
for( i in 1:2) { hpc[, i] <- as.character(hpc[, i])}
for(i in 3:9){ hpc[, i] <- as.numeric(hpc[, i])}
hpc$Date_Time <- as.POSIXct(paste(hpc$Date,hpc$Time),format='%d/%m/%Y %H:%M:%S')
png(file="plot2.png", width = 480, height = 480)
with(hpc, plot(Date_Time, Global_active_power, type ="l", ylab="Global Active Power (kilowatts)", xlab =""))
dev.off() |
# Data manipulation:
library(dplyr)
# 1R algorithm:
library(OneR)
# Visualizations:
library(ggplot2)
# Import the data
mushrooms <- read.csv("mushroom_data.csv")
# Check the structure of the data frame and observe the variables
str(mushrooms)
# As can be seen in the above command, the veil_type column only contains
# one value. This has no predictive power and can thereforecan be deleted.
mushrooms <- mushrooms %>% select(-veil_type)
# Transform and visualize class balance:
mushrooms %>%
group_by(poisonous) %>%
summarise(percentage_each = ((n() / nrow(mushrooms))*100)) %>%
ggplot(aes(x = poisonous, y = percentage_each, fill = poisonous)) +
geom_bar(stat = "identity", alpha = 0.85) +
theme_minimal() +
ggtitle("Current and Former Employee Reviews") +
theme(legend.position="none") +
ggtitle("Balance of classes in dataset")
# Inspect a variable:
table(mushrooms$stalk_root, mushrooms$poisonous)
# Split data into training and testing
train_index <- sample(nrow(mushrooms), nrow(mushrooms) * .8, replace = FALSE)
train <- mushrooms[train_index,]
test <- mushrooms[-train_index,]
test_labels <- test %>% select(poisonous)
test <- test %>% select(-poisonous)
# Using the OneR algorithm, predict the dependent variable 'poisonous' from
# all independent variables in the 'train' dataset and display model
# rules using verbose = T
model <- OneR(poisonous ~., data = train, verbose = T)
# View model summary
summary(model)
# predict 'test' dataset values using trained model
prediction <- predict(model, test)
# View confusion matrix to evaluate model accuracy and error types
eval_model(prediction, test_labels)
# Further learning: Use other classification algorithms from the 'caret'
# package to improve model accuracy / decrease number of Type II errors
#####################################################
# OPTIONAL ##########################################
# View all rows containing erroneous predictions ####
#####################################################
# prediction_vs_actual <- cbind(test_labels, prediction)
# colnames(prediction_vs_actual) <- c("actual", "predicted")
# prediction_vs_actual %>% filter(actual != predicted)
#####################################################
#####################################################
| /1R_tutorial.R | no_license | AaronStearns/OneR-tutorial | R | false | false | 2,304 | r | # Data manipulation:
library(dplyr)
# 1R algorithm:
library(OneR)
# Visualizations:
library(ggplot2)
# Import the data
mushrooms <- read.csv("mushroom_data.csv")
# Check the structure of the data frame and observe the variables
str(mushrooms)
# As can be seen in the above command, the veil_type column only contains
# one value. This has no predictive power and can thereforecan be deleted.
mushrooms <- mushrooms %>% select(-veil_type)
# Transform and visualize class balance:
mushrooms %>%
group_by(poisonous) %>%
summarise(percentage_each = ((n() / nrow(mushrooms))*100)) %>%
ggplot(aes(x = poisonous, y = percentage_each, fill = poisonous)) +
geom_bar(stat = "identity", alpha = 0.85) +
theme_minimal() +
ggtitle("Current and Former Employee Reviews") +
theme(legend.position="none") +
ggtitle("Balance of classes in dataset")
# Inspect a variable:
table(mushrooms$stalk_root, mushrooms$poisonous)
# Split data into training and testing
train_index <- sample(nrow(mushrooms), nrow(mushrooms) * .8, replace = FALSE)
train <- mushrooms[train_index,]
test <- mushrooms[-train_index,]
test_labels <- test %>% select(poisonous)
test <- test %>% select(-poisonous)
# Using the OneR algorithm, predict the dependent variable 'poisonous' from
# all independent variables in the 'train' dataset and display model
# rules using verbose = T
model <- OneR(poisonous ~., data = train, verbose = T)
# View model summary
summary(model)
# predict 'test' dataset values using trained model
prediction <- predict(model, test)
# View confusion matrix to evaluate model accuracy and error types
eval_model(prediction, test_labels)
# Further learning: Use other classification algorithms from the 'caret'
# package to improve model accuracy / decrease number of Type II errors
#####################################################
# OPTIONAL ##########################################
# View all rows containing erroneous predictions ####
#####################################################
# prediction_vs_actual <- cbind(test_labels, prediction)
# colnames(prediction_vs_actual) <- c("actual", "predicted")
# prediction_vs_actual %>% filter(actual != predicted)
#####################################################
#####################################################
|
library(ipdw)
### Name: ipdwInterp
### Title: Inverse Distance Weighting with custom distances
### Aliases: ipdwInterp
### ** Examples
spdf <- data.frame(rnorm(2))
xy <- data.frame(x = c(4, 2), y = c(8, 4))
coordinates(spdf) <- xy
m <- matrix(NA, 10, 10)
costras <- raster(m, xmn = 0, xmx = ncol(m), ymn = 0, ymx = nrow(m))
# introduce spatial gradient
costras[] <- runif(ncell(costras), min = 1, max = 10)
for(i in 1:nrow(costras)){
costras[i,] <- costras[i,] + i
costras[,i] <- costras[,i] + i
}
rstack <- pathdistGen(spdf, costras, 100, progressbar = FALSE)
final.raster <- ipdwInterp(spdf, rstack, paramlist = c("rnorm.2."), overlapped = TRUE)
plot(final.raster)
plot(spdf, add = TRUE)
| /data/genthat_extracted_code/ipdw/examples/ipdwInterp.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 705 | r | library(ipdw)
### Name: ipdwInterp
### Title: Inverse Distance Weighting with custom distances
### Aliases: ipdwInterp
### ** Examples
spdf <- data.frame(rnorm(2))
xy <- data.frame(x = c(4, 2), y = c(8, 4))
coordinates(spdf) <- xy
m <- matrix(NA, 10, 10)
costras <- raster(m, xmn = 0, xmx = ncol(m), ymn = 0, ymx = nrow(m))
# introduce spatial gradient
costras[] <- runif(ncell(costras), min = 1, max = 10)
for(i in 1:nrow(costras)){
costras[i,] <- costras[i,] + i
costras[,i] <- costras[,i] + i
}
rstack <- pathdistGen(spdf, costras, 100, progressbar = FALSE)
final.raster <- ipdwInterp(spdf, rstack, paramlist = c("rnorm.2."), overlapped = TRUE)
plot(final.raster)
plot(spdf, add = TRUE)
|
layout <- function(n.plots) {
c <- r <- trunc(sqrt(n.plots))
if (c < 1)
r <- c <- 1
if (c * r < n.plots)
c <- c + 1
if (c * r < n.plots)
r <- r + 1
c(r, c)
}
| /R/utilities.R | permissive | ices-tools-dev/sfdMaps | R | false | false | 200 | r | layout <- function(n.plots) {
c <- r <- trunc(sqrt(n.plots))
if (c < 1)
r <- c <- 1
if (c * r < n.plots)
c <- c + 1
if (c * r < n.plots)
r <- r + 1
c(r, c)
}
|
bayesDem.go <- function(wpp.year.tfr=wpp.year.default, wpp.year.e0=wpp.year.tfr,
wpp.year.pop=wpp.year.tfr) {
quit.bayesdem <- function(h, ...) {
dispose(main.win)
#detach(gtkdb)
}
options(guiToolkit=guiToolkit.default)
#
path = system.file("images",package="bayesDem")
wait.window <- gwindow('Bayesian Demographer Initialization', width=400, height=100,
parent=c(500, 300), visible=FALSE)
set.widget.bgcolor(wait.window, "white")
#glabel('Starting Bayesian Demographer ...', container=wait.window)
gimage(file.path(path, 'startpyramid.png'), , container=wait.window)
visible(wait.window) <- TRUE
# main window
main.win <<- gwindow(paste('Bayesian Demographer v.',
packageVersion("bayesDem")), visible=FALSE, parent=c(400,50))
main.g <- ggroup(horizontal=FALSE, container=main.win)
# notebook with tabs
main.notebook <- bDem.gnotebook(container=main.g, expand=TRUE)
# TFR prediction tab
tfr.pred <- ggroup(label="<span weight='bold' color='blue'>Projection of Total Fertility Rate</span>",
markup=TRUE, horizontal=FALSE, container=main.notebook)
tfrPredTab(tfr.pred, main.win, wpp.year=wpp.year.tfr)
# Life expectancy
e0w <- ggroup(label="<span weight='bold' color='blue'>Projection of Life Expectancy</span>",
markup=TRUE, horizontal=FALSE, container=main.notebook)
e0PredTab(e0w, main.win, wpp.year=wpp.year.e0)
# Population Prediction
pop.w <- ggroup(label="<span weight='bold' color='blue'>Population Projection</span>",
markup=TRUE, horizontal=FALSE, container=main.notebook)
popPredTab(pop.w, main.win, wpp.year=wpp.year.pop)
svalue(main.notebook)<- 1
# Quit Button
button.group <- ggroup(container=main.g, expand=TRUE)
gbutton('Quit', handler=quit.bayesdem, container=button.group)
addSpring(button.group)
label <- glabel('BayesPop group\nUniversity of Washington', container=button.group)
font(label) <- c(style='normal', family='serif', size='xx-small')
#addHandlerRightclick(main.win, handler=function(h, ...) focus(h$obj) <- TRUE)
addHandlerFocus(main.win, handler=function(h, ...) focus(h$obj) <- TRUE)
#addHandlerDragMotion(main.win, handler=function(h, ...) focus(h$obj) <- TRUE)
dispose(wait.window)
visible(main.win) <- TRUE
# This is necessary in order for bayesTFR etc to find RGtk2 functions
# gtkdb <- list(gtkEventsPending=RGtk2::gtkEventsPending, gtkMainIteration=RGtk2::gtkMainIteration)
# attach(gtkdb)
do.call('require', list('RGtk2'))
}
| /R/bayesDem.R | no_license | raquelrguima/bayesDem | R | false | false | 2,452 | r | bayesDem.go <- function(wpp.year.tfr=wpp.year.default, wpp.year.e0=wpp.year.tfr,
wpp.year.pop=wpp.year.tfr) {
quit.bayesdem <- function(h, ...) {
dispose(main.win)
#detach(gtkdb)
}
options(guiToolkit=guiToolkit.default)
#
path = system.file("images",package="bayesDem")
wait.window <- gwindow('Bayesian Demographer Initialization', width=400, height=100,
parent=c(500, 300), visible=FALSE)
set.widget.bgcolor(wait.window, "white")
#glabel('Starting Bayesian Demographer ...', container=wait.window)
gimage(file.path(path, 'startpyramid.png'), , container=wait.window)
visible(wait.window) <- TRUE
# main window
main.win <<- gwindow(paste('Bayesian Demographer v.',
packageVersion("bayesDem")), visible=FALSE, parent=c(400,50))
main.g <- ggroup(horizontal=FALSE, container=main.win)
# notebook with tabs
main.notebook <- bDem.gnotebook(container=main.g, expand=TRUE)
# TFR prediction tab
tfr.pred <- ggroup(label="<span weight='bold' color='blue'>Projection of Total Fertility Rate</span>",
markup=TRUE, horizontal=FALSE, container=main.notebook)
tfrPredTab(tfr.pred, main.win, wpp.year=wpp.year.tfr)
# Life expectancy
e0w <- ggroup(label="<span weight='bold' color='blue'>Projection of Life Expectancy</span>",
markup=TRUE, horizontal=FALSE, container=main.notebook)
e0PredTab(e0w, main.win, wpp.year=wpp.year.e0)
# Population Prediction
pop.w <- ggroup(label="<span weight='bold' color='blue'>Population Projection</span>",
markup=TRUE, horizontal=FALSE, container=main.notebook)
popPredTab(pop.w, main.win, wpp.year=wpp.year.pop)
svalue(main.notebook)<- 1
# Quit Button
button.group <- ggroup(container=main.g, expand=TRUE)
gbutton('Quit', handler=quit.bayesdem, container=button.group)
addSpring(button.group)
label <- glabel('BayesPop group\nUniversity of Washington', container=button.group)
font(label) <- c(style='normal', family='serif', size='xx-small')
#addHandlerRightclick(main.win, handler=function(h, ...) focus(h$obj) <- TRUE)
addHandlerFocus(main.win, handler=function(h, ...) focus(h$obj) <- TRUE)
#addHandlerDragMotion(main.win, handler=function(h, ...) focus(h$obj) <- TRUE)
dispose(wait.window)
visible(main.win) <- TRUE
# This is necessary in order for bayesTFR etc to find RGtk2 functions
# gtkdb <- list(gtkEventsPending=RGtk2::gtkEventsPending, gtkMainIteration=RGtk2::gtkMainIteration)
# attach(gtkdb)
do.call('require', list('RGtk2'))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.