content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/BDFM.R \name{Cppbdfm} \alias{Cppbdfm} \title{MCMC Routine for Bayesian Dynamic Factor Models} \usage{ Cppbdfm(B, Bp, Jb, lam_B, q, nu_q, H, Hp, lam_H, R, nu_r, Y, freq, LD, Ystore = FALSE, store_idx = 0, reps = 1000, burn = 500, Loud = FALSE) } \arguments{ \item{B}{initial guess for B in transition equation} \item{Bp}{prior for B} \item{Jb}{Helper matrix for transition equation, identity matrix if uniform frequency} \item{lam_B}{prior tightness for B (additive)} \item{q}{initial guess for q in the transition equation} \item{nu_q}{prior "degrees of freedom" for inverse-Whishart prior for q (additive, prior scale is fixed so that increasing nu_q shrinks the variance towards zero)} \item{H}{initial guess for H in the trasition equation} \item{Hp}{prior for H} \item{lam_H}{prior tightness for H (additive)} \item{R}{initial guess for diagonal elements of R in the transition equation, entered as a vector} \item{nu_r}{prior deg. of freedom for elements of R, entered as a vector (additive, prior scale is fixed so that increasing nu_r[j] shrinks the variance of shocks to series j towards zero)} \item{Y}{Input data. Data must be scaled and centered prior to estimation if desired.} \item{freq}{vector, number of high frequency periods in an observation} \item{LD}{vector, 0 for level data and 1 for differenced data} \item{Ystore}{T/F, should the distribution of Y be stored} \item{store_idx, }{if Ystore is TRUE, index of which observed series to store. Note C++ uses zero indexing (i.e. subtract 1 from the R index value)} \item{reps}{number of repetitions for MCMC sampling} \item{burn}{number of iterations to burn in MCMC sampling} \item{Loud}{print status of function during evaluation.} } \description{ \code{Cppbdfm} is the core C++ function for estimating a linear-Gaussian Bayesain dynamic factor model by MCMC methods using Durbin and Koopman's disturbance smoother. This function may be called directly by advanced users. The only dependencies are the Armadillo (\url{http://arma.sourceforge.net/}) linear algebra library for C++ and the packages needed for interfacing with R (\code{\link{Rcpp}} and \code{\link{RcppArmadillo}}). }
/man/CppBDFM.Rd
permissive
palexbg/bdfm
R
false
true
2,254
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/BDFM.R \name{Cppbdfm} \alias{Cppbdfm} \title{MCMC Routine for Bayesian Dynamic Factor Models} \usage{ Cppbdfm(B, Bp, Jb, lam_B, q, nu_q, H, Hp, lam_H, R, nu_r, Y, freq, LD, Ystore = FALSE, store_idx = 0, reps = 1000, burn = 500, Loud = FALSE) } \arguments{ \item{B}{initial guess for B in transition equation} \item{Bp}{prior for B} \item{Jb}{Helper matrix for transition equation, identity matrix if uniform frequency} \item{lam_B}{prior tightness for B (additive)} \item{q}{initial guess for q in the transition equation} \item{nu_q}{prior "degrees of freedom" for inverse-Whishart prior for q (additive, prior scale is fixed so that increasing nu_q shrinks the variance towards zero)} \item{H}{initial guess for H in the trasition equation} \item{Hp}{prior for H} \item{lam_H}{prior tightness for H (additive)} \item{R}{initial guess for diagonal elements of R in the transition equation, entered as a vector} \item{nu_r}{prior deg. of freedom for elements of R, entered as a vector (additive, prior scale is fixed so that increasing nu_r[j] shrinks the variance of shocks to series j towards zero)} \item{Y}{Input data. Data must be scaled and centered prior to estimation if desired.} \item{freq}{vector, number of high frequency periods in an observation} \item{LD}{vector, 0 for level data and 1 for differenced data} \item{Ystore}{T/F, should the distribution of Y be stored} \item{store_idx, }{if Ystore is TRUE, index of which observed series to store. Note C++ uses zero indexing (i.e. subtract 1 from the R index value)} \item{reps}{number of repetitions for MCMC sampling} \item{burn}{number of iterations to burn in MCMC sampling} \item{Loud}{print status of function during evaluation.} } \description{ \code{Cppbdfm} is the core C++ function for estimating a linear-Gaussian Bayesain dynamic factor model by MCMC methods using Durbin and Koopman's disturbance smoother. This function may be called directly by advanced users. The only dependencies are the Armadillo (\url{http://arma.sourceforge.net/}) linear algebra library for C++ and the packages needed for interfacing with R (\code{\link{Rcpp}} and \code{\link{RcppArmadillo}}). }
#' Download ED inputs #' #' Download and unzip common ED inputs from a public Open Science Framework #' (OSF) repository (https://osf.io/b6umf). Inputs include the Olson Global #' Ecosystems (OGE) database (`oge2OLD`) and the `chd` and `dgd` databases. #' #' The total download size around 28 MB. #' #' @param directory Target directory for unzipping files. Will be created if it #' doesn't exist. #' @return `TRUE`, invisibly #' @export download_edi <- function(directory) { download_link <- "https://files.osf.io/v1/resources/b6umf/providers/osfstorage/5a948ea691b689000fa2a588/?zip=" target_file <- paste0(directory, ".zip") PEcAn.utils::download_file(download_link, target_file) unzip(target_file, exdir = directory) invisible(TRUE) }
/models/ed/R/download_edi.R
permissive
infotroph/pecan
R
false
false
752
r
#' Download ED inputs #' #' Download and unzip common ED inputs from a public Open Science Framework #' (OSF) repository (https://osf.io/b6umf). Inputs include the Olson Global #' Ecosystems (OGE) database (`oge2OLD`) and the `chd` and `dgd` databases. #' #' The total download size around 28 MB. #' #' @param directory Target directory for unzipping files. Will be created if it #' doesn't exist. #' @return `TRUE`, invisibly #' @export download_edi <- function(directory) { download_link <- "https://files.osf.io/v1/resources/b6umf/providers/osfstorage/5a948ea691b689000fa2a588/?zip=" target_file <- paste0(directory, ".zip") PEcAn.utils::download_file(download_link, target_file) unzip(target_file, exdir = directory) invisible(TRUE) }
setwd('/exports/igmm/eddie/GenScotDepression/shen/ActiveProject/ImagingProject/ABCD_MDD_brain/') source('FUNs/PheWAS_style_p_plot.R') source('FUNs/wholeB_correction.R') # Settings load('result/i.Main_result/YouthDepree_WM_subcor_FS_noSocialCov.RData') result.mainmodel=filter(result.YouthDepre.region.covWholeB, factor=='KSADS.Depressive_symptoms_ever.p') load('result/x.Supplementary_materials/meanYouthDepree_WM_subcor_FS_noSocialCov.RData') ls.category=read.table('data/p_plot_dat/ls.category.txt',header=F,stringsAsFactors = F,sep = '\t') ls.category$V2=gsub('\\\\n','\\\n',ls.category$V2) ls.FS_label=read.table('data/result_table_data/ls.Freesurfer_region_replacement_labels.csv',header = T, sep='\t',stringsAsFactors = F) ls.DTI_label=read.table('data/result_table_data/ls.DTI_region_replacement_labels.csv',header = T, sep='\t',stringsAsFactors = F) ls.label=rbind(ls.FS_label,ls.DTI_label) # Preprocess results ------------------------------------------------------ # Re-correct p values whole-brain level - main model TargetResult = result.mainmodel ls.dep = c('vol.APARC','sa.APARC','sulc.APARC','thk.APARC','vol.ASEG', 'dtiFA.FiberAtlas','dtiMD.FiberAtlas') ls.factor = unique(TargetResult$factor) result.wholeBcorrected.mainmodel=wholeB_correction(TargetResult = TargetResult,ls.factor = ls.factor,ls.dep = ls.dep) rm(TargetResult) ls.dep.sig=result.wholeBcorrected.mainmodel$dependent[result.wholeBcorrected.mainmodel$p.corrected<0.05] # Re-correct p values whole-brain level - diffs model TargetResult = result.YouthDepre.region.covWholeB[result.YouthDepre.region.covWholeB$dependent %in% ls.dep.sig,] ls.dep = c('vol.APARC','sa.APARC','sulc.APARC','thk.APARC','vol.ASEG', 'dtiFA.FiberAtlas','dtiMD.FiberAtlas') ls.factor = unique(TargetResult$factor) result.wholeBcorrected.diffs=wholeB_correction(TargetResult = TargetResult,ls.factor = ls.factor,ls.dep = ls.dep) rm(TargetResult) targetdata = result.wholeBcorrected.diffs # Add category -------------------------------------------------------- tmp.result.table=targetdata tmp.result.table$category='' category.input=ls.category add_category <- function(ls.capture,category.name,targetMat,col.tocap){ loc.tocap=grep(ls.capture,targetMat[,col.tocap]) targetMat[loc.tocap,'category']=category.name return(targetMat) } for (i in 1:nrow(category.input)){ if (i==1){ tmp.result.table=add_category(category.input[i,1],category.input[i,2], targetMat = tmp.result.table, col.tocap = 'dependent') }else{ tmp.result.table=add_category(category.input[i,1],category.input[i,2], targetMat = tmp.result.table, col.tocap = 'dependent') } } # Add label --------------------------------------------------------------- if (sum(!is.na(ls.label))>0){ for(k in 1:nrow(ls.label)){ tmp.result.table$dependent[grep(ls.label[k,1],tmp.result.table$dependent)]=ls.label[k,2] } } tmp.result.table$dependent=gsub('\\\\n',' ',tmp.result.table$dependent) tmp.result.table$category=gsub('\\n',' ',tmp.result.table$category) tmp.result.table$dependent=paste0(tmp.result.table$category,' in ',tmp.result.table$dependent) # Reorder based on betas -------------------------------------------------- reord_graphdat=function(ls.cate,targetdata){ for (c in ls.cate){ temp.chunk=filter(targetdata,category==c) temp.chunk=temp.chunk[order(-temp.chunk$beta),] temp.length=nrow(temp.chunk) temp.chunk$l.category=rep(99999,nrow(temp.chunk)) temp.chunk$l.category[round(temp.length/2)]=c temp.chunk$l.category[temp.chunk$l.category==99999]=' ' if (c==ls.cate[1]){new.seq.targetdata=temp.chunk}else{ new.seq.targetdata=rbind(new.seq.targetdata,temp.chunk) } } new.seq.targetdata$ord=1:nrow(new.seq.targetdata) return(new.seq.targetdata) } tmp.result.table=reord_graphdat(ls.cate = unique(tmp.result.table$category),tmp.result.table) tmp.result.table$category= factor(tmp.result.table$category, levels = unique(tmp.result.table$category)) # Make the figure --------------------------------------------------------- fig.dat=tmp.result.table fig.dat$sig = 99999 fig.dat$sig[fig.dat$p.value<0.05]='*' fig.dat$sig[fig.dat$sig==99999]='' cl.theme=c('orangered1','slategray3','orange1', 'mediumpurple1','royalblue3','lightseagreen','maroon', 'salmon2','palevioletred2','olivedrab','darkslategray3') fig= ggplot(fig.dat, aes(x=reorder(dependent,-ord), y=beta,fill=category)) + geom_bar(stat="identity", width = 0.5, position=position_dodge())+ geom_errorbar(aes(x=reorder(dependent,-ord), ymin=Lower_95CI, ymax=Upper_95CI), width=0.2, colour="grey", alpha=0.9, size=0.4)+ scale_fill_manual(values = cl.theme)+ theme(axis.title.y=element_blank(), #axis.text.y=element_blank(), #axis.ticks.y=element_blank(), #legend.position = 'none', panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "grey"), axis.line.x = element_line(colour = 'grey',size=1), axis.line.y = element_blank(), plot.title=element_text(lineheight = 1,face='bold',hjust=0.5))+ geom_hline(yintercept=0 , color = "grey", size=0.5)+ geom_text(aes(label=sig), colour="black", hjust=15, size=3)+ xlab('Phenotype')+ ylab('Standardised effect size')+ guides(fill=guide_legend(title="Category"))+ coord_flip() # Save file --------------------------------------------------------------- png("Figs/SuppleInfo/meanSymptoms_beta_bargraph.png", width = 13, height = 4.5, units = 'in', res = 300) fig # Make plot dev.off()
/script/SupplementaryMaterials/Fig.meanSymptoms_beta_bargraph.R
permissive
yunyu118/ABCD_MDD_brain
R
false
false
5,944
r
setwd('/exports/igmm/eddie/GenScotDepression/shen/ActiveProject/ImagingProject/ABCD_MDD_brain/') source('FUNs/PheWAS_style_p_plot.R') source('FUNs/wholeB_correction.R') # Settings load('result/i.Main_result/YouthDepree_WM_subcor_FS_noSocialCov.RData') result.mainmodel=filter(result.YouthDepre.region.covWholeB, factor=='KSADS.Depressive_symptoms_ever.p') load('result/x.Supplementary_materials/meanYouthDepree_WM_subcor_FS_noSocialCov.RData') ls.category=read.table('data/p_plot_dat/ls.category.txt',header=F,stringsAsFactors = F,sep = '\t') ls.category$V2=gsub('\\\\n','\\\n',ls.category$V2) ls.FS_label=read.table('data/result_table_data/ls.Freesurfer_region_replacement_labels.csv',header = T, sep='\t',stringsAsFactors = F) ls.DTI_label=read.table('data/result_table_data/ls.DTI_region_replacement_labels.csv',header = T, sep='\t',stringsAsFactors = F) ls.label=rbind(ls.FS_label,ls.DTI_label) # Preprocess results ------------------------------------------------------ # Re-correct p values whole-brain level - main model TargetResult = result.mainmodel ls.dep = c('vol.APARC','sa.APARC','sulc.APARC','thk.APARC','vol.ASEG', 'dtiFA.FiberAtlas','dtiMD.FiberAtlas') ls.factor = unique(TargetResult$factor) result.wholeBcorrected.mainmodel=wholeB_correction(TargetResult = TargetResult,ls.factor = ls.factor,ls.dep = ls.dep) rm(TargetResult) ls.dep.sig=result.wholeBcorrected.mainmodel$dependent[result.wholeBcorrected.mainmodel$p.corrected<0.05] # Re-correct p values whole-brain level - diffs model TargetResult = result.YouthDepre.region.covWholeB[result.YouthDepre.region.covWholeB$dependent %in% ls.dep.sig,] ls.dep = c('vol.APARC','sa.APARC','sulc.APARC','thk.APARC','vol.ASEG', 'dtiFA.FiberAtlas','dtiMD.FiberAtlas') ls.factor = unique(TargetResult$factor) result.wholeBcorrected.diffs=wholeB_correction(TargetResult = TargetResult,ls.factor = ls.factor,ls.dep = ls.dep) rm(TargetResult) targetdata = result.wholeBcorrected.diffs # Add category -------------------------------------------------------- tmp.result.table=targetdata tmp.result.table$category='' category.input=ls.category add_category <- function(ls.capture,category.name,targetMat,col.tocap){ loc.tocap=grep(ls.capture,targetMat[,col.tocap]) targetMat[loc.tocap,'category']=category.name return(targetMat) } for (i in 1:nrow(category.input)){ if (i==1){ tmp.result.table=add_category(category.input[i,1],category.input[i,2], targetMat = tmp.result.table, col.tocap = 'dependent') }else{ tmp.result.table=add_category(category.input[i,1],category.input[i,2], targetMat = tmp.result.table, col.tocap = 'dependent') } } # Add label --------------------------------------------------------------- if (sum(!is.na(ls.label))>0){ for(k in 1:nrow(ls.label)){ tmp.result.table$dependent[grep(ls.label[k,1],tmp.result.table$dependent)]=ls.label[k,2] } } tmp.result.table$dependent=gsub('\\\\n',' ',tmp.result.table$dependent) tmp.result.table$category=gsub('\\n',' ',tmp.result.table$category) tmp.result.table$dependent=paste0(tmp.result.table$category,' in ',tmp.result.table$dependent) # Reorder based on betas -------------------------------------------------- reord_graphdat=function(ls.cate,targetdata){ for (c in ls.cate){ temp.chunk=filter(targetdata,category==c) temp.chunk=temp.chunk[order(-temp.chunk$beta),] temp.length=nrow(temp.chunk) temp.chunk$l.category=rep(99999,nrow(temp.chunk)) temp.chunk$l.category[round(temp.length/2)]=c temp.chunk$l.category[temp.chunk$l.category==99999]=' ' if (c==ls.cate[1]){new.seq.targetdata=temp.chunk}else{ new.seq.targetdata=rbind(new.seq.targetdata,temp.chunk) } } new.seq.targetdata$ord=1:nrow(new.seq.targetdata) return(new.seq.targetdata) } tmp.result.table=reord_graphdat(ls.cate = unique(tmp.result.table$category),tmp.result.table) tmp.result.table$category= factor(tmp.result.table$category, levels = unique(tmp.result.table$category)) # Make the figure --------------------------------------------------------- fig.dat=tmp.result.table fig.dat$sig = 99999 fig.dat$sig[fig.dat$p.value<0.05]='*' fig.dat$sig[fig.dat$sig==99999]='' cl.theme=c('orangered1','slategray3','orange1', 'mediumpurple1','royalblue3','lightseagreen','maroon', 'salmon2','palevioletred2','olivedrab','darkslategray3') fig= ggplot(fig.dat, aes(x=reorder(dependent,-ord), y=beta,fill=category)) + geom_bar(stat="identity", width = 0.5, position=position_dodge())+ geom_errorbar(aes(x=reorder(dependent,-ord), ymin=Lower_95CI, ymax=Upper_95CI), width=0.2, colour="grey", alpha=0.9, size=0.4)+ scale_fill_manual(values = cl.theme)+ theme(axis.title.y=element_blank(), #axis.text.y=element_blank(), #axis.ticks.y=element_blank(), #legend.position = 'none', panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "grey"), axis.line.x = element_line(colour = 'grey',size=1), axis.line.y = element_blank(), plot.title=element_text(lineheight = 1,face='bold',hjust=0.5))+ geom_hline(yintercept=0 , color = "grey", size=0.5)+ geom_text(aes(label=sig), colour="black", hjust=15, size=3)+ xlab('Phenotype')+ ylab('Standardised effect size')+ guides(fill=guide_legend(title="Category"))+ coord_flip() # Save file --------------------------------------------------------------- png("Figs/SuppleInfo/meanSymptoms_beta_bargraph.png", width = 13, height = 4.5, units = 'in', res = 300) fig # Make plot dev.off()
#' @export #' @title Structural Multiple Equilibria and Structural Transition Policy #' @aliases gemstStructuralMultipleEquilibria_2_2 #' @description Some examples of structural multiple equilibria and structural transition policy. #' In these examples it is assumed that the firm has a structural production function (see Li, Fu, 2020), e.g. #' #' structural_function(last.output, c(0.3, 0.4), 1, 2) * x1^0.35 * x2^0.65 #' #' wherein last.output is the output of the firm in the previous period. #' @param ... arguments to be passed to the function sdm2. #' @references Li Wu, Fu Caihui (2020) A Simulation Study on the Economic Structure Transition Policy. Journal of Shanghai University (Social Sciences). 37(2), pp: 33-45. (In Chinese) #' @examples #' \donttest{ #' dst.firm <- node_new("output", #' type = "CD", alpha = 1, #' beta = c(0.35, 0.65), #' "prod", "lab" #' ) #' #' dst.consumer <- node_new("utility", #' type = "CD", alpha = 1, #' beta = c(0.4, 0.6), #' "prod", "lab" #' ) #' #' policy.technology <- function(time, state, A) { #' # state$last.z[1] is the previous output. #' A[[1]]$alpha <- structural_function(state$last.z[1], c(0.3, 0.4), 1, 2) #' } #' #' policy.tax <- function(time, state) { #' if ((time >= 15) && state$last.z[1] < 0.4) { #' state$S[2, 2] <- 0.8 #' state$S[2, 1] <- 0.2 #' } else { #' state$S[2, 2] <- 1 #' state$S[2, 1] <- 0 #' } #' #' state #' } #' #' f <- function(z0 = c(0.1, 1), #' policy = list( #' policy.technology, #' policyMarketClearingPrice #' )) { #' ge <- sdm2( #' A = list(dst.firm, dst.consumer), #' B = matrix(c( #' 1, 0, #' 0, 0 #' ), 2, 2, TRUE), #' S0Exg = matrix(c( #' NA, NA, #' NA, 1 #' ), 2, 2, TRUE), #' names.commodity = c("prod", "lab"), #' names.agent = c("firm", "consumer"), #' numeraire = "lab", #' z0 = z0, #' p0 = c(1, 1), #' maxIteration = 1, #' numberOfPeriods = 30, #' policy = policy, #' ts = TRUE #' ) #' #' matplot(ge$ts.z, type = "o", pch = 20) #' invisible(ge) #' } #' #' geLow <- f() #' geLow$z #' #' geHigh <- f(z0 = c(0.5, 1)) #' geHigh$z #' #' f(policy = list( #' policy.technology, #' policy.tax, #' policyMarketClearingPrice #' )) #' #' #### structural transition: disequilibrium path and #' ## a market-clearing path (instantaneous equilibrium path) #' dst.firm <- node_new("output", #' type = "CD", alpha = 5, #' beta = c(0.5, 0.5), #' "prod", "lab" #' ) #' #' dst.consumer <- node_new("utility", #' type = "Leontief", a = 1, #' "prod" #' ) #' #' policy.technology <- function(time, state, A) { #' # state$last.z[1] is last output. #' A[[1]]$alpha <- structural_function(state$last.z[1], c(15, 20), 5, 15) #' return(NULL) #' } #' #' policy.tax <- function(time, state) { #' if ((time >= 100) && (time <= 109)) { #' state$S[2, 2] <- 0.6 #' state$S[2, 1] <- 0.4 #' } else { #' state$S[2, 2] <- 1 #' state$S[2, 1] <- 0 #' } #' #' state #' } #' #' f <- function(z0 = c(1, 1), #' p0 = c(1, 1), #' policy = policy.technology) { #' ge <- sdm2( #' A = list(dst.firm, dst.consumer), #' B = matrix(c( #' 1, 0, #' 0, 1 #' ), 2, 2, TRUE), #' S0Exg = matrix(c( #' NA, NA, #' NA, 1 #' ), 2, 2, TRUE), #' names.commodity = c("prod", "lab"), #' names.agent = c("firm", "consumer"), #' numeraire = "lab", #' z0 = z0, #' p0 = p0, #' maxIteration = 1, #' numberOfPeriods = 200, #' policy = policy, #' priceAdjustmentVelocity = 0.4, #' ts = TRUE #' ) #' #' matplot(ge$ts.z, type = "l", pch = 20) #' invisible(ge) #' } #' #' geLow <- f() #' geLow$z #' #' geHigh <- f(z0 = c(18, 1), p0 = c(1, 9)) #' geHigh$z #' #' ## structural transition: a disequilibrium path #' f(policy = list( #' policy.technology, #' policy.tax #' ))$z #' #' #' ## structural transition: a market-clearing path #' f(policy = list( #' policy.technology, #' policy.tax, #' policyMarketClearingPrice #' ))$z #' #' ## structural transition through foreign aid #' policy.foreign_aid <- function(time, state) { #' if ((time >= 100) && (time <= 109)) { #' state$S[2, 2] <- 3 #' } else { #' state$S[2, 2] <- 1 #' } #' #' state #' } #' #' f(policy = list( #' function(time, state, A) { # technology policy #' # state$last.z[1] is last output. #' A[[1]]$alpha <- structural_function(state$last.z[1], c(30, 35), 5, 15) #' }, #' policy.foreign_aid #' )) #' #' #### another example #' dst.firm <- node_new("prod", #' type = "CD", alpha = 2, #' beta = c(0.5, 0.5), #' "prod", "lab" #' ) #' #' dst.consumer <- node_new("util", #' type = "Leontief", a = 1, #' "prod" #' ) #' #' policy.technology <- function(time, state, A) { #' # state$last.z[1] is the previous output. #' A[[1]]$alpha <- structural_function(state$last.z[1], c(220, 250), 2, 4) #' } #' #' policy.tax <- function(time, state) { #' if ((time >= 15) && state$last.z[1] < 240) { #' state$S[2, 2] <- 80 #' state$S[2, 1] <- 20 #' } else { #' state$S[2, 2] <- 100 #' state$S[2, 1] <- 0 #' } #' #' state #' } #' #' ge <- sdm2( #' A = list(dst.firm, dst.consumer), #' B = matrix(c( #' 1, 0, #' 0, 0 #' ), 2, 2, TRUE), #' S0Exg = matrix(c( #' NA, NA, #' NA, 100 #' ), 2, 2, TRUE), #' names.commodity = c("prod", "lab"), #' names.agent = c("firm", "consumer"), #' numeraire = "lab", #' z0 = c(100, 100), #' maxIteration = 1, #' numberOfPeriods = 30, #' policy = list( #' policy.technology, #' policy.tax, #' policyMarketClearingPrice #' ), #' ts = TRUE #' ) #' #' matplot(ge$ts.z, type = "b", pch = 20) #' } gemstStructuralMultipleEquilibria_2_2 <- function(...) sdm2(...)
/R/gemstStructuralMultipleEquilibria_2_2.R
no_license
cran/GE
R
false
false
6,085
r
#' @export #' @title Structural Multiple Equilibria and Structural Transition Policy #' @aliases gemstStructuralMultipleEquilibria_2_2 #' @description Some examples of structural multiple equilibria and structural transition policy. #' In these examples it is assumed that the firm has a structural production function (see Li, Fu, 2020), e.g. #' #' structural_function(last.output, c(0.3, 0.4), 1, 2) * x1^0.35 * x2^0.65 #' #' wherein last.output is the output of the firm in the previous period. #' @param ... arguments to be passed to the function sdm2. #' @references Li Wu, Fu Caihui (2020) A Simulation Study on the Economic Structure Transition Policy. Journal of Shanghai University (Social Sciences). 37(2), pp: 33-45. (In Chinese) #' @examples #' \donttest{ #' dst.firm <- node_new("output", #' type = "CD", alpha = 1, #' beta = c(0.35, 0.65), #' "prod", "lab" #' ) #' #' dst.consumer <- node_new("utility", #' type = "CD", alpha = 1, #' beta = c(0.4, 0.6), #' "prod", "lab" #' ) #' #' policy.technology <- function(time, state, A) { #' # state$last.z[1] is the previous output. #' A[[1]]$alpha <- structural_function(state$last.z[1], c(0.3, 0.4), 1, 2) #' } #' #' policy.tax <- function(time, state) { #' if ((time >= 15) && state$last.z[1] < 0.4) { #' state$S[2, 2] <- 0.8 #' state$S[2, 1] <- 0.2 #' } else { #' state$S[2, 2] <- 1 #' state$S[2, 1] <- 0 #' } #' #' state #' } #' #' f <- function(z0 = c(0.1, 1), #' policy = list( #' policy.technology, #' policyMarketClearingPrice #' )) { #' ge <- sdm2( #' A = list(dst.firm, dst.consumer), #' B = matrix(c( #' 1, 0, #' 0, 0 #' ), 2, 2, TRUE), #' S0Exg = matrix(c( #' NA, NA, #' NA, 1 #' ), 2, 2, TRUE), #' names.commodity = c("prod", "lab"), #' names.agent = c("firm", "consumer"), #' numeraire = "lab", #' z0 = z0, #' p0 = c(1, 1), #' maxIteration = 1, #' numberOfPeriods = 30, #' policy = policy, #' ts = TRUE #' ) #' #' matplot(ge$ts.z, type = "o", pch = 20) #' invisible(ge) #' } #' #' geLow <- f() #' geLow$z #' #' geHigh <- f(z0 = c(0.5, 1)) #' geHigh$z #' #' f(policy = list( #' policy.technology, #' policy.tax, #' policyMarketClearingPrice #' )) #' #' #### structural transition: disequilibrium path and #' ## a market-clearing path (instantaneous equilibrium path) #' dst.firm <- node_new("output", #' type = "CD", alpha = 5, #' beta = c(0.5, 0.5), #' "prod", "lab" #' ) #' #' dst.consumer <- node_new("utility", #' type = "Leontief", a = 1, #' "prod" #' ) #' #' policy.technology <- function(time, state, A) { #' # state$last.z[1] is last output. #' A[[1]]$alpha <- structural_function(state$last.z[1], c(15, 20), 5, 15) #' return(NULL) #' } #' #' policy.tax <- function(time, state) { #' if ((time >= 100) && (time <= 109)) { #' state$S[2, 2] <- 0.6 #' state$S[2, 1] <- 0.4 #' } else { #' state$S[2, 2] <- 1 #' state$S[2, 1] <- 0 #' } #' #' state #' } #' #' f <- function(z0 = c(1, 1), #' p0 = c(1, 1), #' policy = policy.technology) { #' ge <- sdm2( #' A = list(dst.firm, dst.consumer), #' B = matrix(c( #' 1, 0, #' 0, 1 #' ), 2, 2, TRUE), #' S0Exg = matrix(c( #' NA, NA, #' NA, 1 #' ), 2, 2, TRUE), #' names.commodity = c("prod", "lab"), #' names.agent = c("firm", "consumer"), #' numeraire = "lab", #' z0 = z0, #' p0 = p0, #' maxIteration = 1, #' numberOfPeriods = 200, #' policy = policy, #' priceAdjustmentVelocity = 0.4, #' ts = TRUE #' ) #' #' matplot(ge$ts.z, type = "l", pch = 20) #' invisible(ge) #' } #' #' geLow <- f() #' geLow$z #' #' geHigh <- f(z0 = c(18, 1), p0 = c(1, 9)) #' geHigh$z #' #' ## structural transition: a disequilibrium path #' f(policy = list( #' policy.technology, #' policy.tax #' ))$z #' #' #' ## structural transition: a market-clearing path #' f(policy = list( #' policy.technology, #' policy.tax, #' policyMarketClearingPrice #' ))$z #' #' ## structural transition through foreign aid #' policy.foreign_aid <- function(time, state) { #' if ((time >= 100) && (time <= 109)) { #' state$S[2, 2] <- 3 #' } else { #' state$S[2, 2] <- 1 #' } #' #' state #' } #' #' f(policy = list( #' function(time, state, A) { # technology policy #' # state$last.z[1] is last output. #' A[[1]]$alpha <- structural_function(state$last.z[1], c(30, 35), 5, 15) #' }, #' policy.foreign_aid #' )) #' #' #### another example #' dst.firm <- node_new("prod", #' type = "CD", alpha = 2, #' beta = c(0.5, 0.5), #' "prod", "lab" #' ) #' #' dst.consumer <- node_new("util", #' type = "Leontief", a = 1, #' "prod" #' ) #' #' policy.technology <- function(time, state, A) { #' # state$last.z[1] is the previous output. #' A[[1]]$alpha <- structural_function(state$last.z[1], c(220, 250), 2, 4) #' } #' #' policy.tax <- function(time, state) { #' if ((time >= 15) && state$last.z[1] < 240) { #' state$S[2, 2] <- 80 #' state$S[2, 1] <- 20 #' } else { #' state$S[2, 2] <- 100 #' state$S[2, 1] <- 0 #' } #' #' state #' } #' #' ge <- sdm2( #' A = list(dst.firm, dst.consumer), #' B = matrix(c( #' 1, 0, #' 0, 0 #' ), 2, 2, TRUE), #' S0Exg = matrix(c( #' NA, NA, #' NA, 100 #' ), 2, 2, TRUE), #' names.commodity = c("prod", "lab"), #' names.agent = c("firm", "consumer"), #' numeraire = "lab", #' z0 = c(100, 100), #' maxIteration = 1, #' numberOfPeriods = 30, #' policy = list( #' policy.technology, #' policy.tax, #' policyMarketClearingPrice #' ), #' ts = TRUE #' ) #' #' matplot(ge$ts.z, type = "b", pch = 20) #' } gemstStructuralMultipleEquilibria_2_2 <- function(...) sdm2(...)
# connector # #' Takes two differences and judges which connecting phrase should be used to compare the differences. # #' @param difference1 The first difference in the comparison #' @param difference2 The second difference in the comparison #' @examples #' connector(1, 2) #retunrs ", this is consistent with a" (as both values are positive) #' @export #' connector <- function(difference1, difference2){ tryCatch({ # Check that only one value is passed to format_perc() at a time and raise # an error otherwise. if (length(difference1) > 1 | length(difference2) > 1) { stop( "Input to connector should be two arguments, each a single value. ", call. = FALSE ) } else if (is.null(difference1) | is.null(difference2)) { # Check that input is not null, and raise an error if it is stop("Input to connector is NULL", call. = FALSE) } else if (is.na(difference1) | is.na(difference2)) { # Check that input is not null, and raise and error if it is stop("Input to connector is NA", call. = FALSE) } else if (!is.numeric(difference1) | !is.numeric(difference1)) { stop("Input to connector is not a character", call. = FALSE) } else { # If checks of function pass, then run the main body of the function, and # return and output. # BODY -------------------------------------------------------------------- if (difference1 * difference2 < 0) { print(", however there has been") } else { print(", this is consistent with") } } }, warning = function(war){ warning(war) }, error = function(err){ err$message <- paste("While producing the correct connector phrase", err, sep = " ") stop(err) }) }
/R/connector.R
no_license
uk-gov-mirror/moj-analytical-services.mojrap
R
false
false
1,754
r
# connector # #' Takes two differences and judges which connecting phrase should be used to compare the differences. # #' @param difference1 The first difference in the comparison #' @param difference2 The second difference in the comparison #' @examples #' connector(1, 2) #retunrs ", this is consistent with a" (as both values are positive) #' @export #' connector <- function(difference1, difference2){ tryCatch({ # Check that only one value is passed to format_perc() at a time and raise # an error otherwise. if (length(difference1) > 1 | length(difference2) > 1) { stop( "Input to connector should be two arguments, each a single value. ", call. = FALSE ) } else if (is.null(difference1) | is.null(difference2)) { # Check that input is not null, and raise an error if it is stop("Input to connector is NULL", call. = FALSE) } else if (is.na(difference1) | is.na(difference2)) { # Check that input is not null, and raise and error if it is stop("Input to connector is NA", call. = FALSE) } else if (!is.numeric(difference1) | !is.numeric(difference1)) { stop("Input to connector is not a character", call. = FALSE) } else { # If checks of function pass, then run the main body of the function, and # return and output. # BODY -------------------------------------------------------------------- if (difference1 * difference2 < 0) { print(", however there has been") } else { print(", this is consistent with") } } }, warning = function(war){ warning(war) }, error = function(err){ err$message <- paste("While producing the correct connector phrase", err, sep = " ") stop(err) }) }
## 3D heart with a Chinese five-star flag ## Author: Yixuan Qiu <yixuan.qiu@cos.name> ## original code in a Chinese forum: https://d.cosx.org/d/16743 ## this code was written to celebrate China's 60 anniversary if (!require("rgl")) stop("You need the rgl package to generate the 3D heart!") xtheta = function(x, theta, y, w = 0, tt = 0) { (x^2 + (x * tan(theta))^2 + 2 * y^2 + 0.1 * cos(w * tt) - 0.9)^3 - (x^2 + y^2/9) * (x * tan(theta))^3 } fz = function(z, x, y, w = 0, tt = 0) { (x^2 + 2 * y^2 + z^2 + 0.1 * cos(w * tt) - 0.9)^3 - (x^2 + y^2/9) * z^3 } n = 100 y = seq(-2, 2, length.out = n) y0 = xx = zz = NULL for (i in 1:length(y)) { theta = seq(-pi/2, 1.5 * pi, length.out = n) solvex = function(theta, y) { if (theta == -pi/2 | theta == pi/2 | theta == 1.5 * pi) { return(0) } else if (theta > -pi/2 & theta < pi/2) { interval = c(0, 2) } else { interval = c(-2, 0) } x.root = uniroot(xtheta, interval, theta, y)$root return(x.root) } if (xtheta(0, pi/4, y[i]) * xtheta(2, pi/4, y[i]) > 0) next y0 = c(y0, y[i]) x = sapply(theta, solvex, y[i]) zplus = uniroot(fz, c(0, 2), 0, y[i])$root zminus = uniroot(fz, c(-2, 0), 0, y[i])$root z = numeric(n) z[x != 0] = x[x != 0] * tan(theta[x != 0]) z[x == 0] = (theta[x == 0] == pi/2) * zplus + (theta[x == 0] != pi/2) * zminus xx = cbind(xx, x) zz = cbind(zz, z) } yy = matrix(rep(y0, n), n, length(y0), byrow = TRUE) library(rgl) persp3d(zz, xx, yy, col = "red", xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2), zlim = c(-1, 1), axes = FALSE, box = FALSE, xlab = "", ylab = "", zlab = "") fy = function(y, pars) { z = pars[1] x = pars[2] w = pars[3] tt = pars[4] (x^2 + 2 * y^2 + z^2 + 0.1 * cos(w * tt) - 0.9)^3 - (x^2 + y^2/9) * z^3 } gety = function(z, x, interval = c(0.01, 1), w = 0, tt = 0) { mpars = cbind(z, x, w, tt) solvey = function(pars) { if (fy(interval[1], pars) * fy(interval[2], pars) > 0) { return(NA) } else { y = uniroot(fy, interval, pars)$root } } y = apply(mpars, 1, solvey) return(y) } x0 = z0 = seq(-1, 1, length.out = n) y0 = outer(z0, x0, gety) persp3d(x = z0, y = x0, z = y0, zlim = c(-1, 1), col = "white", texture = system.file("img", "flag.png", package = "fun"), add = TRUE) persp3d(x = z0, y = x0, z = -y0, zlim = c(-1, 1), col = "red", add = TRUE)
/demo/ChinaHeart3D.R
no_license
yihui/fun
R
false
false
2,488
r
## 3D heart with a Chinese five-star flag ## Author: Yixuan Qiu <yixuan.qiu@cos.name> ## original code in a Chinese forum: https://d.cosx.org/d/16743 ## this code was written to celebrate China's 60 anniversary if (!require("rgl")) stop("You need the rgl package to generate the 3D heart!") xtheta = function(x, theta, y, w = 0, tt = 0) { (x^2 + (x * tan(theta))^2 + 2 * y^2 + 0.1 * cos(w * tt) - 0.9)^3 - (x^2 + y^2/9) * (x * tan(theta))^3 } fz = function(z, x, y, w = 0, tt = 0) { (x^2 + 2 * y^2 + z^2 + 0.1 * cos(w * tt) - 0.9)^3 - (x^2 + y^2/9) * z^3 } n = 100 y = seq(-2, 2, length.out = n) y0 = xx = zz = NULL for (i in 1:length(y)) { theta = seq(-pi/2, 1.5 * pi, length.out = n) solvex = function(theta, y) { if (theta == -pi/2 | theta == pi/2 | theta == 1.5 * pi) { return(0) } else if (theta > -pi/2 & theta < pi/2) { interval = c(0, 2) } else { interval = c(-2, 0) } x.root = uniroot(xtheta, interval, theta, y)$root return(x.root) } if (xtheta(0, pi/4, y[i]) * xtheta(2, pi/4, y[i]) > 0) next y0 = c(y0, y[i]) x = sapply(theta, solvex, y[i]) zplus = uniroot(fz, c(0, 2), 0, y[i])$root zminus = uniroot(fz, c(-2, 0), 0, y[i])$root z = numeric(n) z[x != 0] = x[x != 0] * tan(theta[x != 0]) z[x == 0] = (theta[x == 0] == pi/2) * zplus + (theta[x == 0] != pi/2) * zminus xx = cbind(xx, x) zz = cbind(zz, z) } yy = matrix(rep(y0, n), n, length(y0), byrow = TRUE) library(rgl) persp3d(zz, xx, yy, col = "red", xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2), zlim = c(-1, 1), axes = FALSE, box = FALSE, xlab = "", ylab = "", zlab = "") fy = function(y, pars) { z = pars[1] x = pars[2] w = pars[3] tt = pars[4] (x^2 + 2 * y^2 + z^2 + 0.1 * cos(w * tt) - 0.9)^3 - (x^2 + y^2/9) * z^3 } gety = function(z, x, interval = c(0.01, 1), w = 0, tt = 0) { mpars = cbind(z, x, w, tt) solvey = function(pars) { if (fy(interval[1], pars) * fy(interval[2], pars) > 0) { return(NA) } else { y = uniroot(fy, interval, pars)$root } } y = apply(mpars, 1, solvey) return(y) } x0 = z0 = seq(-1, 1, length.out = n) y0 = outer(z0, x0, gety) persp3d(x = z0, y = x0, z = y0, zlim = c(-1, 1), col = "white", texture = system.file("img", "flag.png", package = "fun"), add = TRUE) persp3d(x = z0, y = x0, z = -y0, zlim = c(-1, 1), col = "red", add = TRUE)
perlModuleLoaded = # # Currently, this has problems the second time around that we look at Seen # # function(pkgs = character(0)) { b = .PerlPackage("B::Stash") els = names(b$stash[["Seen"]]) base = gsub("\\.pm$", "", els) if(FALSE) { # if we were working with the actual file names rather than the keys. inc = .PerlGet("INC", TRUE) # Discard the . May need to be careful about other .'s in our strings inc = inc[inc != "."] base = gsub(paste("(", paste(inc, collapse = "|"), ")", .Platform$file.sep, sep = ""), "", els) } base = gsub(.Platform$file.sep, "::", base) if(length(pkgs)) pkgs %in% base else base } .PerlStashInfo <- function(pkg = "main", .convert = TRUE, .perl = NULL, .load = TRUE) { if(inherits(pkg, "PerlPackage")) pkg = pkg$name else if(inherits(pkg, "PerlReference")) pkg = class(pkg)[1] else if(.load && pkg != "main" && !perlModuleLoaded(pkg)) # Force the loading .PerlPackage(pkg) # could get the stash from this, but need to obey .convert here. if(pkg == "main" && .convert) { warning("cannot convert elements in the main stash because of potential self-references. Using .convert = FALSE") .convert = FALSE } ans = .Call(RS_getStash, as.character(pkg), .convert, .perl) if(is.null(ans)) stop("can't get stash for module ", pkg, ". Is it loaded in the Perl interpreter") if(!.convert) class(ans) = c("PerlStashReference", class(ans)) ans } .PerlObjects <- function(pkg = "main", .perl= NULL) { names(.PerlStashInfo(pkg, FALSE, .perl, .load = ifelse(pkg == "main", FALSE, TRUE))) } .PerlTypes <- c("CV" = 0, "AV" = 1, "HV" = 2, "SV" = 3) .PerlExists <- function(name, pkg=NULL, type = "SV") { if(is.character(type)) { type <- pmatch(type, names(.PerlTypes)) -1 if(is.na(type)) stop("Incorrect type specification") } .Call(RS_PerlExists, as.character(name), pkg, as.integer(type)) } .PerlType <- function(ref, pkg=NULL, perl=NULL) { .Call(RS_getPerlType, ref, as.character(pkg), perl) }
/R/PerlReflect.R
no_license
sboehringer/RSPerl
R
false
false
2,025
r
perlModuleLoaded = # # Currently, this has problems the second time around that we look at Seen # # function(pkgs = character(0)) { b = .PerlPackage("B::Stash") els = names(b$stash[["Seen"]]) base = gsub("\\.pm$", "", els) if(FALSE) { # if we were working with the actual file names rather than the keys. inc = .PerlGet("INC", TRUE) # Discard the . May need to be careful about other .'s in our strings inc = inc[inc != "."] base = gsub(paste("(", paste(inc, collapse = "|"), ")", .Platform$file.sep, sep = ""), "", els) } base = gsub(.Platform$file.sep, "::", base) if(length(pkgs)) pkgs %in% base else base } .PerlStashInfo <- function(pkg = "main", .convert = TRUE, .perl = NULL, .load = TRUE) { if(inherits(pkg, "PerlPackage")) pkg = pkg$name else if(inherits(pkg, "PerlReference")) pkg = class(pkg)[1] else if(.load && pkg != "main" && !perlModuleLoaded(pkg)) # Force the loading .PerlPackage(pkg) # could get the stash from this, but need to obey .convert here. if(pkg == "main" && .convert) { warning("cannot convert elements in the main stash because of potential self-references. Using .convert = FALSE") .convert = FALSE } ans = .Call(RS_getStash, as.character(pkg), .convert, .perl) if(is.null(ans)) stop("can't get stash for module ", pkg, ". Is it loaded in the Perl interpreter") if(!.convert) class(ans) = c("PerlStashReference", class(ans)) ans } .PerlObjects <- function(pkg = "main", .perl= NULL) { names(.PerlStashInfo(pkg, FALSE, .perl, .load = ifelse(pkg == "main", FALSE, TRUE))) } .PerlTypes <- c("CV" = 0, "AV" = 1, "HV" = 2, "SV" = 3) .PerlExists <- function(name, pkg=NULL, type = "SV") { if(is.character(type)) { type <- pmatch(type, names(.PerlTypes)) -1 if(is.na(type)) stop("Incorrect type specification") } .Call(RS_PerlExists, as.character(name), pkg, as.integer(type)) } .PerlType <- function(ref, pkg=NULL, perl=NULL) { .Call(RS_getPerlType, ref, as.character(pkg), perl) }
library(rgrass7) library(tmap) library(raster) # Load GRASS ecosystem WPATH <- Sys.getenv("PATH") WPATH1 <- paste("C:\\OSGeo4W64\\bin", WPATH, sep=";") Sys.setenv(PATH=WPATH1) initGRASS("C:/OSGeo4W64/apps/grass/grass-7.2.1", tempdir(), override=TRUE) # Set on-disk raster variable rname <- paste0(getwd(), "/GRASS/CODGiK/M-33-6-C-b-1-3.asc") # rname <- paste0(getwd(), "/GRASS/GMTED2010N10E060_300/10n060e_20101117_gmted_mea300.tif") # Set GRASS environment and database location # loc <- initGRASS("C:/OSGeo4W64/apps/grass/grass-7.2.1", home=tempdir(), gisDbase="GRASS_TEMP", override=TRUE ) # Import raster to GRASS and set region execGRASS("r.in.gdal", flags="overwrite", parameters=list(input=rname, output="DEM")) execGRASS("g.region", parameters=list(raster="DEM") ) # Print raster r1 <- readRAST("DEM") qtm(r1) # Calculate surface relief ratio and export to img format execGRASS("r.neighbors", flags="overwrite", parameters=list(input="DEM", output="mean", method="average", size=as.integer(33)) ) execGRASS("r.neighbors", flags="overwrite", parameters=list(input="DEM", output="min", method="minimum", size=as.integer(33)) ) execGRASS("r.neighbors", flags="overwrite", parameters=list(input="DEM", output="max", method="maximum", size=as.integer(33)) ) execGRASS("r.mapcalc", flags="overwrite", parameters=list(expression="srr=(mean - min)/(max - min)") ) execGRASS("r.out.gdal", flags="overwrite", parameters=list(input="srr", format="HFA", type="Float64", output="srr3.img", nodata=-9999) ) r2 <- readRAST("srr") qtm(r2) r3 <- raster("srr3.img") qtm(r3) # Clean up temp rasters execGRASS("g.remove", parameters=list(rast=c("mean", "min", "max", "srr")) ) # Clean up GRASS workspace # unlink(paste(getwd(), "GRASS_TEMP", sep="/"), recursive=TRUE) # file.remove(paste(getwd(), ".grassrc6", sep="/"))
/GRASS.R
no_license
wkedziora/Doktorat
R
false
false
2,093
r
library(rgrass7) library(tmap) library(raster) # Load GRASS ecosystem WPATH <- Sys.getenv("PATH") WPATH1 <- paste("C:\\OSGeo4W64\\bin", WPATH, sep=";") Sys.setenv(PATH=WPATH1) initGRASS("C:/OSGeo4W64/apps/grass/grass-7.2.1", tempdir(), override=TRUE) # Set on-disk raster variable rname <- paste0(getwd(), "/GRASS/CODGiK/M-33-6-C-b-1-3.asc") # rname <- paste0(getwd(), "/GRASS/GMTED2010N10E060_300/10n060e_20101117_gmted_mea300.tif") # Set GRASS environment and database location # loc <- initGRASS("C:/OSGeo4W64/apps/grass/grass-7.2.1", home=tempdir(), gisDbase="GRASS_TEMP", override=TRUE ) # Import raster to GRASS and set region execGRASS("r.in.gdal", flags="overwrite", parameters=list(input=rname, output="DEM")) execGRASS("g.region", parameters=list(raster="DEM") ) # Print raster r1 <- readRAST("DEM") qtm(r1) # Calculate surface relief ratio and export to img format execGRASS("r.neighbors", flags="overwrite", parameters=list(input="DEM", output="mean", method="average", size=as.integer(33)) ) execGRASS("r.neighbors", flags="overwrite", parameters=list(input="DEM", output="min", method="minimum", size=as.integer(33)) ) execGRASS("r.neighbors", flags="overwrite", parameters=list(input="DEM", output="max", method="maximum", size=as.integer(33)) ) execGRASS("r.mapcalc", flags="overwrite", parameters=list(expression="srr=(mean - min)/(max - min)") ) execGRASS("r.out.gdal", flags="overwrite", parameters=list(input="srr", format="HFA", type="Float64", output="srr3.img", nodata=-9999) ) r2 <- readRAST("srr") qtm(r2) r3 <- raster("srr3.img") qtm(r3) # Clean up temp rasters execGRASS("g.remove", parameters=list(rast=c("mean", "min", "max", "srr")) ) # Clean up GRASS workspace # unlink(paste(getwd(), "GRASS_TEMP", sep="/"), recursive=TRUE) # file.remove(paste(getwd(), ".grassrc6", sep="/"))
library(tm) library(tmcn) library(Rwordseg) library(vegan) #----------------------------------------- # 處理字詞庫 #----------------------------------------- #----------------------------------------- # 匯入sogou字庫 words1 <- toTrad(readLines("http://wubi.sogou.com/dict/download_txt.php?id=9182")) # ptt字庫 words2 <- toTrad(readLines("http://wubi.sogou.com/dict/download_txt.php?id=9912")) # 繁體字庫 words <- c(words1,words2) insertWords(words) # 自建字庫 (需要簡體字) strwords <- c("服贸", "马英九", "江宜桦", "立法院", "国会", "行政院", "魏扬", "林飞帆", "陈为廷", "台湾", "警察", "暴力", "镇暴警察", "学运", "黑色岛国", "清大", "台大", "镇压", "后退", "张庆忠", "王金平", "苹果", "陪审团", "粉丝团", "苹论", "阵线", "最新", "评论", "独立", "媒体", "每日", "总览", "有话", "要说" ,"即时", "论坛", "反服贸", "反反服贸") insertWords(strwords, strtype=rep("n", length(strwords)), numfreq=rep(1000, length(strwords))) # 定義停詞 myStopWords <- c(toTrad(stopwordsCN()), "編輯", "時間", "標題", "發信", "實業", "作者", "要聞", "即時新聞", "聯合新聞網", "全文網址", "全文", "網址", "大家", "今天", "知道", "非常", "很多", "現在", "希望", "不要", "已經", "看到", "謝謝", "其實", "事情", "蘋果", "陪審團", "粉絲團", "蘋論", "陣線", "最新", "評論", "獨立", "媒體", "每日", "總覽", "有話", "要說" ,"即時", "論壇", "投稿", "報導", "新聞", "表示", "粉絲", "沒有", "青島", "院內", "濟南", "現場", "主持人", "場內", "一起", "出來", "一下", "裡面", "可能", "需要", "應該", "覺得", "繼續", "告訴", "不能", "剛剛", "接下來", "下去", "廣播", "訊息", "可能","問題", "文章", "社會", "政治", "朋友", "心得", "代表", "方式", "事件", "地方", "內容", "產業", "行為", "運動", "電視", "意見", "高調", "部分", "感覺", "重點", "小時", "狀況", "聲音", "言論", "原則", "意義", "理由", "意思", "物資", "口號", "後退", "掌聲", "台大", "情況", "通道", "小姐", "部分", "身體", "廁所", "內容", "位置", "媽媽", "手機", "圖文", "影音", "記者" ) # main function myDocTerMat <- function(docs, method="vec", encoding="utf8", clean=TRUE){ if(method=="vec"){ corpus <- Corpus(VectorSource(docs)) # 建立語料庫 if(encoding!="uft8"){ corpus <- tm_map(corpus, function(word) iconv(word, from=encoding)) } corpus <- tm_map(corpus, removePunctuation) #清除標點符號 corpus <- tm_map(corpus, function(word) {gsub("[A-Za-z0-9]", "", word)}) #清除英數符號 corpus <- lapply(corpus, segmentCN, nature = TRUE) corpus <- lapply(corpus, function(sent) sent[names(sent)=="n"]) }else if(method=="dir"){ corpus <- Corpus(DirSource(docs)) # 建立語料庫 if(encoding!="uft8"){ corpus <- tm_map(corpus, function(word) iconv(word, from=encoding)) } corpus <- tm_map(corpus, removePunctuation) #清除標點符號 corpus <- tm_map(corpus, function(word) {gsub("[A-Za-z0-9]", "", word)}) #清除英數符號 corpus <- lapply(corpus, function(sent) sapply(sent, segmentCN, nature=TRUE)) corpus <- lapply(corpus, function(sent) { unlist(lapply(sent, function(w) w[names(w) == "n"])) }) } corpus <- Corpus(VectorSource(corpus)) corpus <- tm_map(corpus, removeWords, myStopWords) tdm <- TermDocumentMatrix(corpus, control = list(wordLengths = c(2, Inf))) mat <- as.matrix(tdm) # 輸出Term-Document Matrix if(ncol(mat)>1){ mat <- mat[rowSums(mat) >= quantile(rowSums(mat), 0.80), ] mat <- mat[, colSums(mat)>0] if(clean==TRUE){ # 利用Hierarchical Clustering 清理不屬於學運議題的報導 dist_dtm <- vegdist(t(mat), method = 'horn') hc <- hclust(dist_dtm, method = 'ave') k <- ceiling(0.05*length(hc$labels)) groups <- cutree(hc, k=k) # cut tree into k clusters mat <- mat[, which(groups == which.max(table(groups)))] } mat <- mat[rowSums(mat) > 0, colSums(mat) > 0] f <- rowSums(mat) }else{ # 只有1/0份文件不用分群 mat <- data.frame(mat[rowSums(mat) >= quantile(rowSums(mat), 0.80), ]) colnames(mat) <- 1 f <- rowSums(mat) } out <- list(fre=f, tdm=mat) class(out) <- "news" out }
/src/tm.R
no_license
dspim/study-area-statR
R
false
false
4,697
r
library(tm) library(tmcn) library(Rwordseg) library(vegan) #----------------------------------------- # 處理字詞庫 #----------------------------------------- #----------------------------------------- # 匯入sogou字庫 words1 <- toTrad(readLines("http://wubi.sogou.com/dict/download_txt.php?id=9182")) # ptt字庫 words2 <- toTrad(readLines("http://wubi.sogou.com/dict/download_txt.php?id=9912")) # 繁體字庫 words <- c(words1,words2) insertWords(words) # 自建字庫 (需要簡體字) strwords <- c("服贸", "马英九", "江宜桦", "立法院", "国会", "行政院", "魏扬", "林飞帆", "陈为廷", "台湾", "警察", "暴力", "镇暴警察", "学运", "黑色岛国", "清大", "台大", "镇压", "后退", "张庆忠", "王金平", "苹果", "陪审团", "粉丝团", "苹论", "阵线", "最新", "评论", "独立", "媒体", "每日", "总览", "有话", "要说" ,"即时", "论坛", "反服贸", "反反服贸") insertWords(strwords, strtype=rep("n", length(strwords)), numfreq=rep(1000, length(strwords))) # 定義停詞 myStopWords <- c(toTrad(stopwordsCN()), "編輯", "時間", "標題", "發信", "實業", "作者", "要聞", "即時新聞", "聯合新聞網", "全文網址", "全文", "網址", "大家", "今天", "知道", "非常", "很多", "現在", "希望", "不要", "已經", "看到", "謝謝", "其實", "事情", "蘋果", "陪審團", "粉絲團", "蘋論", "陣線", "最新", "評論", "獨立", "媒體", "每日", "總覽", "有話", "要說" ,"即時", "論壇", "投稿", "報導", "新聞", "表示", "粉絲", "沒有", "青島", "院內", "濟南", "現場", "主持人", "場內", "一起", "出來", "一下", "裡面", "可能", "需要", "應該", "覺得", "繼續", "告訴", "不能", "剛剛", "接下來", "下去", "廣播", "訊息", "可能","問題", "文章", "社會", "政治", "朋友", "心得", "代表", "方式", "事件", "地方", "內容", "產業", "行為", "運動", "電視", "意見", "高調", "部分", "感覺", "重點", "小時", "狀況", "聲音", "言論", "原則", "意義", "理由", "意思", "物資", "口號", "後退", "掌聲", "台大", "情況", "通道", "小姐", "部分", "身體", "廁所", "內容", "位置", "媽媽", "手機", "圖文", "影音", "記者" ) # main function myDocTerMat <- function(docs, method="vec", encoding="utf8", clean=TRUE){ if(method=="vec"){ corpus <- Corpus(VectorSource(docs)) # 建立語料庫 if(encoding!="uft8"){ corpus <- tm_map(corpus, function(word) iconv(word, from=encoding)) } corpus <- tm_map(corpus, removePunctuation) #清除標點符號 corpus <- tm_map(corpus, function(word) {gsub("[A-Za-z0-9]", "", word)}) #清除英數符號 corpus <- lapply(corpus, segmentCN, nature = TRUE) corpus <- lapply(corpus, function(sent) sent[names(sent)=="n"]) }else if(method=="dir"){ corpus <- Corpus(DirSource(docs)) # 建立語料庫 if(encoding!="uft8"){ corpus <- tm_map(corpus, function(word) iconv(word, from=encoding)) } corpus <- tm_map(corpus, removePunctuation) #清除標點符號 corpus <- tm_map(corpus, function(word) {gsub("[A-Za-z0-9]", "", word)}) #清除英數符號 corpus <- lapply(corpus, function(sent) sapply(sent, segmentCN, nature=TRUE)) corpus <- lapply(corpus, function(sent) { unlist(lapply(sent, function(w) w[names(w) == "n"])) }) } corpus <- Corpus(VectorSource(corpus)) corpus <- tm_map(corpus, removeWords, myStopWords) tdm <- TermDocumentMatrix(corpus, control = list(wordLengths = c(2, Inf))) mat <- as.matrix(tdm) # 輸出Term-Document Matrix if(ncol(mat)>1){ mat <- mat[rowSums(mat) >= quantile(rowSums(mat), 0.80), ] mat <- mat[, colSums(mat)>0] if(clean==TRUE){ # 利用Hierarchical Clustering 清理不屬於學運議題的報導 dist_dtm <- vegdist(t(mat), method = 'horn') hc <- hclust(dist_dtm, method = 'ave') k <- ceiling(0.05*length(hc$labels)) groups <- cutree(hc, k=k) # cut tree into k clusters mat <- mat[, which(groups == which.max(table(groups)))] } mat <- mat[rowSums(mat) > 0, colSums(mat) > 0] f <- rowSums(mat) }else{ # 只有1/0份文件不用分群 mat <- data.frame(mat[rowSums(mat) >= quantile(rowSums(mat), 0.80), ]) colnames(mat) <- 1 f <- rowSums(mat) } out <- list(fre=f, tdm=mat) class(out) <- "news" out }
library(glmnet) mydata = read.table("./TrainingSet/AvgRank/lymphoid.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.03,family="gaussian",standardize=TRUE) sink('./Model/EN/AvgRank/lymphoid/lymphoid_015.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/AvgRank/lymphoid/lymphoid_015.R
no_license
leon1003/QSMART
R
false
false
358
r
library(glmnet) mydata = read.table("./TrainingSet/AvgRank/lymphoid.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.03,family="gaussian",standardize=TRUE) sink('./Model/EN/AvgRank/lymphoid/lymphoid_015.txt',append=TRUE) print(glm$glmnet.fit) sink()
#:# libraries library(digest) library(mlr) library(OpenML) library(farff) #:# config set.seed(1) #:# data dataset <- getOMLDataSet(data.name = "steel-plates-fault") head(dataset$data) #:# preprocessing head(dataset$data) #:# model task = makeClassifTask(id = "task", data = dataset$data, target = "Class") lrn = makeLearner("classif.plsdaCaret", par.vals = list(probMethod = "softmax", method = "simpls"), predict.type = "prob") #:# hash #:# 301806f0774e5ef1662275f6bc73ac87 hash <- digest(list(task, lrn)) hash #:# audit cv <- makeResampleDesc("CV", iters = 5) r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1)) ACC <- r$aggr ACC #:# session info sink(paste0("sessionInfo.txt")) sessionInfo() sink()
/models/openml_steel-plates-fault/classification_Class/301806f0774e5ef1662275f6bc73ac87/code.R
no_license
pysiakk/CaseStudies2019S
R
false
false
737
r
#:# libraries library(digest) library(mlr) library(OpenML) library(farff) #:# config set.seed(1) #:# data dataset <- getOMLDataSet(data.name = "steel-plates-fault") head(dataset$data) #:# preprocessing head(dataset$data) #:# model task = makeClassifTask(id = "task", data = dataset$data, target = "Class") lrn = makeLearner("classif.plsdaCaret", par.vals = list(probMethod = "softmax", method = "simpls"), predict.type = "prob") #:# hash #:# 301806f0774e5ef1662275f6bc73ac87 hash <- digest(list(task, lrn)) hash #:# audit cv <- makeResampleDesc("CV", iters = 5) r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1)) ACC <- r$aggr ACC #:# session info sink(paste0("sessionInfo.txt")) sessionInfo() sink()
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, -8.7867694798737e+139, 2.3317908961407e-93, 2.16562581831091e+161)) result <- do.call(meteor:::ET0_Makkink,testlist) str(result)
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615846065-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
736
r
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, -8.7867694798737e+139, 2.3317908961407e-93, 2.16562581831091e+161)) result <- do.call(meteor:::ET0_Makkink,testlist) str(result)
# Final Project ----------------------------------------------------------- # Hugo Belisario # STAT 301-2 Data Science # 17 March 2020 # Catcher Defense during the Statcast Era # Data Scope and Identification ------------------------------------------- # The datasets used for this Final Project came from Baseball Prospectus (BP), an organization that runs a website devoted to sabermetric analysis of baseball, and BaseballSavant, a site dedicated to providing player matchups, Statcast metrics, and advanced statistics in a simple and easy-to-view way. # Baseball Prospectus recently built a new Leaderboards platform that is currently in Beta and includes Hitting, Pitching, and Catcher Defense Leaderboards. # BP has their own way of calculating an overall defensive measure for catchers called Catcher Defensive Adjustment or Fielding Runs Above Average Adjustment. # However, they do not possess Statcast metrics for other aspects of the position besides traditional fielding such as pitch framing, pop time, and exchange. # BaseballSavant maintains Statcast Leaderboards for Catcher Pop Time and Catcher Framing. # The question of interest is the following: Can we predict a catcher's overall defensive value using Statcast data to replace metrics used by Baseball Prospectus? # The notion behind my curiosity is that since Statcast is the state-of-the-art tracking technology implemented by the MLB then it must serve as a more accurate way of predicting a catcher's defensive performance than third-party organizations. # Statcast and Baseball Savant do not provide an overall defensive measure for catchers, but BP does. # Hence, I believe that although there are similarities to statistics provided by both entities using Statcast data can give us more insight as to how these catcher's have actually performed utilizing BP's formula for Catcher Defense Adjustment. # This project is regression-based since I am working with continuous variables. # Background ------------------------------------------------------------ # Catcher's influence on defense is more subtle than the other positions in baseball. # You cannot judge a catcher's defensive performance based on their number of steals allowed or their caught stealing rate because the pitcher and runner play important parts in determining the probability of a steal attempt, let alone a successful one. # Just like your typical fielders, catchers need to put tags on runners trying to score, field bunts, and chase pop flies. # However, baseball is a game of inches. # Any small advantage or edge that teams can improve upon or add to their organization, they will try to maximize it. # Framing and Pop Time/Exchange are those tools that could win a team ballgames, postseason berths, and titles. # A significant component of a catcher's skillset is the ability to prevent runners from advancing and scoring on wild pitches and passed balls by blocking. # The best way to prevent a steal is to prevent the runner from attempting it. # Pop Time measures the time from the moment the pitch hits the catcher’s mitt to the moment the ball reaches the fielder’s projected receiving point at the center of the base. # Pop Time is a combination of exchange time (how quickly the catcher releases the ball, measured in seconds) and arm strength (velocity of throw, in MPH). # Pitch framing for catchers involves receiving the ball and catching it in a manner that favors the pitcher with a strike located outside of standard zones, but in the specific umpire's zone. # Catcher framing is the art of a catcher receiving a pitch in a way that makes it more likely for an umpire to call it a strike. # Baseball Savant breaks down the catcher’s view into eight zones around the strike zone and shows the called strike percentage of all non-swings in that zone. # Framing stats measure how many runs catchers save based on how many extra strikes they provide for their pitchers. # Main driver of overall defense, following metrics are additions to the contributions of a catcher, nonetheless they are still valuable. # FRAA, Fielding Runs Above Average, traditional fielding ability (i.e. bunts, pop flies) # CSAA, Called Strikes Above Average, is how BP measures a catcher’s ability to frame pitches. # CSAA Runs provides a run value for called strikes due to framing. # EPAA, Errant Pitches Above Average, measures a catcher’s ability to block wild pitches and prevent passed balls. # SRAA, Swipe Rate Above Average, calculates a catcher’s ability to throw runners out # TRAA, Takeoff Rate Above Average, stolen base attempts above league average. # PORAA, Pick Off Runs Above Average, places run value on pick offs. # Baseball Prospectus calculates their overall defensive measure by adding Framing Runs (CSAA Runs), Blocking Runs (EPAA Runs), and Throwing Runs (SRAA Runs + TRAA Runs + PORAA Runs) to Fielding Runs Above Average (FRAA). # However, as stated above I want to replace CSAA with Runs From Extra Strikes while also observing the relationships and impact other Statcast metrics such as Strike Rates, Pop Times, and Exchange times have on CDA. # Load Packages ----------------------------------------------------------- library(tidyverse) library(janitor) library(skimr) library(corrplot) library(broom) library(modelr) library(ggfortify) library(knitr) library(GGally) library(rsample) library(leaps) # best subset selection library(glmnet) # ridge & lasso library(glmnetUtils) # improves working with glmnet library(pls) # set seed set.seed(3) # Baseball Prospectus Catcher Defense ----------------------------------------------------- # Statcast tracking technology was set up in all 30 stadiums starting in 2015. # For that reason, I am only using seasonal data since 2015 for this project. # After reading in the Baseball Prospectus .csv files from 2015-2019, I proceeded to remove some unneccesary variables and create a 'year' variable for the dataset. # The 'year' variable was later used to join datasets. # The same process is completed for all of the seasons. # 2019 bp_catchers_2019 <- read_csv("data/bpcatchers2019.csv") %>% clean_names() %>% # remove playerid, version_date, team, lg since they are not important variables in my analysis select(-playerid, -version_date, team, lg) %>% mutate(year = 2019) # separate name column into two columns, first_name and last_name. bp_catchers_2019 <- extract(bp_catchers_2019, name, c("first_name", "last_name"), "([^ ]+) (.*)") bp_catchers_2019 <- bp_catchers_2019 %>% # ordering the data by making names and year the first two columns makes it easier for the final dataset select(first_name, last_name, year, everything()) # 2018 bp_catchers_2018 <- read_csv("data/bpcatchers2018.csv") %>% clean_names() %>% select(-playerid, -version_date, team, lg) %>% mutate(year = 2018) bp_catchers_2018 <- extract(bp_catchers_2018, name, c("first_name", "last_name"), "([^ ]+) (.*)") bp_catchers_2018 <- bp_catchers_2018 %>% select(first_name, last_name, year, everything()) # 2017 bp_catchers_2017<- read_csv("data/bpcatchers2017.csv") %>% clean_names() %>% select(-playerid, -version_date, team, lg) %>% mutate(year = 2017) bp_catchers_2017 <- extract(bp_catchers_2017, name, c("first_name", "last_name"), "([^ ]+) (.*)") bp_catchers_2017 <- bp_catchers_2017 %>% select(first_name, last_name, year, everything()) # 2016 bp_catchers_2016 <- read_csv("data/bpcatchers2016.csv") %>% clean_names() %>% select(-playerid, -version_date, team, lg) %>% mutate(year = 2016) bp_catchers_2016 <- extract(bp_catchers_2016, name, c("first_name", "last_name"), "([^ ]+) (.*)") bp_catchers_2016 <- bp_catchers_2016 %>% select(first_name, last_name, year, everything()) # 2015 bp_catchers_2015 <- read_csv("data/bpcatchers2015.csv") %>% clean_names() %>% select(-playerid, -version_date, team, lg) %>% mutate(year = 2015) bp_catchers_2015 <- extract(bp_catchers_2015, name, c("first_name", "last_name"), "([^ ]+) (.*)") bp_catchers_2015 <- bp_catchers_2015 %>% select(first_name, last_name, year, everything()) # add rows from each dataframe together and formed bp_catchers using rbinds bp_catchers <- rbind(bp_catchers_2015, bp_catchers_2016, bp_catchers_2017, bp_catchers_2018, bp_catchers_2019) # arrange the names and age so that players with multiple appearances in the featured years have their rows in order of age bp_catchers <- bp_catchers %>% arrange(first_name, last_name, age) # Baseball Savant Statcast Catcher Framing ------------------------------------------------ # Similar to the previous set of data, I removed unnecessary variables that could have potentially caused trouble when analyzing the data. # The Catcher Framing dataset included an MLB average of all the metrics as the first row, this was consequently removed. # Later on, I added the rows together and arranged them by name of the player. # 2019 catcher_framing_2019 <- read_csv("data/catcher-framing2019.csv") %>% clean_names() %>% # remove fielder_2 similar to playerid in Baseball Prospectus dataset select(-fielder_2) %>% # order by name select(first_name, last_name, everything()) # remove first row catcher_framing_2019 <- catcher_framing_2019[-1, ] # 2018 catcher_framing_2018 <- read_csv("data/catcher-framing2018.csv") %>% clean_names() %>% select(-fielder_2) %>% select(first_name, last_name, everything()) # remove first row catcher_framing_2018 <- catcher_framing_2018[-1, ] # 2017 catcher_framing_2017 <- read_csv("data/catcher-framing2017.csv") %>% clean_names() %>% select(-fielder_2) %>% select(first_name, last_name, everything()) # remove first row catcher_framing_2017 <- catcher_framing_2017[-1, ] # 2016 catcher_framing_2016 <- read_csv("data/catcher-framing2016.csv") %>% clean_names() %>% select(-fielder_2) %>% select(first_name, last_name, everything()) # remove first row catcher_framing_2016 <- catcher_framing_2016[-1, ] # 2015 catcher_framing_2015 <- read_csv("data/catcher-framing2015.csv") %>% clean_names() %>% select(-fielder_2) %>% select(first_name, last_name, everything()) # remove first row catcher_framing_2015 <- catcher_framing_2015[-1, ] # combine all five datasets to form one, using rbinds to add the rows catcher_framing <- rbind(catcher_framing_2015, catcher_framing_2016, catcher_framing_2017, catcher_framing_2018, catcher_framing_2019) # arrange by name catcher_framing <- catcher_framing %>% arrange(first_name, last_name) # Statcast Catcher Pop Time and Exchange ---------------------------------- # The dataset for Pop Time and Exchange was the most complicated to configure into one that could smoothly join the rest. # Below each command, I explain it's purpose as it pertains to forming the final dataset. # 2019 catcher_poptime_2019 <- read_csv("data/poptime2019.csv") %>% clean_names() %>% # column for player names is called catcher rename(name = catcher) %>% # player_id and team_id are useless for this analysis select(-player_id, -team_id) %>% # add year to join poptime with framing data mutate(year = 2019) %>% # arrange columns to show age and year select(name, age, year, everything()) # Separate name column into two columns, first_name and last_name. catcher_poptime_2019 <- extract(catcher_poptime_2019, name, c("first_name", "last_name"), "([^ ]+) (.*)") # Use inner_join to unite both Baseball Savant Catching Leaderboards into one dataframe using keys name and year. catchers_statcast_2019 <- catcher_poptime_2019 %>% inner_join(catcher_framing_2019, by = c("first_name", "last_name", "year")) # 2018 catcher_poptime_2018 <- read_csv("data/poptime2018.csv") %>% clean_names() %>% rename(name = catcher) %>% select(-player_id, -team_id) %>% mutate(year = 2018) %>% select(name, age, year, everything()) catcher_poptime_2018 <- extract(catcher_poptime_2018, name, c("first_name", "last_name"), "([^ ]+) (.*)") catchers_statcast_2018 <- catcher_poptime_2018 %>% inner_join(catcher_framing_2018, by = c("first_name", "last_name", "year")) # 2017 catcher_poptime_2017 <- read_csv("data/poptime2017.csv") %>% clean_names() %>% rename(name = catcher) %>% select(-player_id, -team_id) %>% mutate(year = 2017) %>% select(name, age, year, everything()) catcher_poptime_2017 <- extract(catcher_poptime_2017, name, c("first_name", "last_name"), "([^ ]+) (.*)") catchers_statcast_2017 <- catcher_poptime_2017 %>% inner_join(catcher_framing_2017, by = c("first_name", "last_name", "year")) # 2016 catcher_poptime_2016 <- read_csv("data/poptime2016.csv") %>% clean_names() %>% rename(name = catcher) %>% select(-player_id, -team_id) %>% mutate(year = 2016) %>% select(name, age, year, everything()) catcher_poptime_2016 <- extract(catcher_poptime_2016, name, c("first_name", "last_name"), "([^ ]+) (.*)") catchers_statcast_2016 <- catcher_poptime_2016 %>% inner_join(catcher_framing_2016, by = c("first_name", "last_name", "year")) # 2015 catcher_poptime_2015 <- read_csv("data/poptime2015.csv") %>% clean_names() %>% rename(name = catcher) %>% select(-player_id, -team_id) %>% mutate(year = 2015) %>% select(name, age, year, everything()) catcher_poptime_2015 <- extract(catcher_poptime_2015, name, c("first_name", "last_name"), "([^ ]+) (.*)") catchers_statcast_2015 <- catcher_poptime_2015 %>% inner_join(catcher_framing_2015, by = c("first_name", "last_name", "year")) # combine all five datasets to form one that represents the metrics provided by Statcast catchers_statcast <- rbind(catchers_statcast_2015, catchers_statcast_2016, catchers_statcast_2017, catchers_statcast_2018, catchers_statcast_2019) catchers_statcast <- catchers_statcast %>% arrange(first_name, last_name, age) # join the Statcast and Baseball Prospectus datasets catchers <- catchers_statcast %>% inner_join(bp_catchers, by = c("first_name", "last_name", "year")) # Clean Final Dataset ----------------------------------------------------- # remove lg since league is not an important variable in the dataset and age.y is a repeat of the age column catchers <- catchers %>% select(-lg, -age.y) %>% select(first_name, last_name, age.x, year, team, everything()) # rename age.x to just "age" catchers <- rename(catchers, age = age.x) # remove name, team to avoid any problems with model building # remove pop time to second base metrics since they produce question marks in the results when running corrplots() and models. # remove throwing runs, framing runs since we are replacing those with Statcast data for those. catchers <- catchers %>% select(-c(first_name, last_name, team, age, year, pop_3b_sba_count, po_runs, epaa_chances, epaa, n_called_pitches, pop_2b_cs, pop_2b_sb, pop_2b_sba_count, pop_3b_sba, pop_3b_cs, pop_3b_sb, csaa_runs, csaa, csaa_chances, csaa_sd, csaa_runs_sd, sraa, sraa_chances, traa, traa_runs, po_chances)) # By the end of this cleaning, the dataset contains 505 observations and 16 variables. catchers <- catchers %>% na.omit # When ommiting NA values from the dataset (mostly from missing numbers for pop time to second base), the dataset is left with 481 observations. # This means that there were 24 missing values. # Codebook ---------------------------------------------------------------- # maxeff_arm_2b_3b_sba: arm strength measured on "max effort" throws, or the average above a player's 90th percentile performance, in miles per hour (MPH) # exchange_2b_3b_sba: exchange time measured in seconds # pop_2b_sba: pop time to second base in seconds # runs_extra_strikes: strikes to runs saved on a .125 run/strike basis, and includes park and pitcher adjustments # strike_rat: cumulative total of all zones, percentages of called strikes from pitches that were not swung at in zones 11-17. # strike_rate_11: percentages of called strikes from all called pitches in zone 11 # strike_rate_12: percentages of called strikes from all called pitches in zone 12 # strike_rate_13: percentages of called strikes from all called pitches in zone 13 # strike_rate_14: percentages of called strikes from all called pitches in zone 14 # strike_rate_16: percentages of called strikes from all called pitches in zone 16 # strike_rate_17: percentages of called strikes from all called pitches in zone 17 # strike_rate_18: percentages of called strikes from all called pitches in zone 18 # strike_rate_19: percentages of called strikes from all called pitches in zone 19 # fraa_adj: catcher defensive adjustment or fielding runs above average adjustment # fraa: fielding runs above average # epaa_runs: errant pitches above average also known as blocking runs # The First Random Data Split ----------------------------------------------- # The data is not very large which led me to use two data splits. # My first split provided 48 observations to run some EDA methods. # split data for EDA and modeling sets, 10% for EDA and 90% for modeling catchers_eda_set <- catchers %>% sample_frac(0.1) catchers_modeling_data <- catchers %>% setdiff(catchers_eda_set) # Exploratory Data Analysis ----------------------------------------------- # My initial step to explore the dataset was to skim through the variables using skim_without_charts. catchers_eda_set %>% skim_without_charts() # Although it may not be the MLB average velocity for catchers throwing to 2nd base, it was surprising to see that the average velocity of this EDA set was 81.5 MPH. # Catcher's arms range from the mid-70s to the high 80s which explains part of the trade-offs teams have to deal with. # Some catchers may have weaker arms but can frame pitches better or hit the ball better. # This particular group of catchers seemed to have high percentages when it came to getting the call for pitches in zones 14 and 16. # It also seemed to be able to prevent runs from scoring due to wild pitches and passed balls. # Definitely a superb framing group since the average was 2.67 for Runs from Extra Strikes. # Pop Time and Exchange times were average by MLB standards. catchers_eda_set %>% cor() %>% corrplot::corrplot() # Strike Rate seems to have a strong positive relationship with the bottom of the Shadow Zone (16-19). # This could signal that framing low pitches is a major factor in determining your overall strike rate. # Strike Rate, as stated previously is a cumulative total percentage based on the Shadow Zone percentages. # Although some zones tend to correlate slightly positive with the overall metric, they are weak signs of multicollinearity meaning a particular zone cannot predict the overall strike rate. # As expected, max effort velocity to 2nd or 3rd base shares a negative relationship with Pop Time because the faster you throw the less time it takes for the baseball to reach the receiver. # Max effort velocity also seems to have a slight positive relationship with our overall defensive measure, fraa_adj and fielding runs above average. # It is easier to get people out on bunts and plays to every base if you have a strong arm. # Exchange shares a solid positive relationship with max efforts velocity implying that catchers with higher velocities tend to have longer exchange times in seconds. # One idea that came about when observing this is that players with strong arms that can reach higher velocities probably take a longer time transfering the baseball from their glove to a more comfortable throwing position to produce a stronger throw. # In the meanwhile, it could be the case that catchers with slower velocities tend to release the ball quicker in order to have a chance at catching the runner. # Runs from Extra Strikes has a robust relationship with both fraa_adj and fraa. # This is encouraging to see since the Statcast metric is replacing Called Strikes Above Average from Baseball Prospectus. # The higher this metric is, the higher the value as a catcher in overall defense. # Blocking Runs seems to have very few notable relationships with the other variables. # However, the correlation plot shows a strong positive relationship with fraa_adj and fraa. # The more passed balls and wild pitches you prevent, the higher your value on defense is a possible conclusion to that finding. # According to the correlation plot, maxeff_arm_2b_3b_sba has a negative relationship with pop time to second base. # The regression line seems to veer off in the middle of the cluster. # I assume that although these catchers have around the same velocity to the bases, they differ in exchange time. ggplot(data = catchers_eda_set, mapping = aes(x = maxeff_arm_2b_3b_sba, y = pop_2b_sba)) + geom_point() + geom_smooth(se = FALSE) # Since Blocking Runs was the other component from the Catcher Defensive Adjustment formula, I wanted to isolate the metric and regress it on fraa_adj. # Most of the points seem to be clustered between -1.5 and 2. # The outliers on both ends being removed could prove that the more runs a catcher prevents through blocking wild pitches and avoiding passed balls adds somewhat significant value to overall defense. ggplot(data = catchers_eda_set, mapping = aes(x = epaa_runs, y = fraa_adj)) + geom_point() + geom_smooth(se = FALSE) # These two clearly share a strong positive relationship and seems that it could be stronger when removing a couple of points far away from the majority. # The more runs you save from framing pitches and giving your pitchers more opportunities for outs, the more your overall defensive value increases. ggplot(data = catchers_eda_set, mapping = aes(x = runs_extra_strikes, y = fraa_adj)) + geom_point() + geom_smooth(se = FALSE) # The Second Random Data Split (Splitting Analysis Dataset) --------------- # Since the dataset was not large, I chose to perform a second random data split in order to attain more accurate predictive results. # Chose to split the data 60%-40% for comparing candidate models and model building, respetively. # Test set for comparing candidate models, selecting final model catchers_mod_comp_dat <- catchers_modeling_data %>% sample_frac(0.60) # Train set for candidate model building catchers_mod_bldg_dat <- catchers_modeling_data %>% setdiff(catchers_mod_comp_dat) # Simple Linear Regression ------------------------------------------------ # Setup formulas for simple linear regressions predictor_var <- catchers_mod_bldg_dat %>% names() %>% setdiff("fraa_adj") fmla <- paste("fraa_adj ~", predictor_var) # adding full model predictor_var <- c(predictor_var, "all_vars") fmla <- c(fmla, "fraa_adj ~ .") # Fit and store the models catchers_models <- tibble( data = list(catchers_mod_bldg_dat), predictor_var, fmla ) %>% mutate(model_fit = map2(fmla, data, lm), model_type = if_else(predictor_var == "all_vars", "full", "slr")) # Model fit summaries/information catchers_models <- catchers_models %>% mutate(mod_glance = map(model_fit, glance), mod_tidy = map(model_fit, tidy), add_tidy = map(model_fit, confint_tidy), mod_tidy = map2(mod_tidy, add_tidy, bind_cols), mod_augment = map2(model_fit, data, augment)) %>% select(-add_tidy) # Models in which there is a statistically significant association between the predictor and the response. catchers_models %>% unnest(mod_tidy) %>% filter(model_type != "full", term != "(Intercept)") %>% select(term, estimate, p.value) %>% arrange(p.value) %>% filter(p.value < 0.05) # Fielding Runs Above Average, Runs From Extra Strikes, Strike Rate, Strike Rate in Zone 18, Blocking Runs, Strike Rate in Zone 19, Strike Rate in Zone 17, and Strike Rate in Zone 16 had statistically significant estimates. # Plotting investigating linear relationship with FRAA_ADJ/CDA. catchers_mod_bldg_dat %>% pivot_longer(cols = -fraa_adj, names_to = "predictor", values_to = "value") %>% ggplot(aes(x = value, y = fraa_adj)) + geom_point() + geom_smooth(method = "lm", se = FALSE) + coord_cartesian(ylim = c(0, 25)) + facet_wrap(. ~ predictor, scales = "free_x") # A table that details the relationship between all of the variables and Catcher Defensive Adjustment in scatterplots with linear regression lines. # Investigating full model catchers_models %>% filter(model_type == "full") %>% unnest(mod_tidy) %>% select(-predictor_var, -fmla, -model_type) # Identify significant slope/linear parameters in full model catchers_models %>% unnest(mod_tidy) %>% filter(model_type == "full", term != "(Intercept)") %>% select(term, estimate, p.value) %>% arrange(p.value) %>% filter(p.value < 0.05) # In the full model, Fielding Runs Above Average, Runs From Extra Strikes, Blocking Runs, and Strike Rate in Zone 14 has a p-value below 0.05 making it a statistically significant. # FRAA being in this list does not come as a surprise since most of the CDA is attributed to Fielding Runs Above Average. # Scatterplot to compare SLR to Full estimates catchers_models %>% unnest(mod_tidy) %>% filter(term != "(Intercept)") %>% select(model_type, term, estimate) %>% pivot_wider(names_from = model_type, values_from = estimate) %>% ggplot(aes(full, slr)) + geom_point() + geom_abline(color = "blue", linetype = "dashed") # Most of the coefficients in the full model are on a similar scale as the simple linear regression coefficients with the exception of Exchange, Pop Time, and Fielding Runs Above Average. # Alternative to scatterplot, a plot of paired confidence intervals for each of the predictors. catchers_models %>% unnest(mod_tidy) %>% filter(term != "(Intercept)") %>% ggplot(aes(model_type, estimate)) + geom_pointrange(aes(ymin = conf.low, ymax = conf.high)) + geom_hline(yintercept = 0, color = "red", linetype = "dashed") + facet_wrap(. ~ term, scales = "free_x") + coord_flip() # There were some major differences between the SLR and Full model for Strike Rates in Zones 17, 18, and 19. # In addition, the same thing occurred with Blocking Runs (EPAA Runs), Strike Rate, and Runs From Extra Strikes. # Strike Rate actually differed in signs as Strike Rate was positive in SLR and negative in the full. # Looking for evidence of non-linear associations using cubic regression model. # Setup formulas for cubic models predictor_var <- catchers_mod_bldg_dat %>% names() %>% setdiff(c("fraa_adj")) fmla <- paste0("fraa_adj ~ poly(", predictor_var, ", 3)") # Fit and store the cubic models cubic_models <- tibble( data = list(catchers_mod_bldg_dat), predictor_var, fmla ) %>% mutate(cubic_fit = map2(fmla, data, lm)) catchers_models %>% # drop full model from model database filter(model_type != "full") %>% select(predictor_var, model_fit) %>% # join catchers model database with new cubic models left_join(cubic_models, by ="predictor_var") %>% # add comparison column linear vs cubic fits mutate(anova_test = map2(model_fit, cubic_fit, anova)) %>% # unwrap anova results - h_0: submodel does just as well as larger unnest(anova_test) %>% # p-value is really prob F RV bigger than observed F drop_na() %>% # p-value is really prob F RV bigger than observed F stat rename(term = predictor_var, p_value = `Pr(>F)`) %>% select(term, p_value) %>% filter(p_value < 0.05) %>% arrange(p_value) # For five features (Pop Time, Strike Rate, Strike Rate in Zone 18, Strike Rate in Zone 19, and Runs From Extra Strikes), an ANOVA test showed that the fits for these features’ cubic model fits were significantly improved over the linear models. # The small p-values indicate that there is little evidence to support the null hypothesis that the SLR models fit equally as well as the cubic models. # There is evidence of non-linear association between crime rate and predictors. # Same split, just in different format for this part of the Linear Regression model analysis catchers_split <- tibble( catchers_mod_comp_dat = catchers_modeling_data %>% sample_frac(0.60) %>% list(), catchers_mod_bldg_dat = catchers_modeling_data %>% setdiff(catchers_mod_comp_dat) %>% list()) pred_variables <- names(catchers_mod_bldg_dat) %>% setdiff(c("fraa_adj")) lm_models <- tibble(fmla = c(str_c("fraa_adj ~ ", str_c(pred_variables, collapse = " + ")), "fraa_adj ~ runs_extra_strikes"), model_name = c("all_vars", "runs_extra_strikes")) %>% mutate(fmla = map(fmla, as.formula)) test_catchers_var <- catchers_split %>% pluck("catchers_mod_bldg_dat", 1, "fraa_adj") %>% var() catchers_lm_fits <- catchers_split %>% crossing(lm_models) %>% mutate( model_fit = map2(fmla, catchers_mod_comp_dat, lm), test_mse = map2_dbl(model_fit, catchers_mod_bldg_dat, modelr::mse), prop_var_explained = 1 - (test_mse/test_catchers_var) ) catchers_lm_fits %>% select(model_name, test_mse, prop_var_explained) %>% arrange(test_mse) # A linear model including all predictors explains about 93.5% of the variance in Catcher Defensive Adjustment with a small test MSE # A linear model including only Runs From Extra Strikes explains about 64.4% of the variance in Catcher Defensive Adjustment with a large MSE. # Linear Model Selection and Regularization ------------------------------- # Ridge Regression -------------------------------------------------------- # Figure out variance of outcome variable; will be useful later. test_fraa_adj_var <- catchers_split %>% pluck("catchers_mod_bldg_dat", 1, "fraa_adj") %>% var() # lambda grid to search -- use for ridge regression (200 values) lambda_grid <- 10^seq(-2, 10, length = 200) # ridge regression: 10-fold cv ridge_cv <- catchers_mod_bldg_dat %>% cv.glmnet( formula = fraa_adj ~ ., data = ., alpha = 0, nfolds = 10, lambda = lambda_grid ) # Check plot of cv error plot(ridge_cv) # ridge's best lambdas ridge_lambda_min <- ridge_cv$lambda.min ridge_lambda_1se <- ridge_cv$lambda.1se # Lasso ------------------------------------------------------------------ # lasso: 10-fold cv lasso_cv <- catchers_mod_bldg_dat %>% cv.glmnet( formula = fraa_adj ~ ., data = ., alpha = 1, nfolds = 10 ) # Check plot of cv error plot(lasso_cv) # lasso's best lambdas lasso_lambda_1se <- lasso_cv$lambda.1se lasso_lambda_min <- lasso_cv$lambda.min catchers_glmnet <- catchers_split %>% mutate( ridge_min = map(catchers_mod_comp_dat, ~ glmnet(fraa_adj ~ ., data = .x, alpha = 0, lambda = ridge_lambda_min)), ridge_1se = map(catchers_mod_comp_dat, ~ glmnet(fraa_adj ~ ., data = .x, alpha = 0, lambda = ridge_lambda_1se)), lasso_min = map(catchers_mod_comp_dat, ~ glmnet(fraa_adj ~ ., data = .x, alpha = 1, lambda = lasso_lambda_min)), lasso_1se = map(catchers_mod_comp_dat, ~ glmnet(fraa_adj ~ ., data = .x, alpha = 1, lambda = lasso_lambda_1se)) ) %>% pivot_longer(cols = c(-catchers_mod_bldg_dat, -catchers_mod_comp_dat), names_to = "method", values_to = "fit") # Test error and R squared for ridge and lasso fits catchers_glmnet_error <- catchers_glmnet %>% mutate(pred = map2(fit, catchers_mod_bldg_dat, predict), test_mse = map2_dbl(catchers_mod_bldg_dat, pred, ~ mean((.x$fraa_adj - .y)^2))) %>% unnest(test_mse) %>% select(method, test_mse) %>% mutate(prop_explained = 1 - test_mse/test_fraa_adj_var) %>% arrange(test_mse) catchers_glmnet_error # lasso_min seemed to explain 94% of the variance in Catcher Defensive Adjustment using all the variables and had the lowest test MSE. # ridge_min, ridge_1se, and lasso_1se in that order with similar numbers ranging from 92.6% to 93.7% of the variance explained and 3.16 to 3.73 in test MSE. # Inspect/compare model coefficients catchers_glmnet %>% pluck("fit") %>% map( ~ coef(.x) %>% as.matrix() %>% as.data.frame() %>% rownames_to_column("name")) %>% reduce(full_join, by = "name") %>% mutate_if(is.double, ~ if_else(. == 0, NA_real_, .)) %>% rename(ridge_min = s0.x, ridge_1se = s0.y, lasso_min = s0.x.x, lasso_1se = s0.y.y) %>% knitr::kable(digits = 3) # Both lasso models retained Runs From Extra Strikes which is encouraging for the notion that it could replace CSAA. # As expected, FRAA remained in all four models as it is a key component of the Catcher Defensive Adjustment formula. # Best Subset Selection --------------------------------------------------- # Helper Functions predict_regsubset <- function(object, fmla , new_data, model_id) { if(!is.data.frame(new_data)){ new_data <- as_tibble(new_data) } obj_formula <- as.formula(fmla) coef_vector <- coef(object, model_id) x_vars <- names(coef_vector) mod_mat_new <- model.matrix(obj_formula, new_data)[ , x_vars] pred <- as.numeric(mod_mat_new %*% coef_vector) return(pred) } test_mse_regsubset <- function(object, fmla , test_data){ num_models <- object %>% summary() %>% pluck("which") %>% dim() %>% .[1] test_mse <- rep(NA, num_models) obs_target <- test_data %>% as_tibble() %>% pull(!!as.formula(fmla)[[2]]) for(i in 1:num_models){ pred <- predict_regsubset(object, fmla, test_data, model_id = i) test_mse[i] <- mean((obs_target - pred)^2) } tibble(model_index = 1:num_models, test_mse = test_mse) } test_mse_regsubset <- function(object, fmla , test_data){ num_models <- object %>% summary() %>% pluck("which") %>% dim() %>% .[1] test_mse <- rep(NA, num_models) obs_target <- test_data %>% as_tibble() %>% pull(!!as.formula(fmla)[[2]]) for(i in 1:num_models){ pred <- predict_regsubset(object, fmla, test_data, model_id = i) test_mse[i] <- mean((obs_target - pred)^2) } tibble(model_index = 1:num_models, test_mse = test_mse) } catchers_bestsubset_cv <- catchers_mod_bldg_dat %>% crossv_kfold(10, id = "folds") %>% mutate( fmla = "fraa_adj ~ .", model_fits = map2(fmla, train, ~ regsubsets(as.formula(.x), data = .y, nvmax = 16)), model_fold_mse = pmap(list(model_fits, fmla ,test), test_mse_regsubset) ) catchers_bestsubset_cv %>% unnest(model_fold_mse) %>% group_by(model_index) %>% summarise(test_mse = mean(test_mse)) %>% arrange(test_mse) # The best subset selection model included three variables which definitely includes Fielding Runs Above Average with a test MSE lower than the linear model and higher than lasso/ridge models. # Principal Components Regression and Partial Least Squares ----------------------------------------- pcr_cv_catchers <- catchers_mod_bldg_dat %>% pcr(fraa_adj ~ ., data = ., scale = TRUE, validation = "CV") # Root Mean Squared Error validationplot(pcr_cv_catchers) # Add vertical line at 13 abline(v = 13) # Mean Squared Error validationplot(pcr_cv_catchers, val.type="MSEP") abline(v = 7) pcr_cv_catchers %>% summary() pls_cv_catchers <- catchers_mod_bldg_dat %>% plsr(fraa_adj ~ ., data = ., scale = TRUE, validation = "CV") validationplot(pls_cv_catchers) abline(v = 7) validationplot(pls_cv_catchers, val.type = "MSEP") abline(v = 5) catchers_dim_reduct <- catchers_split %>% mutate( pcr_13m = map(catchers_mod_comp_dat, ~ pcr(fraa_adj ~ ., data = .x, ncomp = 13)), pcr_7m = map(catchers_mod_comp_dat, ~ pcr(fraa_adj ~ ., data = .x, ncomp = 7)), pls_5m = map(catchers_mod_comp_dat, ~ pcr(fraa_adj ~ ., data = .x, ncomp = 5)) ) %>% pivot_longer(cols = c(-catchers_mod_bldg_dat, -catchers_mod_comp_dat), names_to = "method", values_to = "fit") catchers_dim_error <- catchers_dim_reduct %>% mutate(pred = pmap(list(fit, catchers_mod_bldg_dat, c(13, 7, 5)), predict), test_mse = map2_dbl(catchers_mod_bldg_dat, pred, ~ mean((.x$fraa_adj - .y)^2))) %>% unnest(test_mse) %>% select(method, test_mse) %>% arrange(test_mse) %>% knitr::kable(digits = 3) catchers_dim_error # The PCR model with 13 components seemed to perform the best out of the two types of models in this section. # It had a test MSE similar to the previous models in the report and same as the linear regression model. # The PLS model with 5 components did not perform well compared to the rest at about a test MSE of about 11. # Conclusion -------------------------------------------------------------- # The candidate models were chosen from those models we used throughout the course with the Boston dataset. # This final dataset for my project ended up being very similar in number of observations and number of variables so it seemed appropriate and beneficial to use similar methods. # There were no variables that rendered a binary response and that is why I did not choose to use models that worked well with classification. # The linear regression model using all the variables served as a reference for the other candidate models to compare with since it is a reliable and simple model. # The final model was selected when accounting for the lowest test mean squared error and the highest proportion of the variance in CDA explained. # The lasso_min model performed the best out of the models chosen resulting in a test mean squared error of 3.16 and a proportion of 93.7% of the variance in CDA explained. # The model also retained Runs From Extra Strikes which could possibly prove that this metric from Statcast can replace the metric from Baseball Prospectus for a more accurate version when predicting catcher's overall defense. # In addition, it also kept Strike Rate, Blocking Runs, and Strike Zones 14 and 16. # Framing is an integral part of evaluating the quality of a catcher's defense and to have a better understanding when comparing catchers. # Statcast providing state-of-art technology is a plus that allows baseball analysts to further their studies when it comes to defense, a difficult avenue to quanitfy in the game of baseball. # What we can do to improve these metrics is to keep adding data for this season (hopefully there is a season) and the ones to come as Statcast is simultaneously improving as well as the statistics I covered in this project. # Other entities such as FanGraphs and Baseball have new, specific metrics for catchers that could be potential variables for the dataset. # Lastly, another model I could have used was Polynomial Regression and could have used Cross-Validation and Validation
/R/Belisario_Hugo_Final_Project.R
no_license
HugoBelisario/CatcherDefense
R
false
false
38,750
r
# Final Project ----------------------------------------------------------- # Hugo Belisario # STAT 301-2 Data Science # 17 March 2020 # Catcher Defense during the Statcast Era # Data Scope and Identification ------------------------------------------- # The datasets used for this Final Project came from Baseball Prospectus (BP), an organization that runs a website devoted to sabermetric analysis of baseball, and BaseballSavant, a site dedicated to providing player matchups, Statcast metrics, and advanced statistics in a simple and easy-to-view way. # Baseball Prospectus recently built a new Leaderboards platform that is currently in Beta and includes Hitting, Pitching, and Catcher Defense Leaderboards. # BP has their own way of calculating an overall defensive measure for catchers called Catcher Defensive Adjustment or Fielding Runs Above Average Adjustment. # However, they do not possess Statcast metrics for other aspects of the position besides traditional fielding such as pitch framing, pop time, and exchange. # BaseballSavant maintains Statcast Leaderboards for Catcher Pop Time and Catcher Framing. # The question of interest is the following: Can we predict a catcher's overall defensive value using Statcast data to replace metrics used by Baseball Prospectus? # The notion behind my curiosity is that since Statcast is the state-of-the-art tracking technology implemented by the MLB then it must serve as a more accurate way of predicting a catcher's defensive performance than third-party organizations. # Statcast and Baseball Savant do not provide an overall defensive measure for catchers, but BP does. # Hence, I believe that although there are similarities to statistics provided by both entities using Statcast data can give us more insight as to how these catcher's have actually performed utilizing BP's formula for Catcher Defense Adjustment. # This project is regression-based since I am working with continuous variables. # Background ------------------------------------------------------------ # Catcher's influence on defense is more subtle than the other positions in baseball. # You cannot judge a catcher's defensive performance based on their number of steals allowed or their caught stealing rate because the pitcher and runner play important parts in determining the probability of a steal attempt, let alone a successful one. # Just like your typical fielders, catchers need to put tags on runners trying to score, field bunts, and chase pop flies. # However, baseball is a game of inches. # Any small advantage or edge that teams can improve upon or add to their organization, they will try to maximize it. # Framing and Pop Time/Exchange are those tools that could win a team ballgames, postseason berths, and titles. # A significant component of a catcher's skillset is the ability to prevent runners from advancing and scoring on wild pitches and passed balls by blocking. # The best way to prevent a steal is to prevent the runner from attempting it. # Pop Time measures the time from the moment the pitch hits the catcher’s mitt to the moment the ball reaches the fielder’s projected receiving point at the center of the base. # Pop Time is a combination of exchange time (how quickly the catcher releases the ball, measured in seconds) and arm strength (velocity of throw, in MPH). # Pitch framing for catchers involves receiving the ball and catching it in a manner that favors the pitcher with a strike located outside of standard zones, but in the specific umpire's zone. # Catcher framing is the art of a catcher receiving a pitch in a way that makes it more likely for an umpire to call it a strike. # Baseball Savant breaks down the catcher’s view into eight zones around the strike zone and shows the called strike percentage of all non-swings in that zone. # Framing stats measure how many runs catchers save based on how many extra strikes they provide for their pitchers. # Main driver of overall defense, following metrics are additions to the contributions of a catcher, nonetheless they are still valuable. # FRAA, Fielding Runs Above Average, traditional fielding ability (i.e. bunts, pop flies) # CSAA, Called Strikes Above Average, is how BP measures a catcher’s ability to frame pitches. # CSAA Runs provides a run value for called strikes due to framing. # EPAA, Errant Pitches Above Average, measures a catcher’s ability to block wild pitches and prevent passed balls. # SRAA, Swipe Rate Above Average, calculates a catcher’s ability to throw runners out # TRAA, Takeoff Rate Above Average, stolen base attempts above league average. # PORAA, Pick Off Runs Above Average, places run value on pick offs. # Baseball Prospectus calculates their overall defensive measure by adding Framing Runs (CSAA Runs), Blocking Runs (EPAA Runs), and Throwing Runs (SRAA Runs + TRAA Runs + PORAA Runs) to Fielding Runs Above Average (FRAA). # However, as stated above I want to replace CSAA with Runs From Extra Strikes while also observing the relationships and impact other Statcast metrics such as Strike Rates, Pop Times, and Exchange times have on CDA. # Load Packages ----------------------------------------------------------- library(tidyverse) library(janitor) library(skimr) library(corrplot) library(broom) library(modelr) library(ggfortify) library(knitr) library(GGally) library(rsample) library(leaps) # best subset selection library(glmnet) # ridge & lasso library(glmnetUtils) # improves working with glmnet library(pls) # set seed set.seed(3) # Baseball Prospectus Catcher Defense ----------------------------------------------------- # Statcast tracking technology was set up in all 30 stadiums starting in 2015. # For that reason, I am only using seasonal data since 2015 for this project. # After reading in the Baseball Prospectus .csv files from 2015-2019, I proceeded to remove some unneccesary variables and create a 'year' variable for the dataset. # The 'year' variable was later used to join datasets. # The same process is completed for all of the seasons. # 2019 bp_catchers_2019 <- read_csv("data/bpcatchers2019.csv") %>% clean_names() %>% # remove playerid, version_date, team, lg since they are not important variables in my analysis select(-playerid, -version_date, team, lg) %>% mutate(year = 2019) # separate name column into two columns, first_name and last_name. bp_catchers_2019 <- extract(bp_catchers_2019, name, c("first_name", "last_name"), "([^ ]+) (.*)") bp_catchers_2019 <- bp_catchers_2019 %>% # ordering the data by making names and year the first two columns makes it easier for the final dataset select(first_name, last_name, year, everything()) # 2018 bp_catchers_2018 <- read_csv("data/bpcatchers2018.csv") %>% clean_names() %>% select(-playerid, -version_date, team, lg) %>% mutate(year = 2018) bp_catchers_2018 <- extract(bp_catchers_2018, name, c("first_name", "last_name"), "([^ ]+) (.*)") bp_catchers_2018 <- bp_catchers_2018 %>% select(first_name, last_name, year, everything()) # 2017 bp_catchers_2017<- read_csv("data/bpcatchers2017.csv") %>% clean_names() %>% select(-playerid, -version_date, team, lg) %>% mutate(year = 2017) bp_catchers_2017 <- extract(bp_catchers_2017, name, c("first_name", "last_name"), "([^ ]+) (.*)") bp_catchers_2017 <- bp_catchers_2017 %>% select(first_name, last_name, year, everything()) # 2016 bp_catchers_2016 <- read_csv("data/bpcatchers2016.csv") %>% clean_names() %>% select(-playerid, -version_date, team, lg) %>% mutate(year = 2016) bp_catchers_2016 <- extract(bp_catchers_2016, name, c("first_name", "last_name"), "([^ ]+) (.*)") bp_catchers_2016 <- bp_catchers_2016 %>% select(first_name, last_name, year, everything()) # 2015 bp_catchers_2015 <- read_csv("data/bpcatchers2015.csv") %>% clean_names() %>% select(-playerid, -version_date, team, lg) %>% mutate(year = 2015) bp_catchers_2015 <- extract(bp_catchers_2015, name, c("first_name", "last_name"), "([^ ]+) (.*)") bp_catchers_2015 <- bp_catchers_2015 %>% select(first_name, last_name, year, everything()) # add rows from each dataframe together and formed bp_catchers using rbinds bp_catchers <- rbind(bp_catchers_2015, bp_catchers_2016, bp_catchers_2017, bp_catchers_2018, bp_catchers_2019) # arrange the names and age so that players with multiple appearances in the featured years have their rows in order of age bp_catchers <- bp_catchers %>% arrange(first_name, last_name, age) # Baseball Savant Statcast Catcher Framing ------------------------------------------------ # Similar to the previous set of data, I removed unnecessary variables that could have potentially caused trouble when analyzing the data. # The Catcher Framing dataset included an MLB average of all the metrics as the first row, this was consequently removed. # Later on, I added the rows together and arranged them by name of the player. # 2019 catcher_framing_2019 <- read_csv("data/catcher-framing2019.csv") %>% clean_names() %>% # remove fielder_2 similar to playerid in Baseball Prospectus dataset select(-fielder_2) %>% # order by name select(first_name, last_name, everything()) # remove first row catcher_framing_2019 <- catcher_framing_2019[-1, ] # 2018 catcher_framing_2018 <- read_csv("data/catcher-framing2018.csv") %>% clean_names() %>% select(-fielder_2) %>% select(first_name, last_name, everything()) # remove first row catcher_framing_2018 <- catcher_framing_2018[-1, ] # 2017 catcher_framing_2017 <- read_csv("data/catcher-framing2017.csv") %>% clean_names() %>% select(-fielder_2) %>% select(first_name, last_name, everything()) # remove first row catcher_framing_2017 <- catcher_framing_2017[-1, ] # 2016 catcher_framing_2016 <- read_csv("data/catcher-framing2016.csv") %>% clean_names() %>% select(-fielder_2) %>% select(first_name, last_name, everything()) # remove first row catcher_framing_2016 <- catcher_framing_2016[-1, ] # 2015 catcher_framing_2015 <- read_csv("data/catcher-framing2015.csv") %>% clean_names() %>% select(-fielder_2) %>% select(first_name, last_name, everything()) # remove first row catcher_framing_2015 <- catcher_framing_2015[-1, ] # combine all five datasets to form one, using rbinds to add the rows catcher_framing <- rbind(catcher_framing_2015, catcher_framing_2016, catcher_framing_2017, catcher_framing_2018, catcher_framing_2019) # arrange by name catcher_framing <- catcher_framing %>% arrange(first_name, last_name) # Statcast Catcher Pop Time and Exchange ---------------------------------- # The dataset for Pop Time and Exchange was the most complicated to configure into one that could smoothly join the rest. # Below each command, I explain it's purpose as it pertains to forming the final dataset. # 2019 catcher_poptime_2019 <- read_csv("data/poptime2019.csv") %>% clean_names() %>% # column for player names is called catcher rename(name = catcher) %>% # player_id and team_id are useless for this analysis select(-player_id, -team_id) %>% # add year to join poptime with framing data mutate(year = 2019) %>% # arrange columns to show age and year select(name, age, year, everything()) # Separate name column into two columns, first_name and last_name. catcher_poptime_2019 <- extract(catcher_poptime_2019, name, c("first_name", "last_name"), "([^ ]+) (.*)") # Use inner_join to unite both Baseball Savant Catching Leaderboards into one dataframe using keys name and year. catchers_statcast_2019 <- catcher_poptime_2019 %>% inner_join(catcher_framing_2019, by = c("first_name", "last_name", "year")) # 2018 catcher_poptime_2018 <- read_csv("data/poptime2018.csv") %>% clean_names() %>% rename(name = catcher) %>% select(-player_id, -team_id) %>% mutate(year = 2018) %>% select(name, age, year, everything()) catcher_poptime_2018 <- extract(catcher_poptime_2018, name, c("first_name", "last_name"), "([^ ]+) (.*)") catchers_statcast_2018 <- catcher_poptime_2018 %>% inner_join(catcher_framing_2018, by = c("first_name", "last_name", "year")) # 2017 catcher_poptime_2017 <- read_csv("data/poptime2017.csv") %>% clean_names() %>% rename(name = catcher) %>% select(-player_id, -team_id) %>% mutate(year = 2017) %>% select(name, age, year, everything()) catcher_poptime_2017 <- extract(catcher_poptime_2017, name, c("first_name", "last_name"), "([^ ]+) (.*)") catchers_statcast_2017 <- catcher_poptime_2017 %>% inner_join(catcher_framing_2017, by = c("first_name", "last_name", "year")) # 2016 catcher_poptime_2016 <- read_csv("data/poptime2016.csv") %>% clean_names() %>% rename(name = catcher) %>% select(-player_id, -team_id) %>% mutate(year = 2016) %>% select(name, age, year, everything()) catcher_poptime_2016 <- extract(catcher_poptime_2016, name, c("first_name", "last_name"), "([^ ]+) (.*)") catchers_statcast_2016 <- catcher_poptime_2016 %>% inner_join(catcher_framing_2016, by = c("first_name", "last_name", "year")) # 2015 catcher_poptime_2015 <- read_csv("data/poptime2015.csv") %>% clean_names() %>% rename(name = catcher) %>% select(-player_id, -team_id) %>% mutate(year = 2015) %>% select(name, age, year, everything()) catcher_poptime_2015 <- extract(catcher_poptime_2015, name, c("first_name", "last_name"), "([^ ]+) (.*)") catchers_statcast_2015 <- catcher_poptime_2015 %>% inner_join(catcher_framing_2015, by = c("first_name", "last_name", "year")) # combine all five datasets to form one that represents the metrics provided by Statcast catchers_statcast <- rbind(catchers_statcast_2015, catchers_statcast_2016, catchers_statcast_2017, catchers_statcast_2018, catchers_statcast_2019) catchers_statcast <- catchers_statcast %>% arrange(first_name, last_name, age) # join the Statcast and Baseball Prospectus datasets catchers <- catchers_statcast %>% inner_join(bp_catchers, by = c("first_name", "last_name", "year")) # Clean Final Dataset ----------------------------------------------------- # remove lg since league is not an important variable in the dataset and age.y is a repeat of the age column catchers <- catchers %>% select(-lg, -age.y) %>% select(first_name, last_name, age.x, year, team, everything()) # rename age.x to just "age" catchers <- rename(catchers, age = age.x) # remove name, team to avoid any problems with model building # remove pop time to second base metrics since they produce question marks in the results when running corrplots() and models. # remove throwing runs, framing runs since we are replacing those with Statcast data for those. catchers <- catchers %>% select(-c(first_name, last_name, team, age, year, pop_3b_sba_count, po_runs, epaa_chances, epaa, n_called_pitches, pop_2b_cs, pop_2b_sb, pop_2b_sba_count, pop_3b_sba, pop_3b_cs, pop_3b_sb, csaa_runs, csaa, csaa_chances, csaa_sd, csaa_runs_sd, sraa, sraa_chances, traa, traa_runs, po_chances)) # By the end of this cleaning, the dataset contains 505 observations and 16 variables. catchers <- catchers %>% na.omit # When ommiting NA values from the dataset (mostly from missing numbers for pop time to second base), the dataset is left with 481 observations. # This means that there were 24 missing values. # Codebook ---------------------------------------------------------------- # maxeff_arm_2b_3b_sba: arm strength measured on "max effort" throws, or the average above a player's 90th percentile performance, in miles per hour (MPH) # exchange_2b_3b_sba: exchange time measured in seconds # pop_2b_sba: pop time to second base in seconds # runs_extra_strikes: strikes to runs saved on a .125 run/strike basis, and includes park and pitcher adjustments # strike_rat: cumulative total of all zones, percentages of called strikes from pitches that were not swung at in zones 11-17. # strike_rate_11: percentages of called strikes from all called pitches in zone 11 # strike_rate_12: percentages of called strikes from all called pitches in zone 12 # strike_rate_13: percentages of called strikes from all called pitches in zone 13 # strike_rate_14: percentages of called strikes from all called pitches in zone 14 # strike_rate_16: percentages of called strikes from all called pitches in zone 16 # strike_rate_17: percentages of called strikes from all called pitches in zone 17 # strike_rate_18: percentages of called strikes from all called pitches in zone 18 # strike_rate_19: percentages of called strikes from all called pitches in zone 19 # fraa_adj: catcher defensive adjustment or fielding runs above average adjustment # fraa: fielding runs above average # epaa_runs: errant pitches above average also known as blocking runs # The First Random Data Split ----------------------------------------------- # The data is not very large which led me to use two data splits. # My first split provided 48 observations to run some EDA methods. # split data for EDA and modeling sets, 10% for EDA and 90% for modeling catchers_eda_set <- catchers %>% sample_frac(0.1) catchers_modeling_data <- catchers %>% setdiff(catchers_eda_set) # Exploratory Data Analysis ----------------------------------------------- # My initial step to explore the dataset was to skim through the variables using skim_without_charts. catchers_eda_set %>% skim_without_charts() # Although it may not be the MLB average velocity for catchers throwing to 2nd base, it was surprising to see that the average velocity of this EDA set was 81.5 MPH. # Catcher's arms range from the mid-70s to the high 80s which explains part of the trade-offs teams have to deal with. # Some catchers may have weaker arms but can frame pitches better or hit the ball better. # This particular group of catchers seemed to have high percentages when it came to getting the call for pitches in zones 14 and 16. # It also seemed to be able to prevent runs from scoring due to wild pitches and passed balls. # Definitely a superb framing group since the average was 2.67 for Runs from Extra Strikes. # Pop Time and Exchange times were average by MLB standards. catchers_eda_set %>% cor() %>% corrplot::corrplot() # Strike Rate seems to have a strong positive relationship with the bottom of the Shadow Zone (16-19). # This could signal that framing low pitches is a major factor in determining your overall strike rate. # Strike Rate, as stated previously is a cumulative total percentage based on the Shadow Zone percentages. # Although some zones tend to correlate slightly positive with the overall metric, they are weak signs of multicollinearity meaning a particular zone cannot predict the overall strike rate. # As expected, max effort velocity to 2nd or 3rd base shares a negative relationship with Pop Time because the faster you throw the less time it takes for the baseball to reach the receiver. # Max effort velocity also seems to have a slight positive relationship with our overall defensive measure, fraa_adj and fielding runs above average. # It is easier to get people out on bunts and plays to every base if you have a strong arm. # Exchange shares a solid positive relationship with max efforts velocity implying that catchers with higher velocities tend to have longer exchange times in seconds. # One idea that came about when observing this is that players with strong arms that can reach higher velocities probably take a longer time transfering the baseball from their glove to a more comfortable throwing position to produce a stronger throw. # In the meanwhile, it could be the case that catchers with slower velocities tend to release the ball quicker in order to have a chance at catching the runner. # Runs from Extra Strikes has a robust relationship with both fraa_adj and fraa. # This is encouraging to see since the Statcast metric is replacing Called Strikes Above Average from Baseball Prospectus. # The higher this metric is, the higher the value as a catcher in overall defense. # Blocking Runs seems to have very few notable relationships with the other variables. # However, the correlation plot shows a strong positive relationship with fraa_adj and fraa. # The more passed balls and wild pitches you prevent, the higher your value on defense is a possible conclusion to that finding. # According to the correlation plot, maxeff_arm_2b_3b_sba has a negative relationship with pop time to second base. # The regression line seems to veer off in the middle of the cluster. # I assume that although these catchers have around the same velocity to the bases, they differ in exchange time. ggplot(data = catchers_eda_set, mapping = aes(x = maxeff_arm_2b_3b_sba, y = pop_2b_sba)) + geom_point() + geom_smooth(se = FALSE) # Since Blocking Runs was the other component from the Catcher Defensive Adjustment formula, I wanted to isolate the metric and regress it on fraa_adj. # Most of the points seem to be clustered between -1.5 and 2. # The outliers on both ends being removed could prove that the more runs a catcher prevents through blocking wild pitches and avoiding passed balls adds somewhat significant value to overall defense. ggplot(data = catchers_eda_set, mapping = aes(x = epaa_runs, y = fraa_adj)) + geom_point() + geom_smooth(se = FALSE) # These two clearly share a strong positive relationship and seems that it could be stronger when removing a couple of points far away from the majority. # The more runs you save from framing pitches and giving your pitchers more opportunities for outs, the more your overall defensive value increases. ggplot(data = catchers_eda_set, mapping = aes(x = runs_extra_strikes, y = fraa_adj)) + geom_point() + geom_smooth(se = FALSE) # The Second Random Data Split (Splitting Analysis Dataset) --------------- # Since the dataset was not large, I chose to perform a second random data split in order to attain more accurate predictive results. # Chose to split the data 60%-40% for comparing candidate models and model building, respetively. # Test set for comparing candidate models, selecting final model catchers_mod_comp_dat <- catchers_modeling_data %>% sample_frac(0.60) # Train set for candidate model building catchers_mod_bldg_dat <- catchers_modeling_data %>% setdiff(catchers_mod_comp_dat) # Simple Linear Regression ------------------------------------------------ # Setup formulas for simple linear regressions predictor_var <- catchers_mod_bldg_dat %>% names() %>% setdiff("fraa_adj") fmla <- paste("fraa_adj ~", predictor_var) # adding full model predictor_var <- c(predictor_var, "all_vars") fmla <- c(fmla, "fraa_adj ~ .") # Fit and store the models catchers_models <- tibble( data = list(catchers_mod_bldg_dat), predictor_var, fmla ) %>% mutate(model_fit = map2(fmla, data, lm), model_type = if_else(predictor_var == "all_vars", "full", "slr")) # Model fit summaries/information catchers_models <- catchers_models %>% mutate(mod_glance = map(model_fit, glance), mod_tidy = map(model_fit, tidy), add_tidy = map(model_fit, confint_tidy), mod_tidy = map2(mod_tidy, add_tidy, bind_cols), mod_augment = map2(model_fit, data, augment)) %>% select(-add_tidy) # Models in which there is a statistically significant association between the predictor and the response. catchers_models %>% unnest(mod_tidy) %>% filter(model_type != "full", term != "(Intercept)") %>% select(term, estimate, p.value) %>% arrange(p.value) %>% filter(p.value < 0.05) # Fielding Runs Above Average, Runs From Extra Strikes, Strike Rate, Strike Rate in Zone 18, Blocking Runs, Strike Rate in Zone 19, Strike Rate in Zone 17, and Strike Rate in Zone 16 had statistically significant estimates. # Plotting investigating linear relationship with FRAA_ADJ/CDA. catchers_mod_bldg_dat %>% pivot_longer(cols = -fraa_adj, names_to = "predictor", values_to = "value") %>% ggplot(aes(x = value, y = fraa_adj)) + geom_point() + geom_smooth(method = "lm", se = FALSE) + coord_cartesian(ylim = c(0, 25)) + facet_wrap(. ~ predictor, scales = "free_x") # A table that details the relationship between all of the variables and Catcher Defensive Adjustment in scatterplots with linear regression lines. # Investigating full model catchers_models %>% filter(model_type == "full") %>% unnest(mod_tidy) %>% select(-predictor_var, -fmla, -model_type) # Identify significant slope/linear parameters in full model catchers_models %>% unnest(mod_tidy) %>% filter(model_type == "full", term != "(Intercept)") %>% select(term, estimate, p.value) %>% arrange(p.value) %>% filter(p.value < 0.05) # In the full model, Fielding Runs Above Average, Runs From Extra Strikes, Blocking Runs, and Strike Rate in Zone 14 has a p-value below 0.05 making it a statistically significant. # FRAA being in this list does not come as a surprise since most of the CDA is attributed to Fielding Runs Above Average. # Scatterplot to compare SLR to Full estimates catchers_models %>% unnest(mod_tidy) %>% filter(term != "(Intercept)") %>% select(model_type, term, estimate) %>% pivot_wider(names_from = model_type, values_from = estimate) %>% ggplot(aes(full, slr)) + geom_point() + geom_abline(color = "blue", linetype = "dashed") # Most of the coefficients in the full model are on a similar scale as the simple linear regression coefficients with the exception of Exchange, Pop Time, and Fielding Runs Above Average. # Alternative to scatterplot, a plot of paired confidence intervals for each of the predictors. catchers_models %>% unnest(mod_tidy) %>% filter(term != "(Intercept)") %>% ggplot(aes(model_type, estimate)) + geom_pointrange(aes(ymin = conf.low, ymax = conf.high)) + geom_hline(yintercept = 0, color = "red", linetype = "dashed") + facet_wrap(. ~ term, scales = "free_x") + coord_flip() # There were some major differences between the SLR and Full model for Strike Rates in Zones 17, 18, and 19. # In addition, the same thing occurred with Blocking Runs (EPAA Runs), Strike Rate, and Runs From Extra Strikes. # Strike Rate actually differed in signs as Strike Rate was positive in SLR and negative in the full. # Looking for evidence of non-linear associations using cubic regression model. # Setup formulas for cubic models predictor_var <- catchers_mod_bldg_dat %>% names() %>% setdiff(c("fraa_adj")) fmla <- paste0("fraa_adj ~ poly(", predictor_var, ", 3)") # Fit and store the cubic models cubic_models <- tibble( data = list(catchers_mod_bldg_dat), predictor_var, fmla ) %>% mutate(cubic_fit = map2(fmla, data, lm)) catchers_models %>% # drop full model from model database filter(model_type != "full") %>% select(predictor_var, model_fit) %>% # join catchers model database with new cubic models left_join(cubic_models, by ="predictor_var") %>% # add comparison column linear vs cubic fits mutate(anova_test = map2(model_fit, cubic_fit, anova)) %>% # unwrap anova results - h_0: submodel does just as well as larger unnest(anova_test) %>% # p-value is really prob F RV bigger than observed F drop_na() %>% # p-value is really prob F RV bigger than observed F stat rename(term = predictor_var, p_value = `Pr(>F)`) %>% select(term, p_value) %>% filter(p_value < 0.05) %>% arrange(p_value) # For five features (Pop Time, Strike Rate, Strike Rate in Zone 18, Strike Rate in Zone 19, and Runs From Extra Strikes), an ANOVA test showed that the fits for these features’ cubic model fits were significantly improved over the linear models. # The small p-values indicate that there is little evidence to support the null hypothesis that the SLR models fit equally as well as the cubic models. # There is evidence of non-linear association between crime rate and predictors. # Same split, just in different format for this part of the Linear Regression model analysis catchers_split <- tibble( catchers_mod_comp_dat = catchers_modeling_data %>% sample_frac(0.60) %>% list(), catchers_mod_bldg_dat = catchers_modeling_data %>% setdiff(catchers_mod_comp_dat) %>% list()) pred_variables <- names(catchers_mod_bldg_dat) %>% setdiff(c("fraa_adj")) lm_models <- tibble(fmla = c(str_c("fraa_adj ~ ", str_c(pred_variables, collapse = " + ")), "fraa_adj ~ runs_extra_strikes"), model_name = c("all_vars", "runs_extra_strikes")) %>% mutate(fmla = map(fmla, as.formula)) test_catchers_var <- catchers_split %>% pluck("catchers_mod_bldg_dat", 1, "fraa_adj") %>% var() catchers_lm_fits <- catchers_split %>% crossing(lm_models) %>% mutate( model_fit = map2(fmla, catchers_mod_comp_dat, lm), test_mse = map2_dbl(model_fit, catchers_mod_bldg_dat, modelr::mse), prop_var_explained = 1 - (test_mse/test_catchers_var) ) catchers_lm_fits %>% select(model_name, test_mse, prop_var_explained) %>% arrange(test_mse) # A linear model including all predictors explains about 93.5% of the variance in Catcher Defensive Adjustment with a small test MSE # A linear model including only Runs From Extra Strikes explains about 64.4% of the variance in Catcher Defensive Adjustment with a large MSE. # Linear Model Selection and Regularization ------------------------------- # Ridge Regression -------------------------------------------------------- # Figure out variance of outcome variable; will be useful later. test_fraa_adj_var <- catchers_split %>% pluck("catchers_mod_bldg_dat", 1, "fraa_adj") %>% var() # lambda grid to search -- use for ridge regression (200 values) lambda_grid <- 10^seq(-2, 10, length = 200) # ridge regression: 10-fold cv ridge_cv <- catchers_mod_bldg_dat %>% cv.glmnet( formula = fraa_adj ~ ., data = ., alpha = 0, nfolds = 10, lambda = lambda_grid ) # Check plot of cv error plot(ridge_cv) # ridge's best lambdas ridge_lambda_min <- ridge_cv$lambda.min ridge_lambda_1se <- ridge_cv$lambda.1se # Lasso ------------------------------------------------------------------ # lasso: 10-fold cv lasso_cv <- catchers_mod_bldg_dat %>% cv.glmnet( formula = fraa_adj ~ ., data = ., alpha = 1, nfolds = 10 ) # Check plot of cv error plot(lasso_cv) # lasso's best lambdas lasso_lambda_1se <- lasso_cv$lambda.1se lasso_lambda_min <- lasso_cv$lambda.min catchers_glmnet <- catchers_split %>% mutate( ridge_min = map(catchers_mod_comp_dat, ~ glmnet(fraa_adj ~ ., data = .x, alpha = 0, lambda = ridge_lambda_min)), ridge_1se = map(catchers_mod_comp_dat, ~ glmnet(fraa_adj ~ ., data = .x, alpha = 0, lambda = ridge_lambda_1se)), lasso_min = map(catchers_mod_comp_dat, ~ glmnet(fraa_adj ~ ., data = .x, alpha = 1, lambda = lasso_lambda_min)), lasso_1se = map(catchers_mod_comp_dat, ~ glmnet(fraa_adj ~ ., data = .x, alpha = 1, lambda = lasso_lambda_1se)) ) %>% pivot_longer(cols = c(-catchers_mod_bldg_dat, -catchers_mod_comp_dat), names_to = "method", values_to = "fit") # Test error and R squared for ridge and lasso fits catchers_glmnet_error <- catchers_glmnet %>% mutate(pred = map2(fit, catchers_mod_bldg_dat, predict), test_mse = map2_dbl(catchers_mod_bldg_dat, pred, ~ mean((.x$fraa_adj - .y)^2))) %>% unnest(test_mse) %>% select(method, test_mse) %>% mutate(prop_explained = 1 - test_mse/test_fraa_adj_var) %>% arrange(test_mse) catchers_glmnet_error # lasso_min seemed to explain 94% of the variance in Catcher Defensive Adjustment using all the variables and had the lowest test MSE. # ridge_min, ridge_1se, and lasso_1se in that order with similar numbers ranging from 92.6% to 93.7% of the variance explained and 3.16 to 3.73 in test MSE. # Inspect/compare model coefficients catchers_glmnet %>% pluck("fit") %>% map( ~ coef(.x) %>% as.matrix() %>% as.data.frame() %>% rownames_to_column("name")) %>% reduce(full_join, by = "name") %>% mutate_if(is.double, ~ if_else(. == 0, NA_real_, .)) %>% rename(ridge_min = s0.x, ridge_1se = s0.y, lasso_min = s0.x.x, lasso_1se = s0.y.y) %>% knitr::kable(digits = 3) # Both lasso models retained Runs From Extra Strikes which is encouraging for the notion that it could replace CSAA. # As expected, FRAA remained in all four models as it is a key component of the Catcher Defensive Adjustment formula. # Best Subset Selection --------------------------------------------------- # Helper Functions predict_regsubset <- function(object, fmla , new_data, model_id) { if(!is.data.frame(new_data)){ new_data <- as_tibble(new_data) } obj_formula <- as.formula(fmla) coef_vector <- coef(object, model_id) x_vars <- names(coef_vector) mod_mat_new <- model.matrix(obj_formula, new_data)[ , x_vars] pred <- as.numeric(mod_mat_new %*% coef_vector) return(pred) } test_mse_regsubset <- function(object, fmla , test_data){ num_models <- object %>% summary() %>% pluck("which") %>% dim() %>% .[1] test_mse <- rep(NA, num_models) obs_target <- test_data %>% as_tibble() %>% pull(!!as.formula(fmla)[[2]]) for(i in 1:num_models){ pred <- predict_regsubset(object, fmla, test_data, model_id = i) test_mse[i] <- mean((obs_target - pred)^2) } tibble(model_index = 1:num_models, test_mse = test_mse) } test_mse_regsubset <- function(object, fmla , test_data){ num_models <- object %>% summary() %>% pluck("which") %>% dim() %>% .[1] test_mse <- rep(NA, num_models) obs_target <- test_data %>% as_tibble() %>% pull(!!as.formula(fmla)[[2]]) for(i in 1:num_models){ pred <- predict_regsubset(object, fmla, test_data, model_id = i) test_mse[i] <- mean((obs_target - pred)^2) } tibble(model_index = 1:num_models, test_mse = test_mse) } catchers_bestsubset_cv <- catchers_mod_bldg_dat %>% crossv_kfold(10, id = "folds") %>% mutate( fmla = "fraa_adj ~ .", model_fits = map2(fmla, train, ~ regsubsets(as.formula(.x), data = .y, nvmax = 16)), model_fold_mse = pmap(list(model_fits, fmla ,test), test_mse_regsubset) ) catchers_bestsubset_cv %>% unnest(model_fold_mse) %>% group_by(model_index) %>% summarise(test_mse = mean(test_mse)) %>% arrange(test_mse) # The best subset selection model included three variables which definitely includes Fielding Runs Above Average with a test MSE lower than the linear model and higher than lasso/ridge models. # Principal Components Regression and Partial Least Squares ----------------------------------------- pcr_cv_catchers <- catchers_mod_bldg_dat %>% pcr(fraa_adj ~ ., data = ., scale = TRUE, validation = "CV") # Root Mean Squared Error validationplot(pcr_cv_catchers) # Add vertical line at 13 abline(v = 13) # Mean Squared Error validationplot(pcr_cv_catchers, val.type="MSEP") abline(v = 7) pcr_cv_catchers %>% summary() pls_cv_catchers <- catchers_mod_bldg_dat %>% plsr(fraa_adj ~ ., data = ., scale = TRUE, validation = "CV") validationplot(pls_cv_catchers) abline(v = 7) validationplot(pls_cv_catchers, val.type = "MSEP") abline(v = 5) catchers_dim_reduct <- catchers_split %>% mutate( pcr_13m = map(catchers_mod_comp_dat, ~ pcr(fraa_adj ~ ., data = .x, ncomp = 13)), pcr_7m = map(catchers_mod_comp_dat, ~ pcr(fraa_adj ~ ., data = .x, ncomp = 7)), pls_5m = map(catchers_mod_comp_dat, ~ pcr(fraa_adj ~ ., data = .x, ncomp = 5)) ) %>% pivot_longer(cols = c(-catchers_mod_bldg_dat, -catchers_mod_comp_dat), names_to = "method", values_to = "fit") catchers_dim_error <- catchers_dim_reduct %>% mutate(pred = pmap(list(fit, catchers_mod_bldg_dat, c(13, 7, 5)), predict), test_mse = map2_dbl(catchers_mod_bldg_dat, pred, ~ mean((.x$fraa_adj - .y)^2))) %>% unnest(test_mse) %>% select(method, test_mse) %>% arrange(test_mse) %>% knitr::kable(digits = 3) catchers_dim_error # The PCR model with 13 components seemed to perform the best out of the two types of models in this section. # It had a test MSE similar to the previous models in the report and same as the linear regression model. # The PLS model with 5 components did not perform well compared to the rest at about a test MSE of about 11. # Conclusion -------------------------------------------------------------- # The candidate models were chosen from those models we used throughout the course with the Boston dataset. # This final dataset for my project ended up being very similar in number of observations and number of variables so it seemed appropriate and beneficial to use similar methods. # There were no variables that rendered a binary response and that is why I did not choose to use models that worked well with classification. # The linear regression model using all the variables served as a reference for the other candidate models to compare with since it is a reliable and simple model. # The final model was selected when accounting for the lowest test mean squared error and the highest proportion of the variance in CDA explained. # The lasso_min model performed the best out of the models chosen resulting in a test mean squared error of 3.16 and a proportion of 93.7% of the variance in CDA explained. # The model also retained Runs From Extra Strikes which could possibly prove that this metric from Statcast can replace the metric from Baseball Prospectus for a more accurate version when predicting catcher's overall defense. # In addition, it also kept Strike Rate, Blocking Runs, and Strike Zones 14 and 16. # Framing is an integral part of evaluating the quality of a catcher's defense and to have a better understanding when comparing catchers. # Statcast providing state-of-art technology is a plus that allows baseball analysts to further their studies when it comes to defense, a difficult avenue to quanitfy in the game of baseball. # What we can do to improve these metrics is to keep adding data for this season (hopefully there is a season) and the ones to come as Statcast is simultaneously improving as well as the statistics I covered in this project. # Other entities such as FanGraphs and Baseball have new, specific metrics for catchers that could be potential variables for the dataset. # Lastly, another model I could have used was Polynomial Regression and could have used Cross-Validation and Validation
# Scatterplot Smoothers (J. Fox and S. Weisberg) # Sept 17, 2012 moved from scatterplot.R to scatterplotSmoothers.R # June 18, 2014 Fixed bug in gamLine so the smoother.arg link="linkname" works; thanks to Hani Christoph # 2014-08-19: Make sure that Matrix and MatrixModels packages are available to quantregLine(). # Can't substitute requireNamespace() for require() for gam and quantreg packages. John # 2014-11-21: Added 'offset' argument with default 0: offset= sigmaHat(model) for use with # marginal model plots. Fixed spread smooths as well # 2015-01-27: gam() and s() now imported from mgcv rqss(), qss(), and fitted.rqss() from quantreg. John # 2016-11-19: Added argument in smoother.args called 'evaluation'. The smoother will be evaluated # at evaluation equally spaced points in the range of the horizontal axis, with a default of 50. default.arg <- function(args.list, arg, default){ if (is.null(args.list[[arg]])) default else args.list[[arg]] } loessLine <- function(x, y, col, log.x, log.y, spread=FALSE, smoother.args, draw=TRUE, offset=0) { lty <- default.arg(smoother.args, "lty", 1) lwd <- default.arg(smoother.args, "lwd", 2) lty.spread <- default.arg(smoother.args, "lty.spread", 2) lwd.spread <- default.arg(smoother.args, "lwd.spread", 1) span <- default.arg(smoother.args, "span", 2/3) family <- default.arg(smoother.args, "family", "symmetric") degree <- default.arg(smoother.args, "degree", 1) iterations <- default.arg(smoother.args, "iterations", 4) evaluation <- default.arg(smoother.args, "evaluation", 50) if (log.x){ x <- log(x) } if (log.y){ y <- log(y) } valid <- complete.cases(x, y) x <- x[valid] y <- y[valid] ord <- order(x) x <- x[ord] y <- y[ord] x.eval <- seq(min(x), max(x), length=evaluation) warn <- options(warn=-1) on.exit(options(warn)) # mean smooth fit <- try(loess(y ~ x, span=span, family=family, degree=degree, control=loess.control(iterations=iterations)), silent=TRUE) if (class(fit)[1] != "try-error"){ y.eval <- predict(fit, newdata=data.frame(x=x.eval)) y.eval <- if(log.y) exp(y.eval) else y.eval if(draw)lines(if(log.x) exp(x.eval) else x.eval, y.eval, lwd=lwd, col=col, lty=lty) else out <- list(x=if(log.x) exp(x.eval) else x.eval, y=y.eval) } else{ options(warn) warning("could not fit smooth") return()} # spread smooth, if requested if(spread) { res <- residuals(fit) pos <- res > 0 pos.fit <- try(loess(I(res^2) ~ x, span=span, degree=0, family=family, subset=pos, control=loess.control(iterations=1)), silent=TRUE) neg.fit <- try(loess(I(res^2) ~ x, span=span, degree=0, family=family, subset=!pos, control=loess.control(iterations=1)), silent=TRUE) if(class(pos.fit)[1] != "try-error"){ y.pos <- y.eval + sqrt(offset^2 + predict(pos.fit, newdata=data.frame(x=x.eval))) y.pos <- if (log.y) exp(y.pos) else y.pos if(draw) {lines(if(log.x) exp(x.eval) else x.eval, y.pos, lwd=lwd.spread, lty=lty.spread, col=col)} else {out$x.pos <- if(log.x) exp(x.eval) else x.eval out$y.pos <- y.pos} } else{ options(warn) warning("could not fit positive part of the spread") } if(class(neg.fit)[1] != "try-error"){ y.neg <- y.eval - sqrt(offset^2 + predict(neg.fit, newdata=data.frame(x=x.eval))) y.neg <- if (log.y) exp(y.neg) else y.neg if(draw) lines(x.eval, y.neg, lwd=lwd.spread, lty=lty.spread, col=col) else {out$x.neg <- if(log.x) exp(x.eval) else x.eval out$y.neg <- y.neg} } else {options(warn) warning("could not fit negative part of the spread") } } if(!draw) return(out) } gamLine <- function(x, y, col, log.x, log.y, spread=FALSE, smoother.args, draw=TRUE, offset=0) { # if (!require("mgcv")) stop("mgcv package missing") lty <- default.arg(smoother.args, "lty", 1) lwd <- default.arg(smoother.args, "lwd", 2) lty.spread <- default.arg(smoother.args, "lty.spread", 2) lwd.spread <- default.arg(smoother.args, "lwd.spread", 1) fam <- default.arg(smoother.args, "family", gaussian) link <- default.arg(smoother.args, "link", NULL) evaluation <- default.arg(smoother.args, "evaluation", 50) # June 18, 2014 fam <- if(is.character(fam)) eval(parse(text=fam)) else fam link <- if(is.character(link)) make.link(link) else link # end k <- default.arg(smoother.args, "k", -1) bs <- default.arg(smoother.args, "bs", "tp") if (is.character(family)) family <- eval(parse(text=family)) weights <- default.arg(smoother.args, "weights", NULL) spread <- spread && identical(fam, gaussian) && is.null(link) if (log.x) x <- log(x) if (log.y) y <- log(y) valid <- complete.cases(x, y) x <- x[valid] y <- y[valid] ord <- order(x) x <- x[ord] y <- y[ord] x.eval <- seq(min(x), max(x), length=evaluation) w <-if (is.null(weights)) rep(1, length(y)) else weights[valid][ord] warn <- options(warn=-1) on.exit(options(warn)) # new June 18, 2014 fam1 <- if(is.null(link)) fam else fam(link) fit <- try(mgcv::gam(y ~ mgcv::s(x, k=k, bs=bs), weights=w, family=fam1)) # end bug fix. if (class(fit)[1] != "try-error"){ y.eval <- predict(fit, newdata=data.frame(x=x.eval)) y.eval <- if(log.y) exp(y.eval) else y.eval if(draw)lines(if(log.x) exp(x.eval) else x.eval, y.eval, lwd=lwd, col=col, lty=lty) else out <- list(x=if(log.x) exp(x.eval) else x.eval, y=y.eval) } else{ options(warn) warning("could not fit smooth") return()} if(spread) { res <- residuals(fit) pos <- res > 0 pos.fit <- try(mgcv::gam(I(res^2) ~ mgcv::s(x, k=k, bs=bs), subset=pos), silent=TRUE) neg.fit <- try(mgcv::gam(I(res^2) ~ mgcv::s(x, k=k, bs=bs), subset=!pos), silent=TRUE) if(class(pos.fit)[1] != "try-error"){ y.pos <- y.eval + sqrt(offset^2 + predict(pos.fit, newdata=data.frame(x=x.eval))) y.pos <- if (log.y) exp(y.pos) else y.pos if(draw) {lines(if(log.x) exp(x.eval) else x.eval, y.pos, lwd=lwd.spread, lty=lty.spread, col=col)} else {out$x.pos <- if(log.x) exp(x.eval) else x.eval out$y.pos <- y.pos} } else{ options(warn) warning("could not fit positive part of the spread") } if(class(neg.fit)[1] != "try-error"){ y.neg <- y.eval - sqrt(offset^2 + predict(neg.fit, newdata=data.frame(x=x.eval))) y.neg <- if (log.y) exp(y.neg) else y.neg if(draw) lines(x.eval, y.neg, lwd=lwd.spread, lty=lty.spread, col=col) else {out$x.neg <- if(log.x) exp(x.eval) else x.eval out$y.neg <- y.neg} } else {options(warn) warning("could not fit negative part of the spread") } } if(!draw) return(out) } quantregLine <- function(x, y, col, log.x, log.y, spread=FALSE, smoother.args, draw=TRUE, offset=0) { # if (!require("quantreg")) stop("quantreg package missing") if (!package.installed("Matrix")) stop("the Matrix package is missing") if (!package.installed("MatrixModels")) stop("the MatrixModels package is missing") if (!package.installed("SparseM")) stop("the SparseM package is missing") lty <- default.arg(smoother.args, "lty", 1) lwd <- default.arg(smoother.args, "lwd", 2) lty.spread <- default.arg(smoother.args, "lty.spread", 2) lwd.spread <- default.arg(smoother.args, "lwd.spread", 1) evaluation <- default.arg(smoother.args, "evaluation", 50) if (log.x) x <- log(x) if (log.y) y <- log(y) lambda <- default.arg(smoother.args, "lambda", IQR(x)) valid <- complete.cases(x, y) x <- x[valid] y <- y[valid] ord <- order(x) x <- x[ord] y <- y[ord] x.eval <- seq(min(x), max(x), length=evaluation) if (!spread){ fit <- quantreg::rqss(y ~ quantreg::qss(x, lambda=lambda)) y.eval <- predict(fit, newdata=data.frame(x=x.eval)) y.eval <- if(log.y) exp(y.eval) else y.eval if(draw)lines(if(log.x) exp(x.eval) else x.eval, y.eval, lwd=lwd, col=col, lty=lty) else out <- list(x=if(log.x) exp(x.eval) else x.eval, y=y.eval) } else{ fit <- quantreg::rqss(y ~ quantreg::qss(x, lambda=lambda)) q1fit <- quantreg::rqss(y ~ quantreg::qss(x, lambda=lambda), tau=0.25) q3fit <- quantreg::rqss(y ~ quantreg::qss(x, lambda=lambda), tau=0.75) y.eval <- predict(fit, newdata=data.frame(x=x.eval)) y.eval.q1 <- predict(q1fit, newdata=data.frame(x=x.eval)) y.eval.q3 <- predict(q3fit, newdata=data.frame(x=x.eval)) y.eval <- if(log.y) exp(y.eval) else y.eval y.eval.q1 <- if(log.y) exp(y.eval.q1) else y.eval.q1 y.eval.q3 <- if(log.y) exp(y.eval.q3) else y.eval.q3 # 11/22/14: adjust for offset y.eval.q1 <- y.eval - sqrt( (y.eval-y.eval.q1)^2 + offset^2) y.eval.q3 <- y.eval + sqrt( (y.eval-y.eval.q3)^2 + offset^2) if(draw)lines(if(log.x) exp(x.eval) else x.eval, y.eval, lwd=lwd, col=col, lty=lty) else out <- list(x=if(log.x) exp(x.eval) else x.eval, y=y.eval) if(draw) lines(if(log.x) exp(x.eval) else x.eval, y.eval.q1, lwd=lwd.spread, lty=lty.spread, col=col) else {out$x.neg <- if(log.x) exp(x.eval) else x.eval out$y.neg <- y.eval.q1} if(draw) lines(if(log.x) exp(x.eval) else x.eval, y.eval.q3, lwd=lwd.spread, lty=lty.spread, col=col) else {out$x.neg <- x.eval out$y.neg <- y.eval.q3} } if(!draw) return(out) }
/R/scatterplotSmoothers.R
no_license
jonathon-love/car
R
false
false
10,240
r
# Scatterplot Smoothers (J. Fox and S. Weisberg) # Sept 17, 2012 moved from scatterplot.R to scatterplotSmoothers.R # June 18, 2014 Fixed bug in gamLine so the smoother.arg link="linkname" works; thanks to Hani Christoph # 2014-08-19: Make sure that Matrix and MatrixModels packages are available to quantregLine(). # Can't substitute requireNamespace() for require() for gam and quantreg packages. John # 2014-11-21: Added 'offset' argument with default 0: offset= sigmaHat(model) for use with # marginal model plots. Fixed spread smooths as well # 2015-01-27: gam() and s() now imported from mgcv rqss(), qss(), and fitted.rqss() from quantreg. John # 2016-11-19: Added argument in smoother.args called 'evaluation'. The smoother will be evaluated # at evaluation equally spaced points in the range of the horizontal axis, with a default of 50. default.arg <- function(args.list, arg, default){ if (is.null(args.list[[arg]])) default else args.list[[arg]] } loessLine <- function(x, y, col, log.x, log.y, spread=FALSE, smoother.args, draw=TRUE, offset=0) { lty <- default.arg(smoother.args, "lty", 1) lwd <- default.arg(smoother.args, "lwd", 2) lty.spread <- default.arg(smoother.args, "lty.spread", 2) lwd.spread <- default.arg(smoother.args, "lwd.spread", 1) span <- default.arg(smoother.args, "span", 2/3) family <- default.arg(smoother.args, "family", "symmetric") degree <- default.arg(smoother.args, "degree", 1) iterations <- default.arg(smoother.args, "iterations", 4) evaluation <- default.arg(smoother.args, "evaluation", 50) if (log.x){ x <- log(x) } if (log.y){ y <- log(y) } valid <- complete.cases(x, y) x <- x[valid] y <- y[valid] ord <- order(x) x <- x[ord] y <- y[ord] x.eval <- seq(min(x), max(x), length=evaluation) warn <- options(warn=-1) on.exit(options(warn)) # mean smooth fit <- try(loess(y ~ x, span=span, family=family, degree=degree, control=loess.control(iterations=iterations)), silent=TRUE) if (class(fit)[1] != "try-error"){ y.eval <- predict(fit, newdata=data.frame(x=x.eval)) y.eval <- if(log.y) exp(y.eval) else y.eval if(draw)lines(if(log.x) exp(x.eval) else x.eval, y.eval, lwd=lwd, col=col, lty=lty) else out <- list(x=if(log.x) exp(x.eval) else x.eval, y=y.eval) } else{ options(warn) warning("could not fit smooth") return()} # spread smooth, if requested if(spread) { res <- residuals(fit) pos <- res > 0 pos.fit <- try(loess(I(res^2) ~ x, span=span, degree=0, family=family, subset=pos, control=loess.control(iterations=1)), silent=TRUE) neg.fit <- try(loess(I(res^2) ~ x, span=span, degree=0, family=family, subset=!pos, control=loess.control(iterations=1)), silent=TRUE) if(class(pos.fit)[1] != "try-error"){ y.pos <- y.eval + sqrt(offset^2 + predict(pos.fit, newdata=data.frame(x=x.eval))) y.pos <- if (log.y) exp(y.pos) else y.pos if(draw) {lines(if(log.x) exp(x.eval) else x.eval, y.pos, lwd=lwd.spread, lty=lty.spread, col=col)} else {out$x.pos <- if(log.x) exp(x.eval) else x.eval out$y.pos <- y.pos} } else{ options(warn) warning("could not fit positive part of the spread") } if(class(neg.fit)[1] != "try-error"){ y.neg <- y.eval - sqrt(offset^2 + predict(neg.fit, newdata=data.frame(x=x.eval))) y.neg <- if (log.y) exp(y.neg) else y.neg if(draw) lines(x.eval, y.neg, lwd=lwd.spread, lty=lty.spread, col=col) else {out$x.neg <- if(log.x) exp(x.eval) else x.eval out$y.neg <- y.neg} } else {options(warn) warning("could not fit negative part of the spread") } } if(!draw) return(out) } gamLine <- function(x, y, col, log.x, log.y, spread=FALSE, smoother.args, draw=TRUE, offset=0) { # if (!require("mgcv")) stop("mgcv package missing") lty <- default.arg(smoother.args, "lty", 1) lwd <- default.arg(smoother.args, "lwd", 2) lty.spread <- default.arg(smoother.args, "lty.spread", 2) lwd.spread <- default.arg(smoother.args, "lwd.spread", 1) fam <- default.arg(smoother.args, "family", gaussian) link <- default.arg(smoother.args, "link", NULL) evaluation <- default.arg(smoother.args, "evaluation", 50) # June 18, 2014 fam <- if(is.character(fam)) eval(parse(text=fam)) else fam link <- if(is.character(link)) make.link(link) else link # end k <- default.arg(smoother.args, "k", -1) bs <- default.arg(smoother.args, "bs", "tp") if (is.character(family)) family <- eval(parse(text=family)) weights <- default.arg(smoother.args, "weights", NULL) spread <- spread && identical(fam, gaussian) && is.null(link) if (log.x) x <- log(x) if (log.y) y <- log(y) valid <- complete.cases(x, y) x <- x[valid] y <- y[valid] ord <- order(x) x <- x[ord] y <- y[ord] x.eval <- seq(min(x), max(x), length=evaluation) w <-if (is.null(weights)) rep(1, length(y)) else weights[valid][ord] warn <- options(warn=-1) on.exit(options(warn)) # new June 18, 2014 fam1 <- if(is.null(link)) fam else fam(link) fit <- try(mgcv::gam(y ~ mgcv::s(x, k=k, bs=bs), weights=w, family=fam1)) # end bug fix. if (class(fit)[1] != "try-error"){ y.eval <- predict(fit, newdata=data.frame(x=x.eval)) y.eval <- if(log.y) exp(y.eval) else y.eval if(draw)lines(if(log.x) exp(x.eval) else x.eval, y.eval, lwd=lwd, col=col, lty=lty) else out <- list(x=if(log.x) exp(x.eval) else x.eval, y=y.eval) } else{ options(warn) warning("could not fit smooth") return()} if(spread) { res <- residuals(fit) pos <- res > 0 pos.fit <- try(mgcv::gam(I(res^2) ~ mgcv::s(x, k=k, bs=bs), subset=pos), silent=TRUE) neg.fit <- try(mgcv::gam(I(res^2) ~ mgcv::s(x, k=k, bs=bs), subset=!pos), silent=TRUE) if(class(pos.fit)[1] != "try-error"){ y.pos <- y.eval + sqrt(offset^2 + predict(pos.fit, newdata=data.frame(x=x.eval))) y.pos <- if (log.y) exp(y.pos) else y.pos if(draw) {lines(if(log.x) exp(x.eval) else x.eval, y.pos, lwd=lwd.spread, lty=lty.spread, col=col)} else {out$x.pos <- if(log.x) exp(x.eval) else x.eval out$y.pos <- y.pos} } else{ options(warn) warning("could not fit positive part of the spread") } if(class(neg.fit)[1] != "try-error"){ y.neg <- y.eval - sqrt(offset^2 + predict(neg.fit, newdata=data.frame(x=x.eval))) y.neg <- if (log.y) exp(y.neg) else y.neg if(draw) lines(x.eval, y.neg, lwd=lwd.spread, lty=lty.spread, col=col) else {out$x.neg <- if(log.x) exp(x.eval) else x.eval out$y.neg <- y.neg} } else {options(warn) warning("could not fit negative part of the spread") } } if(!draw) return(out) } quantregLine <- function(x, y, col, log.x, log.y, spread=FALSE, smoother.args, draw=TRUE, offset=0) { # if (!require("quantreg")) stop("quantreg package missing") if (!package.installed("Matrix")) stop("the Matrix package is missing") if (!package.installed("MatrixModels")) stop("the MatrixModels package is missing") if (!package.installed("SparseM")) stop("the SparseM package is missing") lty <- default.arg(smoother.args, "lty", 1) lwd <- default.arg(smoother.args, "lwd", 2) lty.spread <- default.arg(smoother.args, "lty.spread", 2) lwd.spread <- default.arg(smoother.args, "lwd.spread", 1) evaluation <- default.arg(smoother.args, "evaluation", 50) if (log.x) x <- log(x) if (log.y) y <- log(y) lambda <- default.arg(smoother.args, "lambda", IQR(x)) valid <- complete.cases(x, y) x <- x[valid] y <- y[valid] ord <- order(x) x <- x[ord] y <- y[ord] x.eval <- seq(min(x), max(x), length=evaluation) if (!spread){ fit <- quantreg::rqss(y ~ quantreg::qss(x, lambda=lambda)) y.eval <- predict(fit, newdata=data.frame(x=x.eval)) y.eval <- if(log.y) exp(y.eval) else y.eval if(draw)lines(if(log.x) exp(x.eval) else x.eval, y.eval, lwd=lwd, col=col, lty=lty) else out <- list(x=if(log.x) exp(x.eval) else x.eval, y=y.eval) } else{ fit <- quantreg::rqss(y ~ quantreg::qss(x, lambda=lambda)) q1fit <- quantreg::rqss(y ~ quantreg::qss(x, lambda=lambda), tau=0.25) q3fit <- quantreg::rqss(y ~ quantreg::qss(x, lambda=lambda), tau=0.75) y.eval <- predict(fit, newdata=data.frame(x=x.eval)) y.eval.q1 <- predict(q1fit, newdata=data.frame(x=x.eval)) y.eval.q3 <- predict(q3fit, newdata=data.frame(x=x.eval)) y.eval <- if(log.y) exp(y.eval) else y.eval y.eval.q1 <- if(log.y) exp(y.eval.q1) else y.eval.q1 y.eval.q3 <- if(log.y) exp(y.eval.q3) else y.eval.q3 # 11/22/14: adjust for offset y.eval.q1 <- y.eval - sqrt( (y.eval-y.eval.q1)^2 + offset^2) y.eval.q3 <- y.eval + sqrt( (y.eval-y.eval.q3)^2 + offset^2) if(draw)lines(if(log.x) exp(x.eval) else x.eval, y.eval, lwd=lwd, col=col, lty=lty) else out <- list(x=if(log.x) exp(x.eval) else x.eval, y=y.eval) if(draw) lines(if(log.x) exp(x.eval) else x.eval, y.eval.q1, lwd=lwd.spread, lty=lty.spread, col=col) else {out$x.neg <- if(log.x) exp(x.eval) else x.eval out$y.neg <- y.eval.q1} if(draw) lines(if(log.x) exp(x.eval) else x.eval, y.eval.q3, lwd=lwd.spread, lty=lty.spread, col=col) else {out$x.neg <- x.eval out$y.neg <- y.eval.q3} } if(!draw) return(out) }
#### 3.COAD.Significant_MSigDB_analysis.R ### 1.Exame distribution of ssGSEA score hist(COAD.c2.all.v7.0) ### 2.Using limma to find significant pathway in each groups library(Biobase) ## 1) Build an ExpressionSet COAD.h.all.Set <- ExpressionSet(assayData=COAD.h.all.v7.0,phenoData=Cluster.df.sub) COAD.c1.all.Set <- ExpressionSet(assayData=COAD.c1.all.v7.0,phenoData=Cluster.df.sub) COAD.c2.all.Set <- ExpressionSet(assayData=COAD.c2.all.v7.0,phenoData=Cluster.df.sub) COAD.c3.all.Set <- ExpressionSet(assayData=COAD.c3.all.v7.0,phenoData=Cluster.df.sub) COAD.c4.all.Set <- ExpressionSet(assayData=COAD.c4.all.v7.0,phenoData=Cluster.df.sub) COAD.c5.all.Set <- ExpressionSet(assayData=COAD.c5.all.v7.0,phenoData=Cluster.df.sub) COAD.c6.all.Set <- ExpressionSet(assayData=COAD.c6.all.v7.0,phenoData=Cluster.df.sub) COAD.c5.all.Set <- ExpressionSet(assayData=COAD.c5.all.v7.0,phenoData=Cluster.df.sub) ## Several Groups experiment design COAD.c2.all.Set@phenoData@data f <- factor(Cluster.df.sub@data$dynamicColors, levels=c("blue","brown","turquoise","yellow")) design <- model.matrix(~0+f) colnames(design) <- c("blue","brown","turquoise","yellow") ## 2) Significant pathway analysis ###################### COAD.h.all.Set ################# ## Cut off library(limma) fit.h <- lmFit(COAD.h.all.Set, design) contrast.matrix <- makeContrasts(blue-(brown+turquoise)/2, brown-(blue+turquoise)/2, turquoise-(brown+blue)/2, blue-brown, blue-turquoise, brown-blue, brown-turquoise, turquoise-blue, turquoise-brown, levels=design) fit.h.2 <- contrasts.fit(fit.h, contrast.matrix) fit.h.2 <- eBayes(fit.h.2) plotMD(fit.h.2, column = 1) plotMD(fit.h.2, column = 2) plotMD(fit.h.2, column = 3) plotMD(fit.h.2, column = 4) plotMD(fit.h.2, column = 5) plotMD(fit.h.2, column = 6) plotMD(fit.h.2, column = 7) plotMD(fit.h.2, column = 8) plotMD(fit.h.2, column = 9) ## Find Top significant genesets ## Cut off adjPvalueCutoff <- 0.01 number = 50 ## dt <- decideTests(fit.h.2, p.value = adjPvalueCutoff ) summary(dt) DEgeneSets.blue.h <- topTable(fit.h.2, coef="blue - (brown + turquoise)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.brown.h <- topTable(fit.h.2, coef="brown - (blue + turquoise)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.turquoise.h <- topTable(fit.h.2, coef="turquoise - (brown + blue)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BlvBr.h <- topTable(fit.h.2, coef="blue - brown", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BlvTu.h <- topTable(fit.h.2, coef="blue - turquoise", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BrvTu.h <- topTable(fit.h.2, coef="brown - turquoise", number=number, p.value=adjPvalueCutoff,adjust="BH") ## Ploting library(pheatmap) ann_colors = list(dynamicColors = c(blue = "blue",turquoise = "turquoise", brown = "brown", yellow = "yellow")) #pheatmap::pheatmap(COAD.h.all.Set,annotation_col = pData(COAD.h.all.Set), # main = "h.all_all",show_colnames = F) pheatmap::pheatmap(COAD.h.all.Set[rownames(DEgeneSets.blue.h),],annotation_col = pData(COAD.h.all.Set), main = "h.all_Blue VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.h.all.Set[rownames(DEgeneSets.brown.h),],annotation_col = pData(COAD.h.all.Set), main = "h.all_Brown VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.h.all.Set[rownames(DEgeneSets.turquoise.h),],annotation_col = pData(COAD.h.all.Set), main = "h.all_turquoise VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.h.all.Set[rownames(DEgeneSets.BlvBr.h),],annotation_col = pData(COAD.h.all.Set), main = "h.all_Blue - brown",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.h.all.Set[rownames(DEgeneSets.BlvTu.h),],annotation_col = pData(COAD.h.all.Set), main = "h.all_Blue - turquoise",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.h.all.Set[rownames(DEgeneSets.BrvTu.h),],annotation_col = pData(COAD.h.all.Set), main = "h.all_brown - turquoise",annotation_colors = ann_colors) ###################### COAD.c2.all.Set ################# ## Cut off library(limma) fit.c2 <- lmFit(COAD.c2.all.Set, design) contrast.matrix <- makeContrasts(blue-(brown+turquoise)/2, brown-(blue+turquoise)/2, turquoise-(brown+blue)/2, blue-brown, blue-turquoise, brown-blue, brown-turquoise, turquoise-blue, turquoise-brown, levels=design) fit.c2.2 <- contrasts.fit(fit.c2, contrast.matrix) fit.c2.2 <- eBayes(fit.c2.2) plotMD(fit.c2.2, column = 1) plotMD(fit.c2.2, column = 2) plotMD(fit.c2.2, column = 3) plotMD(fit.c2.2, column = 4) plotMD(fit.c2.2, column = 5) plotMD(fit.c2.2, column = 6) plotMD(fit.c2.2, column = 7) plotMD(fit.c2.2, column = 8) plotMD(fit.c2.2, column = 9) ## Find Top significant genesets ## Cut off adjPvalueCutoff <- 0.01 number = 50 ## dt <- decideTests(fit.c2.2, p.value = adjPvalueCutoff ) summary(dt) DEgeneSets.blue.c2 <- topTable(fit.c2.2, coef="blue - (brown + turquoise)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.brown.c2 <- topTable(fit.c2.2, coef="brown - (blue + turquoise)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.turquoise.c2 <- topTable(fit.c2.2, coef="turquoise - (brown + blue)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BlvBr.c2 <- topTable(fit.c2.2, coef="blue - brown", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BlvTu.c2 <- topTable(fit.c2.2, coef="blue - turquoise", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BrvTu.c2 <- topTable(fit.c2.2, coef="brown - turquoise", number=number, p.value=adjPvalueCutoff,adjust="BH") ## Ploting library(pheatmap) ann_colors = list(dynamicColors = c(blue = "blue",turquoise = "turquoise", brown = "brown", yellow = "yellow")) #pheatmap::pheatmap(COAD.c2.all.Set,annotation_col = pData(COAD.h.all.Set), # main = "c2.all_all",show_colnames = F) pheatmap::pheatmap(COAD.c2.all.Set[rownames(DEgeneSets.blue.c2),],annotation_col = pData(COAD.c2.all.Set), main = "c2.all_Blue VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c2.all.Set[rownames(DEgeneSets.brown.c2),],annotation_col = pData(COAD.c2.all.Set), main = "c2.all_Brown VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c2.all.Set[rownames(DEgeneSets.turquoise.c2),],annotation_col = pData(COAD.c2.all.Set), main = "c2.all_turquoise VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c2.all.Set[rownames(DEgeneSets.BlvBr.c2),],annotation_col = pData(COAD.c2.all.Set), main = "c2.all_Blue - brown",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c2.all.Set[rownames(DEgeneSets.BlvTu.c2),],annotation_col = pData(COAD.c2.all.Set), main = "c2.all_Blue - turquoise",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c2.all.Set[rownames(DEgeneSets.BrvTu.c2),],annotation_col = pData(COAD.c2.all.Set), main = "c2.all_brown - turquoise",annotation_colors = ann_colors) ###################### COAD.c5.all.Set ################# ## Cut off library(limma) fit.c5 <- lmFit(COAD.c5.all.Set, design) contrast.matrix <- makeContrasts(blue-(brown+turquoise)/2, brown-(blue+turquoise)/2, turquoise-(brown+blue)/2, blue-brown, blue-turquoise, brown-blue, brown-turquoise, turquoise-blue, turquoise-brown, levels=design) fit.c5.2 <- contrasts.fit(fit.c5, contrast.matrix) fit.c5.2 <- eBayes(fit.c5.2) plotMD(fit.c5.2, column = 1) plotMD(fit.c5.2, column = 2) plotMD(fit.c5.2, column = 3) plotMD(fit.c5.2, column = 4) plotMD(fit.c5.2, column = 5) plotMD(fit.c5.2, column = 6) plotMD(fit.c5.2, column = 7) plotMD(fit.c5.2, column = 8) plotMD(fit.c5.2, column = 9) ## Find Top significant genesets ## Cut off adjPvalueCutoff <- 0.01 number = 50 ## dt <- decideTests(fit.c5.2, p.value = adjPvalueCutoff ) summary(dt) DEgeneSets.blue.c5 <- topTable(fit.c5.2, coef="blue - (brown + turquoise)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.brown.c5 <- topTable(fit.c5.2, coef="brown - (blue + turquoise)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.turquoise.c5 <- topTable(fit.c5.2, coef="turquoise - (brown + blue)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BlvBr.c5 <- topTable(fit.c5.2, coef="blue - brown", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BlvTu.c5 <- topTable(fit.c5.2, coef="blue - turquoise", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BrvTu.c5 <- topTable(fit.c5.2, coef="brown - turquoise", number=number, p.value=adjPvalueCutoff,adjust="BH") ## Ploting library(pheatmap) ann_colors = list(dynamicColors = c(blue = "blue",turquoise = "turquoise", brown = "brown", yellow = "yellow")) #pheatmap::pheatmap(COAD.c5.all.Set,annotation_col = pData(COAD.h.all.Set), # main = "c5.all_all",show_colnames = F) pheatmap::pheatmap(COAD.c5.all.Set[rownames(DEgeneSets.blue.c5),],annotation_col = pData(COAD.c5.all.Set), main = "c5.all_Blue VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c5.all.Set[rownames(DEgeneSets.brown.c5),],annotation_col = pData(COAD.c5.all.Set), main = "c5.all_Brown VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c5.all.Set[rownames(DEgeneSets.turquoise.c5),],annotation_col = pData(COAD.c5.all.Set), main = "c5.all_turquoise VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c5.all.Set[rownames(DEgeneSets.BlvBr.c5),],annotation_col = pData(COAD.c5.all.Set), main = "c5.all_Blue - brown",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c5.all.Set[rownames(DEgeneSets.BlvTu.c5),],annotation_col = pData(COAD.c5.all.Set), main = "c5.all_Blue - turquoise",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c5.all.Set[rownames(DEgeneSets.BrvTu.c5),],annotation_col = pData(COAD.c5.all.Set), main = "c5.all_brown - turquoise",annotation_colors = ann_colors) ###################### COAD.c6.all.Set ################# ## Cut off library(limma) fit.c6 <- lmFit(COAD.c6.all.Set, design) contrast.matrix <- makeContrasts(blue-(brown+turquoise)/2, brown-(blue+turquoise)/2, turquoise-(brown+blue)/2, blue-brown, blue-turquoise, brown-blue, brown-turquoise, turquoise-blue, turquoise-brown, levels=design) fit.c6.2 <- contrasts.fit(fit.c6, contrast.matrix) fit.c6.2 <- eBayes(fit.c6.2) plotMD(fit.c6.2, column = 1) plotMD(fit.c6.2, column = 2) plotMD(fit.c6.2, column = 3) plotMD(fit.c6.2, column = 4) plotMD(fit.c6.2, column = 5) plotMD(fit.c6.2, column = 6) plotMD(fit.c6.2, column = 7) plotMD(fit.c6.2, column = 8) plotMD(fit.c6.2, column = 9) ## Find Top significant genesets ## Cut off adjPvalueCutoff <- 0.01 number = 50 ## dt <- decideTests(fit.c6.2, p.value = adjPvalueCutoff ) summary(dt) DEgeneSets.blue.c6 <- topTable(fit.c6.2, coef="blue - (brown + turquoise)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.brown.c6 <- topTable(fit.c6.2, coef="brown - (blue + turquoise)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.turquoise.c6 <- topTable(fit.c6.2, coef="turquoise - (brown + blue)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BlvBr.c6 <- topTable(fit.c6.2, coef="blue - brown", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BlvTu.c6 <- topTable(fit.c6.2, coef="blue - turquoise", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BrvTu.c6 <- topTable(fit.c6.2, coef="brown - turquoise", number=number, p.value=adjPvalueCutoff,adjust="BH") ## Ploting library(pheatmap) ann_colors = list(dynamicColors = c(blue = "blue",turquoise = "turquoise", brown = "brown", yellow = "yellow")) #pheatmap::pheatmap(COAD.c6.all.Set,annotation_col = pData(COAD.h.all.Set), # main = "c6.all_all",show_colnames = F) pheatmap::pheatmap(COAD.c6.all.Set[rownames(DEgeneSets.blue.c6),],annotation_col = pData(COAD.c6.all.Set), main = "c6.all_Blue VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c6.all.Set[rownames(DEgeneSets.brown.c6),],annotation_col = pData(COAD.c6.all.Set), main = "c6.all_Brown VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c6.all.Set[rownames(DEgeneSets.turquoise.c6),],annotation_col = pData(COAD.c6.all.Set), main = "c6.all_turquoise VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c6.all.Set[rownames(DEgeneSets.BlvBr.c6),],annotation_col = pData(COAD.c6.all.Set), main = "c6.all_Blue - brown",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c6.all.Set[rownames(DEgeneSets.BlvTu.c6),],annotation_col = pData(COAD.c6.all.Set), main = "c6.all_Blue - turquoise",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c6.all.Set[rownames(DEgeneSets.BrvTu.c6),],annotation_col = pData(COAD.c6.all.Set), main = "c6.all_brown - turquoise",annotation_colors = ann_colors)
/COAD_NormalCancer_Project/2.Molecular_dataset/2.Gene_set_analysis/3.COAD.Significant_MSigDB_analysis.R
no_license
haojiang9999/HCA_script
R
false
false
15,311
r
#### 3.COAD.Significant_MSigDB_analysis.R ### 1.Exame distribution of ssGSEA score hist(COAD.c2.all.v7.0) ### 2.Using limma to find significant pathway in each groups library(Biobase) ## 1) Build an ExpressionSet COAD.h.all.Set <- ExpressionSet(assayData=COAD.h.all.v7.0,phenoData=Cluster.df.sub) COAD.c1.all.Set <- ExpressionSet(assayData=COAD.c1.all.v7.0,phenoData=Cluster.df.sub) COAD.c2.all.Set <- ExpressionSet(assayData=COAD.c2.all.v7.0,phenoData=Cluster.df.sub) COAD.c3.all.Set <- ExpressionSet(assayData=COAD.c3.all.v7.0,phenoData=Cluster.df.sub) COAD.c4.all.Set <- ExpressionSet(assayData=COAD.c4.all.v7.0,phenoData=Cluster.df.sub) COAD.c5.all.Set <- ExpressionSet(assayData=COAD.c5.all.v7.0,phenoData=Cluster.df.sub) COAD.c6.all.Set <- ExpressionSet(assayData=COAD.c6.all.v7.0,phenoData=Cluster.df.sub) COAD.c5.all.Set <- ExpressionSet(assayData=COAD.c5.all.v7.0,phenoData=Cluster.df.sub) ## Several Groups experiment design COAD.c2.all.Set@phenoData@data f <- factor(Cluster.df.sub@data$dynamicColors, levels=c("blue","brown","turquoise","yellow")) design <- model.matrix(~0+f) colnames(design) <- c("blue","brown","turquoise","yellow") ## 2) Significant pathway analysis ###################### COAD.h.all.Set ################# ## Cut off library(limma) fit.h <- lmFit(COAD.h.all.Set, design) contrast.matrix <- makeContrasts(blue-(brown+turquoise)/2, brown-(blue+turquoise)/2, turquoise-(brown+blue)/2, blue-brown, blue-turquoise, brown-blue, brown-turquoise, turquoise-blue, turquoise-brown, levels=design) fit.h.2 <- contrasts.fit(fit.h, contrast.matrix) fit.h.2 <- eBayes(fit.h.2) plotMD(fit.h.2, column = 1) plotMD(fit.h.2, column = 2) plotMD(fit.h.2, column = 3) plotMD(fit.h.2, column = 4) plotMD(fit.h.2, column = 5) plotMD(fit.h.2, column = 6) plotMD(fit.h.2, column = 7) plotMD(fit.h.2, column = 8) plotMD(fit.h.2, column = 9) ## Find Top significant genesets ## Cut off adjPvalueCutoff <- 0.01 number = 50 ## dt <- decideTests(fit.h.2, p.value = adjPvalueCutoff ) summary(dt) DEgeneSets.blue.h <- topTable(fit.h.2, coef="blue - (brown + turquoise)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.brown.h <- topTable(fit.h.2, coef="brown - (blue + turquoise)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.turquoise.h <- topTable(fit.h.2, coef="turquoise - (brown + blue)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BlvBr.h <- topTable(fit.h.2, coef="blue - brown", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BlvTu.h <- topTable(fit.h.2, coef="blue - turquoise", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BrvTu.h <- topTable(fit.h.2, coef="brown - turquoise", number=number, p.value=adjPvalueCutoff,adjust="BH") ## Ploting library(pheatmap) ann_colors = list(dynamicColors = c(blue = "blue",turquoise = "turquoise", brown = "brown", yellow = "yellow")) #pheatmap::pheatmap(COAD.h.all.Set,annotation_col = pData(COAD.h.all.Set), # main = "h.all_all",show_colnames = F) pheatmap::pheatmap(COAD.h.all.Set[rownames(DEgeneSets.blue.h),],annotation_col = pData(COAD.h.all.Set), main = "h.all_Blue VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.h.all.Set[rownames(DEgeneSets.brown.h),],annotation_col = pData(COAD.h.all.Set), main = "h.all_Brown VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.h.all.Set[rownames(DEgeneSets.turquoise.h),],annotation_col = pData(COAD.h.all.Set), main = "h.all_turquoise VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.h.all.Set[rownames(DEgeneSets.BlvBr.h),],annotation_col = pData(COAD.h.all.Set), main = "h.all_Blue - brown",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.h.all.Set[rownames(DEgeneSets.BlvTu.h),],annotation_col = pData(COAD.h.all.Set), main = "h.all_Blue - turquoise",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.h.all.Set[rownames(DEgeneSets.BrvTu.h),],annotation_col = pData(COAD.h.all.Set), main = "h.all_brown - turquoise",annotation_colors = ann_colors) ###################### COAD.c2.all.Set ################# ## Cut off library(limma) fit.c2 <- lmFit(COAD.c2.all.Set, design) contrast.matrix <- makeContrasts(blue-(brown+turquoise)/2, brown-(blue+turquoise)/2, turquoise-(brown+blue)/2, blue-brown, blue-turquoise, brown-blue, brown-turquoise, turquoise-blue, turquoise-brown, levels=design) fit.c2.2 <- contrasts.fit(fit.c2, contrast.matrix) fit.c2.2 <- eBayes(fit.c2.2) plotMD(fit.c2.2, column = 1) plotMD(fit.c2.2, column = 2) plotMD(fit.c2.2, column = 3) plotMD(fit.c2.2, column = 4) plotMD(fit.c2.2, column = 5) plotMD(fit.c2.2, column = 6) plotMD(fit.c2.2, column = 7) plotMD(fit.c2.2, column = 8) plotMD(fit.c2.2, column = 9) ## Find Top significant genesets ## Cut off adjPvalueCutoff <- 0.01 number = 50 ## dt <- decideTests(fit.c2.2, p.value = adjPvalueCutoff ) summary(dt) DEgeneSets.blue.c2 <- topTable(fit.c2.2, coef="blue - (brown + turquoise)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.brown.c2 <- topTable(fit.c2.2, coef="brown - (blue + turquoise)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.turquoise.c2 <- topTable(fit.c2.2, coef="turquoise - (brown + blue)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BlvBr.c2 <- topTable(fit.c2.2, coef="blue - brown", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BlvTu.c2 <- topTable(fit.c2.2, coef="blue - turquoise", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BrvTu.c2 <- topTable(fit.c2.2, coef="brown - turquoise", number=number, p.value=adjPvalueCutoff,adjust="BH") ## Ploting library(pheatmap) ann_colors = list(dynamicColors = c(blue = "blue",turquoise = "turquoise", brown = "brown", yellow = "yellow")) #pheatmap::pheatmap(COAD.c2.all.Set,annotation_col = pData(COAD.h.all.Set), # main = "c2.all_all",show_colnames = F) pheatmap::pheatmap(COAD.c2.all.Set[rownames(DEgeneSets.blue.c2),],annotation_col = pData(COAD.c2.all.Set), main = "c2.all_Blue VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c2.all.Set[rownames(DEgeneSets.brown.c2),],annotation_col = pData(COAD.c2.all.Set), main = "c2.all_Brown VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c2.all.Set[rownames(DEgeneSets.turquoise.c2),],annotation_col = pData(COAD.c2.all.Set), main = "c2.all_turquoise VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c2.all.Set[rownames(DEgeneSets.BlvBr.c2),],annotation_col = pData(COAD.c2.all.Set), main = "c2.all_Blue - brown",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c2.all.Set[rownames(DEgeneSets.BlvTu.c2),],annotation_col = pData(COAD.c2.all.Set), main = "c2.all_Blue - turquoise",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c2.all.Set[rownames(DEgeneSets.BrvTu.c2),],annotation_col = pData(COAD.c2.all.Set), main = "c2.all_brown - turquoise",annotation_colors = ann_colors) ###################### COAD.c5.all.Set ################# ## Cut off library(limma) fit.c5 <- lmFit(COAD.c5.all.Set, design) contrast.matrix <- makeContrasts(blue-(brown+turquoise)/2, brown-(blue+turquoise)/2, turquoise-(brown+blue)/2, blue-brown, blue-turquoise, brown-blue, brown-turquoise, turquoise-blue, turquoise-brown, levels=design) fit.c5.2 <- contrasts.fit(fit.c5, contrast.matrix) fit.c5.2 <- eBayes(fit.c5.2) plotMD(fit.c5.2, column = 1) plotMD(fit.c5.2, column = 2) plotMD(fit.c5.2, column = 3) plotMD(fit.c5.2, column = 4) plotMD(fit.c5.2, column = 5) plotMD(fit.c5.2, column = 6) plotMD(fit.c5.2, column = 7) plotMD(fit.c5.2, column = 8) plotMD(fit.c5.2, column = 9) ## Find Top significant genesets ## Cut off adjPvalueCutoff <- 0.01 number = 50 ## dt <- decideTests(fit.c5.2, p.value = adjPvalueCutoff ) summary(dt) DEgeneSets.blue.c5 <- topTable(fit.c5.2, coef="blue - (brown + turquoise)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.brown.c5 <- topTable(fit.c5.2, coef="brown - (blue + turquoise)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.turquoise.c5 <- topTable(fit.c5.2, coef="turquoise - (brown + blue)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BlvBr.c5 <- topTable(fit.c5.2, coef="blue - brown", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BlvTu.c5 <- topTable(fit.c5.2, coef="blue - turquoise", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BrvTu.c5 <- topTable(fit.c5.2, coef="brown - turquoise", number=number, p.value=adjPvalueCutoff,adjust="BH") ## Ploting library(pheatmap) ann_colors = list(dynamicColors = c(blue = "blue",turquoise = "turquoise", brown = "brown", yellow = "yellow")) #pheatmap::pheatmap(COAD.c5.all.Set,annotation_col = pData(COAD.h.all.Set), # main = "c5.all_all",show_colnames = F) pheatmap::pheatmap(COAD.c5.all.Set[rownames(DEgeneSets.blue.c5),],annotation_col = pData(COAD.c5.all.Set), main = "c5.all_Blue VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c5.all.Set[rownames(DEgeneSets.brown.c5),],annotation_col = pData(COAD.c5.all.Set), main = "c5.all_Brown VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c5.all.Set[rownames(DEgeneSets.turquoise.c5),],annotation_col = pData(COAD.c5.all.Set), main = "c5.all_turquoise VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c5.all.Set[rownames(DEgeneSets.BlvBr.c5),],annotation_col = pData(COAD.c5.all.Set), main = "c5.all_Blue - brown",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c5.all.Set[rownames(DEgeneSets.BlvTu.c5),],annotation_col = pData(COAD.c5.all.Set), main = "c5.all_Blue - turquoise",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c5.all.Set[rownames(DEgeneSets.BrvTu.c5),],annotation_col = pData(COAD.c5.all.Set), main = "c5.all_brown - turquoise",annotation_colors = ann_colors) ###################### COAD.c6.all.Set ################# ## Cut off library(limma) fit.c6 <- lmFit(COAD.c6.all.Set, design) contrast.matrix <- makeContrasts(blue-(brown+turquoise)/2, brown-(blue+turquoise)/2, turquoise-(brown+blue)/2, blue-brown, blue-turquoise, brown-blue, brown-turquoise, turquoise-blue, turquoise-brown, levels=design) fit.c6.2 <- contrasts.fit(fit.c6, contrast.matrix) fit.c6.2 <- eBayes(fit.c6.2) plotMD(fit.c6.2, column = 1) plotMD(fit.c6.2, column = 2) plotMD(fit.c6.2, column = 3) plotMD(fit.c6.2, column = 4) plotMD(fit.c6.2, column = 5) plotMD(fit.c6.2, column = 6) plotMD(fit.c6.2, column = 7) plotMD(fit.c6.2, column = 8) plotMD(fit.c6.2, column = 9) ## Find Top significant genesets ## Cut off adjPvalueCutoff <- 0.01 number = 50 ## dt <- decideTests(fit.c6.2, p.value = adjPvalueCutoff ) summary(dt) DEgeneSets.blue.c6 <- topTable(fit.c6.2, coef="blue - (brown + turquoise)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.brown.c6 <- topTable(fit.c6.2, coef="brown - (blue + turquoise)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.turquoise.c6 <- topTable(fit.c6.2, coef="turquoise - (brown + blue)/2", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BlvBr.c6 <- topTable(fit.c6.2, coef="blue - brown", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BlvTu.c6 <- topTable(fit.c6.2, coef="blue - turquoise", number=number, p.value=adjPvalueCutoff,adjust="BH") DEgeneSets.BrvTu.c6 <- topTable(fit.c6.2, coef="brown - turquoise", number=number, p.value=adjPvalueCutoff,adjust="BH") ## Ploting library(pheatmap) ann_colors = list(dynamicColors = c(blue = "blue",turquoise = "turquoise", brown = "brown", yellow = "yellow")) #pheatmap::pheatmap(COAD.c6.all.Set,annotation_col = pData(COAD.h.all.Set), # main = "c6.all_all",show_colnames = F) pheatmap::pheatmap(COAD.c6.all.Set[rownames(DEgeneSets.blue.c6),],annotation_col = pData(COAD.c6.all.Set), main = "c6.all_Blue VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c6.all.Set[rownames(DEgeneSets.brown.c6),],annotation_col = pData(COAD.c6.all.Set), main = "c6.all_Brown VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c6.all.Set[rownames(DEgeneSets.turquoise.c6),],annotation_col = pData(COAD.c6.all.Set), main = "c6.all_turquoise VS orthers",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c6.all.Set[rownames(DEgeneSets.BlvBr.c6),],annotation_col = pData(COAD.c6.all.Set), main = "c6.all_Blue - brown",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c6.all.Set[rownames(DEgeneSets.BlvTu.c6),],annotation_col = pData(COAD.c6.all.Set), main = "c6.all_Blue - turquoise",annotation_colors = ann_colors) pheatmap::pheatmap(COAD.c6.all.Set[rownames(DEgeneSets.BrvTu.c6),],annotation_col = pData(COAD.c6.all.Set), main = "c6.all_brown - turquoise",annotation_colors = ann_colors)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bi_open.R \name{bi_open} \alias{bi_open} \title{Bi open} \usage{ bi_open(read) } \arguments{ \item{read}{either a path to a NetCDF file, or a NetCDF connection created using \code{nc_open}, or a \code{\link{libbi}} object from which to read the output} } \value{ open NetCDF connection } \description{ This function opens an NetCDF file The file can be specified as a string to the filepath, in which case a NetCDF connection is opened, or directly as a NetCDF connection. }
/man/bi_open.Rd
no_license
tyler-abbot/RBi
R
false
true
554
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bi_open.R \name{bi_open} \alias{bi_open} \title{Bi open} \usage{ bi_open(read) } \arguments{ \item{read}{either a path to a NetCDF file, or a NetCDF connection created using \code{nc_open}, or a \code{\link{libbi}} object from which to read the output} } \value{ open NetCDF connection } \description{ This function opens an NetCDF file The file can be specified as a string to the filepath, in which case a NetCDF connection is opened, or directly as a NetCDF connection. }
setwd("C:/Users/SKudr/Documents/GitHub/hse21_H3K4me1_ZDNA_mouse/src") source('lib.R') ### #https://bioconductor.org/packages/release/bioc/vignettes/ChIPpeakAnno/inst/doc/quickStart.html BiocManager::install("ChIPpeakAnno") BiocManager::install("org.Hs.eg.db") BiocManager::install("org.Mm.eg.db") library(ChIPpeakAnno) #library(TxDb.Hsapiens.UCSC.hg19.knownGene) #library(org.Hs.eg.db) library(TxDb.Mmusculus.UCSC.mm10.knownGene) library(org.Mm.eg.db) ### peaks <- toGRanges(paste0(DATA_DIR, 'H3K4me1_ZDNA.intersect_with_DEEPZ.bed'), format="BED") peaks[1:2] annoData <- toGRanges(TxDb.Mmusculus.UCSC.mm10.knownGene) annoData[1:2] anno <- annotatePeakInBatch(peaks, AnnotationData=annoData, output="overlapping", FeatureLocForDistance="TSS", bindingRegion=c(-2000, 300)) data.frame(anno) %>% head() anno$symbol <- xget(anno$feature, org.Mm.egSYMBOL) data.frame(anno) %>% head() anno_df <- data.frame(anno) write.table(anno_df, file=paste0(DATA_DIR, 'H3K4me1_ZDNA.intersect_with_DEEPZ.genes.txt'), col.names = TRUE, row.names = FALSE, sep = '\t', quote = FALSE) uniq_genes_df <- unique(anno_df['symbol']) write.table(uniq_genes_df, file=paste0(DATA_DIR, 'H3K4me1_ZDNA.intersect_with_DEEPZ.genes_uniq.txt'), col.names = FALSE, row.names = FALSE, sep = '\t', quote = FALSE)
/src/ChIPpeakAnno.r
no_license
semkud/hse21_H3K4me1_ZDNA_mouse
R
false
false
1,392
r
setwd("C:/Users/SKudr/Documents/GitHub/hse21_H3K4me1_ZDNA_mouse/src") source('lib.R') ### #https://bioconductor.org/packages/release/bioc/vignettes/ChIPpeakAnno/inst/doc/quickStart.html BiocManager::install("ChIPpeakAnno") BiocManager::install("org.Hs.eg.db") BiocManager::install("org.Mm.eg.db") library(ChIPpeakAnno) #library(TxDb.Hsapiens.UCSC.hg19.knownGene) #library(org.Hs.eg.db) library(TxDb.Mmusculus.UCSC.mm10.knownGene) library(org.Mm.eg.db) ### peaks <- toGRanges(paste0(DATA_DIR, 'H3K4me1_ZDNA.intersect_with_DEEPZ.bed'), format="BED") peaks[1:2] annoData <- toGRanges(TxDb.Mmusculus.UCSC.mm10.knownGene) annoData[1:2] anno <- annotatePeakInBatch(peaks, AnnotationData=annoData, output="overlapping", FeatureLocForDistance="TSS", bindingRegion=c(-2000, 300)) data.frame(anno) %>% head() anno$symbol <- xget(anno$feature, org.Mm.egSYMBOL) data.frame(anno) %>% head() anno_df <- data.frame(anno) write.table(anno_df, file=paste0(DATA_DIR, 'H3K4me1_ZDNA.intersect_with_DEEPZ.genes.txt'), col.names = TRUE, row.names = FALSE, sep = '\t', quote = FALSE) uniq_genes_df <- unique(anno_df['symbol']) write.table(uniq_genes_df, file=paste0(DATA_DIR, 'H3K4me1_ZDNA.intersect_with_DEEPZ.genes_uniq.txt'), col.names = FALSE, row.names = FALSE, sep = '\t', quote = FALSE)
source('./dupFinal.r') DupCheck()
/R/RetypeDupChk.r
no_license
amjiuzi/chemical01
R
false
false
38
r
source('./dupFinal.r') DupCheck()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/jamba-string.r \name{fillBlanks} \alias{fillBlanks} \title{Fill blank entries in a vector} \usage{ fillBlanks(x, blankGrep = c("[ \\t]*"), first = "", ...) } \arguments{ \item{x}{character vector} \item{blankGrep}{vector of grep patterns, or \code{NA}, indicating the type of entry to be considered blank. Each \code{blankGrep} pattern is searched using \code{jamba::proigrep()}, which by default uses case-insensitive regular expression pattern matching.} \item{first}{options character string intended when the first entry of \code{x} is blank. By default \code{""} is used.} \item{...}{additional parameters are ignored.} } \value{ Character vector where blank entries are filled with the most recent non-blank value. } \description{ Fill blank entries in a vector } \details{ This function takes a character vector and fills any blank (missing) entries with the last non-blank entry in the vector. It is intended for situations like imported Excel data, where there may be one header value representing a series of cells. The method used does not loop through the data, and should scale fairly well with good efficiency even for extremely large vectors. } \examples{ x <- c("A", "", "", "", "B", "C", "", "", NA, "D", "", "", "E", "F", "G", "", ""); data.frame(x, fillBlanks(x)); } \seealso{ Other jam string functions: \code{\link{asSize}()}, \code{\link{breaksByVector}()}, \code{\link{cPasteSU}()}, \code{\link{cPasteS}()}, \code{\link{cPasteUnique}()}, \code{\link{cPasteU}()}, \code{\link{cPaste}()}, \code{\link{formatInt}()}, \code{\link{gsubOrdered}()}, \code{\link{gsubs}()}, \code{\link{makeNames}()}, \code{\link{mixedOrder}()}, \code{\link{mixedSortDF}()}, \code{\link{mixedSorts}()}, \code{\link{mixedSort}()}, \code{\link{mmixedOrder}()}, \code{\link{nameVectorN}()}, \code{\link{nameVector}()}, \code{\link{padInteger}()}, \code{\link{padString}()}, \code{\link{pasteByRowOrdered}()}, \code{\link{pasteByRow}()}, \code{\link{sizeAsNum}()}, \code{\link{tcount}()}, \code{\link{ucfirst}()}, \code{\link{uniques}()} } \concept{jam string functions}
/man/fillBlanks.Rd
permissive
jmw86069/jamba
R
false
true
2,152
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/jamba-string.r \name{fillBlanks} \alias{fillBlanks} \title{Fill blank entries in a vector} \usage{ fillBlanks(x, blankGrep = c("[ \\t]*"), first = "", ...) } \arguments{ \item{x}{character vector} \item{blankGrep}{vector of grep patterns, or \code{NA}, indicating the type of entry to be considered blank. Each \code{blankGrep} pattern is searched using \code{jamba::proigrep()}, which by default uses case-insensitive regular expression pattern matching.} \item{first}{options character string intended when the first entry of \code{x} is blank. By default \code{""} is used.} \item{...}{additional parameters are ignored.} } \value{ Character vector where blank entries are filled with the most recent non-blank value. } \description{ Fill blank entries in a vector } \details{ This function takes a character vector and fills any blank (missing) entries with the last non-blank entry in the vector. It is intended for situations like imported Excel data, where there may be one header value representing a series of cells. The method used does not loop through the data, and should scale fairly well with good efficiency even for extremely large vectors. } \examples{ x <- c("A", "", "", "", "B", "C", "", "", NA, "D", "", "", "E", "F", "G", "", ""); data.frame(x, fillBlanks(x)); } \seealso{ Other jam string functions: \code{\link{asSize}()}, \code{\link{breaksByVector}()}, \code{\link{cPasteSU}()}, \code{\link{cPasteS}()}, \code{\link{cPasteUnique}()}, \code{\link{cPasteU}()}, \code{\link{cPaste}()}, \code{\link{formatInt}()}, \code{\link{gsubOrdered}()}, \code{\link{gsubs}()}, \code{\link{makeNames}()}, \code{\link{mixedOrder}()}, \code{\link{mixedSortDF}()}, \code{\link{mixedSorts}()}, \code{\link{mixedSort}()}, \code{\link{mmixedOrder}()}, \code{\link{nameVectorN}()}, \code{\link{nameVector}()}, \code{\link{padInteger}()}, \code{\link{padString}()}, \code{\link{pasteByRowOrdered}()}, \code{\link{pasteByRow}()}, \code{\link{sizeAsNum}()}, \code{\link{tcount}()}, \code{\link{ucfirst}()}, \code{\link{uniques}()} } \concept{jam string functions}
library(arules) install.packages("arulesViz") library("arulesViz") TransFood <- read.csv("http://homepages.uc.edu/~maifg/DataMining/data/food_4_association.csv") TransFood <- TransFood[, -1] TransFood <- as(as.matrix(TransFood), "transactions") str(TransFood) summary(TransFood) itemFrequencyPlot(TransFood, support = 0.05, cex.names = 0.8) # sup = 0.001 conf = 0.4 # sup = 0.001 conf = 0.6 # sup = 0.001 conf = 0.8 # sup = 0.001 conf = 1 basket_rules <- apriori(TransFood, parameter = list(sup = 0.001, conf = 0.9, target = "rules")) inspect(head(basket_rules)) plot(basket_rules) plot(basket_rules, method = "grouped") plot(head(sort(basket_rules, by = "lift"),15), method = "graph") ## clustering ClusterFood <- read.csv("http://homepages.uc.edu/~maifg/DataMining/data/qry_Food_by_Month.csv") summary(ClusterFood) ClusterFood <- scale(x=ClusterFood[,-1]) library(fpc) fit2 <- kmeans(ClusterFood, 2) table(fit2$cluster) plotcluster(ClusterFood, fit2$cluster) fit3 <- kmeans(ClusterFood, 3) table(fit3$cluster) plotcluster(ClusterFood, fit3$cluster) fit4 <- kmeans(ClusterFood, 4) table(fit4$cluster) plotcluster(ClusterFood, fit4$cluster) fit5 <- kmeans(ClusterFood, 5) table(fit5$cluster) plotcluster(ClusterFood, fit5$cluster) fit5$centers #optimal clusters library(clValid) intern_iris <- clValid(ClusterFood, 2:5 ,clMethods=c("hierarchical","kmeans"),validation="internal") summary(intern_iris) aggregate(ClusterFood, by = list(fit2$cluster), FUN = mean) # Determine number of clusters wss <- (nrow(ClusterFood) - 1) * sum(apply(ClusterFood, 2, var)) for (i in 2:12) wss[i] <- sum(kmeans(ClusterFood[,-1], centers = i)$withinss) plot(1:12, wss, type = "b", xlab = "Number of Clusters", ylab = "Within groups sum of squares") prediction.strength(ClusterFood, Gmin = 2, Gmax = 15, M = 10, cutoff = 0.8) d = dist(ClusterFood, method = "euclidean") result = matrix(nrow = 14, ncol = 3) for (i in 2:15) { cluster_result = kmeans(ClusterFood, i) clusterstat = cluster.stats(d, cluster_result$cluster) result[i - 1, 1] = i result[i - 1, 2] = clusterstat$avg.silwidth result[i - 1, 3] = clusterstat$dunn } plot(result[, c(1, 2)], type = "l", ylab = "silhouette width", xlab = "number of clusters") plot(result[, c(1, 3)], type = "l", ylab = "dunn index", xlab = "number of clusters") #prediction strength prediction.strength(ClusterFood, Gmin = 2, Gmax = 15, M = 10, cutoff = 0.8) # hierarchical hc_result = hclust(dist(ClusterFood)) plot(hc_result) # Cut Dendrogram into 3 Clusters rect.hclust(hc_result, k = 2) rect.hclust(hc_result, k = 3) rect.hclust(hc_result, k = 4) rect.hclust(hc_result, k = 5) #EDA plot(density(ClusterFood$Oct..10)) plot(density(ClusterFood$Mar..11)) hist(ClusterFood$Oct)
/codeq2.R
no_license
snsubbu6981/BANA
R
false
false
2,819
r
library(arules) install.packages("arulesViz") library("arulesViz") TransFood <- read.csv("http://homepages.uc.edu/~maifg/DataMining/data/food_4_association.csv") TransFood <- TransFood[, -1] TransFood <- as(as.matrix(TransFood), "transactions") str(TransFood) summary(TransFood) itemFrequencyPlot(TransFood, support = 0.05, cex.names = 0.8) # sup = 0.001 conf = 0.4 # sup = 0.001 conf = 0.6 # sup = 0.001 conf = 0.8 # sup = 0.001 conf = 1 basket_rules <- apriori(TransFood, parameter = list(sup = 0.001, conf = 0.9, target = "rules")) inspect(head(basket_rules)) plot(basket_rules) plot(basket_rules, method = "grouped") plot(head(sort(basket_rules, by = "lift"),15), method = "graph") ## clustering ClusterFood <- read.csv("http://homepages.uc.edu/~maifg/DataMining/data/qry_Food_by_Month.csv") summary(ClusterFood) ClusterFood <- scale(x=ClusterFood[,-1]) library(fpc) fit2 <- kmeans(ClusterFood, 2) table(fit2$cluster) plotcluster(ClusterFood, fit2$cluster) fit3 <- kmeans(ClusterFood, 3) table(fit3$cluster) plotcluster(ClusterFood, fit3$cluster) fit4 <- kmeans(ClusterFood, 4) table(fit4$cluster) plotcluster(ClusterFood, fit4$cluster) fit5 <- kmeans(ClusterFood, 5) table(fit5$cluster) plotcluster(ClusterFood, fit5$cluster) fit5$centers #optimal clusters library(clValid) intern_iris <- clValid(ClusterFood, 2:5 ,clMethods=c("hierarchical","kmeans"),validation="internal") summary(intern_iris) aggregate(ClusterFood, by = list(fit2$cluster), FUN = mean) # Determine number of clusters wss <- (nrow(ClusterFood) - 1) * sum(apply(ClusterFood, 2, var)) for (i in 2:12) wss[i] <- sum(kmeans(ClusterFood[,-1], centers = i)$withinss) plot(1:12, wss, type = "b", xlab = "Number of Clusters", ylab = "Within groups sum of squares") prediction.strength(ClusterFood, Gmin = 2, Gmax = 15, M = 10, cutoff = 0.8) d = dist(ClusterFood, method = "euclidean") result = matrix(nrow = 14, ncol = 3) for (i in 2:15) { cluster_result = kmeans(ClusterFood, i) clusterstat = cluster.stats(d, cluster_result$cluster) result[i - 1, 1] = i result[i - 1, 2] = clusterstat$avg.silwidth result[i - 1, 3] = clusterstat$dunn } plot(result[, c(1, 2)], type = "l", ylab = "silhouette width", xlab = "number of clusters") plot(result[, c(1, 3)], type = "l", ylab = "dunn index", xlab = "number of clusters") #prediction strength prediction.strength(ClusterFood, Gmin = 2, Gmax = 15, M = 10, cutoff = 0.8) # hierarchical hc_result = hclust(dist(ClusterFood)) plot(hc_result) # Cut Dendrogram into 3 Clusters rect.hclust(hc_result, k = 2) rect.hclust(hc_result, k = 3) rect.hclust(hc_result, k = 4) rect.hclust(hc_result, k = 5) #EDA plot(density(ClusterFood$Oct..10)) plot(density(ClusterFood$Mar..11)) hist(ClusterFood$Oct)
# Logistic Regression # Data Preprocessing # Importing the Dataset dataset = read.csv('Social_Network_Ads.csv') dataset = dataset[,3:5] dataset install.packages('caTools') # Splitting the Datset into the Training set and Test set # install.packages('caTools') library(caTools) set.seed(123) # Ramdom_state split = sample.split(dataset$Purchased, SplitRatio = 0.75) training_set = subset(dataset,split == TRUE) test_set = subset(dataset,split == FALSE) # Feature Scaling training_set[,1:2] = scale(training_set[,2:3]) test_set[,2:3] = scale(test_set[,2:3]) # Fitting Logistic Regression to the Training set classifier = glm(formula = Purchased ~ ., family = binomial, data = training_set) # Predicting the test set results prob_pred = predict(classifier ,type ='response',newdata = test_set[-3]) y_pred = ifelse(prob_pred >0.5,1,0) # Making The Confusion Matrix cm = table(test_set[,3],y_pred) # Visualising the Training set Results install.packages('ElemStatLearn') library(ElemStatLearn) set = training_set X1 = seq(min(set[,1]) -1,max(set[,1]) +1,by=0.01) X2 = seq(min(set[,2]) -1,max(set[,2]) +1,by=0.01) grid_set = expand.grid(X1,X2) colnames(grid_set) = c('Age', 'EstimatedSalary') prob_set = predict(classifier ,type = 'response',newdata = grid_set) y_grid = ifelse(prob_set >0.5,1,0) plot(set[,-3], main = 'Logistic Regression (Training set)', xlab = 'Age',ylab = 'Estimated Salary', xlim = range(X1),ylim = range(X2)) contour(X1,X2,matrix(as.numeric(y_grid),length(X1),length(X2)),add = TRUE) points(grid_set,pch='.',col =ifelse(y_grid ==1,'springgreen3','tomato')) points(set ,pch =21,bg=ifelse(set[,3]==1,'green4','red3'))
/R/07-Logistic Regression/Logistic Regression.R
no_license
MyHackInfo/Machine-Learning-A-Z-In-Python-R
R
false
false
1,691
r
# Logistic Regression # Data Preprocessing # Importing the Dataset dataset = read.csv('Social_Network_Ads.csv') dataset = dataset[,3:5] dataset install.packages('caTools') # Splitting the Datset into the Training set and Test set # install.packages('caTools') library(caTools) set.seed(123) # Ramdom_state split = sample.split(dataset$Purchased, SplitRatio = 0.75) training_set = subset(dataset,split == TRUE) test_set = subset(dataset,split == FALSE) # Feature Scaling training_set[,1:2] = scale(training_set[,2:3]) test_set[,2:3] = scale(test_set[,2:3]) # Fitting Logistic Regression to the Training set classifier = glm(formula = Purchased ~ ., family = binomial, data = training_set) # Predicting the test set results prob_pred = predict(classifier ,type ='response',newdata = test_set[-3]) y_pred = ifelse(prob_pred >0.5,1,0) # Making The Confusion Matrix cm = table(test_set[,3],y_pred) # Visualising the Training set Results install.packages('ElemStatLearn') library(ElemStatLearn) set = training_set X1 = seq(min(set[,1]) -1,max(set[,1]) +1,by=0.01) X2 = seq(min(set[,2]) -1,max(set[,2]) +1,by=0.01) grid_set = expand.grid(X1,X2) colnames(grid_set) = c('Age', 'EstimatedSalary') prob_set = predict(classifier ,type = 'response',newdata = grid_set) y_grid = ifelse(prob_set >0.5,1,0) plot(set[,-3], main = 'Logistic Regression (Training set)', xlab = 'Age',ylab = 'Estimated Salary', xlim = range(X1),ylim = range(X2)) contour(X1,X2,matrix(as.numeric(y_grid),length(X1),length(X2)),add = TRUE) points(grid_set,pch='.',col =ifelse(y_grid ==1,'springgreen3','tomato')) points(set ,pch =21,bg=ifelse(set[,3]==1,'green4','red3'))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lines.R \name{segments.to.lines} \alias{segments.to.lines} \title{Convert segments to lines} \usage{ segments.to.lines(segments) } \arguments{ \item{segments}{a data frame of segments.} } \value{ A data frame of lines. } \description{ Convert segments to lines }
/man/segments.to.lines.Rd
permissive
Sithara26/strafica
R
false
true
341
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lines.R \name{segments.to.lines} \alias{segments.to.lines} \title{Convert segments to lines} \usage{ segments.to.lines(segments) } \arguments{ \item{segments}{a data frame of segments.} } \value{ A data frame of lines. } \description{ Convert segments to lines }
context("fbr") test_that("dec_rec", { x <- matrix(c(AirPassengers, AirPassengers), ncol = length(AirPassengers), nrow = 2, byrow = T) method <- mgr_init("tsfresh") ts_start <- as.POSIXct("2010-01-01 01:23:54", tz = "UTC") ts_end <- as.POSIXct("2010-05-01 01:23:54", tz = "UTC") method$ti <- ts_start + seq(0, 143) * 3600 # 1 Row dec <- mgr_dec(method, x[1, , drop = F]) expect_equal(x[1, , drop = F], dec) repr <- mgr_red(method, dec) expect_equal(nrow(repr), 1) # 2 Rows dec <- mgr_dec(method, x) expect_equal(x, dec) repr <- mgr_red(method, dec) expect_equal(nrow(repr), 2) # 2 Rows dec <- mgr_dec(method, x, 2) expect_equal(x, dec) repr <- mgr_red(method, dec, 2) expect_equal(nrow(repr), 2) }) test_that("select_features", { method <- mgr_init("tsfresh") y <- sample(2, 100, replace = T) X <- matrix(c(y, sample(2, 100, replace = T)), ncol = 2, nrow = 100, dimnames = list(NULL, c("F1", "F2"))) feat <- mgr_select_features(method, X, y, NA, 1) expect_equal(feat, "F1") }) test_that("is_vectorized", { method <- mgr_init("tsfresh") expect_true(mgr_is_vectorized(method)) })
/tests/testthat/test_tsfresh.R
no_license
lkegel/classrepr
R
false
false
1,167
r
context("fbr") test_that("dec_rec", { x <- matrix(c(AirPassengers, AirPassengers), ncol = length(AirPassengers), nrow = 2, byrow = T) method <- mgr_init("tsfresh") ts_start <- as.POSIXct("2010-01-01 01:23:54", tz = "UTC") ts_end <- as.POSIXct("2010-05-01 01:23:54", tz = "UTC") method$ti <- ts_start + seq(0, 143) * 3600 # 1 Row dec <- mgr_dec(method, x[1, , drop = F]) expect_equal(x[1, , drop = F], dec) repr <- mgr_red(method, dec) expect_equal(nrow(repr), 1) # 2 Rows dec <- mgr_dec(method, x) expect_equal(x, dec) repr <- mgr_red(method, dec) expect_equal(nrow(repr), 2) # 2 Rows dec <- mgr_dec(method, x, 2) expect_equal(x, dec) repr <- mgr_red(method, dec, 2) expect_equal(nrow(repr), 2) }) test_that("select_features", { method <- mgr_init("tsfresh") y <- sample(2, 100, replace = T) X <- matrix(c(y, sample(2, 100, replace = T)), ncol = 2, nrow = 100, dimnames = list(NULL, c("F1", "F2"))) feat <- mgr_select_features(method, X, y, NA, 1) expect_equal(feat, "F1") }) test_that("is_vectorized", { method <- mgr_init("tsfresh") expect_true(mgr_is_vectorized(method)) })
Carseats <- read.csv("../data/carseats.csv"); require(tree) set.seed(42) train = sample(1:nrow(Carseats),nrow(Carseats)/2) Carseats.train = Carseats[train,] Carseats.test = Carseats[-train,] tree = tree(Sales~.,data=Carseats.train) summary(tree) plot(tree) text(tree,pretty=0)
/R/carseets_dc.R
no_license
alexandraback/dcrf-workshop
R
false
false
281
r
Carseats <- read.csv("../data/carseats.csv"); require(tree) set.seed(42) train = sample(1:nrow(Carseats),nrow(Carseats)/2) Carseats.train = Carseats[train,] Carseats.test = Carseats[-train,] tree = tree(Sales~.,data=Carseats.train) summary(tree) plot(tree) text(tree,pretty=0)
# libraries --------------------------------------------------------------- if (require(pacman) == FALSE) { install.packages("pacman") } pacman::p_load(ggplot2, dplyr, caret) # data creation ----------------------------------------------------------- # Let's create 50k points on a 3x2 grid x <- runif(50000, min = 0, max = 3) y <- runif(50000, min = 0, max = 2) # Flag colour palette japanPalette <- c("red", "white") # Flag dataframe japan_flag <- as.data.frame(x = x) japan_flag$y <- y # Now we add the colour japan_flag <- mutate( japan_flag, flag_colour = ifelse( (x - 1.5)^2 + (y-1)^2 > 3/10, "white", "red") ) # data visualization ------------------------------------------------------ ggplot(data = japan_flag) + geom_point(aes(x = x, y = y, color = flag_colour), size = 0.05) + coord_fixed(ratio = 1) + scale_color_manual(values = japanPalette) # train and test ---------------------------------------------------------- # We are going to choose the left half of the flag to predict the other half train_id <- createDataPartition(y = japan_flag$flag_colour, p = 0.2, list = F) trainSet <- japan_flag[train_id,] testSet <- japan_flag[-train_id,] # TrainSet plot. We see that it contains the left part of the flag ggplot(trainSet) + geom_point(aes(x = x, y = y, color = flag_colour), size = 0.1) + coord_fixed(ratio = 1) + xlim(0,3) + scale_colour_manual(values = japanPalette) # logistic regression model ----------------------------------------------- # creating the glm model mod_gbm_partition <- train( flag_colour ~., data = trainSet, method = "glm" ) # apply the model and get the results testSet$predColour_glm <- predict(object = mod_gbm_partition, newdata = testSet) # visualize the errors ggplot() + geom_point(data = testSet, aes(x = x, y = y, colour = predColour_glm), size = 0.1) + coord_fixed(ratio = 1) + xlim(0,3) + scale_colour_manual(values = c("white","red")) # knn model --------------------------------------------------------------- # creating the glm model mod_knn_partition <- train( flag_colour ~., data = trainSet, method = "knn" ) mod_knn_partition <- readRDS(file = "models/model_knn_smllFlag.rds") # apply the model and get the results testSet$predColour_knn <- predict(object = mod_knn_partition, newdata = testSet) # visualize the errors ggplot() + geom_point(data = testSet, aes(x = x, y = y, colour = predColour_knn), size = 0.1) + coord_fixed(ratio = 1) + xlim(0,3) + scale_colour_manual(values = japanPalette) # decision tree ------------------------------------------------------------ # creating the glm model # mod_dt_partition <- train( # flag_colour ~., # data = trainSet, # method = "rpart" # ) mod_dt_partition <- readRDS(file = "models/model_dt_smllFlag.rds") # apply the model and get the results testSet$predColour_dt <- predict(object = mod_dt_partition, newdata = testSet) # visualize the errors ggplot() + geom_point(data = testSet, aes(x = x, y = y, colour = predColour_dt), size = 0.1) + coord_fixed(ratio = 1) + xlim(0,3) + scale_colour_manual(values = japanPalette) # support vector machine -------------------------------------------------- # creating the svm model # mod_svm_partition <- train( # flag_colour ~., # data = trainSet, # method = "svmRadial" # ) mod_svm_partition <- readRDS(file = "models/model_svm_smllFlag.rds") # apply the model and get the results testSet$predColour_svm <- predict(object = mod_svm_partition, newdata = testSet) # visualize the errors ggplot() + geom_point(data = testSet, aes(x = x, y = y, colour = predColour_svm), size = 0.1) + coord_fixed(ratio = 1) + xlim(0,3) + scale_colour_manual(values = japanPalette)
/.Rproj.user/C0F26720/sources/s-16679FC8/4245BAA9-contents
no_license
JoanClaverol/ubiqum_fun_with_flags
R
false
false
3,851
# libraries --------------------------------------------------------------- if (require(pacman) == FALSE) { install.packages("pacman") } pacman::p_load(ggplot2, dplyr, caret) # data creation ----------------------------------------------------------- # Let's create 50k points on a 3x2 grid x <- runif(50000, min = 0, max = 3) y <- runif(50000, min = 0, max = 2) # Flag colour palette japanPalette <- c("red", "white") # Flag dataframe japan_flag <- as.data.frame(x = x) japan_flag$y <- y # Now we add the colour japan_flag <- mutate( japan_flag, flag_colour = ifelse( (x - 1.5)^2 + (y-1)^2 > 3/10, "white", "red") ) # data visualization ------------------------------------------------------ ggplot(data = japan_flag) + geom_point(aes(x = x, y = y, color = flag_colour), size = 0.05) + coord_fixed(ratio = 1) + scale_color_manual(values = japanPalette) # train and test ---------------------------------------------------------- # We are going to choose the left half of the flag to predict the other half train_id <- createDataPartition(y = japan_flag$flag_colour, p = 0.2, list = F) trainSet <- japan_flag[train_id,] testSet <- japan_flag[-train_id,] # TrainSet plot. We see that it contains the left part of the flag ggplot(trainSet) + geom_point(aes(x = x, y = y, color = flag_colour), size = 0.1) + coord_fixed(ratio = 1) + xlim(0,3) + scale_colour_manual(values = japanPalette) # logistic regression model ----------------------------------------------- # creating the glm model mod_gbm_partition <- train( flag_colour ~., data = trainSet, method = "glm" ) # apply the model and get the results testSet$predColour_glm <- predict(object = mod_gbm_partition, newdata = testSet) # visualize the errors ggplot() + geom_point(data = testSet, aes(x = x, y = y, colour = predColour_glm), size = 0.1) + coord_fixed(ratio = 1) + xlim(0,3) + scale_colour_manual(values = c("white","red")) # knn model --------------------------------------------------------------- # creating the glm model mod_knn_partition <- train( flag_colour ~., data = trainSet, method = "knn" ) mod_knn_partition <- readRDS(file = "models/model_knn_smllFlag.rds") # apply the model and get the results testSet$predColour_knn <- predict(object = mod_knn_partition, newdata = testSet) # visualize the errors ggplot() + geom_point(data = testSet, aes(x = x, y = y, colour = predColour_knn), size = 0.1) + coord_fixed(ratio = 1) + xlim(0,3) + scale_colour_manual(values = japanPalette) # decision tree ------------------------------------------------------------ # creating the glm model # mod_dt_partition <- train( # flag_colour ~., # data = trainSet, # method = "rpart" # ) mod_dt_partition <- readRDS(file = "models/model_dt_smllFlag.rds") # apply the model and get the results testSet$predColour_dt <- predict(object = mod_dt_partition, newdata = testSet) # visualize the errors ggplot() + geom_point(data = testSet, aes(x = x, y = y, colour = predColour_dt), size = 0.1) + coord_fixed(ratio = 1) + xlim(0,3) + scale_colour_manual(values = japanPalette) # support vector machine -------------------------------------------------- # creating the svm model # mod_svm_partition <- train( # flag_colour ~., # data = trainSet, # method = "svmRadial" # ) mod_svm_partition <- readRDS(file = "models/model_svm_smllFlag.rds") # apply the model and get the results testSet$predColour_svm <- predict(object = mod_svm_partition, newdata = testSet) # visualize the errors ggplot() + geom_point(data = testSet, aes(x = x, y = y, colour = predColour_svm), size = 0.1) + coord_fixed(ratio = 1) + xlim(0,3) + scale_colour_manual(values = japanPalette)
file <- commandArgs(trailingOnly = TRUE) library(rCASC) gtfUrl = array[1] fastaUrl = array[2] cellrangerIndexing(group="docker", scratch.folder=getwd(), gtf.url=gtfUrl,fasta.url=fastaUrl, genomeFolder = getwd(), bio.type="protein_coding", nThreads = 8)
/cellrangerIndexing/example_Indexing.R
no_license
Gepiro/rCASCWorkflow
R
false
false
266
r
file <- commandArgs(trailingOnly = TRUE) library(rCASC) gtfUrl = array[1] fastaUrl = array[2] cellrangerIndexing(group="docker", scratch.folder=getwd(), gtf.url=gtfUrl,fasta.url=fastaUrl, genomeFolder = getwd(), bio.type="protein_coding", nThreads = 8)
## ----setup, include = FALSE---------------------------------------------- library(kableExtra) knitr::opts_chunk$set( collapse = TRUE, comment = "#>", error = TRUE ) ## ------------------------------------------------------------------------ library(SingleCaseES) ## ------------------------------------------------------------------------ args(NAP) ## ------------------------------------------------------------------------ A <- c(20, 20, 26, 25, 22, 23) B <- c(28, 25, 24, 27, 30, 30, 29) ## ------------------------------------------------------------------------ NAP(A_data = A, B_data = B) ## ------------------------------------------------------------------------ phase <- c(rep("A", 6), rep("B", 7)) phase outcome_dat <- c(A, B) outcome_dat NAP(condition = phase, outcome = outcome_dat) ## ------------------------------------------------------------------------ phase2 <- c(rep("A", 5), rep("B", 5), rep("C",3)) NAP(condition = phase2, outcome = outcome_dat) ## ------------------------------------------------------------------------ phase_rev <- c(rep("B", 7), rep("A", 6)) outcome_rev <- c(B, A) NAP(condition = phase_rev, outcome = outcome_rev, baseline_phase = "A") ## ------------------------------------------------------------------------ NAP(A_data = A, B_data = B, improvement = "decrease") ## ------------------------------------------------------------------------ NAP(A_data = A, B_data = B, SE = "unbiased") NAP(A_data = A, B_data = B, SE = "Hanley") NAP(A_data = A, B_data = B, SE = "null") NAP(A_data = A, B_data = B, SE = "none") ## ------------------------------------------------------------------------ NAP(A_data = A, B_data = B) NAP(A_data = A, B_data = B, confidence = .99) NAP(A_data = A, B_data = B, confidence = .90) ## ------------------------------------------------------------------------ NAP(A_data = A, B_data = B, confidence = NULL) ## ------------------------------------------------------------------------ Tau(A_data = A, B_data = B) PND(A_data = A, B_data = B) PEM(A_data = A, B_data = B) PAND(A_data = A, B_data = B) IRD(A_data = A, B_data = B) Tau_U(A_data = A, B_data = B) ## ------------------------------------------------------------------------ A <- c(20, 20, 26, 25, 22, 23) B <- c(28, 25, 24, 27, 30, 30, 29) SMD(A_data = A, B_data = B, improvement = "increase") SMD(A_data = A, B_data = B, improvement = "decrease") ## ------------------------------------------------------------------------ SMD(A_data = A, B_data = B, std_dev = "baseline") SMD(A_data = A, B_data = B, std_dev = "pool") ## ------------------------------------------------------------------------ A <- c(20, 20, 26, 25, 22, 23) B <- c(28, 25, 24, 27, 30, 30, 29) LRRi(A_data = A, B_data = B, scale = "percentage") LRRi(A_data = A, B_data = B, improvement = "decrease", scale = "percentage") ## ------------------------------------------------------------------------ A <- c(20, 20, 26, 25, 22, 23) B <- c(28, 25, 24, 27, 30, 30, 29) LRRi(A_data = A, B_data = B, scale = "count") LRRi(A_data = A, B_data = B, scale = "count", improvement = "decrease") ## ------------------------------------------------------------------------ A <- c(0, 0, 0, 0) B <- c(28, 25, 24, 27, 30, 30, 29) LRRd(A_data = A, B_data = B, scale = "rate") LRRd(A_data = A, B_data = B, scale = "rate", observation_length = 30) ## ------------------------------------------------------------------------ LRRd(A_data = A, B_data = B, scale = "percentage") LRRd(A_data = A, B_data = B, scale = "percentage", intervals = 180) ## ------------------------------------------------------------------------ A_pct <- c(20, 20, 25, 25, 20, 25) B_pct <- c(30, 25, 25, 25, 35, 30, 25) LOR(A_data = A_pct, B_data = B_pct, scale = "percentage") LOR(A_data = A_pct/100, B_data = B_pct/100, scale = "proportion") LOR(A_data = A_pct, B_data = B_pct, scale = "count") LOR(A_data = A_pct, B_data = B_pct, scale = "proportion") ## ------------------------------------------------------------------------ LOR(A_data = A_pct, B_data = B_pct, scale = "percentage", improvement = "increase") LOR(A_data = A_pct, B_data = B_pct, scale = "percentage", improvement = "decrease") ## ------------------------------------------------------------------------ LOR(A_data = c(0,0,0), B_data = B_pct, scale = "percentage") LOR(A_data = c(0,0,0), B_data = B_pct, scale = "percentage", intervals = 20) ## ------------------------------------------------------------------------ A <- c(20, 20, 26, 25, 22, 23) B <- c(28, 25, 24, 27, 30, 30, 29) calc_ES(A_data = A, B_data = B, ES = c("NAP","PND","Tau-U")) ## ------------------------------------------------------------------------ phase <- c(rep("A", length(A)), rep("B", length(B))) outcome <- c(A, B) calc_ES(condition = phase, outcome = outcome, baseline_phase = "A", ES = c("NAP","PND","Tau-U")) ## ------------------------------------------------------------------------ calc_ES(A_data = A, B_data = B, ES = "all") ## ------------------------------------------------------------------------ calc_ES(A_data = A, B_data = B, ES = "NOM") ## ------------------------------------------------------------------------ calc_ES(A_data = A, B_data = B, ES = "parametric") ## ------------------------------------------------------------------------ calc_ES(A_data = A, B_data = B, ES = "NOM", improvement = "decrease") ## ------------------------------------------------------------------------ calc_ES(A_data = A, B_data = B, ES = "NOM", improvement = "decrease", confidence = NULL) ## ------------------------------------------------------------------------ calc_ES(A_data = A, B_data = B, ES = "parametric", scale = "count") ## ------------------------------------------------------------------------ calc_ES(A_data = A, B_data = B, ES = c("NAP","PND","SMD")) calc_ES(A_data = A, B_data = B, ES = c("NAP","PND","SMD"), format = "wide") ## ------------------------------------------------------------------------ data(McKissick) ## ---- echo = FALSE------------------------------------------------------- knitr::kable(head(McKissick, n = 10)) ## ------------------------------------------------------------------------ data(Schmidt2007) ## ---- echo = F----------------------------------------------------------- knitr::kable(head(Schmidt2007[,c("Case_pseudonym", "Behavior_type", "Session_number", "Outcome", "Condition", "Phase_num", "Metric", "Session_length", "direction", "n_Intervals")], n = 10), longtable = TRUE) %>% kable_styling() %>% scroll_box(width = "100%") ## ------------------------------------------------------------------------ args(batch_calc_ES) ## ------------------------------------------------------------------------ mckissick_ES <- batch_calc_ES(dat = McKissick, grouping = Case_pseudonym, condition = Condition, outcome = Outcome, improvement = "decrease", ES = c("NAP", "PND")) ## ---- echo = F----------------------------------------------------------- kable(mckissick_ES) ## ------------------------------------------------------------------------ schmidt_ES <- batch_calc_ES(dat = Schmidt2007, grouping = c(Case_pseudonym, Behavior_type, Phase_num), condition = Condition, outcome = Outcome, improvement = direction, ES = c("NAP", "PND")) ## ---- echo = F----------------------------------------------------------- kable(schmidt_ES) %>% kable_styling() %>% scroll_box(width = "100%", height = "800px") ## ------------------------------------------------------------------------ mckissick_ES <- batch_calc_ES(dat = McKissick, grouping = Case_pseudonym, condition = Condition, outcome = Outcome, improvement = "decrease", scale = "count", observation_length = 20, ES = "parametric") ## ---- echo = F----------------------------------------------------------- kable(mckissick_ES) ## ------------------------------------------------------------------------ schmidt_ES <- batch_calc_ES(dat = Schmidt2007, grouping = c(Case_pseudonym, Behavior_type, Phase_num), condition = Condition, outcome = Outcome, improvement = direction, scale = Metric, observation_length = Session_length, intervals = n_Intervals, ES = c("parametric")) ## ------------------------------------------------------------------------ kable(schmidt_ES) %>% kable_styling() %>% scroll_box(width = "100%", height = "800px") ## ------------------------------------------------------------------------ mckissick_wide_ES <- batch_calc_ES(dat = McKissick, grouping = Case_pseudonym, condition = Condition, outcome = Outcome, improvement = "decrease", ES = c("NAP", "PND"), format = "wide") ## ---- echo = F----------------------------------------------------------- kable(mckissick_wide_ES) ## ------------------------------------------------------------------------ batch_calc_ES(dat = McKissick, grouping = Case_pseudonym, condition = Condition, outcome = Outcome, improvement = "decrease", scale = "count", observation_length = 20, ES = c("LRRi","LOR"), warn = FALSE) ## ------------------------------------------------------------------------ batch_calc_ES(dat = McKissick, grouping = Case_pseudonym, condition = Condition, outcome = Outcome, ES = "SMD", improvement = "decrease") batch_calc_ES(dat = McKissick, grouping = Case_pseudonym, condition = Condition, outcome = Outcome, ES = "SMD", improvement = "decrease", std_dev = "pool") ## ------------------------------------------------------------------------ batch_calc_ES(dat = McKissick, grouping = Case_pseudonym, condition = Condition, outcome = Outcome, ES = "parametric", improvement = "decrease", scale = Procedure, observation_length = Session_length, bias_correct = FALSE, warn = FALSE) ## ------------------------------------------------------------------------ batch_calc_ES(dat = McKissick, grouping = Case_pseudonym, condition = Condition, outcome = Outcome, ES = "parametric", improvement = "decrease", scale = Procedure, observation_length = Session_length, confidence = NULL, warn = FALSE)
/data/genthat_extracted_code/SingleCaseES/vignettes/Using-SingleCaseES.R
no_license
surayaaramli/typeRrh
R
false
false
11,267
r
## ----setup, include = FALSE---------------------------------------------- library(kableExtra) knitr::opts_chunk$set( collapse = TRUE, comment = "#>", error = TRUE ) ## ------------------------------------------------------------------------ library(SingleCaseES) ## ------------------------------------------------------------------------ args(NAP) ## ------------------------------------------------------------------------ A <- c(20, 20, 26, 25, 22, 23) B <- c(28, 25, 24, 27, 30, 30, 29) ## ------------------------------------------------------------------------ NAP(A_data = A, B_data = B) ## ------------------------------------------------------------------------ phase <- c(rep("A", 6), rep("B", 7)) phase outcome_dat <- c(A, B) outcome_dat NAP(condition = phase, outcome = outcome_dat) ## ------------------------------------------------------------------------ phase2 <- c(rep("A", 5), rep("B", 5), rep("C",3)) NAP(condition = phase2, outcome = outcome_dat) ## ------------------------------------------------------------------------ phase_rev <- c(rep("B", 7), rep("A", 6)) outcome_rev <- c(B, A) NAP(condition = phase_rev, outcome = outcome_rev, baseline_phase = "A") ## ------------------------------------------------------------------------ NAP(A_data = A, B_data = B, improvement = "decrease") ## ------------------------------------------------------------------------ NAP(A_data = A, B_data = B, SE = "unbiased") NAP(A_data = A, B_data = B, SE = "Hanley") NAP(A_data = A, B_data = B, SE = "null") NAP(A_data = A, B_data = B, SE = "none") ## ------------------------------------------------------------------------ NAP(A_data = A, B_data = B) NAP(A_data = A, B_data = B, confidence = .99) NAP(A_data = A, B_data = B, confidence = .90) ## ------------------------------------------------------------------------ NAP(A_data = A, B_data = B, confidence = NULL) ## ------------------------------------------------------------------------ Tau(A_data = A, B_data = B) PND(A_data = A, B_data = B) PEM(A_data = A, B_data = B) PAND(A_data = A, B_data = B) IRD(A_data = A, B_data = B) Tau_U(A_data = A, B_data = B) ## ------------------------------------------------------------------------ A <- c(20, 20, 26, 25, 22, 23) B <- c(28, 25, 24, 27, 30, 30, 29) SMD(A_data = A, B_data = B, improvement = "increase") SMD(A_data = A, B_data = B, improvement = "decrease") ## ------------------------------------------------------------------------ SMD(A_data = A, B_data = B, std_dev = "baseline") SMD(A_data = A, B_data = B, std_dev = "pool") ## ------------------------------------------------------------------------ A <- c(20, 20, 26, 25, 22, 23) B <- c(28, 25, 24, 27, 30, 30, 29) LRRi(A_data = A, B_data = B, scale = "percentage") LRRi(A_data = A, B_data = B, improvement = "decrease", scale = "percentage") ## ------------------------------------------------------------------------ A <- c(20, 20, 26, 25, 22, 23) B <- c(28, 25, 24, 27, 30, 30, 29) LRRi(A_data = A, B_data = B, scale = "count") LRRi(A_data = A, B_data = B, scale = "count", improvement = "decrease") ## ------------------------------------------------------------------------ A <- c(0, 0, 0, 0) B <- c(28, 25, 24, 27, 30, 30, 29) LRRd(A_data = A, B_data = B, scale = "rate") LRRd(A_data = A, B_data = B, scale = "rate", observation_length = 30) ## ------------------------------------------------------------------------ LRRd(A_data = A, B_data = B, scale = "percentage") LRRd(A_data = A, B_data = B, scale = "percentage", intervals = 180) ## ------------------------------------------------------------------------ A_pct <- c(20, 20, 25, 25, 20, 25) B_pct <- c(30, 25, 25, 25, 35, 30, 25) LOR(A_data = A_pct, B_data = B_pct, scale = "percentage") LOR(A_data = A_pct/100, B_data = B_pct/100, scale = "proportion") LOR(A_data = A_pct, B_data = B_pct, scale = "count") LOR(A_data = A_pct, B_data = B_pct, scale = "proportion") ## ------------------------------------------------------------------------ LOR(A_data = A_pct, B_data = B_pct, scale = "percentage", improvement = "increase") LOR(A_data = A_pct, B_data = B_pct, scale = "percentage", improvement = "decrease") ## ------------------------------------------------------------------------ LOR(A_data = c(0,0,0), B_data = B_pct, scale = "percentage") LOR(A_data = c(0,0,0), B_data = B_pct, scale = "percentage", intervals = 20) ## ------------------------------------------------------------------------ A <- c(20, 20, 26, 25, 22, 23) B <- c(28, 25, 24, 27, 30, 30, 29) calc_ES(A_data = A, B_data = B, ES = c("NAP","PND","Tau-U")) ## ------------------------------------------------------------------------ phase <- c(rep("A", length(A)), rep("B", length(B))) outcome <- c(A, B) calc_ES(condition = phase, outcome = outcome, baseline_phase = "A", ES = c("NAP","PND","Tau-U")) ## ------------------------------------------------------------------------ calc_ES(A_data = A, B_data = B, ES = "all") ## ------------------------------------------------------------------------ calc_ES(A_data = A, B_data = B, ES = "NOM") ## ------------------------------------------------------------------------ calc_ES(A_data = A, B_data = B, ES = "parametric") ## ------------------------------------------------------------------------ calc_ES(A_data = A, B_data = B, ES = "NOM", improvement = "decrease") ## ------------------------------------------------------------------------ calc_ES(A_data = A, B_data = B, ES = "NOM", improvement = "decrease", confidence = NULL) ## ------------------------------------------------------------------------ calc_ES(A_data = A, B_data = B, ES = "parametric", scale = "count") ## ------------------------------------------------------------------------ calc_ES(A_data = A, B_data = B, ES = c("NAP","PND","SMD")) calc_ES(A_data = A, B_data = B, ES = c("NAP","PND","SMD"), format = "wide") ## ------------------------------------------------------------------------ data(McKissick) ## ---- echo = FALSE------------------------------------------------------- knitr::kable(head(McKissick, n = 10)) ## ------------------------------------------------------------------------ data(Schmidt2007) ## ---- echo = F----------------------------------------------------------- knitr::kable(head(Schmidt2007[,c("Case_pseudonym", "Behavior_type", "Session_number", "Outcome", "Condition", "Phase_num", "Metric", "Session_length", "direction", "n_Intervals")], n = 10), longtable = TRUE) %>% kable_styling() %>% scroll_box(width = "100%") ## ------------------------------------------------------------------------ args(batch_calc_ES) ## ------------------------------------------------------------------------ mckissick_ES <- batch_calc_ES(dat = McKissick, grouping = Case_pseudonym, condition = Condition, outcome = Outcome, improvement = "decrease", ES = c("NAP", "PND")) ## ---- echo = F----------------------------------------------------------- kable(mckissick_ES) ## ------------------------------------------------------------------------ schmidt_ES <- batch_calc_ES(dat = Schmidt2007, grouping = c(Case_pseudonym, Behavior_type, Phase_num), condition = Condition, outcome = Outcome, improvement = direction, ES = c("NAP", "PND")) ## ---- echo = F----------------------------------------------------------- kable(schmidt_ES) %>% kable_styling() %>% scroll_box(width = "100%", height = "800px") ## ------------------------------------------------------------------------ mckissick_ES <- batch_calc_ES(dat = McKissick, grouping = Case_pseudonym, condition = Condition, outcome = Outcome, improvement = "decrease", scale = "count", observation_length = 20, ES = "parametric") ## ---- echo = F----------------------------------------------------------- kable(mckissick_ES) ## ------------------------------------------------------------------------ schmidt_ES <- batch_calc_ES(dat = Schmidt2007, grouping = c(Case_pseudonym, Behavior_type, Phase_num), condition = Condition, outcome = Outcome, improvement = direction, scale = Metric, observation_length = Session_length, intervals = n_Intervals, ES = c("parametric")) ## ------------------------------------------------------------------------ kable(schmidt_ES) %>% kable_styling() %>% scroll_box(width = "100%", height = "800px") ## ------------------------------------------------------------------------ mckissick_wide_ES <- batch_calc_ES(dat = McKissick, grouping = Case_pseudonym, condition = Condition, outcome = Outcome, improvement = "decrease", ES = c("NAP", "PND"), format = "wide") ## ---- echo = F----------------------------------------------------------- kable(mckissick_wide_ES) ## ------------------------------------------------------------------------ batch_calc_ES(dat = McKissick, grouping = Case_pseudonym, condition = Condition, outcome = Outcome, improvement = "decrease", scale = "count", observation_length = 20, ES = c("LRRi","LOR"), warn = FALSE) ## ------------------------------------------------------------------------ batch_calc_ES(dat = McKissick, grouping = Case_pseudonym, condition = Condition, outcome = Outcome, ES = "SMD", improvement = "decrease") batch_calc_ES(dat = McKissick, grouping = Case_pseudonym, condition = Condition, outcome = Outcome, ES = "SMD", improvement = "decrease", std_dev = "pool") ## ------------------------------------------------------------------------ batch_calc_ES(dat = McKissick, grouping = Case_pseudonym, condition = Condition, outcome = Outcome, ES = "parametric", improvement = "decrease", scale = Procedure, observation_length = Session_length, bias_correct = FALSE, warn = FALSE) ## ------------------------------------------------------------------------ batch_calc_ES(dat = McKissick, grouping = Case_pseudonym, condition = Condition, outcome = Outcome, ES = "parametric", improvement = "decrease", scale = Procedure, observation_length = Session_length, confidence = NULL, warn = FALSE)
## The function calculates the inverse of the special "matrix" created with the makeCacheMatrix function, ## And cache the result into that special "matrix". cacheSolve <- function(x, ...) { ## pass makeCacheMatrix object as argument s <- x$getinverse() if(!is.null(s)) { ## check if the inverse has already been calculated message("getting cached data") return(s) } data <- x$get() s <- solve(data, ...) x$setinverse(s) ## cache the result via the setinverse function s ## Return a matrix that is the inverse of 'x' }
/cacheSolve.R
no_license
CathyHJD/ProgrammingAssignment2
R
false
false
609
r
## The function calculates the inverse of the special "matrix" created with the makeCacheMatrix function, ## And cache the result into that special "matrix". cacheSolve <- function(x, ...) { ## pass makeCacheMatrix object as argument s <- x$getinverse() if(!is.null(s)) { ## check if the inverse has already been calculated message("getting cached data") return(s) } data <- x$get() s <- solve(data, ...) x$setinverse(s) ## cache the result via the setinverse function s ## Return a matrix that is the inverse of 'x' }
#project - server library(shiny) # function to turn input into output hoursNeeded<-function(hours,days) (56-hours)/(7-days) # what to print shinyServer( function(input,output){ output$inputValue1<-renderPrint({input$hours}) output$inputValue2<-renderPrint({input$days}) output$hoursToSleep<-renderPrint({hoursNeeded(input$hours,input$days)}) } )
/server.R
no_license
engpeng/DevelopingDataProduct
R
false
false
365
r
#project - server library(shiny) # function to turn input into output hoursNeeded<-function(hours,days) (56-hours)/(7-days) # what to print shinyServer( function(input,output){ output$inputValue1<-renderPrint({input$hours}) output$inputValue2<-renderPrint({input$days}) output$hoursToSleep<-renderPrint({hoursNeeded(input$hours,input$days)}) } )
fscores.l <- function (betas, X, method) { logf.z <- function (z, y, betas) { DF <- data.frame(rbind(z)) names(DF) <- if (length(z) == 1) c("z1") else c("z1", "z2") Z <- model.matrix(form, DF) Z <- Z[, match(colnames(betas), colnames(Z)) , drop = FALSE] pr <- probs(c(Z %*% t(betas))) if (prior) -sum(dbinom(y, 1, pr, log = TRUE), na.rm = TRUE) - sum(dnorm(z, log = TRUE)) else -sum(dbinom(y, 1, pr, log = TRUE), na.rm = TRUE) } fscore <- function (logf.z, y, betas) { opt <- optim(rep(0, factors), fn = logf.z, method = "BFGS", hessian = TRUE, y = y, betas = betas) hc <- if (factors == 1) c(1/opt$hes) else solve(opt$hessian) list(mu = opt$par, hes = hc) } if (method == "EB") { scores.ML <- matrix(0, nx, factors) hes.ML <- array(data = 0, dim = c(factors, factors, nx)) for (i in 1:nx) { out <- fscore(logf.z = logf.z, y = X[i, ], betas = betas) scores.ML[i, ] <- out$mu hes.ML[, , i] <- out$hes } se.ML <- t(apply(hes.ML, 3, function(x) sqrt(diag(x)))) res$z1 <- if (factors == 2) scores.ML[, 1] else c(scores.ML) res$se.z1 <- if (factors == 2) se.ML[, 1] else c(se.ML) if (factors == 2) { res$z2 <- scores.ML[, 2] res$se.z2 <- se.ML[, 2] } } if (method == "EAP") { Z <- object$GH$Z GHw <- object$GH$GHw pr <- probs(Z %*% t(betas)) mX <- 1 - X if (any(na.ind <- is.na(X))) X[na.ind] <- mX[na.ind] <- 0 p.xz <- exp(X %*% t(log(pr)) + mX %*% t(log(1 - pr))) p.x <- c(p.xz %*% GHw) p.zx <- p.xz / p.x res$z1 <- c(p.zx %*% (Z[, 2] * GHw)) res$se.z1 <- sqrt(c(p.zx %*% (Z[, 2] * Z[, 2] * GHw)) - res$z1^2) if (object$ltst$factors > 1) { res$z2 <- c(p.zx %*% (Z[, 3] * GHw)) res$se.z2 <- sqrt(c(p.zx %*% (Z[, 3] * Z[, 3] * GHw)) - res$z2^2) } } if (method == "MI") { constraint <- object$constraint cnams.betas <- colnames(betas) if (!is.null(constraint)) betas <- betas[-((constraint[, 2] - 1) * p + constraint[, 1])] var.b <- vcov(object, robust.se) scores.B <- lapply(1:B, array, data = 0, dim = c(nx, factors)) hes.B <- lapply(1:B, array, data = 0, dim = c(factors, factors, nx)) for (b in 1:B) { betas. <- mvrnorm(1, c(betas), var.b) betas. <- betas.ltm(betas., constraint, p, q.) colnames(betas.) <- cnams.betas for (i in 1:nx) { out <- fscore(logf.z = logf.z, y = X[i, ], betas = betas.) scores.B[[b]][i, ] <- out$mu hes.B[[b]][, , i] <- out$hes } } scores.av <- matMeans(scores.B) hes.av <- matArrays(hes.B) SV <- lapply(1:B, array, data = 0, dim = c(factors, factors, nx)) for (b in 1:B) { for (i in 1:nx) { sc.dif <- scores.B[[b]][i, ] - scores.av[i, ] SV[[b]][, , i] <- outer(sc.dif, sc.dif) } } SV <- (B * matArrays(SV))/(B - 1) hes.av <- hes.av + (1 + 1/B) * SV se.av <- t(apply(hes.av, 3, function(x) sqrt(diag(x)))) if (factors == 1) { res$z1 <- c(scores.av) res$se.z1 <- c(se.av) } if (factors == 2) { res$z1 <- scores.av[, 1] res$se.z1 <- se.av[, 1] res$z2 <- scores.av[, 2] res$se.z2 <- se.av[, 2] } attr(res, "zvalues.MI") <- scores.B attr(res, "var.zvalues.MI") <- hes.B } res }
/ltm/R/fscores.l.R
no_license
ingted/R-Examples
R
false
false
3,838
r
fscores.l <- function (betas, X, method) { logf.z <- function (z, y, betas) { DF <- data.frame(rbind(z)) names(DF) <- if (length(z) == 1) c("z1") else c("z1", "z2") Z <- model.matrix(form, DF) Z <- Z[, match(colnames(betas), colnames(Z)) , drop = FALSE] pr <- probs(c(Z %*% t(betas))) if (prior) -sum(dbinom(y, 1, pr, log = TRUE), na.rm = TRUE) - sum(dnorm(z, log = TRUE)) else -sum(dbinom(y, 1, pr, log = TRUE), na.rm = TRUE) } fscore <- function (logf.z, y, betas) { opt <- optim(rep(0, factors), fn = logf.z, method = "BFGS", hessian = TRUE, y = y, betas = betas) hc <- if (factors == 1) c(1/opt$hes) else solve(opt$hessian) list(mu = opt$par, hes = hc) } if (method == "EB") { scores.ML <- matrix(0, nx, factors) hes.ML <- array(data = 0, dim = c(factors, factors, nx)) for (i in 1:nx) { out <- fscore(logf.z = logf.z, y = X[i, ], betas = betas) scores.ML[i, ] <- out$mu hes.ML[, , i] <- out$hes } se.ML <- t(apply(hes.ML, 3, function(x) sqrt(diag(x)))) res$z1 <- if (factors == 2) scores.ML[, 1] else c(scores.ML) res$se.z1 <- if (factors == 2) se.ML[, 1] else c(se.ML) if (factors == 2) { res$z2 <- scores.ML[, 2] res$se.z2 <- se.ML[, 2] } } if (method == "EAP") { Z <- object$GH$Z GHw <- object$GH$GHw pr <- probs(Z %*% t(betas)) mX <- 1 - X if (any(na.ind <- is.na(X))) X[na.ind] <- mX[na.ind] <- 0 p.xz <- exp(X %*% t(log(pr)) + mX %*% t(log(1 - pr))) p.x <- c(p.xz %*% GHw) p.zx <- p.xz / p.x res$z1 <- c(p.zx %*% (Z[, 2] * GHw)) res$se.z1 <- sqrt(c(p.zx %*% (Z[, 2] * Z[, 2] * GHw)) - res$z1^2) if (object$ltst$factors > 1) { res$z2 <- c(p.zx %*% (Z[, 3] * GHw)) res$se.z2 <- sqrt(c(p.zx %*% (Z[, 3] * Z[, 3] * GHw)) - res$z2^2) } } if (method == "MI") { constraint <- object$constraint cnams.betas <- colnames(betas) if (!is.null(constraint)) betas <- betas[-((constraint[, 2] - 1) * p + constraint[, 1])] var.b <- vcov(object, robust.se) scores.B <- lapply(1:B, array, data = 0, dim = c(nx, factors)) hes.B <- lapply(1:B, array, data = 0, dim = c(factors, factors, nx)) for (b in 1:B) { betas. <- mvrnorm(1, c(betas), var.b) betas. <- betas.ltm(betas., constraint, p, q.) colnames(betas.) <- cnams.betas for (i in 1:nx) { out <- fscore(logf.z = logf.z, y = X[i, ], betas = betas.) scores.B[[b]][i, ] <- out$mu hes.B[[b]][, , i] <- out$hes } } scores.av <- matMeans(scores.B) hes.av <- matArrays(hes.B) SV <- lapply(1:B, array, data = 0, dim = c(factors, factors, nx)) for (b in 1:B) { for (i in 1:nx) { sc.dif <- scores.B[[b]][i, ] - scores.av[i, ] SV[[b]][, , i] <- outer(sc.dif, sc.dif) } } SV <- (B * matArrays(SV))/(B - 1) hes.av <- hes.av + (1 + 1/B) * SV se.av <- t(apply(hes.av, 3, function(x) sqrt(diag(x)))) if (factors == 1) { res$z1 <- c(scores.av) res$se.z1 <- c(se.av) } if (factors == 2) { res$z1 <- scores.av[, 1] res$se.z1 <- se.av[, 1] res$z2 <- scores.av[, 2] res$se.z2 <- se.av[, 2] } attr(res, "zvalues.MI") <- scores.B attr(res, "var.zvalues.MI") <- hes.B } res }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sts.R \name{sts_semi_local_linear_trend_state_space_model} \alias{sts_semi_local_linear_trend_state_space_model} \title{State space model for a semi-local linear trend.} \usage{ sts_semi_local_linear_trend_state_space_model(num_timesteps, level_scale, slope_mean, slope_scale, autoregressive_coef, initial_state_prior, observation_noise_scale = 0, initial_step = 0, validate_args = FALSE, allow_nan_stats = TRUE, name = NULL) } \arguments{ \item{num_timesteps}{Scalar \code{integer} \code{tensor} number of timesteps to model with this distribution.} \item{level_scale}{Scalar (any additional dimensions are treated as batch dimensions) \code{float} \code{tensor} indicating the standard deviation of the level transitions.} \item{slope_mean}{Scalar (any additional dimensions are treated as batch dimensions) \code{float} \code{tensor} indicating the expected long-term mean of the latent slope.} \item{slope_scale}{Scalar (any additional dimensions are treated as batch dimensions) \code{float} \code{tensor} indicating the standard deviation of the slope transitions.} \item{autoregressive_coef}{Scalar (any additional dimensions are treated as batch dimensions) \code{float} \code{tensor} defining the AR1 process on the latent slope.} \item{initial_state_prior}{instance of \code{tfd_multivariate_normal} representing the prior distribution on latent states. Must have event shape \code{[1]} (as \code{tfd_linear_gaussian_state_space_model} requires a rank-1 event shape).} \item{observation_noise_scale}{Scalar (any additional dimensions are treated as batch dimensions) \code{float} \code{tensor} indicating the standard deviation of the observation noise.} \item{initial_step}{Optional scalar \code{integer} \code{tensor} specifying the starting timestep. Default value: 0.} \item{validate_args}{\code{logical}. Whether to validate input with asserts. If \code{validate_args} is \code{FALSE}, and the inputs are invalid, correct behavior is not guaranteed. Default value: \code{FALSE}.} \item{allow_nan_stats}{\code{logical}. If \code{FALSE}, raise an exception if a statistic (e.g. mean/mode/etc...) is undefined for any batch member. If \code{TRUE}, batch members with valid parameters leading to undefined statistics will return NaN for this statistic. Default value: \code{TRUE}.} \item{name}{string` prefixed to ops created by this class. Default value: "SemiLocalLinearTrendStateSpaceModel".} } \description{ A state space model (SSM) posits a set of latent (unobserved) variables that evolve over time with dynamics specified by a probabilistic transition model \code{p(z[t+1] | z[t])}. At each timestep, we observe a value sampled from an observation model conditioned on the current state, \code{p(x[t] | z[t])}. The special case where both the transition and observation models are Gaussians with mean specified as a linear function of the inputs, is known as a linear Gaussian state space model and supports tractable exact probabilistic calculations; see \code{tfd_linear_gaussian_state_space_model} for details. } \details{ The semi-local linear trend model is a special case of a linear Gaussian SSM, in which the latent state posits a \code{level} and \code{slope}. The \code{level} evolves via a Gaussian random walk centered at the current \code{slope}, while the \code{slope} follows a first-order autoregressive (AR1) process with mean \code{slope_mean}:\preformatted{level[t] = level[t-1] + slope[t-1] + Normal(0, level_scale) slope[t] = (slope_mean + autoregressive_coef * (slope[t-1] - slope_mean) + Normal(0., slope_scale)) } The latent state is the two-dimensional tuple \code{[level, slope]}. The \code{level} is observed at each timestep. The parameters \code{level_scale}, \code{slope_mean}, \code{slope_scale}, \code{autoregressive_coef}, and \code{observation_noise_scale} are each (a batch of) scalars. The batch shape of this \code{Distribution} is the broadcast batch shape of these parameters and of the \code{initial_state_prior}. Mathematical Details The semi-local linear trend model implements a \code{tfp.distributions.LinearGaussianStateSpaceModel} with \code{latent_size = 2} and \code{observation_size = 1}, following the transition model:\preformatted{transition_matrix = [[1., 1.] [0., autoregressive_coef]] transition_noise ~ N(loc=slope_mean - autoregressive_coef * slope_mean, scale=diag([level_scale, slope_scale])) } which implements the evolution of \code{[level, slope]} described above, and the observation model:\preformatted{observation_matrix = [[1., 0.]] observation_noise ~ N(loc=0, scale=observation_noise_scale) } which picks out the first latent component, i.e., the \code{level}, as the observation at each timestep. } \seealso{ Other sts: \code{\link{sts_additive_state_space_model}}, \code{\link{sts_autoregressive_state_space_model}}, \code{\link{sts_autoregressive}}, \code{\link{sts_dynamic_linear_regression_state_space_model}}, \code{\link{sts_dynamic_linear_regression}}, \code{\link{sts_linear_regression}}, \code{\link{sts_local_level_state_space_model}}, \code{\link{sts_local_level}}, \code{\link{sts_local_linear_trend_state_space_model}}, \code{\link{sts_local_linear_trend}}, \code{\link{sts_seasonal_state_space_model}}, \code{\link{sts_seasonal}}, \code{\link{sts_semi_local_linear_trend}}, \code{\link{sts_sum}} } \concept{sts}
/man/sts_semi_local_linear_trend_state_space_model.Rd
permissive
dfalbel/tfprobability
R
false
true
5,493
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sts.R \name{sts_semi_local_linear_trend_state_space_model} \alias{sts_semi_local_linear_trend_state_space_model} \title{State space model for a semi-local linear trend.} \usage{ sts_semi_local_linear_trend_state_space_model(num_timesteps, level_scale, slope_mean, slope_scale, autoregressive_coef, initial_state_prior, observation_noise_scale = 0, initial_step = 0, validate_args = FALSE, allow_nan_stats = TRUE, name = NULL) } \arguments{ \item{num_timesteps}{Scalar \code{integer} \code{tensor} number of timesteps to model with this distribution.} \item{level_scale}{Scalar (any additional dimensions are treated as batch dimensions) \code{float} \code{tensor} indicating the standard deviation of the level transitions.} \item{slope_mean}{Scalar (any additional dimensions are treated as batch dimensions) \code{float} \code{tensor} indicating the expected long-term mean of the latent slope.} \item{slope_scale}{Scalar (any additional dimensions are treated as batch dimensions) \code{float} \code{tensor} indicating the standard deviation of the slope transitions.} \item{autoregressive_coef}{Scalar (any additional dimensions are treated as batch dimensions) \code{float} \code{tensor} defining the AR1 process on the latent slope.} \item{initial_state_prior}{instance of \code{tfd_multivariate_normal} representing the prior distribution on latent states. Must have event shape \code{[1]} (as \code{tfd_linear_gaussian_state_space_model} requires a rank-1 event shape).} \item{observation_noise_scale}{Scalar (any additional dimensions are treated as batch dimensions) \code{float} \code{tensor} indicating the standard deviation of the observation noise.} \item{initial_step}{Optional scalar \code{integer} \code{tensor} specifying the starting timestep. Default value: 0.} \item{validate_args}{\code{logical}. Whether to validate input with asserts. If \code{validate_args} is \code{FALSE}, and the inputs are invalid, correct behavior is not guaranteed. Default value: \code{FALSE}.} \item{allow_nan_stats}{\code{logical}. If \code{FALSE}, raise an exception if a statistic (e.g. mean/mode/etc...) is undefined for any batch member. If \code{TRUE}, batch members with valid parameters leading to undefined statistics will return NaN for this statistic. Default value: \code{TRUE}.} \item{name}{string` prefixed to ops created by this class. Default value: "SemiLocalLinearTrendStateSpaceModel".} } \description{ A state space model (SSM) posits a set of latent (unobserved) variables that evolve over time with dynamics specified by a probabilistic transition model \code{p(z[t+1] | z[t])}. At each timestep, we observe a value sampled from an observation model conditioned on the current state, \code{p(x[t] | z[t])}. The special case where both the transition and observation models are Gaussians with mean specified as a linear function of the inputs, is known as a linear Gaussian state space model and supports tractable exact probabilistic calculations; see \code{tfd_linear_gaussian_state_space_model} for details. } \details{ The semi-local linear trend model is a special case of a linear Gaussian SSM, in which the latent state posits a \code{level} and \code{slope}. The \code{level} evolves via a Gaussian random walk centered at the current \code{slope}, while the \code{slope} follows a first-order autoregressive (AR1) process with mean \code{slope_mean}:\preformatted{level[t] = level[t-1] + slope[t-1] + Normal(0, level_scale) slope[t] = (slope_mean + autoregressive_coef * (slope[t-1] - slope_mean) + Normal(0., slope_scale)) } The latent state is the two-dimensional tuple \code{[level, slope]}. The \code{level} is observed at each timestep. The parameters \code{level_scale}, \code{slope_mean}, \code{slope_scale}, \code{autoregressive_coef}, and \code{observation_noise_scale} are each (a batch of) scalars. The batch shape of this \code{Distribution} is the broadcast batch shape of these parameters and of the \code{initial_state_prior}. Mathematical Details The semi-local linear trend model implements a \code{tfp.distributions.LinearGaussianStateSpaceModel} with \code{latent_size = 2} and \code{observation_size = 1}, following the transition model:\preformatted{transition_matrix = [[1., 1.] [0., autoregressive_coef]] transition_noise ~ N(loc=slope_mean - autoregressive_coef * slope_mean, scale=diag([level_scale, slope_scale])) } which implements the evolution of \code{[level, slope]} described above, and the observation model:\preformatted{observation_matrix = [[1., 0.]] observation_noise ~ N(loc=0, scale=observation_noise_scale) } which picks out the first latent component, i.e., the \code{level}, as the observation at each timestep. } \seealso{ Other sts: \code{\link{sts_additive_state_space_model}}, \code{\link{sts_autoregressive_state_space_model}}, \code{\link{sts_autoregressive}}, \code{\link{sts_dynamic_linear_regression_state_space_model}}, \code{\link{sts_dynamic_linear_regression}}, \code{\link{sts_linear_regression}}, \code{\link{sts_local_level_state_space_model}}, \code{\link{sts_local_level}}, \code{\link{sts_local_linear_trend_state_space_model}}, \code{\link{sts_local_linear_trend}}, \code{\link{sts_seasonal_state_space_model}}, \code{\link{sts_seasonal}}, \code{\link{sts_semi_local_linear_trend}}, \code{\link{sts_sum}} } \concept{sts}
taxa <- c('Acrocomia_aculeata', 'Acrocomia_crispa', 'Aiphanes_horrida', 'Aiphanes_minima', 'Allagoptera_caudescens', 'Astrocaryum_acaule', 'Astrocaryum_aculeatum', 'Hexopetion_alatum', 'Astrocaryum_campestre', 'Astrocaryum_carnosum', 'Astrocaryum_chambira', 'Astrocaryum_chonta', 'Astrocaryum_faranae', 'Astrocaryum_farinosum', 'Astrocaryum_ferrugineum', 'Astrocaryum_gratum', 'Astrocaryum_gynacanthum', 'Astrocaryum_huaimi', 'Astrocaryum_huicungo', 'Astrocaryum_jauari', 'Astrocaryum_javarense', 'Astrocaryum_macrocalyx', 'Astrocaryum_malybo', 'Hexopetion_mexicanum', 'Astrocaryum_minus', 'Astrocaryum_murumuru', 'Astrocaryum_paramaca', 'Astrocaryum_perangustatum', 'Astrocaryum_rodriguesii', 'Astrocaryum_sciophilum', 'Astrocaryum_scopatum', 'Astrocaryum_sociale', 'Astrocaryum_standleyanum', 'Astrocaryum_ulei', 'Astrocaryum_urostachys', 'Astrocaryum_vulgare', 'Attalea_phalerata', 'Bactris_bifida', 'Bactris_gasipaes', 'Barcella_odora', 'Beccariophoenix_madagascariensis', 'Butia_eriospatha', 'Cocos_nucifera','Desmoncus_orthacanthos', 'Desmoncus_polyacanthos', 'Elaeis_oleifera', 'Jubaea_chilensis', 'Jubaeopsis_caffra', 'Lytocaryum_weddellianum', 'Parajubaea_cocoides', 'Reinhardtia_simplex', 'Voanioala_gerardii') for (i in 1:length(taxa)) { dir.create(paste0("/Users/pmckenz1/Desktop/projects/matrixranger/astrocaryum_proj/",taxa)[i]) } for (i in taxa[8:length(taxa)]) { sdm_function(directory.path = paste0("/Users/pmckenz1/Desktop/projects/matrixranger/astrocaryum_proj/",i), coords = c(-130,-25,-45,35), species.name = gsub("_"," ",i), climate.map.path = "/Users/pmckenz1/Desktop/projects/example_species_dist/climate_map/wc2-5", threshold = "10pct", raster.location = "/Users/pmckenz1/Desktop/projects/matrixranger/astrocaryum_proj/0environ_rasters", do.plot = FALSE) print(i) } library(rworldmap) newmap <- getMap(resolution = "low") plot(newmap, xlim = c(-130, -25), ylim = c(-45, 35), asp = 1) points(acro_occur$lon,acro_occur$lat,pch = 18, col = "red") test <- conv_sdm_rasters_threshold(paste0("/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/",taxa[1],"/outputs/",taxa[1],".asc"), paste0("/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/",taxa[1],"/outputs/maxentResults.csv"), threshold = "10pct") astro_rasters <- raster(paste0("/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/",taxa[1],"/outputs/",taxa[1],".asc")) for( i in 2:length(taxa)) { rastername<- paste0("/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/",taxa[i],"/outputs/",taxa[i],".asc") if(file.exists(rastername)) { result<- raster(rastername) astro_rasters <- stack(astro_rasters,result) print(i) } } enviro_matrix <- sum(astro_rasters) enviro_matrix <- as.matrix(enviro_matrix) enviro_matrix[!is.na(enviro_matrix)] <- (enviro_matrix[!is.na(enviro_matrix)] - min(enviro_matrix[!is.na(enviro_matrix)]))/max(enviro_matrix[!is.na(enviro_matrix)]) image(enviro_matrix) write.csv(file = "/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/0enviro_matrix.csv",enviro_matrix,row.names = FALSE) for(i in 1:length(taxa)) { rastername <- paste0("/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/",taxa[i],"/outputs/",taxa[i],".asc") if(file.exists(rastername)) { startingrange <- conv_sdm_rasters_threshold(rastername, paste0("/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/",taxa[i],"/outputs/maxentResults.csv"), threshold = "10pct") startingrange <- as.matrix(startingrange) write.csv(file = paste0("/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/0startingranges/",taxa[i],".csv"),startingrange,row.names = FALSE) } print(i) } library(ape) dated.tree <- read.nexus("/Users/pmckenz1/Desktop/projects/matrixranger/data/Astrocaryum_dated_tree_in_BEAST.nex") species_names <- list.files("/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/0startingranges") species_names <- gsub(".csv","",species_names) pruned.dated.tree <- drop.tip(dated.tree,dated.tree$tip.label[!(dated.tree$tip.label %in% species_names)]) temp <- pruned.dated.tree$edge.length[1] pruned.dated.tree$edge.length[1]<- temp temp <- pruned.dated.tree$edge.length[77] pruned.dated.tree$edge.length[77] <- temp plot(pruned.dated.tree) pruned.dated.tree$tip.label
/R/ignore/astrocaryum_working_file.R
no_license
pmckenz1/matrixranger
R
false
false
4,433
r
taxa <- c('Acrocomia_aculeata', 'Acrocomia_crispa', 'Aiphanes_horrida', 'Aiphanes_minima', 'Allagoptera_caudescens', 'Astrocaryum_acaule', 'Astrocaryum_aculeatum', 'Hexopetion_alatum', 'Astrocaryum_campestre', 'Astrocaryum_carnosum', 'Astrocaryum_chambira', 'Astrocaryum_chonta', 'Astrocaryum_faranae', 'Astrocaryum_farinosum', 'Astrocaryum_ferrugineum', 'Astrocaryum_gratum', 'Astrocaryum_gynacanthum', 'Astrocaryum_huaimi', 'Astrocaryum_huicungo', 'Astrocaryum_jauari', 'Astrocaryum_javarense', 'Astrocaryum_macrocalyx', 'Astrocaryum_malybo', 'Hexopetion_mexicanum', 'Astrocaryum_minus', 'Astrocaryum_murumuru', 'Astrocaryum_paramaca', 'Astrocaryum_perangustatum', 'Astrocaryum_rodriguesii', 'Astrocaryum_sciophilum', 'Astrocaryum_scopatum', 'Astrocaryum_sociale', 'Astrocaryum_standleyanum', 'Astrocaryum_ulei', 'Astrocaryum_urostachys', 'Astrocaryum_vulgare', 'Attalea_phalerata', 'Bactris_bifida', 'Bactris_gasipaes', 'Barcella_odora', 'Beccariophoenix_madagascariensis', 'Butia_eriospatha', 'Cocos_nucifera','Desmoncus_orthacanthos', 'Desmoncus_polyacanthos', 'Elaeis_oleifera', 'Jubaea_chilensis', 'Jubaeopsis_caffra', 'Lytocaryum_weddellianum', 'Parajubaea_cocoides', 'Reinhardtia_simplex', 'Voanioala_gerardii') for (i in 1:length(taxa)) { dir.create(paste0("/Users/pmckenz1/Desktop/projects/matrixranger/astrocaryum_proj/",taxa)[i]) } for (i in taxa[8:length(taxa)]) { sdm_function(directory.path = paste0("/Users/pmckenz1/Desktop/projects/matrixranger/astrocaryum_proj/",i), coords = c(-130,-25,-45,35), species.name = gsub("_"," ",i), climate.map.path = "/Users/pmckenz1/Desktop/projects/example_species_dist/climate_map/wc2-5", threshold = "10pct", raster.location = "/Users/pmckenz1/Desktop/projects/matrixranger/astrocaryum_proj/0environ_rasters", do.plot = FALSE) print(i) } library(rworldmap) newmap <- getMap(resolution = "low") plot(newmap, xlim = c(-130, -25), ylim = c(-45, 35), asp = 1) points(acro_occur$lon,acro_occur$lat,pch = 18, col = "red") test <- conv_sdm_rasters_threshold(paste0("/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/",taxa[1],"/outputs/",taxa[1],".asc"), paste0("/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/",taxa[1],"/outputs/maxentResults.csv"), threshold = "10pct") astro_rasters <- raster(paste0("/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/",taxa[1],"/outputs/",taxa[1],".asc")) for( i in 2:length(taxa)) { rastername<- paste0("/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/",taxa[i],"/outputs/",taxa[i],".asc") if(file.exists(rastername)) { result<- raster(rastername) astro_rasters <- stack(astro_rasters,result) print(i) } } enviro_matrix <- sum(astro_rasters) enviro_matrix <- as.matrix(enviro_matrix) enviro_matrix[!is.na(enviro_matrix)] <- (enviro_matrix[!is.na(enviro_matrix)] - min(enviro_matrix[!is.na(enviro_matrix)]))/max(enviro_matrix[!is.na(enviro_matrix)]) image(enviro_matrix) write.csv(file = "/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/0enviro_matrix.csv",enviro_matrix,row.names = FALSE) for(i in 1:length(taxa)) { rastername <- paste0("/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/",taxa[i],"/outputs/",taxa[i],".asc") if(file.exists(rastername)) { startingrange <- conv_sdm_rasters_threshold(rastername, paste0("/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/",taxa[i],"/outputs/maxentResults.csv"), threshold = "10pct") startingrange <- as.matrix(startingrange) write.csv(file = paste0("/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/0startingranges/",taxa[i],".csv"),startingrange,row.names = FALSE) } print(i) } library(ape) dated.tree <- read.nexus("/Users/pmckenz1/Desktop/projects/matrixranger/data/Astrocaryum_dated_tree_in_BEAST.nex") species_names <- list.files("/Users/pmckenz1/Desktop/projects/astrocaryum_proj_data/0startingranges") species_names <- gsub(".csv","",species_names) pruned.dated.tree <- drop.tip(dated.tree,dated.tree$tip.label[!(dated.tree$tip.label %in% species_names)]) temp <- pruned.dated.tree$edge.length[1] pruned.dated.tree$edge.length[1]<- temp temp <- pruned.dated.tree$edge.length[77] pruned.dated.tree$edge.length[77] <- temp plot(pruned.dated.tree) pruned.dated.tree$tip.label
residualResampling <- function(graph, probability, n, seed, interestVertices) { if(class(graph) == "igraph") { if(igraph::is.directed(graph)) { stop("Input `graph' must be undirected") } start <- Sys.time() result <- .Call("residualResampling_igraph", graph, probability, n, seed, interestVertices, PACKAGE="networkReliability") end <- Sys.time() } else if(class(graph) == "graphNEL") { start <- Sys.time() result <- .Call("residualResampling_graphNEL", graph, probability, n, seed, interestVertices, PACKAGE="networkReliability") end <- Sys.time() } else if(class(graph) == "graphAM") { start <- Sys.time() result <- .Call("residualResampling_graphAM", graph, probability, n, seed, interestVertices, PACKAGE="networkReliability") end <- Sys.time() } else { stop("Input graph must have class \"igraph\", \"graphAM\" or \"graphNEL\"") } return(new("residualResamplingResult", firstMoment = mpfr(result), call = match.call(), start = start, end = end, n = as.integer(n), interestVertices = as.integer(interestVertices), seed = as.integer(seed), graph = graph, probability = probability)) }
/RPackage/R/residualResampling.R
no_license
rohan-shah/networkReliability
R
false
false
1,130
r
residualResampling <- function(graph, probability, n, seed, interestVertices) { if(class(graph) == "igraph") { if(igraph::is.directed(graph)) { stop("Input `graph' must be undirected") } start <- Sys.time() result <- .Call("residualResampling_igraph", graph, probability, n, seed, interestVertices, PACKAGE="networkReliability") end <- Sys.time() } else if(class(graph) == "graphNEL") { start <- Sys.time() result <- .Call("residualResampling_graphNEL", graph, probability, n, seed, interestVertices, PACKAGE="networkReliability") end <- Sys.time() } else if(class(graph) == "graphAM") { start <- Sys.time() result <- .Call("residualResampling_graphAM", graph, probability, n, seed, interestVertices, PACKAGE="networkReliability") end <- Sys.time() } else { stop("Input graph must have class \"igraph\", \"graphAM\" or \"graphNEL\"") } return(new("residualResamplingResult", firstMoment = mpfr(result), call = match.call(), start = start, end = end, n = as.integer(n), interestVertices = as.integer(interestVertices), seed = as.integer(seed), graph = graph, probability = probability)) }
#Objective to find heart disease library(caret) library(e1071) heart_df<-read.csv(file.choose()) str(heart_df) heart_df$target<-as.factor(heart_df$target) heart_df$Sex<-as.factor(heart_df$Sex) heart_df$CP<-as.factor(heart_df$CP) heart_df$Fbs<-as.factor(heart_df$Fbs) heart_df$ECG<-as.factor(heart_df$ECG) heart_df$exang<-as.factor(heart_df$exang) heart_df$thal<-as.factor(heart_df$thal) heart_df$Age<-as.numeric(heart_df$Age) heart_df$Trestbps<-as.numeric(heart_df$Trestbps) heart_df$Chol<-as.numeric(heart_df$Chol) heart_df$Thalach<-as.numeric(heart_df$Thalach) heart_df$slope<-as.numeric(heart_df$slope) heart_df$ca<-as.numeric(heart_df$ca) str(heart_df) View(heart_df) #creating test and train dataset set.seed(15) # it fixes the data partition intrain<-createDataPartition(heart_df$target,p=.7,list = FALSE) training<-heart_df[intrain,] testing<-heart_df[-intrain,] #summary and checking missing value summary(training) sapply(training,function(x) sum(is.na(x))) #building model with cost svm_model1<-svm(target~.,data=training,cost=0.01,scale=FALSE) summary(svm_model1) #gamma will increase the margin btwn hyperplane and vectors #accuracy of model on training data confusionMatrix(svm_model1$fitted,training$target) #prediction testing_pred<-predict(svm_model1,testing) #accuracy of model on test data confusionMatrix(testing_pred,testing$target) #how to increase the accuracy of model cost = 0.1 or 100 #kernel is directly affect the accuracy of model #tune is used for altering the data to increase the accuracy tune.out<-tune(svm,target~.,data=training, kernel="linear", ranges = list(cost=c(0.001,0.01,0.1,1,5,10,100))) summary(tune.out) #best model bestmod=tune.out$best.model summary(bestmod) #accuracy of model on traning data confusionMatrix(bestmod$fitted,training$target) #prediction on testing data ypred=predict(bestmod,testing) table(predict=ypred,truth=testing$target) #accuracy of model on test data confusionMatrix(ypred,testing$target)
/SVM/svm on heart data.R
no_license
manojnahak02/R-Project-codes
R
false
false
2,058
r
#Objective to find heart disease library(caret) library(e1071) heart_df<-read.csv(file.choose()) str(heart_df) heart_df$target<-as.factor(heart_df$target) heart_df$Sex<-as.factor(heart_df$Sex) heart_df$CP<-as.factor(heart_df$CP) heart_df$Fbs<-as.factor(heart_df$Fbs) heart_df$ECG<-as.factor(heart_df$ECG) heart_df$exang<-as.factor(heart_df$exang) heart_df$thal<-as.factor(heart_df$thal) heart_df$Age<-as.numeric(heart_df$Age) heart_df$Trestbps<-as.numeric(heart_df$Trestbps) heart_df$Chol<-as.numeric(heart_df$Chol) heart_df$Thalach<-as.numeric(heart_df$Thalach) heart_df$slope<-as.numeric(heart_df$slope) heart_df$ca<-as.numeric(heart_df$ca) str(heart_df) View(heart_df) #creating test and train dataset set.seed(15) # it fixes the data partition intrain<-createDataPartition(heart_df$target,p=.7,list = FALSE) training<-heart_df[intrain,] testing<-heart_df[-intrain,] #summary and checking missing value summary(training) sapply(training,function(x) sum(is.na(x))) #building model with cost svm_model1<-svm(target~.,data=training,cost=0.01,scale=FALSE) summary(svm_model1) #gamma will increase the margin btwn hyperplane and vectors #accuracy of model on training data confusionMatrix(svm_model1$fitted,training$target) #prediction testing_pred<-predict(svm_model1,testing) #accuracy of model on test data confusionMatrix(testing_pred,testing$target) #how to increase the accuracy of model cost = 0.1 or 100 #kernel is directly affect the accuracy of model #tune is used for altering the data to increase the accuracy tune.out<-tune(svm,target~.,data=training, kernel="linear", ranges = list(cost=c(0.001,0.01,0.1,1,5,10,100))) summary(tune.out) #best model bestmod=tune.out$best.model summary(bestmod) #accuracy of model on traning data confusionMatrix(bestmod$fitted,training$target) #prediction on testing data ypred=predict(bestmod,testing) table(predict=ypred,truth=testing$target) #accuracy of model on test data confusionMatrix(ypred,testing$target)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plantGlycoMSFunctions.R \name{Read.GlycoMod.maxis} \alias{Read.GlycoMod.maxis} \title{A function to import GlycoMod data from maxis results} \usage{ Read.GlycoMod.maxis(input, ChainSaw, spectrum.table = spectrum.table, dir = "MS2Data") } \arguments{ \item{input}{glycopeptide identifications from GlycoMod} \item{ChainSaw}{an in silico digest} \item{spectrum.table}{a summary table of MS data} \item{dir}{a directory with MS2 binary data} } \description{ this function is for importing GlycoMod data } \examples{ Read.GlycoMod.maxis() } \keyword{import}
/man/Read.GlycoMod.maxis.Rd
no_license
cpanse/plantGlycoMS
R
false
true
637
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plantGlycoMSFunctions.R \name{Read.GlycoMod.maxis} \alias{Read.GlycoMod.maxis} \title{A function to import GlycoMod data from maxis results} \usage{ Read.GlycoMod.maxis(input, ChainSaw, spectrum.table = spectrum.table, dir = "MS2Data") } \arguments{ \item{input}{glycopeptide identifications from GlycoMod} \item{ChainSaw}{an in silico digest} \item{spectrum.table}{a summary table of MS data} \item{dir}{a directory with MS2 binary data} } \description{ this function is for importing GlycoMod data } \examples{ Read.GlycoMod.maxis() } \keyword{import}
# Libraries ------------------------------------------------------------------- library(tidyverse) library(ggthemes) # Data ------------------------------------------------------------------------ data <- readr::read_csv("./Data/MMSI_2016.csv") ## Preprocesing data <- data %>% transmute( indigenous_language = recode_factor(P10_1, `1` = "Yes", `2` = "No"), skin_tone = recode_factor(P10_2, "01" = "Dark", "02" = "Dark", "03" = "Dark", "04" = "Dark", "05" = "Dark", "06" = "Dark", "07" = "Brown", "08" = "Light Brown", "09" = "White", "10" = "White", "11" = "White"), ethnic_identity = recode_factor(P10_3, `1` = "Afro-descendant", `2` = "Indigenous", `3` = "Mestizo", `4` = "White", `5` = "Others", `9` = "Not Specified"), factor = Factor_Per ) # Plots ------------------------------------------------------------------------ ## Identity & Skin Tone ggplot(data) + aes(x = ethnic_identity, fill = skin_tone, weights = factor) + geom_bar(position = "fill") + labs(fill = "Skin Tone", subtitle = "Proportion of individuals", caption = "Source: Own based on data from MSMSI 2016 (INEGI)") + theme_fivethirtyeight() + ggtitle("Skin Tone by Ethnic Identity") + scale_fill_manual(values = c("#8D5524", "#C68642", "#F1C27D", "#FFDBAC"))
/19-08-2021 (Ethnic Identity & Skin Tone)/code.R
no_license
PedroToL/TidyData
R
false
false
2,112
r
# Libraries ------------------------------------------------------------------- library(tidyverse) library(ggthemes) # Data ------------------------------------------------------------------------ data <- readr::read_csv("./Data/MMSI_2016.csv") ## Preprocesing data <- data %>% transmute( indigenous_language = recode_factor(P10_1, `1` = "Yes", `2` = "No"), skin_tone = recode_factor(P10_2, "01" = "Dark", "02" = "Dark", "03" = "Dark", "04" = "Dark", "05" = "Dark", "06" = "Dark", "07" = "Brown", "08" = "Light Brown", "09" = "White", "10" = "White", "11" = "White"), ethnic_identity = recode_factor(P10_3, `1` = "Afro-descendant", `2` = "Indigenous", `3` = "Mestizo", `4` = "White", `5` = "Others", `9` = "Not Specified"), factor = Factor_Per ) # Plots ------------------------------------------------------------------------ ## Identity & Skin Tone ggplot(data) + aes(x = ethnic_identity, fill = skin_tone, weights = factor) + geom_bar(position = "fill") + labs(fill = "Skin Tone", subtitle = "Proportion of individuals", caption = "Source: Own based on data from MSMSI 2016 (INEGI)") + theme_fivethirtyeight() + ggtitle("Skin Tone by Ethnic Identity") + scale_fill_manual(values = c("#8D5524", "#C68642", "#F1C27D", "#FFDBAC"))
##%######################################################%## # # #### Script to interrogate the #### #### hospital databases to extract #### #### IDs # of individuals #### #### who had prevalent comorbidities. This #### #### will be matched against # IDs of people #### #### discontinuing dialysis to see #### #### how the race pattern appears. #### # # ##%######################################################%## # This script is a modification of the hospitalization.R script targeted to the # JASN revision, where they are asking for the effect of ESRD cause, the effect of # censoring at transplant time, etc. See hospitalizatonRevision_notes.Rmd for details # setup --------------------------------------------------------------------------------------- ProjTemplate::reload() # dbdir = verifyPaths(); dir.exists(dbdir) dropdir <- file.path(ProjTemplate::find_dropbox(), 'NIAMS','Ward','USRDS2015','data') no_cores <- detectCores()-1 condition_code <- c('stroke_primary' = 'Primary Stroke', 'stroke_compl' = 'Complicated Stroke', 'LuCa' = 'Lung Cancer', 'MetsCa' = 'Metastatic Cancer', 'dement' = 'Dementia', 'thrive' = 'Failure to thrive' ) ##%######################################################%## # # #### Extraction from database #### # # ##%######################################################%## sql_conn = dbConnect(SQLite(), file.path(dbdir,'USRDS.sqlite3')) till2009 <- tbl(sql_conn, 'till2009') from2010 <- tbl(sql_conn, 'from2010') studyids <- tbl(sql_conn, 'StudyIDs') # These are people in the final analytic dataset, N= 1,291,001 dbs <- list(till2009, from2010) # Stroke ------------------------------------------------------------------ stroke <- dbs %>% lapply(., function(db){ db %>% select(USRDS_ID, starts_with('HSDIAG'), CLM_FROM, CLM_THRU)%>% mutate(PRIM = substr(HSDIAG1,1,3)) %>% filter(PRIM == '430' | PRIM=='431' | PRIM == '432' | PRIM == '433' | PRIM=='434') %>% inner_join(studyids) %>% # Keep only individuals in earlier study collect(n = Inf) }) stroke_primary <- stroke %>% lapply(., function(db) db %>% select(USRDS_ID, CLM_FROM, CLM_THRU)) %>% bind_rows() %>% distinct() %>% mutate(dt = date_midpt(CLM_FROM, CLM_THRU)) head(stroke_primary) # Stroke with complications ----------------------------------------------- stroke_compl <- stroke %>% lapply(., function(db){ db %>% gather(diag, code, -USRDS_ID, -HSDIAG1, -CLM_THRU, -CLM_FROM) %>% mutate(code1 = substr(code,1,3)) %>% filter(code1 %in% c('438','342','344')) %>% select(USRDS_ID, CLM_FROM, CLM_THRU) %>% distinct() }) %>% bind_rows() %>% distinct() %>% mutate(dt = date_midpt(CLM_FROM,CLM_THRU)) head(stroke_compl) # Lung cancer ------------------------------------------------------------- LuCa <- dbs %>% lapply(., function(db){ db %>% mutate(PRIM = substr(HSDIAG1,1,3)) %>% filter(PRIM=='162') %>% inner_join(studyids) %>% # Keep only individuals in earlier study select(USRDS_ID, CLM_FROM, CLM_THRU) %>% collect(n=Inf) }) %>% bind_rows() %>% distinct() %>% mutate(dt = date_midpt(CLM_FROM, CLM_THRU)) head(LuCa) # Metastatic cancer ------------------------------------------------------- MetsCa <- dbs %>% lapply(., function(db){ db %>% mutate(PRIM=substr(HSDIAG1,1,3)) %>% filter(PRIM== '196' | PRIM == '197' | PRIM == '198' | PRIM == '199') %>% inner_join(studyids) %>% select(USRDS_ID, CLM_FROM, CLM_THRU) %>% collect(n = Inf)}) %>% bind_rows() %>% distinct() %>% mutate(dt = date_midpt(CLM_FROM, CLM_THRU)) head(MetsCa) hospitalization <- list('stroke_primary' = stroke_primary, 'stroke_compl' = stroke_compl, 'LuCa' = LuCa, 'MetsCa' = MetsCa) # 'dement' = dement, # 'thrive' = thrive saveRDS(hospitalization, file = 'data/hospitalization_ids.rds', compress = T) # Dementia ---------------------------------------------------------------- ## I'm moving this to Python since it's much faster at processing the database ## row-wise. However, I need to get some SQL calls generated here. # till2009 %>% select(USRDS_ID, starts_with('HSDIAG')) %>% show_query() # from2010 %>% select(USRDS_ID, starts_with("HSDIAG")) %>% show_query() # reticulate::source_python('dementia.py') # dementia <- read_csv('data/Dementia.csv') # names(dementia) <- 'USRDS_ID' # head(dementia) StudyIDS <- studyids %>% collect(n=Inf) sql1 <- paste(capture.output(till2009 %>% select(USRDS_ID, starts_with('HSDIAG'), CLM_FROM, CLM_THRU) %>% show_query(), type='message')[-1], collapse=' ') sql2 <- paste(capture.output(from2010 %>% select(USRDS_ID, starts_with('HSDIAG'), CLM_FROM, CLM_THRU) %>% show_query(), type='message')[-1], collapse=' ') sqlist <- list(sql1,sql2) dement <- list() i=0 for (sql in sqlist){ print(paste('Running ',sql)) rs <- dbSendQuery(sql_conn, sql) while(!dbHasCompleted(rs)){ d <- dbFetch(rs, n = 100000) i=i+1 print(i) dement[[i]] <-d %>% gather(hsdiag, code, -USRDS_ID, -CLM_FROM, -CLM_THRU) %>% filter(str_detect(code, '^290|^2941|^331[012]')) %>% select(USRDS_ID, CLM_FROM, CLM_THRU) %>% # distinct() %>% inner_join(StudyIDS) %>% as.data.frame() } dbClearResult(rs) } dement1 <- bind_rows(dement) dement1 <- dement1 %>% semi_join(StudyIDS) # Keep people from analytic dataset hospitalization[['dement']] <- dement1 saveRDS(hospitalization, file = 'data/hospitalization_ids.rds', compress = T) # Failure to thrive ------------------- ## This can appear in any of the diagnoses sql1 <- paste(capture.output(till2009 %>% select(USRDS_ID, starts_with('HSDIAG'), CLM_FROM, CLM_THRU) %>% show_query(), type='message')[-1], collapse=' ') sql2 <- paste(capture.output(from2010 %>% select(USRDS_ID, starts_with('HSDIAG'), CLM_FROM, CLM_THRU) %>% show_query(), type='message')[-1], collapse=' ') sqlist <- list(sql1,sql2) thrive = list() i = 0 for (sql in sqlist) { rs <- dbSendQuery(sql_conn, sql) while (!dbHasCompleted(rs)) { d <- dbFetch(rs, n = 100000) i = i+1 print(i) thrive[[i]] <-d %>% gather(hsdiag, code, starts_with("HSDIAG")) %>% filter(str_detect(code, '783[237]')) %>% select(USRDS_ID, CLM_FROM, CLM_THRU) %>% as.data.frame() } dbClearResult(rs) } thrive1 <- bind_rows(thrive) %>% semi_join(StudyIDS) # Keep people from analytic dataset hospitalization[['thrive']] <- thrive1 saveRDS(hospitalization, file = 'data/hospitalization_ids.rds') saveRDS(hospitalization, file = file.path(dropdir, 'hospitalization_ids.rds')) dbDisconnect(sql_conn); gc() # End of database extraction ---------------------------------------------- ##%######################################################%## # # #### Creating intermediate datasets #### # # ##%######################################################%## abhiR::reload() hospitalization <- readRDS(path(dropdir, 'hospitalization_ids.rds')) # Dat <- readRDS(path(dropdir,'Analytic.rds')) Dat <- read_fst(path(dropdir,'Analytic.fst')) # Fixing the REGION definition -------------------------------------------- library(stringr) a1 <- str_pad(as.character(c(23, 50, 33, 25, 09, 36, 42, 44, 34)), 2, pad='0') # Northeast a2 <- str_pad(as.character(c(48, 40, 05, 22, 28, 01, 47, 21, 12, 13, 45, 37, 51, 11, 24, 10, 54,78, 72 )), 2, pad='0') # South a3 <- str_pad(as.character(c(20, 31, 46, 38, 29, 19, 27, 17, 55, 18, 26, 39)), 2, pad = '0') # Midwest a4 <- str_pad(as.character(c(02, 15, 06, 41, 53, 32, 04, 49, 16, 35, 08, 56, 30, 66, 69, 60,64)), 2, pad='0') # West Dat <- Dat %>% mutate(REGION = case_when( Dat$STATE %in% a1 ~ "Northeast", Dat$STATE %in% a2 ~ 'South', Dat$STATE %in% a3 ~ "Midwest", Dat$STATE %in% a4 ~ "West" )) # Computing survival times ------------------------------------------------ # The following code computes survival date as the minimum of loss-to-followup, # discontinuation time, death and transplant date Dat <- Dat %>% mutate(surv_date = pmin(cens_time, withdraw_time, DIED, TX1DATE, na.rm=T)) %>% mutate(surv_date2 = pmin(cens_time, withdraw_time, DIED, na.rm=T)) %>% # Removes transplant time as a potential censoring time. The clock no longer stops at transplant times mutate(RACE2 = forcats::fct_relevel(RACE2, 'White')) # Adding DISGRPC codes to analytic data -------------------------------------------- disgrpc_code <- readRDS(path(dropdir, 'disgrpc_code.rds')) # See cause_esrd.R disgrpc <- fst(path(dropdir, 'raw_data.fst'))[,c('USRDS_ID','DISGRPC')] %>% distinct() %>% left_join(disgrpc_code, by=c('DISGRPC'='Format')) %>% mutate(Description = ifelse(Description %in% c('8','**OTHER**'), NA, Description)) %>% mutate(Description = fct_other(Description,keep=c('Diabetes','Hypertension','Glomeruloneph.'))) %>% rename(ESRD_Cause = Description) Dat <- Dat %>% left_join(disgrpc) # Adding reason for dialysis fst::write_fst(Dat, path(dropdir,'revision_JASN','Analytic_DISGRPC.fst')) # Filter index conditions by events after start of dialysis ----------------------------------- # IDEA: We could also look at individuals who had index condition soon followed by dialysis, # where dialysis is the precipitating index condition hosp_post_dx <- map(hospitalization, ~ .x %>% mutate(CLM_FROM = as.Date(CLM_FROM)) %>% left_join(Dat %>% select(USRDS_ID, FIRST_SE, surv_date, surv_date2, cens_type, RACE2, DISGRPC, ESRD_Cause)) %>% # Add cause of dialysis filter(CLM_FROM >= FIRST_SE, CLM_FROM <= surv_date) %>% group_by(USRDS_ID) %>% top_n(-1, CLM_FROM) %>% # Selects last hospital stay for each person top_n(-1, CLM_THRU) %>% # select(-FIRST_SE, -surv_date) %>% distinct() %>% ungroup()) saveRDS(hosp_post_dx, file.path(dropdir, 'revision_JASN','final_hosp_data.rds'), compress = T) # Adding the age at the index condition ----------------------------------- #' what's the chance of discontinuation, by race hosp_post_dx <- readRDS(file.path(dropdir,'revision_JASN','final_hosp_data.rds')) Dat <- fst::read_fst(path(dropdir, 'revision_JASN','Analytic_DISGRPC.fst')) # Dat <- fst::read_fst(path(dropdir, 'Analytic.fst')) # Dat <- Dat %>% mutate(surv_date = pmin(cens_time, withdraw_time, DIED, TX1DATE, na.rm=T)) %>% # mutate(RACE2 = forcats::fct_relevel(RACE2, 'White')) hosp_postdx_age <- map( hosp_post_dx, ~.x %>% left_join(Dat %>% select(USRDS_ID, INC_AGE)) %>% mutate(se_to_event_time = CLM_FROM - FIRST_SE, age_at_event = floor(INC_AGE + se_to_event_time/365.25)) %>% mutate(agegrp_at_event = cut_width(age_at_event, width=10, boundary=10, closed='left')) %>% mutate(agegrp_at_event = forcats::fct_collapse(agegrp_at_event, '<40' = intersect(levels(agegrp_at_event),c('[10,20)','[20,30)','[30,40)')), '80+' = intersect(levels(agegrp_at_event), c('[80,90)','[90,100)','[90,100]','[100,110]'))))) saveRDS(hosp_postdx_age, file.path(dropdir,'revision_JASN','hosp_postdx_age.rds'), compress = T) # Cox regressions: Data munging ----------------------------------------------------------------------------- # Data munging and generation: Matching comorbidity score with time of index condition ------------------------------------- index_condn_comorbs <- readRDS(file.path(dropdir, 'index_condn_comorbs.rds')) hosp_post_dx <- readRDS(file.path(dropdir, 'revision_JASN', 'final_hosp_data.rds')) hosp_postdx_age <- readRDS(file.path(dropdir, 'revision_JASN','hosp_postdx_age.rds')) hosp_cox_data <- map(hosp_postdx_age, ~.x %>% left_join(select(Dat, SEX, zscore, USRDS_ID, REGION)) %>% mutate(time_from_event = as.numeric(surv_date-CLM_FROM)) %>% mutate(time_from_event2 = as.numeric(surv_date2 - CLM_FROM)) %>% mutate(time_on_dialysis = se_to_event_time) %>% rename('Race' = "RACE2") %>% filter(Race != 'Other') %>% mutate(Race = droplevels(Race))) out <- list() for (n in names(hosp_post_dx)){ print(paste('Working on ', n)) d <- index_condn_comorbs[[n]] %>% select(USRDS_ID:CLM_THRU, comorb_indx) # Comorbidities per indiv per visit hosp_post_dx[[n]] %>% mutate(CLM_FROM = as.character(CLM_FROM), CLM_THRU = as.character(CLM_THRU)) %>% left_join(d) %>% group_by(USRDS_ID) %>% filter(comorb_indx == max(comorb_indx)) %>% # Take worst comorbidity index ungroup() %>% distinct() -> out[[n]] } assertthat::are_equal(map_int(hosp_post_dx, nrow), map_int(out, nrow)) # hosp_post_dx <- out modeling_data <- hosp_cox_data for(n in names(modeling_data)){ modeling_data[[n]] <- modeling_data[[n]] %>% left_join(out[[n]] %>% select(USRDS_ID, comorb_indx)) } ## Data munging to add simulated withdrawal times Dat <- fst::read_fst(path(dropdir, 'revision_JASN','Analytic_DISGRPC.fst')) modeling_data2 <- map(modeling_data, ~left_join(., select(Dat, USRDS_ID, toc:tow), by ='USRDS_ID') %>% mutate_at(vars(toc:tow), funs(.*365.25)) %>% # Change to days mutate(time_on_dialysis = as.numeric(time_on_dialysis), REGION = as.factor(REGION), agegrp_at_event = fct_collapse(agegrp_at_event, '<50' = c('<40','[40,50)'))) %>% split(.,.$Race)) # convert times to days save(modeling_data, modeling_data2, file = file.path(dropdir,'revision_JASN','modeling_data.rda'), compress = T) # TODO: Figure out which analysis makes the most sense ##%######################################################%## # # #### Analyses #### # # ##%######################################################%## # What is the chance of discontinuation, by age and race -------------------------------------- hosp_postdx_age <- readRDS(file.path(dropdir,'revision_JASN','hosp_postdx_age.rds')) out1 <- map(hosp_postdx_age, ~.x %>% group_by(agegrp_at_event, RACE2) %>% summarize(prop_withdrew = round(mean(cens_type==3), 3), N = n()) %>% ungroup()) %>% bind_rows(.id = 'index_event') %>% filter(!is.na(RACE2)) %>% unite(out, c('prop_withdrew','N'), sep=' / ') %>% spread(agegrp_at_event, out) %>% mutate(index_event = transform_indx(index_event)) %>% rename(`Index event`=index_event, Race=RACE2) hosp_post_dx <- readRDS(file.path(dropdir, 'revision_JASN', 'final_hosp_data.rds')) out2 <- map(hosp_post_dx, ~.x %>% group_by(RACE2) %>% summarise(prop_withdrew = mean(cens_type==3), N = n())) %>% bind_rows(.id='index_condition') %>% filter(!is.na(RACE2)) %>% mutate(index_condition = transform_indx(index_condition)) %>% mutate(prop_withdrew = round(prop_withdrew,3)) %>% unite(Overall, c('prop_withdrew', 'N'), sep = ' / ') out <- left_join(out1, out2, by=c("Index event" = 'index_condition','Race'='RACE2')) openxlsx::write.xlsx(out, file='results/revision_JASN/Withdrawal_age_raceCause.xlsx') # Median time after index condition to discontinuation ---------------------------------------- hosp_postdx_age <- readRDS(file.path(dropdir, 'revision_JASN', 'hosp_postdx_age.rds')) map(hosp_postdx_age, ~.x %>% mutate(time_to_wd = as.numeric(surv_date - CLM_FROM)) %>% # time between dialysis and withdrawal group_by(agegrp_at_event, RACE2) %>% summarise(median_time = median(time_to_wd, na.rm=T))) %>% bind_rows(.id = 'index_condition') %>% filter(!is.na(RACE2)) %>% spread(agegrp_at_event, median_time) %>% mutate(index_condition = transform_indx(index_condition)) %>% rename('Index event' = 'index_condition', 'Race' = 'RACE2') %>% openxlsx::write.xlsx(file = 'results/revision_JASN/Time_to_withdrawalCause.xlsx') # Survival analysis on discontinuation -------------------------------------------------------- load(file.path(dropdir, 'revision_JASN', 'modeling_data.rda')) ## Kaplan Meier curves fit_list = map(modeling_data, ~survfit(Surv(time_from_event, cens_type==3)~ Race, data = .)) cph1_list <- map(modeling_data, ~ coxph(Surv(time_from_event, cens_type==3)~ Race, data = .)) logrank_list <- map(cph1_list, ~format.pval(anova(.)[2,4], eps=1e-6)) plt_list <- vector('list',6) for(i in 1:6){ plt_list[[i]] <- survminer::ggsurvplot(fit_list[[i]], data = modeling_data[[i]], conf.int = F, pval = F, censor = F, risk.table = F, xlab = 'Time (days)', ylab = 'Percent who discontinued dialysis', legend = 'bottom', title = names(fit_list)[i])$plot + scale_y_continuous(labels = scales::percent) + annotate('text', x = 50, y = 0.1, label = paste0('p-value : ', logrank_list[[i]]), hjust = 0) + theme( legend.justification = c(0.5, 0.5)) } pdf('graphs/revision_JASN/KaplanMeierPlotsCause.pdf') for(i in 1:6) print(plt_list[[i]]) dev.off() #' Cox regressions adjusting for age at event, gender, race, z-score, region, comorbidities and time on dialysis at time of event hosp_coxph <- map(modeling_data, ~ coxph(Surv(time_from_event+0.1, cens_type==3)~Race + agegrp_at_event + SEX + zscore + REGION + comorb_indx + time_on_dialysis + ESRD_Cause, data = .) %>% broom::tidy() %>% filter(str_detect(term, 'Race')) %>% select(term, estimate, p.value:conf.high) %>% mutate(term = str_remove(term, 'Race')) %>% mutate_at(vars(estimate, conf.low:conf.high), exp)) bind_rows(hosp_coxph, .id = 'Index event') %>% mutate(`Index event` = case_when(`Index event` == 'stroke_primary' ~ 'Primary stroke', `Index event` == 'stroke_compl' ~ 'Stroke with complications', `Index event` == 'LuCa' ~ 'Lung cancer', `Index event` == 'MetsCa' ~ 'Metastatic cancer', `Index event` == 'dement' ~ 'Dementia', `Index event` == 'thrive' ~ 'Failure to thrive')) %>% ggplot(aes(x = term, y = estimate, ymin = conf.low, ymax = conf.high))+ geom_pointrange() + geom_hline(yintercept = 1, linetype =3)+ facet_wrap(~`Index event`, nrow=2) + theme(axis.text.x = element_text(angle = 45, hjust=1)) + scale_y_continuous('HR for discontinuation, compared to Whites', breaks = seq(0.4,1.4, by = 0.2))+ labs(x = '') + ggsave('graphs/revision_JASN/ForestPlotCause.pdf') bind_rows(hosp_coxph, .id = 'Index event') %>% rename(Race = term, HR = estimate, `P-value` = p.value, `95% LCB` = conf.low, `95% UCB` = conf.high) %>% mutate(`Index event` = case_when(`Index event` == 'stroke_primary' ~ 'Primary stroke', `Index event` == 'stroke_compl' ~ 'Stroke with complications', `Index event` == 'LuCa' ~ 'Lung cancer', `Index event` == 'MetsCa' ~ 'Metastatic cancer', `Index event` == 'dement' ~ 'Dementia', `Index event` == 'thrive' ~ 'Failure to thrive')) %>% clean_cols(`Index event`) %>% openxlsx::write.xlsx('results/revision_JASN/CoxPHCause.xlsx', colWidths = 'auto', headerStyle = openxlsx::createStyle(textDecoration = 'BOLD'), overwrite = TRUE) # Evaluating how long from discontinuation to death ------------------------------------------- ProjTemplate::reload() hospitalization <- readRDS(path(dropdir, 'hospitalization_ids.rds')) Dat <- read_fst(path(dropdir, 'revision_JASN','Analytic_DISGRPC.fst')) Dat <- Dat %>% mutate(surv_date = pmin(cens_time, withdraw_time, DIED, TX1DATE, na.rm=T)) %>% mutate(RACE2 = forcats::fct_relevel(RACE2, 'White')) Dat <- Dat %>% mutate(withdraw_to_death = ifelse(cens_type==3 & !is.na(BEGIN_withdraw), tod - tow, NA)) Dat %>% filter(RACE2 != 'Other') %>% ggplot(aes(x = RACE2, y = withdraw_to_death * 365.25))+geom_boxplot() Dat %>% filter(RACE2 != 'Other', cens_type==3) %>% kruskal.test(withdraw_to_death~RACE2, data=.) %>% broom::tidy() Dat %>% filter(RACE2 != 'Other', cens_type == 3) %>% ggplot(aes(x = withdraw_to_death*365.25))+ geom_density(aes(group = RACE2, color = RACE2)) + xlim(0,100) Dat %>% filter(RACE2 != 'Other', cens_type == 3) %>% group_by(RACE2) %>% summarise(med_surv = median(365*withdraw_to_death, na.rm=T)) # TODO: Sensitivity of all results to imputation of withdrawal time # This means, we assumed, if withdrawal data is missing, that we used 7 days before death. # We should see how our results hold up if we impute the withdrawal date as # (a) the date of death # (b) randomly drawn from race-specific distribution # Assessing comorbidities from hospitalization ------------------------------------------------ ## This is done in evaluate_comorbidities.R # Simulation study ---------------------------------------------------------------------------- load(file.path(dropdir, 'revision_JASN', 'modeling_data.rda')) cl <- makeCluster(no_cores) registerDoParallel(cl) cox_models <- sim_fn_cause(modeling_data2) saveRDS(cox_models, file.path(dropdir, 'revision_JASN', 'cox_models.rds'), compress = T) bl <- modify_depth(cox_models, 2, ~select(., term, estimate) %>% mutate(estimate = exp(estimate))) %>% map(~bind_rows(.) ) bl <- map(bl, ~mutate(., term = str_remove(term, 'Race'))) pdf('graphs/revision_JASN/SimulationResultsCause.pdf') for(n in names(bl)){ print(bl[[n]] %>% ggplot(aes(estimate))+geom_histogram(bins=20) + facet_wrap(~term, scales = 'free', nrow = 2)+ labs(x = 'Hazard ratio against Whites', y = '') + ggtitle(n)) } dev.off() # Simulation study stratified by group -------------------------------------------------------- load(file.path(dropdir, 'revision_JASN','modeling_data.rda')) modeling_data2_young <- modify_depth(modeling_data2, 2, ~filter(., age_at_event < 70)) modeling_data2_old <- modify_depth(modeling_data2, 2, ~filter(., age_at_event >= 70)) ## Some summaries N_young <- modify_depth(modeling_data2_young, 1, ~map_df(., nrow)) %>% bind_rows(.id = 'Condition') N_old <- modify_depth(modeling_data2_old, 1, ~map_df(., nrow)) %>% bind_rows(.id = 'Condition') cl <- makeCluster(no_cores) registerDoParallel(cl) cox_models_young <- sim_fn_cause(modeling_data2_young) cox_models_old <- sim_fn_cause(modeling_data2_old) stopCluster(cl) bl <- modify_depth(cox_models_old, 2, ~select(., term, estimate) %>% mutate(estimate = exp(estimate))) %>% map(~bind_rows(.) ) bl <- map(bl, ~mutate(., term = str_remove(term, 'Race'))) pdf('graphs/revision_JASN/SimulationResults_old.pdf') for(n in names(bl)){ print(bl[[n]] %>% ggplot(aes(estimate))+geom_histogram(bins=20) + facet_wrap(~term, scales = 'free', nrow = 2)+ labs(x = 'Hazard ratio against Whites', y = '') + ggtitle(paste('Age 70+:', n))) } dev.off() bl <- modify_depth(cox_models_young, 2, ~select(., term, estimate) %>% mutate(estimate = exp(estimate))) %>% map(~bind_rows(.) ) bl <- map(bl, ~mutate(., term = str_remove(term, 'Race'))) pdf('graphs/revision_JASN/SimulationResults_young.pdf') for(n in names(bl)){ print(bl[[n]] %>% ggplot(aes(estimate))+geom_histogram(bins=20) + facet_wrap(~term, scales = 'free', nrow = 2)+ labs(x = 'Hazard ratio against Whites', y = '') + ggtitle(paste('Age 69-:', n))) } dev.off() save(cox_models_young, cox_models_old, file = file.path(dropdir, 'revision_JASN', 'cox_models_sim_strat.rda'), compress = T) # # YLL and observed time computations ---------------------------------------------------------------------------- # # load(file.path(dropdir, 'modeling_data.rda')) # cl <- makeCluster(no_cores) # registerDoParallel(cl) # sim_results <- sim_fn_yll(modeling_data2) # stopCluster(cl) # # # # obstimes <- out_obstimes_fn(simres, modeling_data2) # # nominal_obstimes <- map(modeling_data, ~mutate(., time_from_event = ifelse(cens_type ==3, time_from_event + 7, time_from_event)) %>% # # group_by(Race) %>% # # summarize(nominal_obstime = sum(time_from_event, na.rm=T)) %>% # # ungroup() %>% # # mutate(Race = as.character(Race))) # # yll <- out_yll_fn(sim_results) # final_tbl <- out_obstimes_fn(sim_results, modeling_data2) # # ## Repeat for stratified analyses # cl <- makeCluster(no_cores) # registerDoParallel(cl) # sim_results_young <- sim_fn_yll(modeling_data2_young) # sim_results_old <- sim_fn_yll(modeling_data2_old) # stopCluster(cl) # # # yll_young <- out_yll_fn(sim_results_young) # yll_old <- out_yll_fn(sim_results_old) # # final_tbl_young = out_obstimes_fn(sim_results_young, modeling_data2_young) # final_tbl_old = out_obstimes_fn(sim_results_old, modeling_data2_old) # # openxlsx::write.xlsx(list('Overall' = final_tbl, # 'Young' = final_tbl_young, # 'Old' = final_tbl_old, # 'Overall-YLL' = yll, # 'Young-YLL' = yll_young, # 'Old-YLL' = yll_old), # file='ObsTime.xlsx', # headerStyle = openxlsx::createStyle(textDecoration = 'BOLD')) # # Summaries of Weibull models ----------------------------------------------------------------- load(file.path(dropdir,'revision_JASN', 'modeling_data.rda')) weib_models <- list() for (cnd in names(modeling_data2)){ D = modeling_data2[[cnd]] weib_models[[cnd]] <- survreg(Surv(time_from_event+0.1, cens_type==3)~ # Added 0.1 since weibull is > 0 agegrp_at_event + SEX + time_on_dialysis + REGION+ zscore + comorb_indx + ESRD_Cause, data = D$White, dist = 'weibull') } weib_res <- map(weib_models, broom::tidy) ## Cox-Snell graphs pdf('graphs/revision_JASN/CoxSnell.pdf') for(n in names(weib_models)){ cs <- cox_snell(weib_models[[n]], modeling_data2[[n]]$White) title(main = n) } dev.off() weib_res2 <- map(weib_res, aft_to_hr) format_terms <- function(x, mod){ nms <- names(attr(mod$terms, 'dataClasses')) baseline <- map(mod$xlevels, 1) %>% bind_rows() %>% gather(Variables, term) %>% cbind(out = '') x$Variables = '' for (n in nms){ x$Variables[str_detect(x$term, n)] <- n x$term = str_remove(x$term, n) } x <- x %>% select(Variables, term, out) %>% bind_rows(baseline) %>% arrange(Variables, term) x2 <- split(x, x$Variables) xlvl = mod$xlevels for (n in names(xlvl)) { x2[[n]] <- x2[[n]][match(xlvl[[n]],x2[[n]][,'term']),] } x <- bind_rows(x2) x <- x %>% rename(value = term, HR = out) %>% clean_cols(Variables) return(x) } out <- map2(weib_res2, weib_models, format_terms) out <- map(out, ~ mutate(., Variables = case_when(Variables == 'agegrp_at_event' ~ 'Age group', Variables == 'comorb_indx' ~ 'Comorbidity index', Variables == 'REGION' ~ 'Region', Variables == 'SEX' ~ 'Sex', Variables == 'time_on_dialysis' ~ "Time on dialysis (days)", Variables == 'zscore' ~ 'SES Score', TRUE ~ '')) %>% rename(Group = value)) openxlsx::write.xlsx(out, file = 'results/revision_JASN/WhiteModelsCause.xlsx', headerStyle = openxlsx::createStyle(textDecoration = 'BOLD') )
/R/JASN/hospitalization_cause.R
no_license
webbedfeet/USRDS2015
R
false
false
29,640
r
##%######################################################%## # # #### Script to interrogate the #### #### hospital databases to extract #### #### IDs # of individuals #### #### who had prevalent comorbidities. This #### #### will be matched against # IDs of people #### #### discontinuing dialysis to see #### #### how the race pattern appears. #### # # ##%######################################################%## # This script is a modification of the hospitalization.R script targeted to the # JASN revision, where they are asking for the effect of ESRD cause, the effect of # censoring at transplant time, etc. See hospitalizatonRevision_notes.Rmd for details # setup --------------------------------------------------------------------------------------- ProjTemplate::reload() # dbdir = verifyPaths(); dir.exists(dbdir) dropdir <- file.path(ProjTemplate::find_dropbox(), 'NIAMS','Ward','USRDS2015','data') no_cores <- detectCores()-1 condition_code <- c('stroke_primary' = 'Primary Stroke', 'stroke_compl' = 'Complicated Stroke', 'LuCa' = 'Lung Cancer', 'MetsCa' = 'Metastatic Cancer', 'dement' = 'Dementia', 'thrive' = 'Failure to thrive' ) ##%######################################################%## # # #### Extraction from database #### # # ##%######################################################%## sql_conn = dbConnect(SQLite(), file.path(dbdir,'USRDS.sqlite3')) till2009 <- tbl(sql_conn, 'till2009') from2010 <- tbl(sql_conn, 'from2010') studyids <- tbl(sql_conn, 'StudyIDs') # These are people in the final analytic dataset, N= 1,291,001 dbs <- list(till2009, from2010) # Stroke ------------------------------------------------------------------ stroke <- dbs %>% lapply(., function(db){ db %>% select(USRDS_ID, starts_with('HSDIAG'), CLM_FROM, CLM_THRU)%>% mutate(PRIM = substr(HSDIAG1,1,3)) %>% filter(PRIM == '430' | PRIM=='431' | PRIM == '432' | PRIM == '433' | PRIM=='434') %>% inner_join(studyids) %>% # Keep only individuals in earlier study collect(n = Inf) }) stroke_primary <- stroke %>% lapply(., function(db) db %>% select(USRDS_ID, CLM_FROM, CLM_THRU)) %>% bind_rows() %>% distinct() %>% mutate(dt = date_midpt(CLM_FROM, CLM_THRU)) head(stroke_primary) # Stroke with complications ----------------------------------------------- stroke_compl <- stroke %>% lapply(., function(db){ db %>% gather(diag, code, -USRDS_ID, -HSDIAG1, -CLM_THRU, -CLM_FROM) %>% mutate(code1 = substr(code,1,3)) %>% filter(code1 %in% c('438','342','344')) %>% select(USRDS_ID, CLM_FROM, CLM_THRU) %>% distinct() }) %>% bind_rows() %>% distinct() %>% mutate(dt = date_midpt(CLM_FROM,CLM_THRU)) head(stroke_compl) # Lung cancer ------------------------------------------------------------- LuCa <- dbs %>% lapply(., function(db){ db %>% mutate(PRIM = substr(HSDIAG1,1,3)) %>% filter(PRIM=='162') %>% inner_join(studyids) %>% # Keep only individuals in earlier study select(USRDS_ID, CLM_FROM, CLM_THRU) %>% collect(n=Inf) }) %>% bind_rows() %>% distinct() %>% mutate(dt = date_midpt(CLM_FROM, CLM_THRU)) head(LuCa) # Metastatic cancer ------------------------------------------------------- MetsCa <- dbs %>% lapply(., function(db){ db %>% mutate(PRIM=substr(HSDIAG1,1,3)) %>% filter(PRIM== '196' | PRIM == '197' | PRIM == '198' | PRIM == '199') %>% inner_join(studyids) %>% select(USRDS_ID, CLM_FROM, CLM_THRU) %>% collect(n = Inf)}) %>% bind_rows() %>% distinct() %>% mutate(dt = date_midpt(CLM_FROM, CLM_THRU)) head(MetsCa) hospitalization <- list('stroke_primary' = stroke_primary, 'stroke_compl' = stroke_compl, 'LuCa' = LuCa, 'MetsCa' = MetsCa) # 'dement' = dement, # 'thrive' = thrive saveRDS(hospitalization, file = 'data/hospitalization_ids.rds', compress = T) # Dementia ---------------------------------------------------------------- ## I'm moving this to Python since it's much faster at processing the database ## row-wise. However, I need to get some SQL calls generated here. # till2009 %>% select(USRDS_ID, starts_with('HSDIAG')) %>% show_query() # from2010 %>% select(USRDS_ID, starts_with("HSDIAG")) %>% show_query() # reticulate::source_python('dementia.py') # dementia <- read_csv('data/Dementia.csv') # names(dementia) <- 'USRDS_ID' # head(dementia) StudyIDS <- studyids %>% collect(n=Inf) sql1 <- paste(capture.output(till2009 %>% select(USRDS_ID, starts_with('HSDIAG'), CLM_FROM, CLM_THRU) %>% show_query(), type='message')[-1], collapse=' ') sql2 <- paste(capture.output(from2010 %>% select(USRDS_ID, starts_with('HSDIAG'), CLM_FROM, CLM_THRU) %>% show_query(), type='message')[-1], collapse=' ') sqlist <- list(sql1,sql2) dement <- list() i=0 for (sql in sqlist){ print(paste('Running ',sql)) rs <- dbSendQuery(sql_conn, sql) while(!dbHasCompleted(rs)){ d <- dbFetch(rs, n = 100000) i=i+1 print(i) dement[[i]] <-d %>% gather(hsdiag, code, -USRDS_ID, -CLM_FROM, -CLM_THRU) %>% filter(str_detect(code, '^290|^2941|^331[012]')) %>% select(USRDS_ID, CLM_FROM, CLM_THRU) %>% # distinct() %>% inner_join(StudyIDS) %>% as.data.frame() } dbClearResult(rs) } dement1 <- bind_rows(dement) dement1 <- dement1 %>% semi_join(StudyIDS) # Keep people from analytic dataset hospitalization[['dement']] <- dement1 saveRDS(hospitalization, file = 'data/hospitalization_ids.rds', compress = T) # Failure to thrive ------------------- ## This can appear in any of the diagnoses sql1 <- paste(capture.output(till2009 %>% select(USRDS_ID, starts_with('HSDIAG'), CLM_FROM, CLM_THRU) %>% show_query(), type='message')[-1], collapse=' ') sql2 <- paste(capture.output(from2010 %>% select(USRDS_ID, starts_with('HSDIAG'), CLM_FROM, CLM_THRU) %>% show_query(), type='message')[-1], collapse=' ') sqlist <- list(sql1,sql2) thrive = list() i = 0 for (sql in sqlist) { rs <- dbSendQuery(sql_conn, sql) while (!dbHasCompleted(rs)) { d <- dbFetch(rs, n = 100000) i = i+1 print(i) thrive[[i]] <-d %>% gather(hsdiag, code, starts_with("HSDIAG")) %>% filter(str_detect(code, '783[237]')) %>% select(USRDS_ID, CLM_FROM, CLM_THRU) %>% as.data.frame() } dbClearResult(rs) } thrive1 <- bind_rows(thrive) %>% semi_join(StudyIDS) # Keep people from analytic dataset hospitalization[['thrive']] <- thrive1 saveRDS(hospitalization, file = 'data/hospitalization_ids.rds') saveRDS(hospitalization, file = file.path(dropdir, 'hospitalization_ids.rds')) dbDisconnect(sql_conn); gc() # End of database extraction ---------------------------------------------- ##%######################################################%## # # #### Creating intermediate datasets #### # # ##%######################################################%## abhiR::reload() hospitalization <- readRDS(path(dropdir, 'hospitalization_ids.rds')) # Dat <- readRDS(path(dropdir,'Analytic.rds')) Dat <- read_fst(path(dropdir,'Analytic.fst')) # Fixing the REGION definition -------------------------------------------- library(stringr) a1 <- str_pad(as.character(c(23, 50, 33, 25, 09, 36, 42, 44, 34)), 2, pad='0') # Northeast a2 <- str_pad(as.character(c(48, 40, 05, 22, 28, 01, 47, 21, 12, 13, 45, 37, 51, 11, 24, 10, 54,78, 72 )), 2, pad='0') # South a3 <- str_pad(as.character(c(20, 31, 46, 38, 29, 19, 27, 17, 55, 18, 26, 39)), 2, pad = '0') # Midwest a4 <- str_pad(as.character(c(02, 15, 06, 41, 53, 32, 04, 49, 16, 35, 08, 56, 30, 66, 69, 60,64)), 2, pad='0') # West Dat <- Dat %>% mutate(REGION = case_when( Dat$STATE %in% a1 ~ "Northeast", Dat$STATE %in% a2 ~ 'South', Dat$STATE %in% a3 ~ "Midwest", Dat$STATE %in% a4 ~ "West" )) # Computing survival times ------------------------------------------------ # The following code computes survival date as the minimum of loss-to-followup, # discontinuation time, death and transplant date Dat <- Dat %>% mutate(surv_date = pmin(cens_time, withdraw_time, DIED, TX1DATE, na.rm=T)) %>% mutate(surv_date2 = pmin(cens_time, withdraw_time, DIED, na.rm=T)) %>% # Removes transplant time as a potential censoring time. The clock no longer stops at transplant times mutate(RACE2 = forcats::fct_relevel(RACE2, 'White')) # Adding DISGRPC codes to analytic data -------------------------------------------- disgrpc_code <- readRDS(path(dropdir, 'disgrpc_code.rds')) # See cause_esrd.R disgrpc <- fst(path(dropdir, 'raw_data.fst'))[,c('USRDS_ID','DISGRPC')] %>% distinct() %>% left_join(disgrpc_code, by=c('DISGRPC'='Format')) %>% mutate(Description = ifelse(Description %in% c('8','**OTHER**'), NA, Description)) %>% mutate(Description = fct_other(Description,keep=c('Diabetes','Hypertension','Glomeruloneph.'))) %>% rename(ESRD_Cause = Description) Dat <- Dat %>% left_join(disgrpc) # Adding reason for dialysis fst::write_fst(Dat, path(dropdir,'revision_JASN','Analytic_DISGRPC.fst')) # Filter index conditions by events after start of dialysis ----------------------------------- # IDEA: We could also look at individuals who had index condition soon followed by dialysis, # where dialysis is the precipitating index condition hosp_post_dx <- map(hospitalization, ~ .x %>% mutate(CLM_FROM = as.Date(CLM_FROM)) %>% left_join(Dat %>% select(USRDS_ID, FIRST_SE, surv_date, surv_date2, cens_type, RACE2, DISGRPC, ESRD_Cause)) %>% # Add cause of dialysis filter(CLM_FROM >= FIRST_SE, CLM_FROM <= surv_date) %>% group_by(USRDS_ID) %>% top_n(-1, CLM_FROM) %>% # Selects last hospital stay for each person top_n(-1, CLM_THRU) %>% # select(-FIRST_SE, -surv_date) %>% distinct() %>% ungroup()) saveRDS(hosp_post_dx, file.path(dropdir, 'revision_JASN','final_hosp_data.rds'), compress = T) # Adding the age at the index condition ----------------------------------- #' what's the chance of discontinuation, by race hosp_post_dx <- readRDS(file.path(dropdir,'revision_JASN','final_hosp_data.rds')) Dat <- fst::read_fst(path(dropdir, 'revision_JASN','Analytic_DISGRPC.fst')) # Dat <- fst::read_fst(path(dropdir, 'Analytic.fst')) # Dat <- Dat %>% mutate(surv_date = pmin(cens_time, withdraw_time, DIED, TX1DATE, na.rm=T)) %>% # mutate(RACE2 = forcats::fct_relevel(RACE2, 'White')) hosp_postdx_age <- map( hosp_post_dx, ~.x %>% left_join(Dat %>% select(USRDS_ID, INC_AGE)) %>% mutate(se_to_event_time = CLM_FROM - FIRST_SE, age_at_event = floor(INC_AGE + se_to_event_time/365.25)) %>% mutate(agegrp_at_event = cut_width(age_at_event, width=10, boundary=10, closed='left')) %>% mutate(agegrp_at_event = forcats::fct_collapse(agegrp_at_event, '<40' = intersect(levels(agegrp_at_event),c('[10,20)','[20,30)','[30,40)')), '80+' = intersect(levels(agegrp_at_event), c('[80,90)','[90,100)','[90,100]','[100,110]'))))) saveRDS(hosp_postdx_age, file.path(dropdir,'revision_JASN','hosp_postdx_age.rds'), compress = T) # Cox regressions: Data munging ----------------------------------------------------------------------------- # Data munging and generation: Matching comorbidity score with time of index condition ------------------------------------- index_condn_comorbs <- readRDS(file.path(dropdir, 'index_condn_comorbs.rds')) hosp_post_dx <- readRDS(file.path(dropdir, 'revision_JASN', 'final_hosp_data.rds')) hosp_postdx_age <- readRDS(file.path(dropdir, 'revision_JASN','hosp_postdx_age.rds')) hosp_cox_data <- map(hosp_postdx_age, ~.x %>% left_join(select(Dat, SEX, zscore, USRDS_ID, REGION)) %>% mutate(time_from_event = as.numeric(surv_date-CLM_FROM)) %>% mutate(time_from_event2 = as.numeric(surv_date2 - CLM_FROM)) %>% mutate(time_on_dialysis = se_to_event_time) %>% rename('Race' = "RACE2") %>% filter(Race != 'Other') %>% mutate(Race = droplevels(Race))) out <- list() for (n in names(hosp_post_dx)){ print(paste('Working on ', n)) d <- index_condn_comorbs[[n]] %>% select(USRDS_ID:CLM_THRU, comorb_indx) # Comorbidities per indiv per visit hosp_post_dx[[n]] %>% mutate(CLM_FROM = as.character(CLM_FROM), CLM_THRU = as.character(CLM_THRU)) %>% left_join(d) %>% group_by(USRDS_ID) %>% filter(comorb_indx == max(comorb_indx)) %>% # Take worst comorbidity index ungroup() %>% distinct() -> out[[n]] } assertthat::are_equal(map_int(hosp_post_dx, nrow), map_int(out, nrow)) # hosp_post_dx <- out modeling_data <- hosp_cox_data for(n in names(modeling_data)){ modeling_data[[n]] <- modeling_data[[n]] %>% left_join(out[[n]] %>% select(USRDS_ID, comorb_indx)) } ## Data munging to add simulated withdrawal times Dat <- fst::read_fst(path(dropdir, 'revision_JASN','Analytic_DISGRPC.fst')) modeling_data2 <- map(modeling_data, ~left_join(., select(Dat, USRDS_ID, toc:tow), by ='USRDS_ID') %>% mutate_at(vars(toc:tow), funs(.*365.25)) %>% # Change to days mutate(time_on_dialysis = as.numeric(time_on_dialysis), REGION = as.factor(REGION), agegrp_at_event = fct_collapse(agegrp_at_event, '<50' = c('<40','[40,50)'))) %>% split(.,.$Race)) # convert times to days save(modeling_data, modeling_data2, file = file.path(dropdir,'revision_JASN','modeling_data.rda'), compress = T) # TODO: Figure out which analysis makes the most sense ##%######################################################%## # # #### Analyses #### # # ##%######################################################%## # What is the chance of discontinuation, by age and race -------------------------------------- hosp_postdx_age <- readRDS(file.path(dropdir,'revision_JASN','hosp_postdx_age.rds')) out1 <- map(hosp_postdx_age, ~.x %>% group_by(agegrp_at_event, RACE2) %>% summarize(prop_withdrew = round(mean(cens_type==3), 3), N = n()) %>% ungroup()) %>% bind_rows(.id = 'index_event') %>% filter(!is.na(RACE2)) %>% unite(out, c('prop_withdrew','N'), sep=' / ') %>% spread(agegrp_at_event, out) %>% mutate(index_event = transform_indx(index_event)) %>% rename(`Index event`=index_event, Race=RACE2) hosp_post_dx <- readRDS(file.path(dropdir, 'revision_JASN', 'final_hosp_data.rds')) out2 <- map(hosp_post_dx, ~.x %>% group_by(RACE2) %>% summarise(prop_withdrew = mean(cens_type==3), N = n())) %>% bind_rows(.id='index_condition') %>% filter(!is.na(RACE2)) %>% mutate(index_condition = transform_indx(index_condition)) %>% mutate(prop_withdrew = round(prop_withdrew,3)) %>% unite(Overall, c('prop_withdrew', 'N'), sep = ' / ') out <- left_join(out1, out2, by=c("Index event" = 'index_condition','Race'='RACE2')) openxlsx::write.xlsx(out, file='results/revision_JASN/Withdrawal_age_raceCause.xlsx') # Median time after index condition to discontinuation ---------------------------------------- hosp_postdx_age <- readRDS(file.path(dropdir, 'revision_JASN', 'hosp_postdx_age.rds')) map(hosp_postdx_age, ~.x %>% mutate(time_to_wd = as.numeric(surv_date - CLM_FROM)) %>% # time between dialysis and withdrawal group_by(agegrp_at_event, RACE2) %>% summarise(median_time = median(time_to_wd, na.rm=T))) %>% bind_rows(.id = 'index_condition') %>% filter(!is.na(RACE2)) %>% spread(agegrp_at_event, median_time) %>% mutate(index_condition = transform_indx(index_condition)) %>% rename('Index event' = 'index_condition', 'Race' = 'RACE2') %>% openxlsx::write.xlsx(file = 'results/revision_JASN/Time_to_withdrawalCause.xlsx') # Survival analysis on discontinuation -------------------------------------------------------- load(file.path(dropdir, 'revision_JASN', 'modeling_data.rda')) ## Kaplan Meier curves fit_list = map(modeling_data, ~survfit(Surv(time_from_event, cens_type==3)~ Race, data = .)) cph1_list <- map(modeling_data, ~ coxph(Surv(time_from_event, cens_type==3)~ Race, data = .)) logrank_list <- map(cph1_list, ~format.pval(anova(.)[2,4], eps=1e-6)) plt_list <- vector('list',6) for(i in 1:6){ plt_list[[i]] <- survminer::ggsurvplot(fit_list[[i]], data = modeling_data[[i]], conf.int = F, pval = F, censor = F, risk.table = F, xlab = 'Time (days)', ylab = 'Percent who discontinued dialysis', legend = 'bottom', title = names(fit_list)[i])$plot + scale_y_continuous(labels = scales::percent) + annotate('text', x = 50, y = 0.1, label = paste0('p-value : ', logrank_list[[i]]), hjust = 0) + theme( legend.justification = c(0.5, 0.5)) } pdf('graphs/revision_JASN/KaplanMeierPlotsCause.pdf') for(i in 1:6) print(plt_list[[i]]) dev.off() #' Cox regressions adjusting for age at event, gender, race, z-score, region, comorbidities and time on dialysis at time of event hosp_coxph <- map(modeling_data, ~ coxph(Surv(time_from_event+0.1, cens_type==3)~Race + agegrp_at_event + SEX + zscore + REGION + comorb_indx + time_on_dialysis + ESRD_Cause, data = .) %>% broom::tidy() %>% filter(str_detect(term, 'Race')) %>% select(term, estimate, p.value:conf.high) %>% mutate(term = str_remove(term, 'Race')) %>% mutate_at(vars(estimate, conf.low:conf.high), exp)) bind_rows(hosp_coxph, .id = 'Index event') %>% mutate(`Index event` = case_when(`Index event` == 'stroke_primary' ~ 'Primary stroke', `Index event` == 'stroke_compl' ~ 'Stroke with complications', `Index event` == 'LuCa' ~ 'Lung cancer', `Index event` == 'MetsCa' ~ 'Metastatic cancer', `Index event` == 'dement' ~ 'Dementia', `Index event` == 'thrive' ~ 'Failure to thrive')) %>% ggplot(aes(x = term, y = estimate, ymin = conf.low, ymax = conf.high))+ geom_pointrange() + geom_hline(yintercept = 1, linetype =3)+ facet_wrap(~`Index event`, nrow=2) + theme(axis.text.x = element_text(angle = 45, hjust=1)) + scale_y_continuous('HR for discontinuation, compared to Whites', breaks = seq(0.4,1.4, by = 0.2))+ labs(x = '') + ggsave('graphs/revision_JASN/ForestPlotCause.pdf') bind_rows(hosp_coxph, .id = 'Index event') %>% rename(Race = term, HR = estimate, `P-value` = p.value, `95% LCB` = conf.low, `95% UCB` = conf.high) %>% mutate(`Index event` = case_when(`Index event` == 'stroke_primary' ~ 'Primary stroke', `Index event` == 'stroke_compl' ~ 'Stroke with complications', `Index event` == 'LuCa' ~ 'Lung cancer', `Index event` == 'MetsCa' ~ 'Metastatic cancer', `Index event` == 'dement' ~ 'Dementia', `Index event` == 'thrive' ~ 'Failure to thrive')) %>% clean_cols(`Index event`) %>% openxlsx::write.xlsx('results/revision_JASN/CoxPHCause.xlsx', colWidths = 'auto', headerStyle = openxlsx::createStyle(textDecoration = 'BOLD'), overwrite = TRUE) # Evaluating how long from discontinuation to death ------------------------------------------- ProjTemplate::reload() hospitalization <- readRDS(path(dropdir, 'hospitalization_ids.rds')) Dat <- read_fst(path(dropdir, 'revision_JASN','Analytic_DISGRPC.fst')) Dat <- Dat %>% mutate(surv_date = pmin(cens_time, withdraw_time, DIED, TX1DATE, na.rm=T)) %>% mutate(RACE2 = forcats::fct_relevel(RACE2, 'White')) Dat <- Dat %>% mutate(withdraw_to_death = ifelse(cens_type==3 & !is.na(BEGIN_withdraw), tod - tow, NA)) Dat %>% filter(RACE2 != 'Other') %>% ggplot(aes(x = RACE2, y = withdraw_to_death * 365.25))+geom_boxplot() Dat %>% filter(RACE2 != 'Other', cens_type==3) %>% kruskal.test(withdraw_to_death~RACE2, data=.) %>% broom::tidy() Dat %>% filter(RACE2 != 'Other', cens_type == 3) %>% ggplot(aes(x = withdraw_to_death*365.25))+ geom_density(aes(group = RACE2, color = RACE2)) + xlim(0,100) Dat %>% filter(RACE2 != 'Other', cens_type == 3) %>% group_by(RACE2) %>% summarise(med_surv = median(365*withdraw_to_death, na.rm=T)) # TODO: Sensitivity of all results to imputation of withdrawal time # This means, we assumed, if withdrawal data is missing, that we used 7 days before death. # We should see how our results hold up if we impute the withdrawal date as # (a) the date of death # (b) randomly drawn from race-specific distribution # Assessing comorbidities from hospitalization ------------------------------------------------ ## This is done in evaluate_comorbidities.R # Simulation study ---------------------------------------------------------------------------- load(file.path(dropdir, 'revision_JASN', 'modeling_data.rda')) cl <- makeCluster(no_cores) registerDoParallel(cl) cox_models <- sim_fn_cause(modeling_data2) saveRDS(cox_models, file.path(dropdir, 'revision_JASN', 'cox_models.rds'), compress = T) bl <- modify_depth(cox_models, 2, ~select(., term, estimate) %>% mutate(estimate = exp(estimate))) %>% map(~bind_rows(.) ) bl <- map(bl, ~mutate(., term = str_remove(term, 'Race'))) pdf('graphs/revision_JASN/SimulationResultsCause.pdf') for(n in names(bl)){ print(bl[[n]] %>% ggplot(aes(estimate))+geom_histogram(bins=20) + facet_wrap(~term, scales = 'free', nrow = 2)+ labs(x = 'Hazard ratio against Whites', y = '') + ggtitle(n)) } dev.off() # Simulation study stratified by group -------------------------------------------------------- load(file.path(dropdir, 'revision_JASN','modeling_data.rda')) modeling_data2_young <- modify_depth(modeling_data2, 2, ~filter(., age_at_event < 70)) modeling_data2_old <- modify_depth(modeling_data2, 2, ~filter(., age_at_event >= 70)) ## Some summaries N_young <- modify_depth(modeling_data2_young, 1, ~map_df(., nrow)) %>% bind_rows(.id = 'Condition') N_old <- modify_depth(modeling_data2_old, 1, ~map_df(., nrow)) %>% bind_rows(.id = 'Condition') cl <- makeCluster(no_cores) registerDoParallel(cl) cox_models_young <- sim_fn_cause(modeling_data2_young) cox_models_old <- sim_fn_cause(modeling_data2_old) stopCluster(cl) bl <- modify_depth(cox_models_old, 2, ~select(., term, estimate) %>% mutate(estimate = exp(estimate))) %>% map(~bind_rows(.) ) bl <- map(bl, ~mutate(., term = str_remove(term, 'Race'))) pdf('graphs/revision_JASN/SimulationResults_old.pdf') for(n in names(bl)){ print(bl[[n]] %>% ggplot(aes(estimate))+geom_histogram(bins=20) + facet_wrap(~term, scales = 'free', nrow = 2)+ labs(x = 'Hazard ratio against Whites', y = '') + ggtitle(paste('Age 70+:', n))) } dev.off() bl <- modify_depth(cox_models_young, 2, ~select(., term, estimate) %>% mutate(estimate = exp(estimate))) %>% map(~bind_rows(.) ) bl <- map(bl, ~mutate(., term = str_remove(term, 'Race'))) pdf('graphs/revision_JASN/SimulationResults_young.pdf') for(n in names(bl)){ print(bl[[n]] %>% ggplot(aes(estimate))+geom_histogram(bins=20) + facet_wrap(~term, scales = 'free', nrow = 2)+ labs(x = 'Hazard ratio against Whites', y = '') + ggtitle(paste('Age 69-:', n))) } dev.off() save(cox_models_young, cox_models_old, file = file.path(dropdir, 'revision_JASN', 'cox_models_sim_strat.rda'), compress = T) # # YLL and observed time computations ---------------------------------------------------------------------------- # # load(file.path(dropdir, 'modeling_data.rda')) # cl <- makeCluster(no_cores) # registerDoParallel(cl) # sim_results <- sim_fn_yll(modeling_data2) # stopCluster(cl) # # # # obstimes <- out_obstimes_fn(simres, modeling_data2) # # nominal_obstimes <- map(modeling_data, ~mutate(., time_from_event = ifelse(cens_type ==3, time_from_event + 7, time_from_event)) %>% # # group_by(Race) %>% # # summarize(nominal_obstime = sum(time_from_event, na.rm=T)) %>% # # ungroup() %>% # # mutate(Race = as.character(Race))) # # yll <- out_yll_fn(sim_results) # final_tbl <- out_obstimes_fn(sim_results, modeling_data2) # # ## Repeat for stratified analyses # cl <- makeCluster(no_cores) # registerDoParallel(cl) # sim_results_young <- sim_fn_yll(modeling_data2_young) # sim_results_old <- sim_fn_yll(modeling_data2_old) # stopCluster(cl) # # # yll_young <- out_yll_fn(sim_results_young) # yll_old <- out_yll_fn(sim_results_old) # # final_tbl_young = out_obstimes_fn(sim_results_young, modeling_data2_young) # final_tbl_old = out_obstimes_fn(sim_results_old, modeling_data2_old) # # openxlsx::write.xlsx(list('Overall' = final_tbl, # 'Young' = final_tbl_young, # 'Old' = final_tbl_old, # 'Overall-YLL' = yll, # 'Young-YLL' = yll_young, # 'Old-YLL' = yll_old), # file='ObsTime.xlsx', # headerStyle = openxlsx::createStyle(textDecoration = 'BOLD')) # # Summaries of Weibull models ----------------------------------------------------------------- load(file.path(dropdir,'revision_JASN', 'modeling_data.rda')) weib_models <- list() for (cnd in names(modeling_data2)){ D = modeling_data2[[cnd]] weib_models[[cnd]] <- survreg(Surv(time_from_event+0.1, cens_type==3)~ # Added 0.1 since weibull is > 0 agegrp_at_event + SEX + time_on_dialysis + REGION+ zscore + comorb_indx + ESRD_Cause, data = D$White, dist = 'weibull') } weib_res <- map(weib_models, broom::tidy) ## Cox-Snell graphs pdf('graphs/revision_JASN/CoxSnell.pdf') for(n in names(weib_models)){ cs <- cox_snell(weib_models[[n]], modeling_data2[[n]]$White) title(main = n) } dev.off() weib_res2 <- map(weib_res, aft_to_hr) format_terms <- function(x, mod){ nms <- names(attr(mod$terms, 'dataClasses')) baseline <- map(mod$xlevels, 1) %>% bind_rows() %>% gather(Variables, term) %>% cbind(out = '') x$Variables = '' for (n in nms){ x$Variables[str_detect(x$term, n)] <- n x$term = str_remove(x$term, n) } x <- x %>% select(Variables, term, out) %>% bind_rows(baseline) %>% arrange(Variables, term) x2 <- split(x, x$Variables) xlvl = mod$xlevels for (n in names(xlvl)) { x2[[n]] <- x2[[n]][match(xlvl[[n]],x2[[n]][,'term']),] } x <- bind_rows(x2) x <- x %>% rename(value = term, HR = out) %>% clean_cols(Variables) return(x) } out <- map2(weib_res2, weib_models, format_terms) out <- map(out, ~ mutate(., Variables = case_when(Variables == 'agegrp_at_event' ~ 'Age group', Variables == 'comorb_indx' ~ 'Comorbidity index', Variables == 'REGION' ~ 'Region', Variables == 'SEX' ~ 'Sex', Variables == 'time_on_dialysis' ~ "Time on dialysis (days)", Variables == 'zscore' ~ 'SES Score', TRUE ~ '')) %>% rename(Group = value)) openxlsx::write.xlsx(out, file = 'results/revision_JASN/WhiteModelsCause.xlsx', headerStyle = openxlsx::createStyle(textDecoration = 'BOLD') )
library(MASS) library(ggplot2) library(gridExtra) library(mgcv) library(reshape) library(mvtnorm) library(MCMCpack) library(splines) library(fGarch) library(xtable) library(quadprog) source('C:\\Users\\dcries\\github\\ebmodel\\base_fcn.R') Rcpp::sourceCpp('C:\\Users\\dcries\\github\\ebmodel\\ppred_analysis.cpp') Rcpp::sourceCpp('C:\\Users\\dcries\\github\\ebmodel\\bivar_fullmcmc.cpp') params <- c(100,50,300,14,-7,-200,8,-5) #params <- c(100,50,300,14,-7,-200,8,-5) simdata <- generate_data4(params,dist=1,nrep=2) #simdata2 <- generate_data2(params,dist=1) yee <- simdata$yee yes <- simdata$yes yeeb <- rowMeans(yee) yesb <- rowMeans(yes) xee <- simdata$xee xes <- simdata$xes wee <- simdata$wee wes <- simdata$wes n <- length(yeeb) nr <- ncol(yee) zg <- simdata$zg #<- rbinom(n,1,0.5) #gender indicator zb <- simdata$zb#<- rnorm(n,27,5) #bmi za <- simdata$za#<- runif(n,20,40) #age Z= cbind(zg,zb,za) #number of mcmc iterations after burn ureps <- 1500 #tuning burnin burn <- 500 #number of iterations needed nreps <- ureps+burn #inital number of knots currentkee <- 2;currentkee2 <- 1;currentkee3 <- 5 currentkes <- 2;currentkes2 <- 5;currentkes3 <- 1 #inital knot locations #knots <- sort(x)[c(50,125,200,250)] knotsee <- sort(wee)[c(50,200)];knotsee2 <- sort(wee)[c(200)];knotsee3 <- sort(wee)[c(10,50,100,200,250)] knotses <- sort(wes)[c(50,200)];knotses2 <- sort(wes)[c(10,50,100,200,250)];knotses3 <- sort(wes)[c(50)] #specified by Denison ck <- 0.4 #number of continuous derivatives -1 l <- 3 #number of components h <- 10#10 #sd for random walk maxkt <- 15 #current latent variables x, used in lm(y~bs(x)) currentxee <- wee[,1];currentxee2 <- yee[,2];currentxee3 <- wee[,2]+rnorm(n,0,300) currentxes <- wes[,1];currentxes2 <- yes[,1];currentxes3 <- wes[,2]+rnorm(n,0,100) currentmuee <- rep(2600,h);currentmuee2 <- rep(1800,h);currentmuee3 <- rep(3600,h)#runif(h,1800,3800) currentmues <- rep(0,h);currentmues2 <- rep(100,h);currentmues3 <- rep(-100,h)#runif(h,-400,400) #currentmuee <- 2600#runif(h,1800,3800) #currentmues <- 0#runif(h,-400,400) currentpi <- rep(1/h,h) currentzeta <- sample(1:h,n,replace=T) currentv <- rep(0.3,h) currentpredee <- wee[,1] currentpredes <- wes[,1] currentsigma2ee <- 400^2;currentsigma2ee2 <- 200^2;currentsigma2ee3 <- 600^2 currentsigma2ve <- 250^2;currentsigma2ve2 <- 150^2;currentsigma2ve3 <- 350^2 currentsigma2es <- 220^2;currentsigma2es2 <- 120^2;currentsigma2es3 <- 320^2 currentsigma2vs <- 80^2;currentsigma2vs2 <- 180^2;currentsigma2vs3 <- 30^2 currentalpha <- 1;currentalpha2 <- 1;currentalpha3 <- 1 tunevar = c(100^2,25^2)#c(100^2,25^2) tunecor = -0.2 currentsigma2x <- c(400^2,400^2);currentsigma2x2 <- c(800^2,800^2);currentsigma2x3 <- c(100^2,100^2) currentbetaee <- matrix(c(0,0,0),nrow=1);currentbetaee2 <- matrix(c(-50,-15,-15),nrow=1);currentbetaee3 <- matrix(c(100,15,15),nrow=1) currentbetaes <- matrix(c(0,0,0),nrow=1);currentbetaes2 <- matrix(c(-100,-15,-15),nrow=1);currentbetaes3 <- matrix(c(100,15,15),nrow=1) #priors #lambda is value for mean of prior for number of knots lambda <- 1 #priors for sigmae ae <- 0.01;be <- 0.01 #priors for sigmav av <- 0.01; bv <- 0.01 m <- c(2400,0) v2 <- matrix(c(3000^2,0,0,1000^2),ncol=2,byrow=FALSE) a_alp <- 1 b_alp <- 1 d <- 3#3 psi <- diag(2) #prior variance for all coefficients Vb <- 100000 Mb <- 0 ck <- 0.4 initial <- list(currentkee=currentkee,currentkes=currentkes,ck=ck,knotsee=knotsee, knotses=knotses,currentxee=currentxee,currentxes=currentxes, currentv=currentv,currentpi=currentpi,currentalpha=currentalpha, currentzeta=currentzeta,currentpredee=currentpredee,currentpredes=currentpredes, currentmuee=currentmuee,currentmues=currentmues,currentsigma2ee=currentsigma2ee, currentsigma2es=currentsigma2es,currentsigma2ve=currentsigma2ve, currentsigma2vs=currentsigma2vs,currentsigma2x=currentsigma2x, tunevar=tunevar,currentbetaee=currentbetaee,currentbetaes=currentbetaes, tunecor=tunecor) initial2 <- list(currentkee=currentkee2,currentkes=currentkes2,ck=ck,knotsee=knotsee2, knotses=knotses2,currentxee=currentxee2,currentxes=currentxes2, currentv=currentv,currentpi=currentpi,currentalpha=currentalpha2, currentzeta=currentzeta,currentpredee=currentpredee,currentpredes=currentpredes, currentmuee=currentmuee2,currentmues=currentmues2,currentsigma2ee=currentsigma2ee2, currentsigma2es=currentsigma2es2,currentsigma2ve=currentsigma2ve2, currentsigma2vs=currentsigma2vs2,currentsigma2x=currentsigma2x2, tunevar=tunevar,currentbetaee=currentbetaee2,currentbetaes=currentbetaes2, tunecor=tunecor) initial3 <- list(currentkee=currentkee3,currentkes=currentkes3,ck=ck,knotsee=knotsee3, knotses=knotses3,currentxee=currentxee3,currentxes=currentxes3, currentv=currentv,currentpi=currentpi,currentalpha=currentalpha3, currentzeta=currentzeta,currentpredee=currentpredee,currentpredes=currentpredes, currentmuee=currentmuee3,currentmues=currentmues3,currentsigma2ee=currentsigma2ee3, currentsigma2es=currentsigma2es3,currentsigma2ve=currentsigma2ve3, currentsigma2vs=currentsigma2vs3,currentsigma2x=currentsigma2x3, tunevar=tunevar,currentbetaee=currentbetaee3,currentbetaes=currentbetaes3, tunecor=tunecor) prior <- list(lambda=lambda,ae=ae,be=be,av=av,bv=bv,a_alp=a_alp, b_alp=b_alp,d=d,m=m,v2=v2,psi=psi,Vb=Vb,Mb=Mb) chain1=mcmc_full(yee,yes,wee,wes,Z,initial,prior,nreps,burn,h,maxkt,my_bs,my_qp) chain2=mcmc_full(yee,yes,wee,wes,Z,initial,prior,nreps,burn,h,maxkt,my_bs,my_qp) chain3=mcmc_full(yee,yes,wee,wes,Z,initial,prior,nreps,burn,h,maxkt,my_bs,my_qp) #---------------------------------------------------------------------------------# #combine lists #mapply(c, a1, a2, SIMPLIFY=FALSE) latentxee <- mcmc.list(mcmc(chain1$latentxee),mcmc(chain2$latentxee),mcmc(chain3$latentxee)) latentxes <- mcmc.list(mcmc(chain1$latentxes),mcmc(chain2$latentxes),mcmc(chain3$latentxes)) muee <- mcmc.list(mcmc(chain1$muee),mcmc(chain2$muee),mcmc(chain3$muee)) mues <- mcmc.list(mcmc(chain1$mues),mcmc(chain2$mues),mcmc(chain3$mues)) sigma2xee <- mcmc.list(mcmc(chain1$sigma2xee),mcmc(chain2$sigma2xee),mcmc(chain3$sigma2xee)) sigma2xes <- mcmc.list(mcmc(chain1$sigma2xes),mcmc(chain2$sigma2xes),mcmc(chain3$sigma2xes)) corrx <- mcmc.list(mcmc(chain1$corrx),mcmc(chain2$corrx),mcmc(chain3$corrx)) pi <- mcmc.list(mcmc(chain1$pi),mcmc(chain2$pi),mcmc(chain3$pi)) meanfcnee <- mcmc.list(mcmc(chain1$meanfcnee),mcmc(chain2$meanfcnee),mcmc(chain3$meanfcnee)) meanfcnes <- mcmc.list(mcmc(chain1$meanfcnes),mcmc(chain2$meanfcnes),mcmc(chain3$meanfcnes)) sigma2eee <- mcmc.list(mcmc(chain1$sigma2eee),mcmc(chain2$sigma2eee),mcmc(chain3$sigma2eee)) sigma2ees <- mcmc.list(mcmc(chain1$sigma2ees),mcmc(chain2$sigma2ees),mcmc(chain3$sigma2ees)) sigma2vee <- mcmc.list(mcmc(chain1$sigma2vee),mcmc(chain2$sigma2vee),mcmc(chain3$sigma2vee)) sigma2ves <- mcmc.list(mcmc(chain1$sigma2ves),mcmc(chain2$sigma2ves),mcmc(chain3$sigma2ves)) kee <- mcmc.list(mcmc(chain1$kee),mcmc(chain2$kee),mcmc(chain3$kee)) kes <- mcmc.list(mcmc(chain1$kes),mcmc(chain2$kes),mcmc(chain3$kes)) betaee <- mcmc.list(mcmc(chain1$betaee),mcmc(chain2$betaee),mcmc(chain3$betaee)) betaes <- mcmc.list(mcmc(chain1$betaes),mcmc(chain2$betaes),mcmc(chain3$betaes)) alpha <- mcmc.list(mcmc(chain1$alpha),mcmc(chain2$alpha),mcmc(chain3$alpha)) plot(xee,apply(chain1$latentxes,2,function(x){length(unique(x))/length(x)})) plot(xee,apply(chain2$latentxes,2,function(x){length(unique(x))/length(x)})) plot(xee,apply(chain3$latentxes,2,function(x){length(unique(x))/length(x)})) chain1$acceptance_rate gelman.diag(muee) gelman.diag(mues) gelman.diag(sigma2xee) gelman.diag(sigma2xes) gelman.diag(corrx) gelman.diag(sigma2eee) gelman.diag(sigma2ees) gelman.diag(sigma2vee) gelman.diag(sigma2ves) gelman.diag(kee) gelman.diag(kes) gelman.diag(betaee) gelman.diag(betaes) plot(betaee) pairs(chain1$betaee) pairs(data.frame(chain1$sigma2eee,chain1$sigma2ees,chain1$sigma2vee,chain1$sigma2ves)) truth <- c(min(simdata$xee),quantile(simdata$xee,probs=c(0.05,0.1,0.25,0.75,0.9,0.95)),max(simdata$xee),min(simdata$xes),quantile(simdata$xes,probs=c(0.05,0.1,0.25,0.75,0.9,0.95)),max(simdata$xes)) truth2 <- c(sum(simdata$xee < 2000)/length(simdata$xee),sum(simdata$xes < 0)/length(simdata$xes),range(simdata$xee+simdata$xei),quantile(simdata$xee,probs=c(0.75))-quantile(simdata$xee,probs=c(0.25)),quantile(simdata$xei,probs=c(0.75))-quantile(simdata$xei,probs=c(0.25)),range(simdata$xes)) wsum <- c(min(simdata$wee),quantile(simdata$wee,probs=c(0.05,0.1,0.25,0.75,0.9,0.95)),max(simdata$wee),min(simdata$wes),quantile(simdata$wes,probs=c(0.05,0.1,0.25,0.75,0.9,0.95)),max(simdata$wes)) ysum <- c(min(simdata$yee),quantile(simdata$yee,probs=c(0.05,0.1,0.25,0.75,0.9,0.95)),max(simdata$yee),min(simdata$yes),quantile(simdata$yes,probs=c(0.05,0.1,0.25,0.75,0.9,0.95)),max(simdata$yes)) ppan=pp_full(chain1,truth,wsum,ysum,xee,xes,yeeb,yesb,Z) out <- list(chain1=chain1,chain2=chain2,chain3=chain3,ppan=ppan) save(out,file="//my.files.iastate.edu/Users/dcries/Desktop/dp_mcmc1.RData") #save.image("//my.files.iastate.edu/Users/dcries/Desktop/dp_mcmc1.RData") #save.image("//my.files.iastate.edu/Users/dcries/Desktop/full_mcmc1x.RData") # xnames <- c("EI Min","EI 5%tile","EI 10%tile","EI 25%tile","EI 75%tile","EI 90%tile","EI 95%tile","EI Max", # "EE Min","EE 5%tile","EE 10%tile","EE 25%tile","EE 75%tile","EE 90%tile","EE 95%tile","EE Max") wnames <- c("EE Min","EE 5%tile","EE 10%tile","EE 25%tile","EE 75%tile","EE 90%tile","EE 95%tile","EE Max", "ES Min","ES 5%tile","ES 10%tile","ES 25%tile","ES 75%tile","ES 90%tile","ES 95%tile","ES Max") ppred16(ppan$checkx,truth,wnames) ppred16(ppan$checkw,wsum,wnames) ppred16(ppan$checky,ysum,wnames) #unique components table(apply(chain1$zeta,1,function(x){return(length(unique(x)))})) #max utilized component, for DP table(apply(chain1$zeta,1,function(x){return(max(x))})) summat <- matrix(0,nrow=10,ncol=5) #summat[1,] <- summary(muee)$quantiles #summat[2,] <- summary(mues)$quantiles summat[1,] <- quantile(sqrt(unlist(sigma2eee)),probs=c(0.025,0.25,0.5,0.75,0.975)) summat[2,] <- quantile(sqrt(unlist(sigma2ees)),probs=c(0.025,0.25,0.5,0.75,0.975)) summat[3,] <- quantile(sqrt(unlist(sigma2vee)),probs=c(0.025,0.25,0.5,0.75,0.975)) summat[4,] <- quantile(sqrt(unlist(sigma2ves)),probs=c(0.025,0.25,0.5,0.75,0.975)) #summat[7,] <- quantile(sqrt(unlist(sigma2xee)),probs=c(0.025,0.25,0.5,0.75,0.975)) #summat[8,] <- quantile(sqrt(unlist(sigma2xes)),probs=c(0.025,0.25,0.5,0.75,0.975)) #summat[5,] <- summary(corrx)$quantiles summat[5:7,] <- summary(betaee)$quantiles summat[8:10,] <- summary(betaes)$quantiles summat <- data.frame(summat) names(summat) <- c("2.5\\%","25\\%","50\\%","75\\%","97.5\\%") row.names(summat) <- c("$\\sigma_{yee}$","$\\sigma_{yes}$", "$\\sigma_{wee}$","$\\sigma_{wes}$", "$\\gamma_{1,ee}$","$\\gamma_{2,ee}$","$\\gamma_{3,ee}$", "$\\gamma_{1,es}$","$\\gamma_{2,es}$","$\\gamma_{3,es}$") print(xtable(summat), sanitize.text.function=function(x){x}) #ee ind <- sample(1:nrow(chain1$meanfcnee),500) y <- melt(t(chain1$meanfcnee)[,ind]) x <- melt(t(chain1$latentxee)[,ind]) dat <- cbind(x,y$value) names(dat) = c("x1","variable", "xval", "yval") #cs95ee <- data.frame(t(apply(chain1$meanfcnee,2,quantile,probs=c(0.025,0.975))),x=rowMeans(t(chain1$latentxee))) #names(cs95ee) <- c("lower","upper","x") p1 <- ggplot() + geom_line(data = dat,aes(x = xval, y = yval, group = variable),alpha=0.02) + geom_line(aes(x=rowMeans(t(chain1$latentxee)),y=rowMeans(t(chain1$meanfcnee))),col="red",size=1) + #geom_line(aes(x=simdata$xee,y=eecurve(simdata$xee)),col="blue",size=1)+ #geom_ribbon(data=cs95ee,aes(x=x,ymin=lower,ymax=upper,linetype=NA),colour="blue",alpha=0.3) + geom_point(aes(x=simdata$xee,y=yeeb)) + xlab("Truth") + ylab("Observed") + theme_bw() #es ind <- sample(1:nrow(chain1$meanfcnes),500) y <- melt(t(chain1$meanfcnes)[,ind]) x <- melt(t(chain1$latentxes)[,ind]) dat <- cbind(x,y$value) names(dat) = c("x1","variable", "xval", "yval") #cs95es <- data.frame(t(apply(chain1$meanfcnes,2,quantile,probs=c(0.025,0.975))),x=rowMeans(t(chain1$latentxes))) #names(cs95es) <- c("lower","upper","x") p2 <- ggplot() + geom_line(data = dat,aes(x = xval, y = yval, group = variable),alpha=0.04) + geom_line(aes(x=rowMeans(t(chain1$latentxes)),y=rowMeans(t(chain1$meanfcnes))),col="red",size=1) + #geom_line(aes(x=simdata$xee,y=eecurve(simdata$xee)),col="blue",size=1)+ #geom_ribbon(data=cs95es,aes(x=x,ymin=lower,ymax=upper,linetype=NA),colour="blue",alpha=0.3) + geom_point(aes(x=simdata$xes,y=yesb)) + xlab("Truth") + ylab("Observed") + theme_bw() grid.arrange(p1,p2,nrow=1) #check DP density convergence, need 3 chains, rbind results together mee <- rbind((chain1$muee),(chain2$muee),(chain3$muee)) see <- rbind(sqrt(chain1$sigma2xee),sqrt(chain2$sigma2xee),sqrt(chain3$sigma2xee)) pi2 <- rbind((chain1$pi),(chain2$pi),(chain3$pi)) mes <- rbind((chain1$mues),(chain2$mues),(chain3$mues)) ses <- rbind(sqrt(chain1$sigma2xes),sqrt(chain2$sigma2xes),sqrt(chain3$sigma2xes)) loc_check <- seq(1300,3300,length=10) density_conv(cbind(mee,pi2,see),loc_check[3]) loc_check <- seq(-500,500,length=10) density_conv(cbind(mes,pi2,ses),loc_check[2])
/run_dpmm_mcmc.R
no_license
dcries/ebmodel
R
false
false
13,634
r
library(MASS) library(ggplot2) library(gridExtra) library(mgcv) library(reshape) library(mvtnorm) library(MCMCpack) library(splines) library(fGarch) library(xtable) library(quadprog) source('C:\\Users\\dcries\\github\\ebmodel\\base_fcn.R') Rcpp::sourceCpp('C:\\Users\\dcries\\github\\ebmodel\\ppred_analysis.cpp') Rcpp::sourceCpp('C:\\Users\\dcries\\github\\ebmodel\\bivar_fullmcmc.cpp') params <- c(100,50,300,14,-7,-200,8,-5) #params <- c(100,50,300,14,-7,-200,8,-5) simdata <- generate_data4(params,dist=1,nrep=2) #simdata2 <- generate_data2(params,dist=1) yee <- simdata$yee yes <- simdata$yes yeeb <- rowMeans(yee) yesb <- rowMeans(yes) xee <- simdata$xee xes <- simdata$xes wee <- simdata$wee wes <- simdata$wes n <- length(yeeb) nr <- ncol(yee) zg <- simdata$zg #<- rbinom(n,1,0.5) #gender indicator zb <- simdata$zb#<- rnorm(n,27,5) #bmi za <- simdata$za#<- runif(n,20,40) #age Z= cbind(zg,zb,za) #number of mcmc iterations after burn ureps <- 1500 #tuning burnin burn <- 500 #number of iterations needed nreps <- ureps+burn #inital number of knots currentkee <- 2;currentkee2 <- 1;currentkee3 <- 5 currentkes <- 2;currentkes2 <- 5;currentkes3 <- 1 #inital knot locations #knots <- sort(x)[c(50,125,200,250)] knotsee <- sort(wee)[c(50,200)];knotsee2 <- sort(wee)[c(200)];knotsee3 <- sort(wee)[c(10,50,100,200,250)] knotses <- sort(wes)[c(50,200)];knotses2 <- sort(wes)[c(10,50,100,200,250)];knotses3 <- sort(wes)[c(50)] #specified by Denison ck <- 0.4 #number of continuous derivatives -1 l <- 3 #number of components h <- 10#10 #sd for random walk maxkt <- 15 #current latent variables x, used in lm(y~bs(x)) currentxee <- wee[,1];currentxee2 <- yee[,2];currentxee3 <- wee[,2]+rnorm(n,0,300) currentxes <- wes[,1];currentxes2 <- yes[,1];currentxes3 <- wes[,2]+rnorm(n,0,100) currentmuee <- rep(2600,h);currentmuee2 <- rep(1800,h);currentmuee3 <- rep(3600,h)#runif(h,1800,3800) currentmues <- rep(0,h);currentmues2 <- rep(100,h);currentmues3 <- rep(-100,h)#runif(h,-400,400) #currentmuee <- 2600#runif(h,1800,3800) #currentmues <- 0#runif(h,-400,400) currentpi <- rep(1/h,h) currentzeta <- sample(1:h,n,replace=T) currentv <- rep(0.3,h) currentpredee <- wee[,1] currentpredes <- wes[,1] currentsigma2ee <- 400^2;currentsigma2ee2 <- 200^2;currentsigma2ee3 <- 600^2 currentsigma2ve <- 250^2;currentsigma2ve2 <- 150^2;currentsigma2ve3 <- 350^2 currentsigma2es <- 220^2;currentsigma2es2 <- 120^2;currentsigma2es3 <- 320^2 currentsigma2vs <- 80^2;currentsigma2vs2 <- 180^2;currentsigma2vs3 <- 30^2 currentalpha <- 1;currentalpha2 <- 1;currentalpha3 <- 1 tunevar = c(100^2,25^2)#c(100^2,25^2) tunecor = -0.2 currentsigma2x <- c(400^2,400^2);currentsigma2x2 <- c(800^2,800^2);currentsigma2x3 <- c(100^2,100^2) currentbetaee <- matrix(c(0,0,0),nrow=1);currentbetaee2 <- matrix(c(-50,-15,-15),nrow=1);currentbetaee3 <- matrix(c(100,15,15),nrow=1) currentbetaes <- matrix(c(0,0,0),nrow=1);currentbetaes2 <- matrix(c(-100,-15,-15),nrow=1);currentbetaes3 <- matrix(c(100,15,15),nrow=1) #priors #lambda is value for mean of prior for number of knots lambda <- 1 #priors for sigmae ae <- 0.01;be <- 0.01 #priors for sigmav av <- 0.01; bv <- 0.01 m <- c(2400,0) v2 <- matrix(c(3000^2,0,0,1000^2),ncol=2,byrow=FALSE) a_alp <- 1 b_alp <- 1 d <- 3#3 psi <- diag(2) #prior variance for all coefficients Vb <- 100000 Mb <- 0 ck <- 0.4 initial <- list(currentkee=currentkee,currentkes=currentkes,ck=ck,knotsee=knotsee, knotses=knotses,currentxee=currentxee,currentxes=currentxes, currentv=currentv,currentpi=currentpi,currentalpha=currentalpha, currentzeta=currentzeta,currentpredee=currentpredee,currentpredes=currentpredes, currentmuee=currentmuee,currentmues=currentmues,currentsigma2ee=currentsigma2ee, currentsigma2es=currentsigma2es,currentsigma2ve=currentsigma2ve, currentsigma2vs=currentsigma2vs,currentsigma2x=currentsigma2x, tunevar=tunevar,currentbetaee=currentbetaee,currentbetaes=currentbetaes, tunecor=tunecor) initial2 <- list(currentkee=currentkee2,currentkes=currentkes2,ck=ck,knotsee=knotsee2, knotses=knotses2,currentxee=currentxee2,currentxes=currentxes2, currentv=currentv,currentpi=currentpi,currentalpha=currentalpha2, currentzeta=currentzeta,currentpredee=currentpredee,currentpredes=currentpredes, currentmuee=currentmuee2,currentmues=currentmues2,currentsigma2ee=currentsigma2ee2, currentsigma2es=currentsigma2es2,currentsigma2ve=currentsigma2ve2, currentsigma2vs=currentsigma2vs2,currentsigma2x=currentsigma2x2, tunevar=tunevar,currentbetaee=currentbetaee2,currentbetaes=currentbetaes2, tunecor=tunecor) initial3 <- list(currentkee=currentkee3,currentkes=currentkes3,ck=ck,knotsee=knotsee3, knotses=knotses3,currentxee=currentxee3,currentxes=currentxes3, currentv=currentv,currentpi=currentpi,currentalpha=currentalpha3, currentzeta=currentzeta,currentpredee=currentpredee,currentpredes=currentpredes, currentmuee=currentmuee3,currentmues=currentmues3,currentsigma2ee=currentsigma2ee3, currentsigma2es=currentsigma2es3,currentsigma2ve=currentsigma2ve3, currentsigma2vs=currentsigma2vs3,currentsigma2x=currentsigma2x3, tunevar=tunevar,currentbetaee=currentbetaee3,currentbetaes=currentbetaes3, tunecor=tunecor) prior <- list(lambda=lambda,ae=ae,be=be,av=av,bv=bv,a_alp=a_alp, b_alp=b_alp,d=d,m=m,v2=v2,psi=psi,Vb=Vb,Mb=Mb) chain1=mcmc_full(yee,yes,wee,wes,Z,initial,prior,nreps,burn,h,maxkt,my_bs,my_qp) chain2=mcmc_full(yee,yes,wee,wes,Z,initial,prior,nreps,burn,h,maxkt,my_bs,my_qp) chain3=mcmc_full(yee,yes,wee,wes,Z,initial,prior,nreps,burn,h,maxkt,my_bs,my_qp) #---------------------------------------------------------------------------------# #combine lists #mapply(c, a1, a2, SIMPLIFY=FALSE) latentxee <- mcmc.list(mcmc(chain1$latentxee),mcmc(chain2$latentxee),mcmc(chain3$latentxee)) latentxes <- mcmc.list(mcmc(chain1$latentxes),mcmc(chain2$latentxes),mcmc(chain3$latentxes)) muee <- mcmc.list(mcmc(chain1$muee),mcmc(chain2$muee),mcmc(chain3$muee)) mues <- mcmc.list(mcmc(chain1$mues),mcmc(chain2$mues),mcmc(chain3$mues)) sigma2xee <- mcmc.list(mcmc(chain1$sigma2xee),mcmc(chain2$sigma2xee),mcmc(chain3$sigma2xee)) sigma2xes <- mcmc.list(mcmc(chain1$sigma2xes),mcmc(chain2$sigma2xes),mcmc(chain3$sigma2xes)) corrx <- mcmc.list(mcmc(chain1$corrx),mcmc(chain2$corrx),mcmc(chain3$corrx)) pi <- mcmc.list(mcmc(chain1$pi),mcmc(chain2$pi),mcmc(chain3$pi)) meanfcnee <- mcmc.list(mcmc(chain1$meanfcnee),mcmc(chain2$meanfcnee),mcmc(chain3$meanfcnee)) meanfcnes <- mcmc.list(mcmc(chain1$meanfcnes),mcmc(chain2$meanfcnes),mcmc(chain3$meanfcnes)) sigma2eee <- mcmc.list(mcmc(chain1$sigma2eee),mcmc(chain2$sigma2eee),mcmc(chain3$sigma2eee)) sigma2ees <- mcmc.list(mcmc(chain1$sigma2ees),mcmc(chain2$sigma2ees),mcmc(chain3$sigma2ees)) sigma2vee <- mcmc.list(mcmc(chain1$sigma2vee),mcmc(chain2$sigma2vee),mcmc(chain3$sigma2vee)) sigma2ves <- mcmc.list(mcmc(chain1$sigma2ves),mcmc(chain2$sigma2ves),mcmc(chain3$sigma2ves)) kee <- mcmc.list(mcmc(chain1$kee),mcmc(chain2$kee),mcmc(chain3$kee)) kes <- mcmc.list(mcmc(chain1$kes),mcmc(chain2$kes),mcmc(chain3$kes)) betaee <- mcmc.list(mcmc(chain1$betaee),mcmc(chain2$betaee),mcmc(chain3$betaee)) betaes <- mcmc.list(mcmc(chain1$betaes),mcmc(chain2$betaes),mcmc(chain3$betaes)) alpha <- mcmc.list(mcmc(chain1$alpha),mcmc(chain2$alpha),mcmc(chain3$alpha)) plot(xee,apply(chain1$latentxes,2,function(x){length(unique(x))/length(x)})) plot(xee,apply(chain2$latentxes,2,function(x){length(unique(x))/length(x)})) plot(xee,apply(chain3$latentxes,2,function(x){length(unique(x))/length(x)})) chain1$acceptance_rate gelman.diag(muee) gelman.diag(mues) gelman.diag(sigma2xee) gelman.diag(sigma2xes) gelman.diag(corrx) gelman.diag(sigma2eee) gelman.diag(sigma2ees) gelman.diag(sigma2vee) gelman.diag(sigma2ves) gelman.diag(kee) gelman.diag(kes) gelman.diag(betaee) gelman.diag(betaes) plot(betaee) pairs(chain1$betaee) pairs(data.frame(chain1$sigma2eee,chain1$sigma2ees,chain1$sigma2vee,chain1$sigma2ves)) truth <- c(min(simdata$xee),quantile(simdata$xee,probs=c(0.05,0.1,0.25,0.75,0.9,0.95)),max(simdata$xee),min(simdata$xes),quantile(simdata$xes,probs=c(0.05,0.1,0.25,0.75,0.9,0.95)),max(simdata$xes)) truth2 <- c(sum(simdata$xee < 2000)/length(simdata$xee),sum(simdata$xes < 0)/length(simdata$xes),range(simdata$xee+simdata$xei),quantile(simdata$xee,probs=c(0.75))-quantile(simdata$xee,probs=c(0.25)),quantile(simdata$xei,probs=c(0.75))-quantile(simdata$xei,probs=c(0.25)),range(simdata$xes)) wsum <- c(min(simdata$wee),quantile(simdata$wee,probs=c(0.05,0.1,0.25,0.75,0.9,0.95)),max(simdata$wee),min(simdata$wes),quantile(simdata$wes,probs=c(0.05,0.1,0.25,0.75,0.9,0.95)),max(simdata$wes)) ysum <- c(min(simdata$yee),quantile(simdata$yee,probs=c(0.05,0.1,0.25,0.75,0.9,0.95)),max(simdata$yee),min(simdata$yes),quantile(simdata$yes,probs=c(0.05,0.1,0.25,0.75,0.9,0.95)),max(simdata$yes)) ppan=pp_full(chain1,truth,wsum,ysum,xee,xes,yeeb,yesb,Z) out <- list(chain1=chain1,chain2=chain2,chain3=chain3,ppan=ppan) save(out,file="//my.files.iastate.edu/Users/dcries/Desktop/dp_mcmc1.RData") #save.image("//my.files.iastate.edu/Users/dcries/Desktop/dp_mcmc1.RData") #save.image("//my.files.iastate.edu/Users/dcries/Desktop/full_mcmc1x.RData") # xnames <- c("EI Min","EI 5%tile","EI 10%tile","EI 25%tile","EI 75%tile","EI 90%tile","EI 95%tile","EI Max", # "EE Min","EE 5%tile","EE 10%tile","EE 25%tile","EE 75%tile","EE 90%tile","EE 95%tile","EE Max") wnames <- c("EE Min","EE 5%tile","EE 10%tile","EE 25%tile","EE 75%tile","EE 90%tile","EE 95%tile","EE Max", "ES Min","ES 5%tile","ES 10%tile","ES 25%tile","ES 75%tile","ES 90%tile","ES 95%tile","ES Max") ppred16(ppan$checkx,truth,wnames) ppred16(ppan$checkw,wsum,wnames) ppred16(ppan$checky,ysum,wnames) #unique components table(apply(chain1$zeta,1,function(x){return(length(unique(x)))})) #max utilized component, for DP table(apply(chain1$zeta,1,function(x){return(max(x))})) summat <- matrix(0,nrow=10,ncol=5) #summat[1,] <- summary(muee)$quantiles #summat[2,] <- summary(mues)$quantiles summat[1,] <- quantile(sqrt(unlist(sigma2eee)),probs=c(0.025,0.25,0.5,0.75,0.975)) summat[2,] <- quantile(sqrt(unlist(sigma2ees)),probs=c(0.025,0.25,0.5,0.75,0.975)) summat[3,] <- quantile(sqrt(unlist(sigma2vee)),probs=c(0.025,0.25,0.5,0.75,0.975)) summat[4,] <- quantile(sqrt(unlist(sigma2ves)),probs=c(0.025,0.25,0.5,0.75,0.975)) #summat[7,] <- quantile(sqrt(unlist(sigma2xee)),probs=c(0.025,0.25,0.5,0.75,0.975)) #summat[8,] <- quantile(sqrt(unlist(sigma2xes)),probs=c(0.025,0.25,0.5,0.75,0.975)) #summat[5,] <- summary(corrx)$quantiles summat[5:7,] <- summary(betaee)$quantiles summat[8:10,] <- summary(betaes)$quantiles summat <- data.frame(summat) names(summat) <- c("2.5\\%","25\\%","50\\%","75\\%","97.5\\%") row.names(summat) <- c("$\\sigma_{yee}$","$\\sigma_{yes}$", "$\\sigma_{wee}$","$\\sigma_{wes}$", "$\\gamma_{1,ee}$","$\\gamma_{2,ee}$","$\\gamma_{3,ee}$", "$\\gamma_{1,es}$","$\\gamma_{2,es}$","$\\gamma_{3,es}$") print(xtable(summat), sanitize.text.function=function(x){x}) #ee ind <- sample(1:nrow(chain1$meanfcnee),500) y <- melt(t(chain1$meanfcnee)[,ind]) x <- melt(t(chain1$latentxee)[,ind]) dat <- cbind(x,y$value) names(dat) = c("x1","variable", "xval", "yval") #cs95ee <- data.frame(t(apply(chain1$meanfcnee,2,quantile,probs=c(0.025,0.975))),x=rowMeans(t(chain1$latentxee))) #names(cs95ee) <- c("lower","upper","x") p1 <- ggplot() + geom_line(data = dat,aes(x = xval, y = yval, group = variable),alpha=0.02) + geom_line(aes(x=rowMeans(t(chain1$latentxee)),y=rowMeans(t(chain1$meanfcnee))),col="red",size=1) + #geom_line(aes(x=simdata$xee,y=eecurve(simdata$xee)),col="blue",size=1)+ #geom_ribbon(data=cs95ee,aes(x=x,ymin=lower,ymax=upper,linetype=NA),colour="blue",alpha=0.3) + geom_point(aes(x=simdata$xee,y=yeeb)) + xlab("Truth") + ylab("Observed") + theme_bw() #es ind <- sample(1:nrow(chain1$meanfcnes),500) y <- melt(t(chain1$meanfcnes)[,ind]) x <- melt(t(chain1$latentxes)[,ind]) dat <- cbind(x,y$value) names(dat) = c("x1","variable", "xval", "yval") #cs95es <- data.frame(t(apply(chain1$meanfcnes,2,quantile,probs=c(0.025,0.975))),x=rowMeans(t(chain1$latentxes))) #names(cs95es) <- c("lower","upper","x") p2 <- ggplot() + geom_line(data = dat,aes(x = xval, y = yval, group = variable),alpha=0.04) + geom_line(aes(x=rowMeans(t(chain1$latentxes)),y=rowMeans(t(chain1$meanfcnes))),col="red",size=1) + #geom_line(aes(x=simdata$xee,y=eecurve(simdata$xee)),col="blue",size=1)+ #geom_ribbon(data=cs95es,aes(x=x,ymin=lower,ymax=upper,linetype=NA),colour="blue",alpha=0.3) + geom_point(aes(x=simdata$xes,y=yesb)) + xlab("Truth") + ylab("Observed") + theme_bw() grid.arrange(p1,p2,nrow=1) #check DP density convergence, need 3 chains, rbind results together mee <- rbind((chain1$muee),(chain2$muee),(chain3$muee)) see <- rbind(sqrt(chain1$sigma2xee),sqrt(chain2$sigma2xee),sqrt(chain3$sigma2xee)) pi2 <- rbind((chain1$pi),(chain2$pi),(chain3$pi)) mes <- rbind((chain1$mues),(chain2$mues),(chain3$mues)) ses <- rbind(sqrt(chain1$sigma2xes),sqrt(chain2$sigma2xes),sqrt(chain3$sigma2xes)) loc_check <- seq(1300,3300,length=10) density_conv(cbind(mee,pi2,see),loc_check[3]) loc_check <- seq(-500,500,length=10) density_conv(cbind(mes,pi2,ses),loc_check[2])
#' @rdname ebar #' @export ebar <- function(p, serie, name = NULL, stack = NULL, clickable = TRUE, xAxisIndex = 0, yAxisIndex = 0, barGap = "100%", barCategoryGap = "20%", legendHoverLink = TRUE, z = 2, zlevel = 0, ...){ serie <- deparse(substitute(serie)) p %>% ebar_(serie, name, stack, clickable, xAxisIndex, yAxisIndex, barGap, barCategoryGap, legendHoverLink, z, zlevel, ...) } #' @rdname eline #' @export eline <- function(p, serie, name = NULL, stack = NULL, clickable = TRUE, xAxisIndex = 0, yAxisIndex = 0, symbol = NULL, symbolSize = "2 | 4", symbolRotate = NULL, showAllSymbol = FALSE, smooth = TRUE, legendHoverLink = TRUE, dataFilter = "nearest", z = 2, zlevel = 0, tooltip, ...){ serie <- deparse(substitute(serie)) tooltip <- if(missing(tooltip)) default_tooltip(trigger = "axis") p %>% eline_(serie, name, stack, clickable, xAxisIndex, yAxisIndex, symbol, symbolSize, symbolRotate, showAllSymbol, smooth, legendHoverLink, dataFilter, z, zlevel, tooltip, ...) } #' @rdname earea #' @export earea <- function(p, serie, name = NULL, stack = NULL, smooth = TRUE, ...){ serie <- deparse(substitute(serie)) p %>% earea_(serie, name, stack, smooth, ...) } #' @rdname escatter #' @export escatter <- function(p, serie, size = NULL, name = NULL, clickable = TRUE, symbol = NULL, symbolSize = 4, symbolRotate = NULL, large = FALSE, largeThreshold = 2000, legendHoverLink = TRUE, z = 2, zlevel = 0, ...){ serie <- deparse(substitute(serie)) size <- if(!missing(size)) deparse(substitute(size)) else NULL p %>% escatter_(serie, size, name, clickable, symbol, symbolSize, symbolRotate, large, largeThreshold, legendHoverLink, z, zlevel, ...) } #' @rdname epie #' @export epie <- function(p, serie, name = NULL, clickable = TRUE, legendHoverLink = TRUE, center = list("50%", "50%"), radius = list(0, "75%"), startAngle = 90, minAngle = 0, clockWise = TRUE, roseType = NULL, selectedOffset = 10, selectedMode = TRUE, z = 2, zlevel = 0, ...){ serie <- deparse(substitute(serie)) p %>% epie_(serie, name, clickable, legendHoverLink, center, radius, startAngle, minAngle, clockWise, roseType, selectedOffset, selectedMode, z, zlevel, ...) } #' @rdname eradar #' @export eradar <- function(p, serie, name = NULL, clickable = TRUE, symbol = NULL, symbolSize = 4, symbolRotate = NULL, legendHoverLink = TRUE, polarIndex = 0, z = 2, zlevel = 0, ...){ serie <- deparse(substitute(serie)) p %>% eradar_(serie, name, clickable, symbol, symbolSize, symbolRotate, legendHoverLink, polarIndex, z, zlevel, ...) } #' @rdname echord #' @export echord <- function(p, name = NULL, sort = "none", sortSub = "none", clickable = TRUE, z = 2, zlevel = 0, symbol = NULL, symbolSize = NULL, clockWise = FALSE, minRadius = 10, maxRadius = 20, ribbonType = TRUE, showScale = FALSE, showScaleText = FALSE, padding = 2, ...){ p %>% echord_(name, sort, sortSub, clickable, z, zlevel, symbol, symbolSize, clockWise, minRadius, maxRadius, ribbonType, showScale, showScaleText, padding, ...) } #' @rdname emap_choropleth #' @export emap_choropleth <- function(p, serie){ serie <- deparse(substitute(serie)) p %>% emap_choropleth_(serie) } #' @rdname emap_coords #' @export emap_coords <- function(p, lon, lat){ lon <- deparse(substitute(lon)) lat <- deparse(substitute(lat)) p %>% emap_coords_(lon, lat) } #' #' @rdname emap_lines #' #' @export emap_lines <- function(p, edges, source, target, name = NULL, clickable = TRUE, symbol = "arrow", symbolSize = 2, symbolRotate = NULL, large = FALSE, smooth = TRUE, z = 2, zlevel = 0, smoothness = 0.2, precision = 2, bundling = list(enable = FALSE, maxTurningAngle = 45), ...){ source <- deparse(substitute(source)) target <- deparse(substitute(target)) p %>% emap_lines_(edges, source, target, name, clickable, symbol, symbolSize, symbolRotate, large, smooth, z, zlevel, smoothness, precision, bundling, ...) } #' @rdname emap_points #' @export emap_points <- function(p, serie, clickable = TRUE, symbol = "pin", symbolSize = 10, symbolRotate = NULL, large = FALSE, itemStyle = NULL, ...){ serie <- deparse(substitute(serie)) itemStyle <- if(is.null(itemStyle)) list(normal = list(label = list(show = FALSE))) else itemStyle p %>% emap_points_(serie, clickable, symbol, symbolSize, symbolRotate, large, itemStyle, ...) } #' @rdname emap_heat #' @export emap_heat <- function(p, lon, lat, z, blurSize = 30, minAlpha = 0.05, valueScale = 1, opacity = 1, gradientColors = NULL, ...){ gradientColors <- if(is.null(gradientColors)) default_gradient() else gradientColors lon <- deparse(substitute(lon)) lat <- deparse(substitute(lat)) z <- deparse(substitute(z)) p %>% emap_heat_(lon, lat, z, blurSize, minAlpha, valueScale, opacity, gradientColors, ...) } #' @rdname emap #' @export emap <- function(p, name = NULL, mapType = "world", clickable = TRUE, z = 2, zlevel = 0, selectedMode = NULL, hoverable = FALSE, dataRangeHoverLink = TRUE, mapLocation = list(x = "center", y = "center"), mapValueCalculation = "sum", mapValuePrecision = 0, showLegendSymbol = TRUE, roam = FALSE, scaleLimit = NULL, nameMap = NULL, textFixed = NULL, ...){ p %>% emap_(name, mapType, clickable, z, zlevel, selectedMode, hoverable, dataRangeHoverLink, mapLocation, mapValueCalculation, mapValuePrecision, showLegendSymbol, roam, scaleLimit, nameMap, textFixed, ...) } #' @rdname egauge #' @export egauge <- function(p, value, indicator = "", name = NULL, clickable = FALSE, legendHoverLink = TRUE, center = list("50%", "50%"), radius = list("0%", "75%"), startAngle = 225, endAngle = -45, min = 0, max = 100, splitNumber = 10, z = 2, zlevel = 0, tooltip, ...){ tooltip <- if(missing(tooltip)) default_tooltip(trigger = "item") name <- ifelse(is.null(name), indicator, name) p %>% egauge_(value, indicator, name, clickable, legendHoverLink, center, radius, startAngle, endAngle, min, max, splitNumber , z, zlevel, tooltip, ...) } #' @rdname efunnel #' @export efunnel <- function(p, serie, name = NULL, clickable = TRUE, legendHoverLink = TRUE, sort = "descending", min = NULL, max = NULL, x = 80, y = 60, x2 = 80, y2 = 60, width = NULL, height = NULL, funnelAlign = "center", minSize = "0%", maxSize = "100%", gap = 0, tooltip, ...){ tooltip <- if(missing(tooltip)) default_tooltip(trigger = "item") serie <- deparse(substitute(serie)) p %>% efunnel_(serie, name, clickable, legendHoverLink, sort, min, max, x, y, x2, y2, width, height, funnelAlign, minSize, maxSize, gap, tooltip, ...) } #' @rdname evenn #' @export evenn <- function(p, serie, name = NULL, clickable = TRUE, z = 2, zlevel = 0, tooltip = NULL, ...){ serie <- deparse(substitute(serie)) tooltip <- if(is.null(tooltip)) default_tooltip(trigger = "item") else tooltip p %>% evenn_( serie, name, clickable, z, zlevel, tooltip, ...) } #' @rdname ecloud #' @export ecloud <- function(p, freq, color, name = NULL, clickable = TRUE, center = list("50%", "50%"), size = list("40%", "40%"), textRotation = list(0, 90), autoSize = list(enable = TRUE, minSize = 12), z = 2, zlevel = 0, tooltip, ...){ tooltip <- if(missing(tooltip)) default_tooltip(trigger = "item") freq <- deparse(substitute(freq)) color <- if(!missing(color)) deparse(substitute(color)) else NULL p %>% ecloud_(freq, color, name, clickable, center, size, textRotation, autoSize, z, zlevel, tooltip, ...) } #' @rdname eheatmap #' @export eheatmap <- function(p, y, values, name = NULL, clickable = TRUE, blurSize = 30, minAlpha = 0.5, valueScale = 1, opacity = 1, z = 2, zlevel = 0, gradientColors, tooltip, ...){ gradientColors <- if(missing(gradientColors)) default_gradient() else gradientColors tooltip <- if(missing(tooltip)) default_tooltip(trigger = "item") y <- deparse(substitute(y)) values <- deparse(substitute(values)) p %>% eheatmap_(y, values, name, clickable, blurSize, minAlpha, valueScale, opacity, z, zlevel, gradientColors, tooltip, ...) } #' @rdname edata #' @export edata <- function(p, data, x){ # x if(!missing(x)){ xvar <- tryCatch(eval(substitute(x), data), error = function(e) e) if(is(xvar, "error")){ xvar <- x } } else { xvar <- list() } if(!missing(data)){ data <- map_grps_(data) assign("data", data, envir = data_env) } # assign for future use assign("x", xvar, envir = data_env) if(length(xvar)) assign("x.name", deparse(substitute(x)), envir = data_env) p } #' @rdname etreemap #' @export etreemap <- function(p, serie, name = NULL, itemStyle = NULL, clickable = FALSE, center = list("50%", "50%"), size = list("80%", "80%"), z = 2, zlevel = 0, ...){ serie <- deparse(substitute(serie)) p <- p %>% etreemap_(serie, name, itemStyle, clickable, center, size, z, zlevel, ...) p } #' @rdname candlestick #' @export ecandle <- function(p, opening, closing, low, high, name = NULL, clickable = TRUE, z = 2, zlevel = 0, ...){ opening <- deparse(substitute(opening)) closing <- deparse(substitute(closing)) low <- deparse(substitute(low)) high <- deparse(substitute(high)) p <- p %>% ecandle_(opening, closing, low, high, name = NULL, clickable = TRUE, z = 2, zlevel = 0, ...) p }
/R/add.R
no_license
DATAUNIRIO/echarts
R
false
false
9,791
r
#' @rdname ebar #' @export ebar <- function(p, serie, name = NULL, stack = NULL, clickable = TRUE, xAxisIndex = 0, yAxisIndex = 0, barGap = "100%", barCategoryGap = "20%", legendHoverLink = TRUE, z = 2, zlevel = 0, ...){ serie <- deparse(substitute(serie)) p %>% ebar_(serie, name, stack, clickable, xAxisIndex, yAxisIndex, barGap, barCategoryGap, legendHoverLink, z, zlevel, ...) } #' @rdname eline #' @export eline <- function(p, serie, name = NULL, stack = NULL, clickable = TRUE, xAxisIndex = 0, yAxisIndex = 0, symbol = NULL, symbolSize = "2 | 4", symbolRotate = NULL, showAllSymbol = FALSE, smooth = TRUE, legendHoverLink = TRUE, dataFilter = "nearest", z = 2, zlevel = 0, tooltip, ...){ serie <- deparse(substitute(serie)) tooltip <- if(missing(tooltip)) default_tooltip(trigger = "axis") p %>% eline_(serie, name, stack, clickable, xAxisIndex, yAxisIndex, symbol, symbolSize, symbolRotate, showAllSymbol, smooth, legendHoverLink, dataFilter, z, zlevel, tooltip, ...) } #' @rdname earea #' @export earea <- function(p, serie, name = NULL, stack = NULL, smooth = TRUE, ...){ serie <- deparse(substitute(serie)) p %>% earea_(serie, name, stack, smooth, ...) } #' @rdname escatter #' @export escatter <- function(p, serie, size = NULL, name = NULL, clickable = TRUE, symbol = NULL, symbolSize = 4, symbolRotate = NULL, large = FALSE, largeThreshold = 2000, legendHoverLink = TRUE, z = 2, zlevel = 0, ...){ serie <- deparse(substitute(serie)) size <- if(!missing(size)) deparse(substitute(size)) else NULL p %>% escatter_(serie, size, name, clickable, symbol, symbolSize, symbolRotate, large, largeThreshold, legendHoverLink, z, zlevel, ...) } #' @rdname epie #' @export epie <- function(p, serie, name = NULL, clickable = TRUE, legendHoverLink = TRUE, center = list("50%", "50%"), radius = list(0, "75%"), startAngle = 90, minAngle = 0, clockWise = TRUE, roseType = NULL, selectedOffset = 10, selectedMode = TRUE, z = 2, zlevel = 0, ...){ serie <- deparse(substitute(serie)) p %>% epie_(serie, name, clickable, legendHoverLink, center, radius, startAngle, minAngle, clockWise, roseType, selectedOffset, selectedMode, z, zlevel, ...) } #' @rdname eradar #' @export eradar <- function(p, serie, name = NULL, clickable = TRUE, symbol = NULL, symbolSize = 4, symbolRotate = NULL, legendHoverLink = TRUE, polarIndex = 0, z = 2, zlevel = 0, ...){ serie <- deparse(substitute(serie)) p %>% eradar_(serie, name, clickable, symbol, symbolSize, symbolRotate, legendHoverLink, polarIndex, z, zlevel, ...) } #' @rdname echord #' @export echord <- function(p, name = NULL, sort = "none", sortSub = "none", clickable = TRUE, z = 2, zlevel = 0, symbol = NULL, symbolSize = NULL, clockWise = FALSE, minRadius = 10, maxRadius = 20, ribbonType = TRUE, showScale = FALSE, showScaleText = FALSE, padding = 2, ...){ p %>% echord_(name, sort, sortSub, clickable, z, zlevel, symbol, symbolSize, clockWise, minRadius, maxRadius, ribbonType, showScale, showScaleText, padding, ...) } #' @rdname emap_choropleth #' @export emap_choropleth <- function(p, serie){ serie <- deparse(substitute(serie)) p %>% emap_choropleth_(serie) } #' @rdname emap_coords #' @export emap_coords <- function(p, lon, lat){ lon <- deparse(substitute(lon)) lat <- deparse(substitute(lat)) p %>% emap_coords_(lon, lat) } #' #' @rdname emap_lines #' #' @export emap_lines <- function(p, edges, source, target, name = NULL, clickable = TRUE, symbol = "arrow", symbolSize = 2, symbolRotate = NULL, large = FALSE, smooth = TRUE, z = 2, zlevel = 0, smoothness = 0.2, precision = 2, bundling = list(enable = FALSE, maxTurningAngle = 45), ...){ source <- deparse(substitute(source)) target <- deparse(substitute(target)) p %>% emap_lines_(edges, source, target, name, clickable, symbol, symbolSize, symbolRotate, large, smooth, z, zlevel, smoothness, precision, bundling, ...) } #' @rdname emap_points #' @export emap_points <- function(p, serie, clickable = TRUE, symbol = "pin", symbolSize = 10, symbolRotate = NULL, large = FALSE, itemStyle = NULL, ...){ serie <- deparse(substitute(serie)) itemStyle <- if(is.null(itemStyle)) list(normal = list(label = list(show = FALSE))) else itemStyle p %>% emap_points_(serie, clickable, symbol, symbolSize, symbolRotate, large, itemStyle, ...) } #' @rdname emap_heat #' @export emap_heat <- function(p, lon, lat, z, blurSize = 30, minAlpha = 0.05, valueScale = 1, opacity = 1, gradientColors = NULL, ...){ gradientColors <- if(is.null(gradientColors)) default_gradient() else gradientColors lon <- deparse(substitute(lon)) lat <- deparse(substitute(lat)) z <- deparse(substitute(z)) p %>% emap_heat_(lon, lat, z, blurSize, minAlpha, valueScale, opacity, gradientColors, ...) } #' @rdname emap #' @export emap <- function(p, name = NULL, mapType = "world", clickable = TRUE, z = 2, zlevel = 0, selectedMode = NULL, hoverable = FALSE, dataRangeHoverLink = TRUE, mapLocation = list(x = "center", y = "center"), mapValueCalculation = "sum", mapValuePrecision = 0, showLegendSymbol = TRUE, roam = FALSE, scaleLimit = NULL, nameMap = NULL, textFixed = NULL, ...){ p %>% emap_(name, mapType, clickable, z, zlevel, selectedMode, hoverable, dataRangeHoverLink, mapLocation, mapValueCalculation, mapValuePrecision, showLegendSymbol, roam, scaleLimit, nameMap, textFixed, ...) } #' @rdname egauge #' @export egauge <- function(p, value, indicator = "", name = NULL, clickable = FALSE, legendHoverLink = TRUE, center = list("50%", "50%"), radius = list("0%", "75%"), startAngle = 225, endAngle = -45, min = 0, max = 100, splitNumber = 10, z = 2, zlevel = 0, tooltip, ...){ tooltip <- if(missing(tooltip)) default_tooltip(trigger = "item") name <- ifelse(is.null(name), indicator, name) p %>% egauge_(value, indicator, name, clickable, legendHoverLink, center, radius, startAngle, endAngle, min, max, splitNumber , z, zlevel, tooltip, ...) } #' @rdname efunnel #' @export efunnel <- function(p, serie, name = NULL, clickable = TRUE, legendHoverLink = TRUE, sort = "descending", min = NULL, max = NULL, x = 80, y = 60, x2 = 80, y2 = 60, width = NULL, height = NULL, funnelAlign = "center", minSize = "0%", maxSize = "100%", gap = 0, tooltip, ...){ tooltip <- if(missing(tooltip)) default_tooltip(trigger = "item") serie <- deparse(substitute(serie)) p %>% efunnel_(serie, name, clickable, legendHoverLink, sort, min, max, x, y, x2, y2, width, height, funnelAlign, minSize, maxSize, gap, tooltip, ...) } #' @rdname evenn #' @export evenn <- function(p, serie, name = NULL, clickable = TRUE, z = 2, zlevel = 0, tooltip = NULL, ...){ serie <- deparse(substitute(serie)) tooltip <- if(is.null(tooltip)) default_tooltip(trigger = "item") else tooltip p %>% evenn_( serie, name, clickable, z, zlevel, tooltip, ...) } #' @rdname ecloud #' @export ecloud <- function(p, freq, color, name = NULL, clickable = TRUE, center = list("50%", "50%"), size = list("40%", "40%"), textRotation = list(0, 90), autoSize = list(enable = TRUE, minSize = 12), z = 2, zlevel = 0, tooltip, ...){ tooltip <- if(missing(tooltip)) default_tooltip(trigger = "item") freq <- deparse(substitute(freq)) color <- if(!missing(color)) deparse(substitute(color)) else NULL p %>% ecloud_(freq, color, name, clickable, center, size, textRotation, autoSize, z, zlevel, tooltip, ...) } #' @rdname eheatmap #' @export eheatmap <- function(p, y, values, name = NULL, clickable = TRUE, blurSize = 30, minAlpha = 0.5, valueScale = 1, opacity = 1, z = 2, zlevel = 0, gradientColors, tooltip, ...){ gradientColors <- if(missing(gradientColors)) default_gradient() else gradientColors tooltip <- if(missing(tooltip)) default_tooltip(trigger = "item") y <- deparse(substitute(y)) values <- deparse(substitute(values)) p %>% eheatmap_(y, values, name, clickable, blurSize, minAlpha, valueScale, opacity, z, zlevel, gradientColors, tooltip, ...) } #' @rdname edata #' @export edata <- function(p, data, x){ # x if(!missing(x)){ xvar <- tryCatch(eval(substitute(x), data), error = function(e) e) if(is(xvar, "error")){ xvar <- x } } else { xvar <- list() } if(!missing(data)){ data <- map_grps_(data) assign("data", data, envir = data_env) } # assign for future use assign("x", xvar, envir = data_env) if(length(xvar)) assign("x.name", deparse(substitute(x)), envir = data_env) p } #' @rdname etreemap #' @export etreemap <- function(p, serie, name = NULL, itemStyle = NULL, clickable = FALSE, center = list("50%", "50%"), size = list("80%", "80%"), z = 2, zlevel = 0, ...){ serie <- deparse(substitute(serie)) p <- p %>% etreemap_(serie, name, itemStyle, clickable, center, size, z, zlevel, ...) p } #' @rdname candlestick #' @export ecandle <- function(p, opening, closing, low, high, name = NULL, clickable = TRUE, z = 2, zlevel = 0, ...){ opening <- deparse(substitute(opening)) closing <- deparse(substitute(closing)) low <- deparse(substitute(low)) high <- deparse(substitute(high)) p <- p %>% ecandle_(opening, closing, low, high, name = NULL, clickable = TRUE, z = 2, zlevel = 0, ...) p }
print.ewma <- function(x, ...) { cat("EWMA computation ") cat(paste("Call: ", x$call, " ")) }
/easyewma/R/print.ewma.R
no_license
pbosetti/easyewma
R
false
false
102
r
print.ewma <- function(x, ...) { cat("EWMA computation ") cat(paste("Call: ", x$call, " ")) }
pollutantmean <- function(directory, pollutant, id = 1:332) { ## 'directory' is a character vector of length 1 indicating ## the location of the CSV files ## 'pollutant' is a character vector of length 1 indicating ## the name of the pollutant for which we will calculate the ## mean; either "sulfate" or "nitrate". ## 'id' is an integer vector indicating the monitor ID numbers ## to be used ## Return the mean of the pollutant across all monitors list ## in the 'id' vector (ignoring NA values) ## NOTE: Do not round the result! men <- c() for (i in id) { tab <- read.csv(file.path(directory,dir(directory)[i]), header = T, sep = ",") men <- c(men, tab[[pollutant]]) } mean(men,na.rm = TRUE) }
/p1/pollutantmean.R
no_license
kevicao/JHRprogramming
R
false
false
800
r
pollutantmean <- function(directory, pollutant, id = 1:332) { ## 'directory' is a character vector of length 1 indicating ## the location of the CSV files ## 'pollutant' is a character vector of length 1 indicating ## the name of the pollutant for which we will calculate the ## mean; either "sulfate" or "nitrate". ## 'id' is an integer vector indicating the monitor ID numbers ## to be used ## Return the mean of the pollutant across all monitors list ## in the 'id' vector (ignoring NA values) ## NOTE: Do not round the result! men <- c() for (i in id) { tab <- read.csv(file.path(directory,dir(directory)[i]), header = T, sep = ",") men <- c(men, tab[[pollutant]]) } mean(men,na.rm = TRUE) }
epc <- read.table("household_power_consumption.txt",header=T,sep=";",colClasses=c("character","character","double","double","double","double","double","double","numeric"),na.strings="?") epc$DateTime = paste(epc$Date, epc$Time) epc$DateTime = as.POSIXlt(epc$DateTime,format="%d/%m/%Y %H:%M:%S") epc$Date = NULL epc$Time = NULL subepc <- subset(epc,DateTime$year==107 & DateTime$mon==1 & (DateTime$mday==1 | DateTime$mday==2)) env <- par() png("plot4.png") par(mfrow=c(2,2)) plot(x=(subepc$DateTime),y=subepc$Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab="") plot(x=(subepc$DateTime),y=subepc$Voltage,type="l",ylab="Voltage",xlab="datetime") plot(x=(subepc$DateTime),y=subepc$Sub_metering_1,type="l",ylab="Energy sub metering",xlab="") lines(x=(subepc$DateTime),y=subepc$Sub_metering_2,col="red") lines(x=(subepc$DateTime),y=subepc$Sub_metering_3,col="blue") legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),bty="l",col=c("black","red","blue"),lwd=2,cex=0.4) plot(subepc$DateTime, subepc$Global_reactive_power, type = "l", main = "", xlab = "datetime") par(env) dev.off()
/plot4.R
no_license
jammykhan/ExData_Plotting1
R
false
false
1,131
r
epc <- read.table("household_power_consumption.txt",header=T,sep=";",colClasses=c("character","character","double","double","double","double","double","double","numeric"),na.strings="?") epc$DateTime = paste(epc$Date, epc$Time) epc$DateTime = as.POSIXlt(epc$DateTime,format="%d/%m/%Y %H:%M:%S") epc$Date = NULL epc$Time = NULL subepc <- subset(epc,DateTime$year==107 & DateTime$mon==1 & (DateTime$mday==1 | DateTime$mday==2)) env <- par() png("plot4.png") par(mfrow=c(2,2)) plot(x=(subepc$DateTime),y=subepc$Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab="") plot(x=(subepc$DateTime),y=subepc$Voltage,type="l",ylab="Voltage",xlab="datetime") plot(x=(subepc$DateTime),y=subepc$Sub_metering_1,type="l",ylab="Energy sub metering",xlab="") lines(x=(subepc$DateTime),y=subepc$Sub_metering_2,col="red") lines(x=(subepc$DateTime),y=subepc$Sub_metering_3,col="blue") legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),bty="l",col=c("black","red","blue"),lwd=2,cex=0.4) plot(subepc$DateTime, subepc$Global_reactive_power, type = "l", main = "", xlab = "datetime") par(env) dev.off()
### points per shot attempt over time # displays efficiency growth # efficiency growth and shot volume # x value: time # y value: points per attempt # z value: shot volume # displayed as: another line, 2D area # why points per attempt as efficiency metric? # ignores free throws, as an improvement in FT shooting can disguise as an improvement in shot selection install.packages("pracma") install.packages("xts") install.packages("quantmod") library(pracma) library(xts) library(quantmod) save(vassell, file = "vassell.Rdata") load(file = "vassell.Rdata") ## per game ppa vassell$ppa <- (vassell$X2P*2 + vassell$X3P*3) / vassell$FGA ## cumulative ppa vassell$cump <- cumsum(vassell$X2P*2) + cumsum(vassell$X3P*3) vassell$cuma <- cumsum(vassell$FGA) vassell$cum.ppa <- vassell$cump / vassell$cuma vassell$ppa.wma <- WMA(vassell$cum.ppa, 6) plot(x = vassell$Rk, xlab = "game", y = vassell$cum.ppa, ylab = "PPA", type = "h", main = "Devin Vassell (sophomore, Florida State) points per attempt 2018-2020, through 50 games") lines(x = vassell$Rk, y = vassell$ppa.wma, type = "l", col = "firebrick1") vassell$fga.wma <- WMA(vassell$FGA, 6) plot(x = vassell$Rk[1:56], xlab = "game", y = vassell$FGA[1:56], ylab = "FGA", type = "h", main = "Devin Vassell (sophomore, Florida State) shot attempts per game 2018-2020, through 50 games") lines(x = vassell$Rk, y = vassell$fga.wma, type = "l", col = "firebrick1") ## ncaa averages save(ncaa, file = "ncaa.Rdata") load(file = "ncaa.Rdata") ncaa$cump <- cumsum((ncaa$FG-ncaa$X3P)*2) + cumsum(ncaa$X3P*3) ncaa$cuma <- cumsum(ncaa$FGA) ncaa$cum.ppa <- ncaa$cump / ncaa$cuma ncaa$ncaa.ppa <- ncaa$cum.ppa[353] ncaa$ppa <- ((ncaa$FG-ncaa$X3P)*2 + ncaa$X3P*3) / ncaa$FGA summary(ncaa$ppa) ppa.95 <- quantile(ncaa$ppa, c(.05, .50, .95)) ## ppa and fga plot # bar/line graph par(mar = c(4,4,4,4)) plot(x = vassell$Rk[1:56], xlab = "", y = vassell$ppa.wma[1:56], ylab = "", ylim = c(.9,1.2), type = "h", lwd = 4, main = "Devin Vassell (sophomore, Florida State) moving average points per attempt and attempts per game 2018-2020 through 51 games", col = "cadetblue") axis(2) mtext("points per attempt", side = 2, line = 3, las = 3) abline(h = ppa.95[1], lty = 2, lwd = 2, col = "black") mtext("NCAA ppa - 5%", side = 2, col = "black", las = 2, line = -4.2, cex = .7, at = (ppa.95[1] + .005)) abline(h = ncaa$ncaa.ppa[1], lty = 2, lwd = 2, col = "black") mtext("NCAA ppa", side = 2, col = "black", las = 2, line = -3, cex = .7, at = (ncaa$ncaa.ppa[1] + .005)) abline(h = ppa.95[3], lty = 2, lwd = 2, col = "black") mtext("NCAA ppa - 95%", side = 2, col = "black", las = 2, line = -4.6, cex = .7, at = (ppa.95[3] + .005)) abline(h = 1.0503525, lty = 2, lwd = 2, col = "black") mtext("FSU ppa", side = 2, col = "black", las = 2, line = -2.5, cex = .7, at = (ncaa$ppa[97] + .005)) par(new = TRUE) plot(x = vassell$Rk[1:56], xlab = "game", y = vassell$fga.wma[1:56], ylab = "", yaxt = "n", type = "l", lwd = 3, main = "", col = "brown4") axis(4) mtext("attempts per game", side = 4, line = 2.5, las = 3) abline(v = 34, lty = 2, lwd = 2, col = "black") mtext("2019 Season", side = 1, las = 1, line = .15, cex = .7, at = 34) legend("bottomright", c("points per attempt", "attempts per game"), col = c("cadetblue","brown4"), lwd = c(4, 4), lty = c(1, 1), cex = .65) # scatterplot par(mar= c(4,4,4,4)) plot(y = vassell$ppa[1:56], xlab = "shot attempts", x = vassell$FGA[1:56], ylab = "points per attempt", type = "p", lwd = 4, lty = 4, col = "cadetblue", main = "Devin Vassell (sophomore, Florida State) points per attempt and attempts per game 2018-2020 through 51 games") abline(h = ncaa$ncaa.ppa[1], lty = 2, lwd = 1, col = "black") mtext("NCAA average ppa", side = 2, col = "black", las = 2, line = -5.5, cex = .7, at = (ncaa$ppa[1] + .06)) abline(h = 1.12, lty = 2, lwd = 1, col = "black") mtext("Vassell ppa", side = 2, col = "black", las = 2, line = -3.5, cex = .7, at = (1.12 + .06)) mtext(".0259 r-squared", side = 4, las = 2, line = -7.5, cex = .8, at = 2.9, lwd = 3) mtext(".2742 p-value", side = 4, las = 2, line = -7.5, cex = .8, at = 3, lwd = 3) ### points, shots per minute par(mar= c(4,4,4,4)) plot(y = vassell$MP[1:56], ylab = "minutes played", x = vassell$PTS[1:56], xlab = "points per game", type = "p", lwd = 2, lty = 2, col = "cadetblue", main = "Devin Vassell (sophomore, Florida State) points per game and minutes played 2018-2020 through 51 games") par(mar= c(4,4,4,4)) plot(y = vassell$MP[1:56], ylab = "minutes played", x = vassell$FGA[1:56], xlab = "shots per game", type = "p", lwd = 2, lty = 2, col = "cadetblue", main = "Devin Vassell (sophomore, Florida State) shots per game and minutes played 2018-2020 through 51 games") ### defensive analysis steals.regression <- lm(vassell$STL[1:56] ~ vassell$MP[1:56]) summary(steals.regression) blocks.regression <- lm(vassell$BLK[1:56] ~ vassell$MP[1:56]) summary(blocks.regression) rebounds.regression <- lm(vassell$TRB[1:56] ~ vassell$MP[1:56]) summary(rebounds.regression) efficiency.regression <- lm(vassell$FGA[1:56] ~ vassell$ppa[1:56]) summary(efficiency.regression) efficiency.rsq <- 0.02593 par(mar= c(4,4,4,4)) plot(x = vassell$MP[1:56], xlab = "minutes played", y = vassell$STL[1:56], ylab = "steals per game", type = "p", lwd = 2, lty = 2, col = "cadetblue", main = "Devin Vassell (sophomore, Florida State) steals per game and minutes played 2018-2020 through 50 games") par(mar= c(4,4,4,4)) plot(x = vassell$Rk[1:56], xlab = "game", y = vassell$STL[1:56], ylab = "steals", ylim = c(0,3), type = "h", lwd = 4, lty = 1, col = "cadetblue", main = "Devin Vassell (sophomore, Florida State) steals per game 2018-2020 through 51 games") abline(v = 34, lty = 2, lwd = 2, col = "black") mtext("2019 Season", side = 1, las = 1, line = .15, cex = .7, at = 34) par(mar= c(4,4,4,4)) plot(x = vassell$Rk[1:56], xlab = "game", y = vassell$BLK[1:56], ylab = "blocks", type = "h", lwd = 4, lty = 1, col = "firebrick1", main = "Devin Vassell (sophomore, Florida State) blocks per game 2018-2020 through 51 games") abline(v = 34, lty = 2, lwd = 2, col = "black") mtext("2019 Season", side = 1, las = 1, line = .15, cex = .7, at = 34) par(mar= c(4,4,4,4)) plot(x = vassell$Rk[1:56], xlab = "game", y = vassell$TRB[1:56], ylab = "total rebounds", type = "h", lwd = 4, lty = 1, col = "forestgreen", main = "Devin Vassell (sophomore, Florida State) rebounds per game 2018-2020 through 51 games") abline(v = 34, lty = 2, lwd = 2, col = "black") mtext("2019 Season", side = 1, las = 1, line = .15, cex = .7, at = 34) par(mar= c(4,4,4,4)) plot(x = vassell$Rk[1:56], xlab = "game", y = vassell$PTS[1:56], ylab = "points", type = "h", lwd = 4, lty = 1, col = "cadetblue", main = "Devin Vassell (sophomore, Florida State) points per game 2018-2020 through 51 games") abline(v = 34, lty = 2, lwd = 2, col = "black") mtext("2019 Season", side = 1, las = 1, line = .15, cex = .7, at = 34)
/vassell.R
no_license
watercam/college-hoops
R
false
false
7,053
r
### points per shot attempt over time # displays efficiency growth # efficiency growth and shot volume # x value: time # y value: points per attempt # z value: shot volume # displayed as: another line, 2D area # why points per attempt as efficiency metric? # ignores free throws, as an improvement in FT shooting can disguise as an improvement in shot selection install.packages("pracma") install.packages("xts") install.packages("quantmod") library(pracma) library(xts) library(quantmod) save(vassell, file = "vassell.Rdata") load(file = "vassell.Rdata") ## per game ppa vassell$ppa <- (vassell$X2P*2 + vassell$X3P*3) / vassell$FGA ## cumulative ppa vassell$cump <- cumsum(vassell$X2P*2) + cumsum(vassell$X3P*3) vassell$cuma <- cumsum(vassell$FGA) vassell$cum.ppa <- vassell$cump / vassell$cuma vassell$ppa.wma <- WMA(vassell$cum.ppa, 6) plot(x = vassell$Rk, xlab = "game", y = vassell$cum.ppa, ylab = "PPA", type = "h", main = "Devin Vassell (sophomore, Florida State) points per attempt 2018-2020, through 50 games") lines(x = vassell$Rk, y = vassell$ppa.wma, type = "l", col = "firebrick1") vassell$fga.wma <- WMA(vassell$FGA, 6) plot(x = vassell$Rk[1:56], xlab = "game", y = vassell$FGA[1:56], ylab = "FGA", type = "h", main = "Devin Vassell (sophomore, Florida State) shot attempts per game 2018-2020, through 50 games") lines(x = vassell$Rk, y = vassell$fga.wma, type = "l", col = "firebrick1") ## ncaa averages save(ncaa, file = "ncaa.Rdata") load(file = "ncaa.Rdata") ncaa$cump <- cumsum((ncaa$FG-ncaa$X3P)*2) + cumsum(ncaa$X3P*3) ncaa$cuma <- cumsum(ncaa$FGA) ncaa$cum.ppa <- ncaa$cump / ncaa$cuma ncaa$ncaa.ppa <- ncaa$cum.ppa[353] ncaa$ppa <- ((ncaa$FG-ncaa$X3P)*2 + ncaa$X3P*3) / ncaa$FGA summary(ncaa$ppa) ppa.95 <- quantile(ncaa$ppa, c(.05, .50, .95)) ## ppa and fga plot # bar/line graph par(mar = c(4,4,4,4)) plot(x = vassell$Rk[1:56], xlab = "", y = vassell$ppa.wma[1:56], ylab = "", ylim = c(.9,1.2), type = "h", lwd = 4, main = "Devin Vassell (sophomore, Florida State) moving average points per attempt and attempts per game 2018-2020 through 51 games", col = "cadetblue") axis(2) mtext("points per attempt", side = 2, line = 3, las = 3) abline(h = ppa.95[1], lty = 2, lwd = 2, col = "black") mtext("NCAA ppa - 5%", side = 2, col = "black", las = 2, line = -4.2, cex = .7, at = (ppa.95[1] + .005)) abline(h = ncaa$ncaa.ppa[1], lty = 2, lwd = 2, col = "black") mtext("NCAA ppa", side = 2, col = "black", las = 2, line = -3, cex = .7, at = (ncaa$ncaa.ppa[1] + .005)) abline(h = ppa.95[3], lty = 2, lwd = 2, col = "black") mtext("NCAA ppa - 95%", side = 2, col = "black", las = 2, line = -4.6, cex = .7, at = (ppa.95[3] + .005)) abline(h = 1.0503525, lty = 2, lwd = 2, col = "black") mtext("FSU ppa", side = 2, col = "black", las = 2, line = -2.5, cex = .7, at = (ncaa$ppa[97] + .005)) par(new = TRUE) plot(x = vassell$Rk[1:56], xlab = "game", y = vassell$fga.wma[1:56], ylab = "", yaxt = "n", type = "l", lwd = 3, main = "", col = "brown4") axis(4) mtext("attempts per game", side = 4, line = 2.5, las = 3) abline(v = 34, lty = 2, lwd = 2, col = "black") mtext("2019 Season", side = 1, las = 1, line = .15, cex = .7, at = 34) legend("bottomright", c("points per attempt", "attempts per game"), col = c("cadetblue","brown4"), lwd = c(4, 4), lty = c(1, 1), cex = .65) # scatterplot par(mar= c(4,4,4,4)) plot(y = vassell$ppa[1:56], xlab = "shot attempts", x = vassell$FGA[1:56], ylab = "points per attempt", type = "p", lwd = 4, lty = 4, col = "cadetblue", main = "Devin Vassell (sophomore, Florida State) points per attempt and attempts per game 2018-2020 through 51 games") abline(h = ncaa$ncaa.ppa[1], lty = 2, lwd = 1, col = "black") mtext("NCAA average ppa", side = 2, col = "black", las = 2, line = -5.5, cex = .7, at = (ncaa$ppa[1] + .06)) abline(h = 1.12, lty = 2, lwd = 1, col = "black") mtext("Vassell ppa", side = 2, col = "black", las = 2, line = -3.5, cex = .7, at = (1.12 + .06)) mtext(".0259 r-squared", side = 4, las = 2, line = -7.5, cex = .8, at = 2.9, lwd = 3) mtext(".2742 p-value", side = 4, las = 2, line = -7.5, cex = .8, at = 3, lwd = 3) ### points, shots per minute par(mar= c(4,4,4,4)) plot(y = vassell$MP[1:56], ylab = "minutes played", x = vassell$PTS[1:56], xlab = "points per game", type = "p", lwd = 2, lty = 2, col = "cadetblue", main = "Devin Vassell (sophomore, Florida State) points per game and minutes played 2018-2020 through 51 games") par(mar= c(4,4,4,4)) plot(y = vassell$MP[1:56], ylab = "minutes played", x = vassell$FGA[1:56], xlab = "shots per game", type = "p", lwd = 2, lty = 2, col = "cadetblue", main = "Devin Vassell (sophomore, Florida State) shots per game and minutes played 2018-2020 through 51 games") ### defensive analysis steals.regression <- lm(vassell$STL[1:56] ~ vassell$MP[1:56]) summary(steals.regression) blocks.regression <- lm(vassell$BLK[1:56] ~ vassell$MP[1:56]) summary(blocks.regression) rebounds.regression <- lm(vassell$TRB[1:56] ~ vassell$MP[1:56]) summary(rebounds.regression) efficiency.regression <- lm(vassell$FGA[1:56] ~ vassell$ppa[1:56]) summary(efficiency.regression) efficiency.rsq <- 0.02593 par(mar= c(4,4,4,4)) plot(x = vassell$MP[1:56], xlab = "minutes played", y = vassell$STL[1:56], ylab = "steals per game", type = "p", lwd = 2, lty = 2, col = "cadetblue", main = "Devin Vassell (sophomore, Florida State) steals per game and minutes played 2018-2020 through 50 games") par(mar= c(4,4,4,4)) plot(x = vassell$Rk[1:56], xlab = "game", y = vassell$STL[1:56], ylab = "steals", ylim = c(0,3), type = "h", lwd = 4, lty = 1, col = "cadetblue", main = "Devin Vassell (sophomore, Florida State) steals per game 2018-2020 through 51 games") abline(v = 34, lty = 2, lwd = 2, col = "black") mtext("2019 Season", side = 1, las = 1, line = .15, cex = .7, at = 34) par(mar= c(4,4,4,4)) plot(x = vassell$Rk[1:56], xlab = "game", y = vassell$BLK[1:56], ylab = "blocks", type = "h", lwd = 4, lty = 1, col = "firebrick1", main = "Devin Vassell (sophomore, Florida State) blocks per game 2018-2020 through 51 games") abline(v = 34, lty = 2, lwd = 2, col = "black") mtext("2019 Season", side = 1, las = 1, line = .15, cex = .7, at = 34) par(mar= c(4,4,4,4)) plot(x = vassell$Rk[1:56], xlab = "game", y = vassell$TRB[1:56], ylab = "total rebounds", type = "h", lwd = 4, lty = 1, col = "forestgreen", main = "Devin Vassell (sophomore, Florida State) rebounds per game 2018-2020 through 51 games") abline(v = 34, lty = 2, lwd = 2, col = "black") mtext("2019 Season", side = 1, las = 1, line = .15, cex = .7, at = 34) par(mar= c(4,4,4,4)) plot(x = vassell$Rk[1:56], xlab = "game", y = vassell$PTS[1:56], ylab = "points", type = "h", lwd = 4, lty = 1, col = "cadetblue", main = "Devin Vassell (sophomore, Florida State) points per game 2018-2020 through 51 games") abline(v = 34, lty = 2, lwd = 2, col = "black") mtext("2019 Season", side = 1, las = 1, line = .15, cex = .7, at = 34)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllData.R \docType{data} \name{calcium} \alias{calc.rho} \alias{calcium} \alias{calciumse} \alias{fastgluc} \alias{fastglucse} \title{Data on effect of calcium on fasting glucose (correlated variants)} \format{An object of class \code{numeric} of length 6.} \usage{ calcium calciumse fastgluc fastglucse calc.rho } \description{ Two sets of example data are included in the package: one illustrating uncorrelated variants, and the other correlated variants. These are the data on correlated variants. } \details{ The variables \code{calcium}, and \code{fastgluc} are the genetic associations with calcium and fasting glucose for 6 genetic variants reported by Burgess et al (2015). The respective standard errors of the associations are given as \code{calciumse} and \code{fastglucse}. The matrix of correlations between the genetic variants is given as \code{calc.rho}. These data can be used to test out the various functions in the package. } \references{ Stephen Burgess, Robert A Scott, Nic J Timpson, George Davey Smith, Simon G Thompson. Using published data in Mendelian randomization: a blueprint for efficient identification of causal risk factors. Eur J Epidemiol 2015; 30(7):543-552. doi: 10.1007/s10654-015-0011-z. } \keyword{datasets}
/man/calcium.Rd
no_license
GeneticResources/MendelianRandomization
R
false
true
1,368
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllData.R \docType{data} \name{calcium} \alias{calc.rho} \alias{calcium} \alias{calciumse} \alias{fastgluc} \alias{fastglucse} \title{Data on effect of calcium on fasting glucose (correlated variants)} \format{An object of class \code{numeric} of length 6.} \usage{ calcium calciumse fastgluc fastglucse calc.rho } \description{ Two sets of example data are included in the package: one illustrating uncorrelated variants, and the other correlated variants. These are the data on correlated variants. } \details{ The variables \code{calcium}, and \code{fastgluc} are the genetic associations with calcium and fasting glucose for 6 genetic variants reported by Burgess et al (2015). The respective standard errors of the associations are given as \code{calciumse} and \code{fastglucse}. The matrix of correlations between the genetic variants is given as \code{calc.rho}. These data can be used to test out the various functions in the package. } \references{ Stephen Burgess, Robert A Scott, Nic J Timpson, George Davey Smith, Simon G Thompson. Using published data in Mendelian randomization: a blueprint for efficient identification of causal risk factors. Eur J Epidemiol 2015; 30(7):543-552. doi: 10.1007/s10654-015-0011-z. } \keyword{datasets}
#Step 2: Variable assiment my_var1 <- 42 my_var2 <- 35.25 my_var1 + 100 my_var1 + my_var2 - 12 my_var3 <- my_var1^2 + my_var2^2 my_var3 #Step 3: Logical opperartions my_var3 > 200 my_var3 > 3009 my_var1 == my_var2 my_var1 != my_var2 my_var3 >= 200 my_var3 <= 200 my_new_var <- my_var1 == my_var2 #Step 6, 7, 10, 11: Vectors 1 : 67 my_vector1 <- 1:67 my_vector2 <- c(-32, 45, 67, 12.78, 129, 0, -65) #Создайте переменную the_best_vector, в которой хранятся числа от 1 до 5000 и затем числа от 7000 до 10000. the_best_vector <- c(1:5000,7000:10000) # В уже созданной переменной my_numbers сохранен вектор из 20 целых чисел. # Ваша задача создать новую переменную my_numbers_2, в которой будет сохранен # 2, 5, 7, 9, 12, 16 и 20 элемент вектора my_numbers. my_numbers <- c(6,13,13,1,18,15,6,13,19,17,19,17,18,6,0,15,10,19,10,15) my_numbers_2 <- my_numbers[c(2,5,7,9,12,16,20)] my_vector1[1] my_vector1[3] my_vector2[2] my_vector2[c(1,2,3)] my_vector2[1:3] my_vector2[c(1,5,6,7,10)] my_vector1 + 10 my_vector2 + 56 my_vector2 == 0 my_vector1 > 30 x <- 23 my_vector1 > 23 x == 23 # my_vector1 > x my_vector2 > 0 my_vector2[my_vector2 > 0] my_vector2[my_vector2 < 0] my_vector2[my_vector2 == 0] my_vector1[my_vector1 > 20 & my_vector1 < 30] my_numbers <- my_vector1[my_vector1 > 20 & my_vector1 < 30] positive_numbers <- my_vector2[my_vector2 > 0] v1 <- c(165, 178, 180, 181, 167, 178, 187, 167, 187) mean_v1 <- mean(v1) v1[v1 > mean_v1] greater_than_mean <- v1[v1 > mean_v1] # В уже созданной переменной my_vector хранится вектор из 20 целых чисел. # Найдите сумму всех элементов вектора , которые больше 10. Сохраните сумму в переменную my_sum. # sum(x) - сумма элементов числового вектора x my_vector <- c(8, 13, 9, 18, 7, 2, 15, 2, 8, 18, 6, 8, 9, 6, 12, 11, 3, 1, 2, 14) my_sum <- sum(my_vector[my_vector > 10]) #Step 13: Lists and dataframes age <- c(16, 18, 22, 27) is_maried <- c(F, F, T, T) # example data <- list(age, is_maried) data data[[1]][1] data[[2]][4] name <- c("Olga", "Maria", "Nastya", "Polina") data <- list(age, is_maried, name) data my_data <- data.frame(Name = name, Age = age, Status = is_maried) my_data typeof(my_data) # В векторе my_vector отберите только те наблюдения, # которые отклоняются от среднего меньше чем на одно стандартное отклонение. # Сохраните эти наблюдения в новую переменную my_vector_2. # При этом исходный вектор my_vector оставьте без изменений. # mean(x)среднее значение вектора x # sd(x) стандартное отклонение вектора x # abs(n) абсолютное значение числа n # Найти среднее значение mean(x) # Найти стандартное отклонение sd(x) # Отклонение от среднего значения может быть как в одну сторону +, так в другую сторону -, значит: # Найти mean(x) + sd(x) # Найти mean(x) - sd(x) # Значения mean(x) + sd(x) и mean(x) - sd(x) являются числами-границами, # в пределах которых должны лежать искомые значения my_vector2, значит: # Найти значения my_vector < mean(x) + sd(x) # Найти значения my_vector > mean(x) - sd(x) # С помощью & объединить два вектора # (т.е. сделать пересечение множеств значений my_vector < mean(x) + sd(x) и my_vector > mean(x) - sd(x)) my_vector <- c(21, 18, 21, 19, 25, 20, 17, 17, 18, 22, 17, 18, 18, 19, 19, 27, 21, 20, 24, 17, 15, 24, 24, 29, 19, 14, 21, 17, 19, 18, 18, 20, 21, 21, 19, 19, 17, 21, 13, 17, 13, 23, 15, 23, 24, 16, 17, 25, 24, 22) my_vector < mean(my_vector) + sd(my_vector) my_vector > mean(my_vector) - sd(my_vector) my_vector_2 <- my_vector[my_vector > mean(my_vector) - sd(my_vector) & my_vector < mean(my_vector) + sd(my_vector)] my_vector_2 my_vector_2 <- my_vector[abs(my_vector - mean(my_vector)) < sd(my_vector)] my_vector_2 mean_my_vector <- mean(my_vector) sd_my_vector <- sd(my_vector) my_vector_2 <- my_vector[abs(my_vector - mean_my_vector) < sd_my_vector] my_vector_2
/data_pre-processing/Variables.R
permissive
testor321/R_in_statistics
R
false
false
4,694
r
#Step 2: Variable assiment my_var1 <- 42 my_var2 <- 35.25 my_var1 + 100 my_var1 + my_var2 - 12 my_var3 <- my_var1^2 + my_var2^2 my_var3 #Step 3: Logical opperartions my_var3 > 200 my_var3 > 3009 my_var1 == my_var2 my_var1 != my_var2 my_var3 >= 200 my_var3 <= 200 my_new_var <- my_var1 == my_var2 #Step 6, 7, 10, 11: Vectors 1 : 67 my_vector1 <- 1:67 my_vector2 <- c(-32, 45, 67, 12.78, 129, 0, -65) #Создайте переменную the_best_vector, в которой хранятся числа от 1 до 5000 и затем числа от 7000 до 10000. the_best_vector <- c(1:5000,7000:10000) # В уже созданной переменной my_numbers сохранен вектор из 20 целых чисел. # Ваша задача создать новую переменную my_numbers_2, в которой будет сохранен # 2, 5, 7, 9, 12, 16 и 20 элемент вектора my_numbers. my_numbers <- c(6,13,13,1,18,15,6,13,19,17,19,17,18,6,0,15,10,19,10,15) my_numbers_2 <- my_numbers[c(2,5,7,9,12,16,20)] my_vector1[1] my_vector1[3] my_vector2[2] my_vector2[c(1,2,3)] my_vector2[1:3] my_vector2[c(1,5,6,7,10)] my_vector1 + 10 my_vector2 + 56 my_vector2 == 0 my_vector1 > 30 x <- 23 my_vector1 > 23 x == 23 # my_vector1 > x my_vector2 > 0 my_vector2[my_vector2 > 0] my_vector2[my_vector2 < 0] my_vector2[my_vector2 == 0] my_vector1[my_vector1 > 20 & my_vector1 < 30] my_numbers <- my_vector1[my_vector1 > 20 & my_vector1 < 30] positive_numbers <- my_vector2[my_vector2 > 0] v1 <- c(165, 178, 180, 181, 167, 178, 187, 167, 187) mean_v1 <- mean(v1) v1[v1 > mean_v1] greater_than_mean <- v1[v1 > mean_v1] # В уже созданной переменной my_vector хранится вектор из 20 целых чисел. # Найдите сумму всех элементов вектора , которые больше 10. Сохраните сумму в переменную my_sum. # sum(x) - сумма элементов числового вектора x my_vector <- c(8, 13, 9, 18, 7, 2, 15, 2, 8, 18, 6, 8, 9, 6, 12, 11, 3, 1, 2, 14) my_sum <- sum(my_vector[my_vector > 10]) #Step 13: Lists and dataframes age <- c(16, 18, 22, 27) is_maried <- c(F, F, T, T) # example data <- list(age, is_maried) data data[[1]][1] data[[2]][4] name <- c("Olga", "Maria", "Nastya", "Polina") data <- list(age, is_maried, name) data my_data <- data.frame(Name = name, Age = age, Status = is_maried) my_data typeof(my_data) # В векторе my_vector отберите только те наблюдения, # которые отклоняются от среднего меньше чем на одно стандартное отклонение. # Сохраните эти наблюдения в новую переменную my_vector_2. # При этом исходный вектор my_vector оставьте без изменений. # mean(x)среднее значение вектора x # sd(x) стандартное отклонение вектора x # abs(n) абсолютное значение числа n # Найти среднее значение mean(x) # Найти стандартное отклонение sd(x) # Отклонение от среднего значения может быть как в одну сторону +, так в другую сторону -, значит: # Найти mean(x) + sd(x) # Найти mean(x) - sd(x) # Значения mean(x) + sd(x) и mean(x) - sd(x) являются числами-границами, # в пределах которых должны лежать искомые значения my_vector2, значит: # Найти значения my_vector < mean(x) + sd(x) # Найти значения my_vector > mean(x) - sd(x) # С помощью & объединить два вектора # (т.е. сделать пересечение множеств значений my_vector < mean(x) + sd(x) и my_vector > mean(x) - sd(x)) my_vector <- c(21, 18, 21, 19, 25, 20, 17, 17, 18, 22, 17, 18, 18, 19, 19, 27, 21, 20, 24, 17, 15, 24, 24, 29, 19, 14, 21, 17, 19, 18, 18, 20, 21, 21, 19, 19, 17, 21, 13, 17, 13, 23, 15, 23, 24, 16, 17, 25, 24, 22) my_vector < mean(my_vector) + sd(my_vector) my_vector > mean(my_vector) - sd(my_vector) my_vector_2 <- my_vector[my_vector > mean(my_vector) - sd(my_vector) & my_vector < mean(my_vector) + sd(my_vector)] my_vector_2 my_vector_2 <- my_vector[abs(my_vector - mean(my_vector)) < sd(my_vector)] my_vector_2 mean_my_vector <- mean(my_vector) sd_my_vector <- sd(my_vector) my_vector_2 <- my_vector[abs(my_vector - mean_my_vector) < sd_my_vector] my_vector_2
#nread data train <- read.csv('NYTimesBlogTrain.csv',stringsAsFactors=F) test <- read.csv('NYTimesBlogTest.csv',stringsAsFactors=F) #load libraries library(tm) library(dplyr) library(ggplot2) library(tidyr) library(chron) library(rpart) library(rpart.plot) library(caret) library(rattle) library(randomForest) library(kknn) library(glmnet) library(gbm) library(caretEnsemble) library(doMC) library(tau) library(e1071) #I am going to combine the data to do the processing together, then I can split that #it seems like duplicate qwork to do it at once and besides when I select the less sparse data who knows who I will do that train$group = 1 test$group= 0 data <- bind_rows(train,test) table(data$group) #let's do some basic processing data$date = as.Date(data$PubDate,'%Y-%m-%d ') data$time = sapply(strsplit(data$PubDate,' '), function(x) times(x[2])) data$weekday = weekdays(data$date) data$hour = as.character((hours(data$time))) #I need to create a corpus for the headline, for the snippet and for the abstract corpus_hl <- Corpus(VectorSource(data$Headline)) corpus_hl <- tm_map(corpus_hl,tolower) corpus_hl <- tm_map(corpus_hl,PlainTextDocument) corpus_hl <- tm_map(corpus_hl,removePunctuation) corpus_hl <- tm_map(corpus_hl,removeWords, stopwords("english")) corpus_hl <- tm_map(corpus_hl,stemDocument) dtm_hl <- DocumentTermMatrix(corpus_hl) dtm_hl_dense = removeSparseTerms(dtm_hl, 0.995) headline_words <- as.data.frame(as.matrix(dtm_hl_dense)) names(headline_words) <- paste0('head_',names(headline_words)) corpus_sn <- Corpus(VectorSource(data$Snippet)) corpus_sn <- tm_map(corpus_sn,tolower) corpus_sn <- tm_map(corpus_sn,PlainTextDocument) corpus_sn <- tm_map(corpus_sn,removePunctuation) corpus_sn <- tm_map(corpus_sn,removeWords, stopwords("english")) corpus_sn <- tm_map(corpus_sn,stemDocument) dtm_sn <- DocumentTermMatrix(corpus_sn) dtm_sn_dense = removeSparseTerms(dtm_sn, 0.99) snippet_words <- as.data.frame(as.matrix(dtm_sn_dense)) names(snippet_words) <- paste0('snip_',names(snippet_words)) corpus_abs <- Corpus(VectorSource(data$Abstract)) corpus_abs <- tm_map(corpus_abs,tolower) corpus_abs <- tm_map(corpus_abs,PlainTextDocument) corpus_abs <- tm_map(corpus_abs,removePunctuation) corpus_abs <- tm_map(corpus_abs,removeWords, stopwords("english")) corpus_abs <- tm_map(corpus_abs,stemDocument) dtm_abs <- DocumentTermMatrix(corpus_abs) dtm_abs_dense = removeSparseTerms(dtm_abs, 0.99) abstract_words <- as.data.frame(as.matrix(dtm_abs_dense)) names(abstract_words) <- paste0('abs_',names(abstract_words)) #merge back to create a modeling set modeling <- cbind(data,headline_words,snippet_words,abstract_words) row.names(modeling) <- NULL modeling$NewsDesk <- factor(modeling$NewsDesk) modeling$SectionName <- factor(modeling$SectionName) modeling$SubsectionName <- factor(modeling$SubsectionName) modeling$weekday <- factor(modeling$weekday) #for some reason, caret does some preprocessing to the factors and messes them up for prediction, the best way is to vreate my own categorical variables for them #on modleing data and be done with it #for that I will use dplyr extra <- modeling %>% select(NewsDesk,SectionName,SubsectionName,weekday,UniqueID) %>% gather(var,level,-UniqueID) %>% mutate(level=ifelse(level=="",'blank',level),n=1) %>% unite(name,var,level,sep="_") %>% spread(name,n,fill=0) modeling2 <- inner_join(modeling,extra) modeling2 <- modeling2 %>% select(-c(NewsDesk,SectionName,SubsectionName,weekday)) which(sapply(modeling2,function(x) sum(is.na(x))>0)) names(modeling2) <- make.names(names(modeling2)) names(modeling2) <- gsub("\\.+","",names(modeling2)) modeling2$hour <- as.numeric(modeling2$hour) #split into train and test train_new <- subset(modeling2,group==1,-group) train_new$Popular <- as.factor(train_new$Popular) test_new <- subset(modeling2,group==0,-c(group,Popular)) which(sapply(test_new,function(x) sum(is.na(x))>0)) which(sapply(train_new,function(x) sum(is.na(x))>0)) ############################################# #Models # Now that I have somedata, lest establish a a baseline, guessnot prop.table(table(train_new$Popular)) confusionMatrix(factor(rep(0,dim(train_new)[1]),c(0,1)),factor(train_new$Popular,c(0,1)),positive='1') #83.27% drop1 = which((names(train_new) %in% c('PubDate','date','time','UniqueID','Headline','Snippet','Abstract'))) #start with a logistic model1 <- glm(Popular~. ,train_new[,-drop1],family='binomial') pred1 <- predict(model1,newdata=test_new,type='response') write.csv(data.frame(UniqueID=test_new$UniqueID,Probability1=pred1),'submission1.csv',row.names=F) confusionMatrix(factor(as.numeric(predict(model1,type='response')>=0.5,c(0,1))), factor(train_new$Popular,c(0,1)),positive='1') #this was not so good, but it was just a test of how well I can do, mostly test the sumission #it will take forever to clean the variable by nae o n logistic #try an rpart cv on caret #from a Kuhn presentation I found, he explain how to tune ROC, whihc is how we are being measured #summaryFunction = twoClassSummary),metric="ROC" train2 = train_new[,setdiff(1:dim(train_new)[2],drop1)] train2$Popular = factor(train2$Popular,c(0,1),labels=c('N',"Y")) cvctrl <- trainControl(method = "cv", number = 10, summaryFunction = twoClassSummary, classProbs = TRUE) cpgrid = expand.grid(.cp=seq(0.01,.1,by=0.01)) set.seed(1) model2 <- train(Popular ~ ., data = train2, method = "rpart",preProcess=NULL, metric = "ROC", trControl = cvctrl,tuneGrid=cpgrid) model2 model2$finalModel fancyRpartPlot(model2$finalModel) confusionMatrix(factor(as.numeric(predict(model2$finalModel)[,2]>=0.5),c(0,1),labels=c('N',"Y")), train2$Popular,positive='Y') pred2 <- predict(model2$finalModel,newdata=test_new) write.csv(data.frame(UniqueID=test_new$UniqueID,Probability1=pred2[,2]),'submission2.csv',row.names=F) #Random Forest model model3 <- randomForest(Popular~.,data=train2) model3 pred3 <- predict(model3,newdata=test_new,type='prob') confusionMatrix(predict(model3), train2$Popular,positive='Y') write.csv(data.frame(UniqueID=test_new$UniqueID,Probability1=pred3[,2]),'submission3.csv',row.names=F) #the RF was like number 19 so far, this makes me feel good #I will calculate n-grams for 2 and 3 workds from the headline and the snippet, #I think those should be enough, but I may revisit that later #I worked that codee on the hw for chapetr 5 library(tau) #1. extract the processed text from teh corpora snippets <- data.frame(text=sapply(1:dim(data)[1],function (x) unlist(corpus_sn[[x]][[1]])), stringsAsFactors=F) headlines <- data.frame(text=sapply(1:dim(data)[1],function (x) unlist(corpus_hl[[x]][[1]])), stringsAsFactors=F) #calculateth n-gram combos ngrams2_hl <- sapply(headlines$text,textcnt,method='string',n=2) ngrams3_hl <- sapply(headlines$text,textcnt,method='string',n=3) ngrams2_sn <- sapply(snippets$text,textcnt,method='string',n=2) ngrams3_sn <- sapply(snippets$text,textcnt,method='string',n=3) #define a function to extract the data from the ngrams, there may be better ways #this was mine #define a helper function mydf <- function(x) { y=datax if (length(y[[x]])>0) { data.frame(ngram=names(y[[x]]), freq=as.numeric(y[[x]]),index=x, stringsAsFactors=F) } } library(dplyr) library(tidyr) datax = ngrams2_hl tmp2_hl <- bind_rows(Map(mydf,1:dim(headlines)[1])) datax = ngrams3_hl tmp3_hl <- bind_rows(Map(mydf,1:dim(headlines)[1])) datax = ngrams2_sn tmp2_sn <- bind_rows(Map(mydf,1:dim(snippets)[1])) datax = ngrams3_sn tmp3_sn <- bind_rows(Map(mydf,1:dim(snippets)[1])) #combine and modify to have columns for each n-gram ngrams_hl <- bind_rows(tmp2_hl,tmp3_hl) %>% mutate(ngram=paste0('hl_',gsub(' ','_',ngram))) %>% group_by(index,ngram) %>% summarise(freq=sum(freq)) ngrams_sn <- bind_rows(tmp2_sn,tmp3_sn) %>% mutate(ngram=paste0('sn_',gsub(' ','_',ngram))) %>% group_by(index,ngram) %>% summarise(freq=sum(freq)) #this yields way too many n-grams, lets identify the top ones so we can filter those #and only crerate variables for those top_hl <- ngrams_hl %>% group_by(ngram) %>% summarise(P=n()/dim(headlines)[1]) %>% ungroup() %>% arrange(desc(P)) top_sn <- ngrams_sn %>% group_by(ngram) %>% summarise(P=n()/dim(snippets)[1]) %>% ungroup() %>% arrange(desc(P)) sum(top_sn$P >=0.01) sum(top_hl$P >=0.005) #if I chose the ones that are on 0.5% or more of dcuments then I get 27 variabls for headline and over 1% I get 19 for snippets #let's go with this, as it is new yoprk is the top comb as expected for the NY Times ngrams_hl1 <- ngrams_hl %>% filter(ngram %in% top_hl$ngram[top_hl$P >=0.005]) %>% spread(ngram,freq,fill=0) ngrams_sn1 <- ngrams_sn %>% filter(ngram %in% top_sn$ngram[top_hl$P >=0.01]) %>% spread(ngram,freq,fill=0) #Now I have to merge this by the index somehow, which is the row number #for that add an index variable, whihc will be the row number #I do not want to mess anything, so i will do a copy of modeling2 for that modeling3 <- modeling2 modeling3$index <- as.numeric(row.names(modeling3)) modeling3 <- left_join(modeling3,ngrams_hl1) modeling3 <- left_join(modeling3,ngrams_sn1) #when the index was not found that created NAs, in this case they mean 0 on the ngram columns modeling3[644:682][is.na(modeling3[644:682])] <- 0 which(sapply(modeling3,function(x) sum(is.na(x))>0)) #this shows only popular, and this is fine those are the test table(modeling3$Popular,modeling3$group,useNA='ifany') #create a new test and train from modeling3 train_new1 <- subset(modeling3,group==1,-c(group,index)) train_new1$Popular <- as.factor(train_new1$Popular) test_new1 <- subset(modeling3,group==0,-c(group,Popular,index)) dim(train_new1)[2]- dim(train_new)[2] dim(test_new1)[2]- dim(test_new)[2] #I added 39 variables to both setdiff(names(train_new1),names(test_new1)) #as expected the only difference is popular setdiff(names(test_new1),names(train_new1)) #a nothing is in test that is not in train #finally drop the variables not needed train3 = train_new1[,setdiff(1:dim(train_new1)[2],drop1)] train3$Popular = factor(train3$Popular,c(0,1),labels=c('N',"Y")) #since the random forest is my best so far #try a rf with the ngrams model4 <- randomForest(Popular~.,data=train3) model4 pred4 <- predict(model4,newdata=test_new1,type='prob') confusionMatrix(predict(model4), train3$Popular,positive='Y') write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred4[,2]),'submission4.csv',row.names=F) #try a boosted tree using caret, wil use C5.0 #library('gbm') cvCtrl <- trainControl(method = "cv", number = 10, summaryFunction = twoClassSummary, classProbs = TRUE) model5 <- train(Popular~.,train3, method = "C5.0", metric = "ROC",trControl= cvCtrl) model5 pred5 <- predict(model5,newdata=test_new1,type='prob') confusionMatrix(predict(model5), train3$Popular,positive='Y') write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred5[,2]),'submission5.csv',row.names=F) #this moved it to 20th, it was a small improvement though #try GBM model6 <- train(Popular~.,train3, method = "gbm", metric = "ROC",trControl= cvCtrl) pred6 <- predict(model6,newdata=test_new1,type='prob') confusionMatrix(predict(model6), train3$Popular,positive='Y') write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred6[,2]),'submission6.csv',row.names=F) #I am running into issues with variables that are too sparse, #in addition in researching that. I will take out soem highly correlated variables #using findCorrelation #Iwant to understand the data better to see what to do prop.table(table(data$NewsDesk,useNA='ifany'))*100 prop.table(table(data$SectionName,useNA='ifany'))*100 prop.table(table(data$SubsectionName,useNA='ifany'))*100 #I need to further fix my data to clean some sparse data, o the section, subsection anc news desk variables #for which i created my own cat vars, modeling is when i did that befor modeling4 <- modeling levels(modeling4$NewsDesk )[1]<- 'blank' levels(modeling4$SectionName )[1]<- 'blank' levels(modeling4$SubsectionName )[1]<- 'blank' #now several are very small, so they have really no information, or I will assume that #I will need to have at least 5% thresh = 250 names(which(table(modeling4$NewsDesk)<thresh)) levels(modeling4$NewsDesk )[(which(table(modeling4$NewsDesk)<thresh))] <- 'Other' names(which(table(modeling4$SectionName)<thresh)) levels(modeling4$SectionName )[(which(table(modeling4$SectionName)<thresh))] <- 'Other' names(which(table(modeling4$SubsectionName)<thresh)) levels(modeling4$SubsectionName )[(which(table(modeling4$SubsectionName)<thresh))] <- 'Other' extra1 <- modeling4 %>% select(NewsDesk,SectionName,SubsectionName,weekday,UniqueID) %>% gather(var,level,-UniqueID) %>% mutate(level=ifelse(level=="",'blank',level),n=1) %>% unite(name,var,level,sep="_") %>% spread(name,n,fill=0) modeling5 <- inner_join(modeling4,extra1) modeling5 <- modeling5 %>% select(-c(NewsDesk,SectionName,SubsectionName,weekday)) which(sapply(modeling5,function(x) sum(is.na(x))>0)) names(modeling5) <- make.names(names(modeling5)) names(modeling5) <- gsub("\\.+","",names(modeling5)) modeling5$hour <- as.numeric(modeling5$hour) #now add the ngrams modeling6 <- modeling5 modeling6$index <- as.numeric(row.names(modeling6)) modeling6 <- left_join(modeling6,ngrams_hl1) modeling6 <- left_join(modeling6,ngrams_sn1) #when the index was not founf that created NAs, in this case they mean 0 on the ngram columns modeling6[630:668][is.na(modeling6[630:668])] <- 0 which(sapply(modeling6,function(x) sum(is.na(x))>0)) #this shows only popular, and this is fine those are the test table(modeling6$Popular,modeling6$group,useNA='ifany') #as I should have assumed, the same work tends ot be in snippet and abstract and #hence the correlation is very high #I need to take these out correlated_vars <- findCorrelation(cor(modeling6[,-c(1:3,5,6,7,8,9,10)])) names(modeling6[,-c(1:3,5,6,7,8,9,10)])[correlated_vars] modeling7 <- modeling6[,-c(1:3,5,7,9,10)][,-correlated_vars] #modeling7$UniqueID = modeling6$UniqueID nzv <- nearZeroVar(modeling7,freqCut=99/1) sapply(modeling7[nzv],mean) names(modeling7[nzv]) modeling8 <- modeling7[,-nzv] modeling8$UniqueID = modeling6$UniqueID #I had dropped the uique IDs #create a new test and train from modeling8 train_new2 <- subset(modeling8,group==1,-c(group,UniqueID)) train_new2$Popular <- as.factor(train_new2$Popular) test_new2 <- subset(modeling8,group==0,-c(group,Popular)) sum(row.names(train_new2) != row.names(train_new1)) #sanity check sum(row.names(test_new2) != row.names(test_new1)) #sanity check levels(train_new2$Popular) <- c('N','Y') #I think this set is good enough to try the ensemble, #first try the lasso #try to regularize with lasso library(glmnet) model8 <- train(Popular~.,train_new2, method = "glmnet",family='binomial') confusionMatrix(predict(model8), train_new2$Popular,positive='1') model8$finalModel #Next step is to try an ensemble library(caretEnsemble) #I will try an ensemble of rf, the boosted tree (gbm) and the log - all cv'd by caret consstently ctrl1 <- trainControl(method='cv',number=2,savePredictions=T,classProbs=T, summaryFunction=twoClassSummary, index=createFolds(train3$Popular, 5)) list1 = list(lasso=caretModelSpec(method='glmnet',family='binomial'), gbm = caretModelSpec(method='gbm'), rf = caretModelSpec(method='rf')) list2 = list(log=caretModelSpec(method='glm',family='binomial'), rf = caretModelSpec(method='rf',tuneGrid=data.frame(.mtry=10))) set.seed(234) #just in case model_list <- caretList(Popular~.,data=train3[-drop2], trControl=ctrl1,metric='ROC',tuneList=list1) #try them not in tuneList but in methodList (not sure how to do binomial) #try svm alone, then svm, gbm.c5.0 and rf #it seems data is so sparse in some vars that it is creating issues as well #so what if gbm fails, maybe c5.0 works on ensemble #the emsemble is not working like that - lets tune my models one by one, and then #try to use caretEnsemble, if that fails try caretStack, and if not then just build your own #ensemble and your own stack model ctrl2 <- trainControl(method='repeatedcv',number=10,repeats = 3,savePredictions=T, classProbs=T, verboseIter = TRUE, summaryFunction=twoClassSummary, index=createMultiFolds(train_new2$Popular, 10,times=3)) library(doMC) registerDoMC(cores = 3) ptm = proc.time() ensemble1 <- train(Popular~.,train_new2, method = "glmnet",family='binomial',trControl=ctrl2) proc.time()-ptm ensemble1 confusionMatrix(predict(ensemble1), train_new2$Popular,positive='Y') pred_ens1 <- predict(ensemble1,newdata=test_new2,type='prob') write.csv(data.frame(UniqueID=test_new2$UniqueID,Probability1=pred_ens1[,2]),'submission_ens1.csv',row.names=F) ptm = proc.time() ensemble2 <- train(Popular~.,train_new2, method = "rf",trControl=ctrl2,metric='ROC',tuneLength=3) proc.time()-ptm ensemble2 confusionMatrix(predict(ensemble2), train_new2$Popular,positive='Y') pred_ens2 <- predict(ensemble2,newdata=test_new2,type='prob') write.csv(data.frame(UniqueID=test_new2$UniqueID,Probability1=pred_ens2[,2]),'submission_ens2.csv',row.names=F) ptm = proc.time() ensemble3 <- train(Popular~.,train_new2, method = "gbm",trControl=ctrl2) proc.time()-ptm ensemble3 #why would the better tuned model for gb do worse, #I wangt to rerun thid #it can be the data ensemble3 <- train(Popular~.,train_new2, method = "gbm",trControl=ctrl2) ptm = proc.time() confusionMatrix(predict(ensemble3), train_new2$Popular,positive='Y') pred_ens3 <- predict(ensemble3,newdata=test_new2,type='prob') write.csv(data.frame(UniqueID=test_new2$UniqueID,Probability1=pred_ens3[,2]),'submission_ens3.csv',row.names=F) ptm = proc.time() ensemble4 <- train(Popular~.,train_new2, method = "svmPoly",trControl=ctrl2) proc.time()-ptm confusionMatrix(predict(ensemble4), train_new2$Popular,positive='Y') pred_ens4 <- predict(ensemble4,newdata=test_new2,type='prob') write.csv(data.frame(UniqueID=test_new2$UniqueID,Probability1=pred_ens4[,2]),'submission_ens4.csv',row.names=F) #since I have not been able to get caretList to work, I will create my own list #seems like it should work model_list=list(ensemble1,ensemble2,ensemble3,ensemble4) ensemble <- caretEnsemble(model_list) #try to stacj my predictions using a gbm model ensemble_data <- data.frame(model1=predict(ensemble1,type='prob')[,2], model2=predict(ensemble2,type='prob')[,2], model3=predict(ensemble3,type='prob')[,2], model4=predict(ensemble4,type='prob')[,2], Popular=train_new2$Popular) registerDoMC(cores = 3) ensemble_mix1 = train(Popular~.,ensemble_data,method='gbm',trControl=ctrl2) registerDoMC(cores = 1) #weird this selected model2, only #what if I just average the Ps preds_mix <- data.frame(UniqueID=test_new2$UniqueID,m1=pred_ens1[,2],m2=pred_ens2[,2],m3=pred_ens3[,2],m4=pred_ens4[,2]) preds_mix$Probability1 = apply(preds_mix[-1], 1,mean) write.csv(preds_mix[c(1,6)],'submission_ens.avg.csv',row.names=F) pred_aux <- as.factor(ifelse(apply(ensemble_data[1:4],1,mean)>=0.5,'Y','N')) #What if I try my ensemble with the train3 set, and bootstrapping #I will have to set the seeds as before - it iw worth a try registerDoMC(cores = 3) getDoParWorkers() ctrl3 <- trainControl(method='repeatedcv',number=10,repeats=5,savePredictions=T, classProbs=T, verboseIter = TRUE, summaryFunction=twoClassSummary, index=createFolds(train3$Popular, 10,times=5)) set.seed(2345) seeds1 <- vector(mode='list',length=(10*5 +1)) for (i in 1:(10*5)) seeds1[[i]] = sample.int(1000,15) seeds1[[10*5 +1 ]] <- sample.int(1000,1) ensemble3b <- train(Popular~.,train_new2, trControl = ctrl3,tuneLength=5, method = "gbm",metric="ROC") confusionMatrix(predict(ensemble3b), train3$Popular,positive='Y') pred_ens3b <- predict(ensemble3b,newdata=train_new2,type='prob') write.csv(data.frame(UniqueID=test_new2$UniqueID,Probability1=pred_ens4[,2]),'submission_ens3b.csv',row.names=F) #you already have models for gbm (winner), rf (ngrams) and - so why no try that on caretEnsemble list2 = list(log=caretModelSpec(method='glm',family='binomial'), rf = caretModelSpec(method='rf'), gbm=caretModelSpec(method='gbm',distribution='bernoulli')) n=25 ctrl4 <- trainControl(method = "boot", number = n, savePredictions = T, summaryFunction = twoClassSummary, classProbs = TRUE,verboseIter =T, index=createResample(train3$Popular, n)) model_list <- caretList(Popular~.,data=train3[,-drop2], trControl=ctrl4,metric='ROC',tuneList=list2) xyplot(resamples(model_list$gbm),resamples(model_list$rf)) modelCor(resamples(model_list)) rocs <- data.frame(gbm=model_list$gbm$resample$ROC,rf=model_list$rf$resample$ROC,log=model_list$log$resample$ROC) ggplot(rocs,aes(x=gbm,y=rf))+geom_point()+coord_cartesian(ylim=c(0,1),xlim=c(0,1)) ggplot(rocs,aes(x=gbm,y=log))+geom_point()+coord_cartesian(ylim=c(0,1),xlim=c(0,1)) ggplot(rocs,aes(x=rf,y=log))+geom_point()+coord_cartesian(ylim=c(0,1),xlim=c(0,1)) #gbm and rf are fairly correlated #rf took 9 hours, and that seems to much to me, it is also highly correlated to gbm #I think I will run some other types of models insted - #lest try knn and nnet #but I i will try them first nnet1 <- train(Popular~.,data=train3[,-drop2],trControl=ctrl4,metric='ROC',method='nnet',) knn1 <- train(Popular~.,data=train3[,-drop2],trControl=ctrl4,metric='ROC',method='kknn') glmnet1 = train(Popular~.,data=train3[,-drop2],trControl=ctrl4,metric='ROC',method='glmnet',family='bernoulli',tuneLength=5) #Now that I figured (I hope) caretEnsemble and the seeds and all that, I can let my thing run #I will use gbm, glmnet/binomial (regularized), nnet load('train_test_full.rdata') n=25 set.seed(1969) ctrl5<- trainControl(method = "boot", number = n, savePredictions = T, summaryFunction = twoClassSummary, classProbs = TRUE,verboseIter =T, index=createResample(train4$Popular, n)) list3 = list(glm=caretModelSpec(method='glmnet',family='binomial',tuneLength=5), gbm=caretModelSpec(method='gbm',distribution='bernoulli',tuneLength=5), nnet=caretModelSpec(method='nnet',tuneLength=5)) registerDoMC(cores = 3) set.seed(1969) model_list1 <- caretList(Popular~.,data=train3[,-drop2], trControl=ctrl5,metric='ROC',tuneList=list3) save(model_list1,file='model_list1.rdata') #add a emseble ist with log(wordCount) train4 <- train3[-which(names(train3)=='WordCount')] train4$logwordcount <- log(train3$WordCount+1) registerDoMC(cores = 3) set.seed(1971) model_list2 <- caretList(Popular~.,data=train4[,-drop2], trControl=ctrl5,metric='ROC',tuneList=list3) save(model_list2,file='model_list2.rdata') greedy_ensemble <- caretEnsemble(model_list) summary(greedy_ensemble) drop2 <- which(sapply(train3s,sd)==0) qplot(model_list1$glm$resample$ROC,model_list1$gbm$resample$ROC,geom="point")+coord_cartesian(ylim=c(0,1),xlim=c(0,1)) qplot(model_list1$glm$resample$ROC,model_list1$nnet$resample$ROC,geom="point")+coord_cartesian(ylim=c(0,1),xlim=c(0,1)) qplot(model_list1$gbm$resample$ROC,model_list1$nnet$resample$ROC,geom="point")+coord_cartesian(ylim=c(0,1),xlim=c(0,1)) modelCor(resamples(model_list1)) #NOw lest do the ensemble registerDoMC(cores = 3) greedy_ensemble <- caretEnsemble(model_list1) summary(greedy_ensemble) greedy_ensemble2 <- caretEnsemble(model_list2) pred_greedy1<- predict(greedy_ensemble,newdata=test_new1) write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred_greedy1), 'submission_ens_greedy.csv',row.names=F) test_new1a <- test_new1 test_new1a$logwordcount <- log(test_new1$WordCount+1) pred_greedy2<- predict(greedy_ensemble2,newdata=test_new1a) write.csv(data.frame(UniqueID=test_new1a$UniqueID,Probability1=pred_greedy2), 'submission_ens_greedy2.csv',row.names=F) ens_preds1 <- predict(greedy_ensemble, newdata=test_new1) write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=ens_preds1),'submission_ens1.csv',row.names=F) gbm_ensemble <- caretStack( model_list1, method='gbm', metric='ROC', trControl=trainControl( method='boot', number=10, savePredictions=TRUE, classProbs=TRUE, summaryFunction=twoClassSummary ) ) pred_stack1 <- predict(gbm_ensemble,test_new1,type='prob') write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred_stack1[,2]),'submission_ens_gbm.csv',row.names=F) glm_ensemble <- caretStack( model_list1, method='glm',family='binomial', metric='ROC', trControl=trainControl( method='boot', number=10, savePredictions=TRUE, classProbs=TRUE, summaryFunction=twoClassSummary ) ) pred_ens_glm1 <- predict(glm_ensemble,test_new1,type='prob') write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred_ens_glm1[,2]), 'submission_ens_glm.csv',row.names=F) #### ebs 2 gbm_ensemble2 <- caretStack( model_list2, method='gbm', metric='ROC', trControl=trainControl( method='boot', number=10, savePredictions=TRUE, classProbs=TRUE, summaryFunction=twoClassSummary ) ) pred_stack2 <- predict(gbm_ensemble2,test_new1a,type='prob') write.csv(data.frame(UniqueID=test_new1a$UniqueID,Probability1=pred_stack2[,2]),'submission_ens_gbm2.csv',row.names=F) glm_ensemble2 <- caretStack( model_list2, method='glm',family='binomial', metric='ROC', trControl=trainControl( method='boot', number=10, savePredictions=TRUE, classProbs=TRUE, summaryFunction=twoClassSummary ) ) pred_ens_glm2 <- predict(glm_ensemble2,test_new1a,type='prob') write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred_ens_glm2[,2]), 'submission_ens_glm2.csv',row.names=F) #what If I try rf with this set.seed(123) rf_ensemble2 <- caretStack( model_list2, method='rf', metric='ROC', tuneLength=3, trControl=trainControl( method='boot', number=25, savePredictions=TRUE, classProbs=TRUE, summaryFunction=twoClassSummary ) ) pred_ens_rf2 <- predict(rf_ensemble2,test_new1a,type='prob') write.csv(data.frame(UniqueID=test_new1a$UniqueID,Probability1=pred_ens_rf2[,2]), 'submission_ens_rf2.csv',row.names=F) ### #the log helped a lot #I also would like to try t impite the missing owrkd count, I think that #could help the log model, as 0 may be makig it not idal #other idea is to try PCA preprocess (or ICA prep-process) #with list 3 since that is my best one so far #then move forward drop2 <- which(sapply(train4,sd)==0) set.seed(85) ctrl_pca = trainControl(method='boot',number=5,savePredictions = T, summaryFunction = twoClassSummary, classProbs = TRUE,verboseIter =T) registerDoMC(1) nzv1 = nearZeroVar(train4[]) set.seed(67) gbm_pca <- train(Popular~.,data=train4[,-nzv1],preProcess=c("pca"), trControl = ctrl_pca,metric="ROC",tune_length=3, method='gbm',distribution='bernoulli') save(model_list,file='model_list.rdata') save.image('kaggle.rdata') save(train_new2,drop2,test_new2,file='train_test_new2') save.image('ensemble_models1_image.rdata') save.image('ensemble_models2_image.rdata') #At this point I am <1% from the top, I am sure I can tune this and get some more #try svm and other stuff for example, but what if there is a better approach #what if the classification first makes sense #also by definition I am in the land of diniishig returns #if we were prediting cancer 1% is a lor of lives, #blogs are much less important #one idea is to use the log of word count for the glm model, #that one should improve hopefully #and that should improve the ensemble #in fact since it makes nt difference for trees (as a cut in 7 is the same) #as a cut in log(7), lets just do it for all #what is with wordCount = 0, should I impute? #did it and I made it to #6 #I want to try somrandom models, see if I get something that can improve #lda, svm set.seed(567) svm <- train(Popular~.,data=train4[,-drop2],method='svmRadial',metric='ROC',trControl = ctrl5) qplot(svm$resample$ROC,model_list2$glm$resample$ROC)+coord_cartesian(ylim=c(0,1),xlim=c(0,1)) qplot(svm$resample$ROC,model_list2$nnet$resample$ROC)+coord_cartesian(ylim=c(0,1),xlim=c(0,1)) ctrl6<- trainControl(method = "none", number = n, savePredictions = T, summaryFunction = twoClassSummary, classProbs = TRUE,verboseIter =T, index=createResample(train3$Popular, n)) set.seed(567) drop4 <- nearZeroVar(train4[-which(names(train4)=='Popular')],freq=99/1) drop5 <- findCorrelation(cor(train4[-c(which(names(train4)=='Popular'),drop4)])) lda1 <- train(Popular~.,data=train4[,-c(drop2,drop4)],method='lda2',metric='ROC',trControl = ctrl6) #I want to estimate the AUC on the 2 best models set.seed(234) my_boot <- createResample(train4$Popular,times = 25) library(ROCR) getROC <- function(model,x) { pred = predict(model,newdata=train4[x,],type='prob') roc = prediction(pred[,2],train4$Popular[x]) as.numeric(performance(roc, "auc")@y.values) } glm_ens2_stats <- sapply(my_boot, function(x) getROC(glm_ensemble2,x)) gbm_ens2_stats <- sapply(my_boot, function(x) getROC(gbm_ensemble2,x)) perf_data <- data.frame(glm=glm_ens2_stats,gbm=gbm_ens2_stats,index=1:25) perf_data %>% gather(model, ROC, -index) %>% ggplot(aes(x=index,y=ROC,color=model,group=model))+ geom_line()+coord_cartesian(ylim=c(0.95,0.975))+ scale_y_continuous(labels=percent)+ geom_line(stat='hline',yintercept='mean') mean( sapply(my_boot, function(x) getROC(svm,x))) pred_svm <- predict(svm,test_new1a,type='prob') write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred_svm[,2]), 'submission_svmrad.csv',row.names=F) confusionMatrix(predict(svm),train4$Popular,positive='Y') #svm is quite uncorrelated to the others #so lest add it to the list #it took forever runnign all four, so I will come back later and add it manually #Zach Meyer helpoed e figure out how set.seed(2006) ctrl6<- trainControl(method = "boot", number = n, savePredictions = T, summaryFunction = twoClassSummary, classProbs = TRUE,verboseIter =T, index=createResample(train4$Popular, n)) list4 = list(svm=caretModelSpec(method='svmPoly',tuneLength=5), glm=caretModelSpec(method='glmnet',family='binomial',tuneLength=5), gbm=caretModelSpec(method='gbm',distribution='bernoulli',tuneLength=5), nnet=caretModelSpec(method='nnet',tuneLength=5)) registerDoMC(cores = 3) model_list4 <- caretList(Popular~.,data=train4[,-drop2], trControl=ctrl6,metric='ROC',tuneList=list4) save(model_list4,file='model_list4.rdata') #running all 4 took forever, but that let me to lern that I can #run one seprately with train and add it #lets add svm ctrlx<- trainControl(method = "boot", number=25, savePredictions = T, summaryFunction = twoClassSummary, classProbs = TRUE,verboseIter =T) registerDoMC(cores = 4) svm_extra <- train(Popular~.,data=train4[,-drop2], trControl=ctrl5,metric='ROC',method='svmPoly', tuneLength=3) registerDoMC(cores = 1) save(svm_extra,file='svm_extra.rdata') #combine the list model_list3 <- model_list2 model_list3[['svm']] <- svm_extra #somehow now it is telling me that the model_list2 and model_list3 do not have the same #resampling, somehow it got messed up #I need to run the other models again tonight glm_ensemble3 <- caretStack( model_list3, method='glm',family='binomial', metric='ROC', trControl=trainControl( method='boot', number=25, savePredictions=TRUE, classProbs=TRUE, summaryFunction=twoClassSummary ) ) pred_ens_glm3 <- predict(glm_ensemble3,test_new1a,type='prob') write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred_ens_glm3[,2]), 'submission_ens_glm3.csv',row.names=F) #I am going to try to cluster the data, in the lesson it says as i underatand it #you cluster on train only, and use that , so I will do that rm(list=ls()) load("~/Documents/Analytics_edge/train_test_full.rdata") #first I need to scale the data, it is mostly for wordCount, as all others are 0/1 #or very close #and even before impute the word count for those that are 0 as that is #illogical to me load('modeling.rdata') train_predictors <- modeling[modeling$group==1,c(10,1:3,7,14)] #save mean for imputation meanx = mean(train_predictors$WordCount[train_predictors$WordCount !=0]) train_predictors$WordCount[train_predictors$WordCount ==0] <- meanx train_predictors$logwc <- log(train_predictors$WordCount) train_predictors <- train_predictors[-5] extra2 <- train_predictors %>% select(-logwc) %>% gather(variable,level,-UniqueID) %>% group_by(UniqueID,variable,level) %>% summarise(N=n()) %>% unite(key,variable,level,sep="_") %>% spread(key,N,fill=0) train_predictors1 <- left_join(train_predictors[c('UniqueID','logwc')],extra2) scale1 <- preProcess(train_predictors1) scaled_preds <- predict(scale1,newdata=train_predictors1) dist = dist(scaled_preds) hclust1 <- hclust(dist,method='ward.D') plot(hclust1) clusters=cutree(hclust1,k=4) kmeans1 <- kmeans(scaled_preds,4) table(kmeans1$cluster,train_new1$Popular) kclusters <- kmeans1$cluster #lest see if they make sense #modeling has the original data that came, so I can use that for a quick profile aux <- modeling[modeling$group==1,c(9,1,2,3)] aux$cluster = kclusters table(kclusters,train3$Popular) profile = aux %>% group_by(cluster) %>% gather(variable,level,-cluster) %>% group_by(cluster,variable,level) %>% summarise(N=n()) %>% group_by(cluster,variable) %>% mutate(P=N/sum(N)) %>% select(-N) %>% spread(cluster,P,fill=0) View(profile) #Because gbm is my best model, I will keep using gbm #and build 4 models, one for each cluster load('train4_files.rdata') n=25 registerDoMC(cores = 3) getDoParWorkers() for (i in 1:4) { trainx <- subset(train4,clusters==i) #get the data for that cluster #I need to take out the vars with no variability, or gbm fails dropx <- nearZeroVar(trainx,freq=100/0) set.seed(2006) #set a seed for reproductibility #define control parameters ctrlx<- trainControl(method = "boot", number = n, savePredictions = T, summaryFunction = twoClassSummary, classProbs = TRUE,verboseIter =T, index=createResample(trainx$Popular, n)) #run model modelx <- train(Popular~.,trainx[,-c(drop2,dropx)],method='gbm', distribution='bernoulli',trControl = ctrlx,metric='ROC') #save results assign(paste0('gbm_clust_',i),modelx) } save(list=ls(pattern='gbm_clust'),file='cluster_models.rdata') #they are worth trying, oI just need to combined them #first need to cluster the test data #for that I imputd first the missing word to mean, then did the log, then scaled test_new1b <- test_new1 test_predictors <- modeling[modeling$group==0,c(10,1:3,7,14)] test_predictors$WordCount[test_predictors$WordCount ==0] <- meanx test_predictors$logwc <- log(test_predictors$WordCount) test_predictors <- test_predictors[-5] extra2test <- test_predictors %>% select(-logwc) %>% gather(variable,level,-UniqueID) %>% group_by(UniqueID,variable,level) %>% summarise(N=n()) %>% unite(key,variable,level,sep="_") %>% spread(key,N,fill=0) #I am missing some variables, I need to add them missing <- setdiff(names(extra2),names(extra2test)) missing_df = data.frame(matrix(rep(0,dim(extra2test)[1]*length(missing)),dim(extra2test)[1],length(missing))) names(missing_df) <- missing missing_df$UniqueID = extra2test$UniqueID extra2test <- inner_join(extra2test,missing_df) test_predictors1 <- left_join(test_predictors[c('UniqueID','logwc')],extra2test) scaled_test_preds <- predict(scale1,newdata=test_predictors1) library(flexclust) clust.kcca = as.kcca(kmeans1,train_predictors1) test_clusters = predict(clust.kcca,newdata=test_predictors1) table(test_clusters)
/Competition.R
no_license
mariosegal/Anaytics_Edge_Repo
R
false
false
36,987
r
#nread data train <- read.csv('NYTimesBlogTrain.csv',stringsAsFactors=F) test <- read.csv('NYTimesBlogTest.csv',stringsAsFactors=F) #load libraries library(tm) library(dplyr) library(ggplot2) library(tidyr) library(chron) library(rpart) library(rpart.plot) library(caret) library(rattle) library(randomForest) library(kknn) library(glmnet) library(gbm) library(caretEnsemble) library(doMC) library(tau) library(e1071) #I am going to combine the data to do the processing together, then I can split that #it seems like duplicate qwork to do it at once and besides when I select the less sparse data who knows who I will do that train$group = 1 test$group= 0 data <- bind_rows(train,test) table(data$group) #let's do some basic processing data$date = as.Date(data$PubDate,'%Y-%m-%d ') data$time = sapply(strsplit(data$PubDate,' '), function(x) times(x[2])) data$weekday = weekdays(data$date) data$hour = as.character((hours(data$time))) #I need to create a corpus for the headline, for the snippet and for the abstract corpus_hl <- Corpus(VectorSource(data$Headline)) corpus_hl <- tm_map(corpus_hl,tolower) corpus_hl <- tm_map(corpus_hl,PlainTextDocument) corpus_hl <- tm_map(corpus_hl,removePunctuation) corpus_hl <- tm_map(corpus_hl,removeWords, stopwords("english")) corpus_hl <- tm_map(corpus_hl,stemDocument) dtm_hl <- DocumentTermMatrix(corpus_hl) dtm_hl_dense = removeSparseTerms(dtm_hl, 0.995) headline_words <- as.data.frame(as.matrix(dtm_hl_dense)) names(headline_words) <- paste0('head_',names(headline_words)) corpus_sn <- Corpus(VectorSource(data$Snippet)) corpus_sn <- tm_map(corpus_sn,tolower) corpus_sn <- tm_map(corpus_sn,PlainTextDocument) corpus_sn <- tm_map(corpus_sn,removePunctuation) corpus_sn <- tm_map(corpus_sn,removeWords, stopwords("english")) corpus_sn <- tm_map(corpus_sn,stemDocument) dtm_sn <- DocumentTermMatrix(corpus_sn) dtm_sn_dense = removeSparseTerms(dtm_sn, 0.99) snippet_words <- as.data.frame(as.matrix(dtm_sn_dense)) names(snippet_words) <- paste0('snip_',names(snippet_words)) corpus_abs <- Corpus(VectorSource(data$Abstract)) corpus_abs <- tm_map(corpus_abs,tolower) corpus_abs <- tm_map(corpus_abs,PlainTextDocument) corpus_abs <- tm_map(corpus_abs,removePunctuation) corpus_abs <- tm_map(corpus_abs,removeWords, stopwords("english")) corpus_abs <- tm_map(corpus_abs,stemDocument) dtm_abs <- DocumentTermMatrix(corpus_abs) dtm_abs_dense = removeSparseTerms(dtm_abs, 0.99) abstract_words <- as.data.frame(as.matrix(dtm_abs_dense)) names(abstract_words) <- paste0('abs_',names(abstract_words)) #merge back to create a modeling set modeling <- cbind(data,headline_words,snippet_words,abstract_words) row.names(modeling) <- NULL modeling$NewsDesk <- factor(modeling$NewsDesk) modeling$SectionName <- factor(modeling$SectionName) modeling$SubsectionName <- factor(modeling$SubsectionName) modeling$weekday <- factor(modeling$weekday) #for some reason, caret does some preprocessing to the factors and messes them up for prediction, the best way is to vreate my own categorical variables for them #on modleing data and be done with it #for that I will use dplyr extra <- modeling %>% select(NewsDesk,SectionName,SubsectionName,weekday,UniqueID) %>% gather(var,level,-UniqueID) %>% mutate(level=ifelse(level=="",'blank',level),n=1) %>% unite(name,var,level,sep="_") %>% spread(name,n,fill=0) modeling2 <- inner_join(modeling,extra) modeling2 <- modeling2 %>% select(-c(NewsDesk,SectionName,SubsectionName,weekday)) which(sapply(modeling2,function(x) sum(is.na(x))>0)) names(modeling2) <- make.names(names(modeling2)) names(modeling2) <- gsub("\\.+","",names(modeling2)) modeling2$hour <- as.numeric(modeling2$hour) #split into train and test train_new <- subset(modeling2,group==1,-group) train_new$Popular <- as.factor(train_new$Popular) test_new <- subset(modeling2,group==0,-c(group,Popular)) which(sapply(test_new,function(x) sum(is.na(x))>0)) which(sapply(train_new,function(x) sum(is.na(x))>0)) ############################################# #Models # Now that I have somedata, lest establish a a baseline, guessnot prop.table(table(train_new$Popular)) confusionMatrix(factor(rep(0,dim(train_new)[1]),c(0,1)),factor(train_new$Popular,c(0,1)),positive='1') #83.27% drop1 = which((names(train_new) %in% c('PubDate','date','time','UniqueID','Headline','Snippet','Abstract'))) #start with a logistic model1 <- glm(Popular~. ,train_new[,-drop1],family='binomial') pred1 <- predict(model1,newdata=test_new,type='response') write.csv(data.frame(UniqueID=test_new$UniqueID,Probability1=pred1),'submission1.csv',row.names=F) confusionMatrix(factor(as.numeric(predict(model1,type='response')>=0.5,c(0,1))), factor(train_new$Popular,c(0,1)),positive='1') #this was not so good, but it was just a test of how well I can do, mostly test the sumission #it will take forever to clean the variable by nae o n logistic #try an rpart cv on caret #from a Kuhn presentation I found, he explain how to tune ROC, whihc is how we are being measured #summaryFunction = twoClassSummary),metric="ROC" train2 = train_new[,setdiff(1:dim(train_new)[2],drop1)] train2$Popular = factor(train2$Popular,c(0,1),labels=c('N',"Y")) cvctrl <- trainControl(method = "cv", number = 10, summaryFunction = twoClassSummary, classProbs = TRUE) cpgrid = expand.grid(.cp=seq(0.01,.1,by=0.01)) set.seed(1) model2 <- train(Popular ~ ., data = train2, method = "rpart",preProcess=NULL, metric = "ROC", trControl = cvctrl,tuneGrid=cpgrid) model2 model2$finalModel fancyRpartPlot(model2$finalModel) confusionMatrix(factor(as.numeric(predict(model2$finalModel)[,2]>=0.5),c(0,1),labels=c('N',"Y")), train2$Popular,positive='Y') pred2 <- predict(model2$finalModel,newdata=test_new) write.csv(data.frame(UniqueID=test_new$UniqueID,Probability1=pred2[,2]),'submission2.csv',row.names=F) #Random Forest model model3 <- randomForest(Popular~.,data=train2) model3 pred3 <- predict(model3,newdata=test_new,type='prob') confusionMatrix(predict(model3), train2$Popular,positive='Y') write.csv(data.frame(UniqueID=test_new$UniqueID,Probability1=pred3[,2]),'submission3.csv',row.names=F) #the RF was like number 19 so far, this makes me feel good #I will calculate n-grams for 2 and 3 workds from the headline and the snippet, #I think those should be enough, but I may revisit that later #I worked that codee on the hw for chapetr 5 library(tau) #1. extract the processed text from teh corpora snippets <- data.frame(text=sapply(1:dim(data)[1],function (x) unlist(corpus_sn[[x]][[1]])), stringsAsFactors=F) headlines <- data.frame(text=sapply(1:dim(data)[1],function (x) unlist(corpus_hl[[x]][[1]])), stringsAsFactors=F) #calculateth n-gram combos ngrams2_hl <- sapply(headlines$text,textcnt,method='string',n=2) ngrams3_hl <- sapply(headlines$text,textcnt,method='string',n=3) ngrams2_sn <- sapply(snippets$text,textcnt,method='string',n=2) ngrams3_sn <- sapply(snippets$text,textcnt,method='string',n=3) #define a function to extract the data from the ngrams, there may be better ways #this was mine #define a helper function mydf <- function(x) { y=datax if (length(y[[x]])>0) { data.frame(ngram=names(y[[x]]), freq=as.numeric(y[[x]]),index=x, stringsAsFactors=F) } } library(dplyr) library(tidyr) datax = ngrams2_hl tmp2_hl <- bind_rows(Map(mydf,1:dim(headlines)[1])) datax = ngrams3_hl tmp3_hl <- bind_rows(Map(mydf,1:dim(headlines)[1])) datax = ngrams2_sn tmp2_sn <- bind_rows(Map(mydf,1:dim(snippets)[1])) datax = ngrams3_sn tmp3_sn <- bind_rows(Map(mydf,1:dim(snippets)[1])) #combine and modify to have columns for each n-gram ngrams_hl <- bind_rows(tmp2_hl,tmp3_hl) %>% mutate(ngram=paste0('hl_',gsub(' ','_',ngram))) %>% group_by(index,ngram) %>% summarise(freq=sum(freq)) ngrams_sn <- bind_rows(tmp2_sn,tmp3_sn) %>% mutate(ngram=paste0('sn_',gsub(' ','_',ngram))) %>% group_by(index,ngram) %>% summarise(freq=sum(freq)) #this yields way too many n-grams, lets identify the top ones so we can filter those #and only crerate variables for those top_hl <- ngrams_hl %>% group_by(ngram) %>% summarise(P=n()/dim(headlines)[1]) %>% ungroup() %>% arrange(desc(P)) top_sn <- ngrams_sn %>% group_by(ngram) %>% summarise(P=n()/dim(snippets)[1]) %>% ungroup() %>% arrange(desc(P)) sum(top_sn$P >=0.01) sum(top_hl$P >=0.005) #if I chose the ones that are on 0.5% or more of dcuments then I get 27 variabls for headline and over 1% I get 19 for snippets #let's go with this, as it is new yoprk is the top comb as expected for the NY Times ngrams_hl1 <- ngrams_hl %>% filter(ngram %in% top_hl$ngram[top_hl$P >=0.005]) %>% spread(ngram,freq,fill=0) ngrams_sn1 <- ngrams_sn %>% filter(ngram %in% top_sn$ngram[top_hl$P >=0.01]) %>% spread(ngram,freq,fill=0) #Now I have to merge this by the index somehow, which is the row number #for that add an index variable, whihc will be the row number #I do not want to mess anything, so i will do a copy of modeling2 for that modeling3 <- modeling2 modeling3$index <- as.numeric(row.names(modeling3)) modeling3 <- left_join(modeling3,ngrams_hl1) modeling3 <- left_join(modeling3,ngrams_sn1) #when the index was not found that created NAs, in this case they mean 0 on the ngram columns modeling3[644:682][is.na(modeling3[644:682])] <- 0 which(sapply(modeling3,function(x) sum(is.na(x))>0)) #this shows only popular, and this is fine those are the test table(modeling3$Popular,modeling3$group,useNA='ifany') #create a new test and train from modeling3 train_new1 <- subset(modeling3,group==1,-c(group,index)) train_new1$Popular <- as.factor(train_new1$Popular) test_new1 <- subset(modeling3,group==0,-c(group,Popular,index)) dim(train_new1)[2]- dim(train_new)[2] dim(test_new1)[2]- dim(test_new)[2] #I added 39 variables to both setdiff(names(train_new1),names(test_new1)) #as expected the only difference is popular setdiff(names(test_new1),names(train_new1)) #a nothing is in test that is not in train #finally drop the variables not needed train3 = train_new1[,setdiff(1:dim(train_new1)[2],drop1)] train3$Popular = factor(train3$Popular,c(0,1),labels=c('N',"Y")) #since the random forest is my best so far #try a rf with the ngrams model4 <- randomForest(Popular~.,data=train3) model4 pred4 <- predict(model4,newdata=test_new1,type='prob') confusionMatrix(predict(model4), train3$Popular,positive='Y') write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred4[,2]),'submission4.csv',row.names=F) #try a boosted tree using caret, wil use C5.0 #library('gbm') cvCtrl <- trainControl(method = "cv", number = 10, summaryFunction = twoClassSummary, classProbs = TRUE) model5 <- train(Popular~.,train3, method = "C5.0", metric = "ROC",trControl= cvCtrl) model5 pred5 <- predict(model5,newdata=test_new1,type='prob') confusionMatrix(predict(model5), train3$Popular,positive='Y') write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred5[,2]),'submission5.csv',row.names=F) #this moved it to 20th, it was a small improvement though #try GBM model6 <- train(Popular~.,train3, method = "gbm", metric = "ROC",trControl= cvCtrl) pred6 <- predict(model6,newdata=test_new1,type='prob') confusionMatrix(predict(model6), train3$Popular,positive='Y') write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred6[,2]),'submission6.csv',row.names=F) #I am running into issues with variables that are too sparse, #in addition in researching that. I will take out soem highly correlated variables #using findCorrelation #Iwant to understand the data better to see what to do prop.table(table(data$NewsDesk,useNA='ifany'))*100 prop.table(table(data$SectionName,useNA='ifany'))*100 prop.table(table(data$SubsectionName,useNA='ifany'))*100 #I need to further fix my data to clean some sparse data, o the section, subsection anc news desk variables #for which i created my own cat vars, modeling is when i did that befor modeling4 <- modeling levels(modeling4$NewsDesk )[1]<- 'blank' levels(modeling4$SectionName )[1]<- 'blank' levels(modeling4$SubsectionName )[1]<- 'blank' #now several are very small, so they have really no information, or I will assume that #I will need to have at least 5% thresh = 250 names(which(table(modeling4$NewsDesk)<thresh)) levels(modeling4$NewsDesk )[(which(table(modeling4$NewsDesk)<thresh))] <- 'Other' names(which(table(modeling4$SectionName)<thresh)) levels(modeling4$SectionName )[(which(table(modeling4$SectionName)<thresh))] <- 'Other' names(which(table(modeling4$SubsectionName)<thresh)) levels(modeling4$SubsectionName )[(which(table(modeling4$SubsectionName)<thresh))] <- 'Other' extra1 <- modeling4 %>% select(NewsDesk,SectionName,SubsectionName,weekday,UniqueID) %>% gather(var,level,-UniqueID) %>% mutate(level=ifelse(level=="",'blank',level),n=1) %>% unite(name,var,level,sep="_") %>% spread(name,n,fill=0) modeling5 <- inner_join(modeling4,extra1) modeling5 <- modeling5 %>% select(-c(NewsDesk,SectionName,SubsectionName,weekday)) which(sapply(modeling5,function(x) sum(is.na(x))>0)) names(modeling5) <- make.names(names(modeling5)) names(modeling5) <- gsub("\\.+","",names(modeling5)) modeling5$hour <- as.numeric(modeling5$hour) #now add the ngrams modeling6 <- modeling5 modeling6$index <- as.numeric(row.names(modeling6)) modeling6 <- left_join(modeling6,ngrams_hl1) modeling6 <- left_join(modeling6,ngrams_sn1) #when the index was not founf that created NAs, in this case they mean 0 on the ngram columns modeling6[630:668][is.na(modeling6[630:668])] <- 0 which(sapply(modeling6,function(x) sum(is.na(x))>0)) #this shows only popular, and this is fine those are the test table(modeling6$Popular,modeling6$group,useNA='ifany') #as I should have assumed, the same work tends ot be in snippet and abstract and #hence the correlation is very high #I need to take these out correlated_vars <- findCorrelation(cor(modeling6[,-c(1:3,5,6,7,8,9,10)])) names(modeling6[,-c(1:3,5,6,7,8,9,10)])[correlated_vars] modeling7 <- modeling6[,-c(1:3,5,7,9,10)][,-correlated_vars] #modeling7$UniqueID = modeling6$UniqueID nzv <- nearZeroVar(modeling7,freqCut=99/1) sapply(modeling7[nzv],mean) names(modeling7[nzv]) modeling8 <- modeling7[,-nzv] modeling8$UniqueID = modeling6$UniqueID #I had dropped the uique IDs #create a new test and train from modeling8 train_new2 <- subset(modeling8,group==1,-c(group,UniqueID)) train_new2$Popular <- as.factor(train_new2$Popular) test_new2 <- subset(modeling8,group==0,-c(group,Popular)) sum(row.names(train_new2) != row.names(train_new1)) #sanity check sum(row.names(test_new2) != row.names(test_new1)) #sanity check levels(train_new2$Popular) <- c('N','Y') #I think this set is good enough to try the ensemble, #first try the lasso #try to regularize with lasso library(glmnet) model8 <- train(Popular~.,train_new2, method = "glmnet",family='binomial') confusionMatrix(predict(model8), train_new2$Popular,positive='1') model8$finalModel #Next step is to try an ensemble library(caretEnsemble) #I will try an ensemble of rf, the boosted tree (gbm) and the log - all cv'd by caret consstently ctrl1 <- trainControl(method='cv',number=2,savePredictions=T,classProbs=T, summaryFunction=twoClassSummary, index=createFolds(train3$Popular, 5)) list1 = list(lasso=caretModelSpec(method='glmnet',family='binomial'), gbm = caretModelSpec(method='gbm'), rf = caretModelSpec(method='rf')) list2 = list(log=caretModelSpec(method='glm',family='binomial'), rf = caretModelSpec(method='rf',tuneGrid=data.frame(.mtry=10))) set.seed(234) #just in case model_list <- caretList(Popular~.,data=train3[-drop2], trControl=ctrl1,metric='ROC',tuneList=list1) #try them not in tuneList but in methodList (not sure how to do binomial) #try svm alone, then svm, gbm.c5.0 and rf #it seems data is so sparse in some vars that it is creating issues as well #so what if gbm fails, maybe c5.0 works on ensemble #the emsemble is not working like that - lets tune my models one by one, and then #try to use caretEnsemble, if that fails try caretStack, and if not then just build your own #ensemble and your own stack model ctrl2 <- trainControl(method='repeatedcv',number=10,repeats = 3,savePredictions=T, classProbs=T, verboseIter = TRUE, summaryFunction=twoClassSummary, index=createMultiFolds(train_new2$Popular, 10,times=3)) library(doMC) registerDoMC(cores = 3) ptm = proc.time() ensemble1 <- train(Popular~.,train_new2, method = "glmnet",family='binomial',trControl=ctrl2) proc.time()-ptm ensemble1 confusionMatrix(predict(ensemble1), train_new2$Popular,positive='Y') pred_ens1 <- predict(ensemble1,newdata=test_new2,type='prob') write.csv(data.frame(UniqueID=test_new2$UniqueID,Probability1=pred_ens1[,2]),'submission_ens1.csv',row.names=F) ptm = proc.time() ensemble2 <- train(Popular~.,train_new2, method = "rf",trControl=ctrl2,metric='ROC',tuneLength=3) proc.time()-ptm ensemble2 confusionMatrix(predict(ensemble2), train_new2$Popular,positive='Y') pred_ens2 <- predict(ensemble2,newdata=test_new2,type='prob') write.csv(data.frame(UniqueID=test_new2$UniqueID,Probability1=pred_ens2[,2]),'submission_ens2.csv',row.names=F) ptm = proc.time() ensemble3 <- train(Popular~.,train_new2, method = "gbm",trControl=ctrl2) proc.time()-ptm ensemble3 #why would the better tuned model for gb do worse, #I wangt to rerun thid #it can be the data ensemble3 <- train(Popular~.,train_new2, method = "gbm",trControl=ctrl2) ptm = proc.time() confusionMatrix(predict(ensemble3), train_new2$Popular,positive='Y') pred_ens3 <- predict(ensemble3,newdata=test_new2,type='prob') write.csv(data.frame(UniqueID=test_new2$UniqueID,Probability1=pred_ens3[,2]),'submission_ens3.csv',row.names=F) ptm = proc.time() ensemble4 <- train(Popular~.,train_new2, method = "svmPoly",trControl=ctrl2) proc.time()-ptm confusionMatrix(predict(ensemble4), train_new2$Popular,positive='Y') pred_ens4 <- predict(ensemble4,newdata=test_new2,type='prob') write.csv(data.frame(UniqueID=test_new2$UniqueID,Probability1=pred_ens4[,2]),'submission_ens4.csv',row.names=F) #since I have not been able to get caretList to work, I will create my own list #seems like it should work model_list=list(ensemble1,ensemble2,ensemble3,ensemble4) ensemble <- caretEnsemble(model_list) #try to stacj my predictions using a gbm model ensemble_data <- data.frame(model1=predict(ensemble1,type='prob')[,2], model2=predict(ensemble2,type='prob')[,2], model3=predict(ensemble3,type='prob')[,2], model4=predict(ensemble4,type='prob')[,2], Popular=train_new2$Popular) registerDoMC(cores = 3) ensemble_mix1 = train(Popular~.,ensemble_data,method='gbm',trControl=ctrl2) registerDoMC(cores = 1) #weird this selected model2, only #what if I just average the Ps preds_mix <- data.frame(UniqueID=test_new2$UniqueID,m1=pred_ens1[,2],m2=pred_ens2[,2],m3=pred_ens3[,2],m4=pred_ens4[,2]) preds_mix$Probability1 = apply(preds_mix[-1], 1,mean) write.csv(preds_mix[c(1,6)],'submission_ens.avg.csv',row.names=F) pred_aux <- as.factor(ifelse(apply(ensemble_data[1:4],1,mean)>=0.5,'Y','N')) #What if I try my ensemble with the train3 set, and bootstrapping #I will have to set the seeds as before - it iw worth a try registerDoMC(cores = 3) getDoParWorkers() ctrl3 <- trainControl(method='repeatedcv',number=10,repeats=5,savePredictions=T, classProbs=T, verboseIter = TRUE, summaryFunction=twoClassSummary, index=createFolds(train3$Popular, 10,times=5)) set.seed(2345) seeds1 <- vector(mode='list',length=(10*5 +1)) for (i in 1:(10*5)) seeds1[[i]] = sample.int(1000,15) seeds1[[10*5 +1 ]] <- sample.int(1000,1) ensemble3b <- train(Popular~.,train_new2, trControl = ctrl3,tuneLength=5, method = "gbm",metric="ROC") confusionMatrix(predict(ensemble3b), train3$Popular,positive='Y') pred_ens3b <- predict(ensemble3b,newdata=train_new2,type='prob') write.csv(data.frame(UniqueID=test_new2$UniqueID,Probability1=pred_ens4[,2]),'submission_ens3b.csv',row.names=F) #you already have models for gbm (winner), rf (ngrams) and - so why no try that on caretEnsemble list2 = list(log=caretModelSpec(method='glm',family='binomial'), rf = caretModelSpec(method='rf'), gbm=caretModelSpec(method='gbm',distribution='bernoulli')) n=25 ctrl4 <- trainControl(method = "boot", number = n, savePredictions = T, summaryFunction = twoClassSummary, classProbs = TRUE,verboseIter =T, index=createResample(train3$Popular, n)) model_list <- caretList(Popular~.,data=train3[,-drop2], trControl=ctrl4,metric='ROC',tuneList=list2) xyplot(resamples(model_list$gbm),resamples(model_list$rf)) modelCor(resamples(model_list)) rocs <- data.frame(gbm=model_list$gbm$resample$ROC,rf=model_list$rf$resample$ROC,log=model_list$log$resample$ROC) ggplot(rocs,aes(x=gbm,y=rf))+geom_point()+coord_cartesian(ylim=c(0,1),xlim=c(0,1)) ggplot(rocs,aes(x=gbm,y=log))+geom_point()+coord_cartesian(ylim=c(0,1),xlim=c(0,1)) ggplot(rocs,aes(x=rf,y=log))+geom_point()+coord_cartesian(ylim=c(0,1),xlim=c(0,1)) #gbm and rf are fairly correlated #rf took 9 hours, and that seems to much to me, it is also highly correlated to gbm #I think I will run some other types of models insted - #lest try knn and nnet #but I i will try them first nnet1 <- train(Popular~.,data=train3[,-drop2],trControl=ctrl4,metric='ROC',method='nnet',) knn1 <- train(Popular~.,data=train3[,-drop2],trControl=ctrl4,metric='ROC',method='kknn') glmnet1 = train(Popular~.,data=train3[,-drop2],trControl=ctrl4,metric='ROC',method='glmnet',family='bernoulli',tuneLength=5) #Now that I figured (I hope) caretEnsemble and the seeds and all that, I can let my thing run #I will use gbm, glmnet/binomial (regularized), nnet load('train_test_full.rdata') n=25 set.seed(1969) ctrl5<- trainControl(method = "boot", number = n, savePredictions = T, summaryFunction = twoClassSummary, classProbs = TRUE,verboseIter =T, index=createResample(train4$Popular, n)) list3 = list(glm=caretModelSpec(method='glmnet',family='binomial',tuneLength=5), gbm=caretModelSpec(method='gbm',distribution='bernoulli',tuneLength=5), nnet=caretModelSpec(method='nnet',tuneLength=5)) registerDoMC(cores = 3) set.seed(1969) model_list1 <- caretList(Popular~.,data=train3[,-drop2], trControl=ctrl5,metric='ROC',tuneList=list3) save(model_list1,file='model_list1.rdata') #add a emseble ist with log(wordCount) train4 <- train3[-which(names(train3)=='WordCount')] train4$logwordcount <- log(train3$WordCount+1) registerDoMC(cores = 3) set.seed(1971) model_list2 <- caretList(Popular~.,data=train4[,-drop2], trControl=ctrl5,metric='ROC',tuneList=list3) save(model_list2,file='model_list2.rdata') greedy_ensemble <- caretEnsemble(model_list) summary(greedy_ensemble) drop2 <- which(sapply(train3s,sd)==0) qplot(model_list1$glm$resample$ROC,model_list1$gbm$resample$ROC,geom="point")+coord_cartesian(ylim=c(0,1),xlim=c(0,1)) qplot(model_list1$glm$resample$ROC,model_list1$nnet$resample$ROC,geom="point")+coord_cartesian(ylim=c(0,1),xlim=c(0,1)) qplot(model_list1$gbm$resample$ROC,model_list1$nnet$resample$ROC,geom="point")+coord_cartesian(ylim=c(0,1),xlim=c(0,1)) modelCor(resamples(model_list1)) #NOw lest do the ensemble registerDoMC(cores = 3) greedy_ensemble <- caretEnsemble(model_list1) summary(greedy_ensemble) greedy_ensemble2 <- caretEnsemble(model_list2) pred_greedy1<- predict(greedy_ensemble,newdata=test_new1) write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred_greedy1), 'submission_ens_greedy.csv',row.names=F) test_new1a <- test_new1 test_new1a$logwordcount <- log(test_new1$WordCount+1) pred_greedy2<- predict(greedy_ensemble2,newdata=test_new1a) write.csv(data.frame(UniqueID=test_new1a$UniqueID,Probability1=pred_greedy2), 'submission_ens_greedy2.csv',row.names=F) ens_preds1 <- predict(greedy_ensemble, newdata=test_new1) write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=ens_preds1),'submission_ens1.csv',row.names=F) gbm_ensemble <- caretStack( model_list1, method='gbm', metric='ROC', trControl=trainControl( method='boot', number=10, savePredictions=TRUE, classProbs=TRUE, summaryFunction=twoClassSummary ) ) pred_stack1 <- predict(gbm_ensemble,test_new1,type='prob') write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred_stack1[,2]),'submission_ens_gbm.csv',row.names=F) glm_ensemble <- caretStack( model_list1, method='glm',family='binomial', metric='ROC', trControl=trainControl( method='boot', number=10, savePredictions=TRUE, classProbs=TRUE, summaryFunction=twoClassSummary ) ) pred_ens_glm1 <- predict(glm_ensemble,test_new1,type='prob') write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred_ens_glm1[,2]), 'submission_ens_glm.csv',row.names=F) #### ebs 2 gbm_ensemble2 <- caretStack( model_list2, method='gbm', metric='ROC', trControl=trainControl( method='boot', number=10, savePredictions=TRUE, classProbs=TRUE, summaryFunction=twoClassSummary ) ) pred_stack2 <- predict(gbm_ensemble2,test_new1a,type='prob') write.csv(data.frame(UniqueID=test_new1a$UniqueID,Probability1=pred_stack2[,2]),'submission_ens_gbm2.csv',row.names=F) glm_ensemble2 <- caretStack( model_list2, method='glm',family='binomial', metric='ROC', trControl=trainControl( method='boot', number=10, savePredictions=TRUE, classProbs=TRUE, summaryFunction=twoClassSummary ) ) pred_ens_glm2 <- predict(glm_ensemble2,test_new1a,type='prob') write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred_ens_glm2[,2]), 'submission_ens_glm2.csv',row.names=F) #what If I try rf with this set.seed(123) rf_ensemble2 <- caretStack( model_list2, method='rf', metric='ROC', tuneLength=3, trControl=trainControl( method='boot', number=25, savePredictions=TRUE, classProbs=TRUE, summaryFunction=twoClassSummary ) ) pred_ens_rf2 <- predict(rf_ensemble2,test_new1a,type='prob') write.csv(data.frame(UniqueID=test_new1a$UniqueID,Probability1=pred_ens_rf2[,2]), 'submission_ens_rf2.csv',row.names=F) ### #the log helped a lot #I also would like to try t impite the missing owrkd count, I think that #could help the log model, as 0 may be makig it not idal #other idea is to try PCA preprocess (or ICA prep-process) #with list 3 since that is my best one so far #then move forward drop2 <- which(sapply(train4,sd)==0) set.seed(85) ctrl_pca = trainControl(method='boot',number=5,savePredictions = T, summaryFunction = twoClassSummary, classProbs = TRUE,verboseIter =T) registerDoMC(1) nzv1 = nearZeroVar(train4[]) set.seed(67) gbm_pca <- train(Popular~.,data=train4[,-nzv1],preProcess=c("pca"), trControl = ctrl_pca,metric="ROC",tune_length=3, method='gbm',distribution='bernoulli') save(model_list,file='model_list.rdata') save.image('kaggle.rdata') save(train_new2,drop2,test_new2,file='train_test_new2') save.image('ensemble_models1_image.rdata') save.image('ensemble_models2_image.rdata') #At this point I am <1% from the top, I am sure I can tune this and get some more #try svm and other stuff for example, but what if there is a better approach #what if the classification first makes sense #also by definition I am in the land of diniishig returns #if we were prediting cancer 1% is a lor of lives, #blogs are much less important #one idea is to use the log of word count for the glm model, #that one should improve hopefully #and that should improve the ensemble #in fact since it makes nt difference for trees (as a cut in 7 is the same) #as a cut in log(7), lets just do it for all #what is with wordCount = 0, should I impute? #did it and I made it to #6 #I want to try somrandom models, see if I get something that can improve #lda, svm set.seed(567) svm <- train(Popular~.,data=train4[,-drop2],method='svmRadial',metric='ROC',trControl = ctrl5) qplot(svm$resample$ROC,model_list2$glm$resample$ROC)+coord_cartesian(ylim=c(0,1),xlim=c(0,1)) qplot(svm$resample$ROC,model_list2$nnet$resample$ROC)+coord_cartesian(ylim=c(0,1),xlim=c(0,1)) ctrl6<- trainControl(method = "none", number = n, savePredictions = T, summaryFunction = twoClassSummary, classProbs = TRUE,verboseIter =T, index=createResample(train3$Popular, n)) set.seed(567) drop4 <- nearZeroVar(train4[-which(names(train4)=='Popular')],freq=99/1) drop5 <- findCorrelation(cor(train4[-c(which(names(train4)=='Popular'),drop4)])) lda1 <- train(Popular~.,data=train4[,-c(drop2,drop4)],method='lda2',metric='ROC',trControl = ctrl6) #I want to estimate the AUC on the 2 best models set.seed(234) my_boot <- createResample(train4$Popular,times = 25) library(ROCR) getROC <- function(model,x) { pred = predict(model,newdata=train4[x,],type='prob') roc = prediction(pred[,2],train4$Popular[x]) as.numeric(performance(roc, "auc")@y.values) } glm_ens2_stats <- sapply(my_boot, function(x) getROC(glm_ensemble2,x)) gbm_ens2_stats <- sapply(my_boot, function(x) getROC(gbm_ensemble2,x)) perf_data <- data.frame(glm=glm_ens2_stats,gbm=gbm_ens2_stats,index=1:25) perf_data %>% gather(model, ROC, -index) %>% ggplot(aes(x=index,y=ROC,color=model,group=model))+ geom_line()+coord_cartesian(ylim=c(0.95,0.975))+ scale_y_continuous(labels=percent)+ geom_line(stat='hline',yintercept='mean') mean( sapply(my_boot, function(x) getROC(svm,x))) pred_svm <- predict(svm,test_new1a,type='prob') write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred_svm[,2]), 'submission_svmrad.csv',row.names=F) confusionMatrix(predict(svm),train4$Popular,positive='Y') #svm is quite uncorrelated to the others #so lest add it to the list #it took forever runnign all four, so I will come back later and add it manually #Zach Meyer helpoed e figure out how set.seed(2006) ctrl6<- trainControl(method = "boot", number = n, savePredictions = T, summaryFunction = twoClassSummary, classProbs = TRUE,verboseIter =T, index=createResample(train4$Popular, n)) list4 = list(svm=caretModelSpec(method='svmPoly',tuneLength=5), glm=caretModelSpec(method='glmnet',family='binomial',tuneLength=5), gbm=caretModelSpec(method='gbm',distribution='bernoulli',tuneLength=5), nnet=caretModelSpec(method='nnet',tuneLength=5)) registerDoMC(cores = 3) model_list4 <- caretList(Popular~.,data=train4[,-drop2], trControl=ctrl6,metric='ROC',tuneList=list4) save(model_list4,file='model_list4.rdata') #running all 4 took forever, but that let me to lern that I can #run one seprately with train and add it #lets add svm ctrlx<- trainControl(method = "boot", number=25, savePredictions = T, summaryFunction = twoClassSummary, classProbs = TRUE,verboseIter =T) registerDoMC(cores = 4) svm_extra <- train(Popular~.,data=train4[,-drop2], trControl=ctrl5,metric='ROC',method='svmPoly', tuneLength=3) registerDoMC(cores = 1) save(svm_extra,file='svm_extra.rdata') #combine the list model_list3 <- model_list2 model_list3[['svm']] <- svm_extra #somehow now it is telling me that the model_list2 and model_list3 do not have the same #resampling, somehow it got messed up #I need to run the other models again tonight glm_ensemble3 <- caretStack( model_list3, method='glm',family='binomial', metric='ROC', trControl=trainControl( method='boot', number=25, savePredictions=TRUE, classProbs=TRUE, summaryFunction=twoClassSummary ) ) pred_ens_glm3 <- predict(glm_ensemble3,test_new1a,type='prob') write.csv(data.frame(UniqueID=test_new1$UniqueID,Probability1=pred_ens_glm3[,2]), 'submission_ens_glm3.csv',row.names=F) #I am going to try to cluster the data, in the lesson it says as i underatand it #you cluster on train only, and use that , so I will do that rm(list=ls()) load("~/Documents/Analytics_edge/train_test_full.rdata") #first I need to scale the data, it is mostly for wordCount, as all others are 0/1 #or very close #and even before impute the word count for those that are 0 as that is #illogical to me load('modeling.rdata') train_predictors <- modeling[modeling$group==1,c(10,1:3,7,14)] #save mean for imputation meanx = mean(train_predictors$WordCount[train_predictors$WordCount !=0]) train_predictors$WordCount[train_predictors$WordCount ==0] <- meanx train_predictors$logwc <- log(train_predictors$WordCount) train_predictors <- train_predictors[-5] extra2 <- train_predictors %>% select(-logwc) %>% gather(variable,level,-UniqueID) %>% group_by(UniqueID,variable,level) %>% summarise(N=n()) %>% unite(key,variable,level,sep="_") %>% spread(key,N,fill=0) train_predictors1 <- left_join(train_predictors[c('UniqueID','logwc')],extra2) scale1 <- preProcess(train_predictors1) scaled_preds <- predict(scale1,newdata=train_predictors1) dist = dist(scaled_preds) hclust1 <- hclust(dist,method='ward.D') plot(hclust1) clusters=cutree(hclust1,k=4) kmeans1 <- kmeans(scaled_preds,4) table(kmeans1$cluster,train_new1$Popular) kclusters <- kmeans1$cluster #lest see if they make sense #modeling has the original data that came, so I can use that for a quick profile aux <- modeling[modeling$group==1,c(9,1,2,3)] aux$cluster = kclusters table(kclusters,train3$Popular) profile = aux %>% group_by(cluster) %>% gather(variable,level,-cluster) %>% group_by(cluster,variable,level) %>% summarise(N=n()) %>% group_by(cluster,variable) %>% mutate(P=N/sum(N)) %>% select(-N) %>% spread(cluster,P,fill=0) View(profile) #Because gbm is my best model, I will keep using gbm #and build 4 models, one for each cluster load('train4_files.rdata') n=25 registerDoMC(cores = 3) getDoParWorkers() for (i in 1:4) { trainx <- subset(train4,clusters==i) #get the data for that cluster #I need to take out the vars with no variability, or gbm fails dropx <- nearZeroVar(trainx,freq=100/0) set.seed(2006) #set a seed for reproductibility #define control parameters ctrlx<- trainControl(method = "boot", number = n, savePredictions = T, summaryFunction = twoClassSummary, classProbs = TRUE,verboseIter =T, index=createResample(trainx$Popular, n)) #run model modelx <- train(Popular~.,trainx[,-c(drop2,dropx)],method='gbm', distribution='bernoulli',trControl = ctrlx,metric='ROC') #save results assign(paste0('gbm_clust_',i),modelx) } save(list=ls(pattern='gbm_clust'),file='cluster_models.rdata') #they are worth trying, oI just need to combined them #first need to cluster the test data #for that I imputd first the missing word to mean, then did the log, then scaled test_new1b <- test_new1 test_predictors <- modeling[modeling$group==0,c(10,1:3,7,14)] test_predictors$WordCount[test_predictors$WordCount ==0] <- meanx test_predictors$logwc <- log(test_predictors$WordCount) test_predictors <- test_predictors[-5] extra2test <- test_predictors %>% select(-logwc) %>% gather(variable,level,-UniqueID) %>% group_by(UniqueID,variable,level) %>% summarise(N=n()) %>% unite(key,variable,level,sep="_") %>% spread(key,N,fill=0) #I am missing some variables, I need to add them missing <- setdiff(names(extra2),names(extra2test)) missing_df = data.frame(matrix(rep(0,dim(extra2test)[1]*length(missing)),dim(extra2test)[1],length(missing))) names(missing_df) <- missing missing_df$UniqueID = extra2test$UniqueID extra2test <- inner_join(extra2test,missing_df) test_predictors1 <- left_join(test_predictors[c('UniqueID','logwc')],extra2test) scaled_test_preds <- predict(scale1,newdata=test_predictors1) library(flexclust) clust.kcca = as.kcca(kmeans1,train_predictors1) test_clusters = predict(clust.kcca,newdata=test_predictors1) table(test_clusters)
# Example: http://rspatial.org/analysis/rst/3-spauto.html perWater = read.csv('../Zoo955_WeeklyAssignments/Week6_WIwater_NLCD.csv',stringsAsFactors = F)#### Counties data #### library(rgdal) counties = readOGR('../Lecture6_Overlays/Data/County_Boundaries_24K/County_Boundaries_24K.shp',layer='County_Boundaries_24K') # Add water to dataframe counties$water = perWater$perWater summary(counties) # Plot counties plot(counties) # Get centroid coordinates and plot xy <- coordinates(counties) points(xy, cex=2, pch=20) ### Find adjacent polygons # Neighbors will typically be created from a spatial polygon file. # Neighbors can be based on contiguity, distance, or the k nearest neighbors may be defined. Determine which polygons are “near”, and how to quantify that. Here we’ll use adjacency as criterion. To find adjacent polygons, we can use package `spdep` library(spdep) # Construct neighbours list from polygon list w <- poly2nb(counties, row.names= counties$OBJECTID) class(w) summary(w) # Plot the links between the polygons plot(counties) plot(w, xy, col='red4', add=TRUE) # An alternative method is to choose the k nearest points as neighbors k = knearneigh(xy, k = 3) k = knn2nb(k, row.names = counties$OBJECTID) plot(counties, main='k nearest neighbors = 3') plot(kk, xy, col='red4', add=TRUE) # Or distance neighbors d = dnearneigh(xy, d1 = 0, d2 = 50000, row.names = counties$OBJECTID) plot(counties, main='neighbors, distance = 50km') plot(d, xy, col='red4', add=TRUE) # Transform w into a spatial weights matrix. # A spatial weights matrix reflects the intensity of the geographic relationship between observations wm <- nb2mat(w, style='B') wm nb2mat(d, style='B') # Compute Moran's I # To do this we need to create a ‘listw’ type spatial weights object (instead of the matrix we used above). # To get the same value as above we use “style=’B’” to use binary (TRUE/FALSE) distance weights. ww <- nb2listw(w, style='B') ww dd <- nb2listw(d, style='B', zero.policy = T) print(dd, zero.policy=TRUE) kk <- nb2listw(k, style='B') kk # Now we can use the moran function. # I = (n sum_i sum_j w_ij (x_i - xbar) (x_j - xbar)) / (S0 sum_i (x_i - xbar)^2) # x = a numeric vector the same length as the neighbours list in listw # listw = a listw object created for example by nb2listw # n = number of zones # S0 = global sum of weights moran(x = counties$water,listw = ww, n=length(ww$neighbours), S0=Szero(ww)) moran(x = counties$water,listw = rr, n=length(rr$neighbours), S0=Szero(rr)) moran(x = counties$water,listw = dd, n=length(dd$neighbours), S0=Szero(dd),zero.policy = T) # Now we can test for significance. First analytically, using linear regression based logic and assumptions. moran.test(counties$water, ww, randomisation=FALSE) # Instead of the approach above you should use Monte Carlo simulation. # That is the preferred method (in fact, the only good method). # The way it works that the values are randomly assigned to the polygons, and the Moran’s I is computed. # This is repeated several times to establish a distribution of expected values. # The observed value of Moran’s I is then compared with the simulated distribution to see how likely # it is that the observed values could be considered a random draw. moran.mc(counties$water, ww, nsim=99) # null hypothesis states that the attribute being analyzed is randomly distributed among the features in your study area
/Lecture7_Autocorrelation/lecture7.R
no_license
kbraziun/Zoo955
R
false
false
3,459
r
# Example: http://rspatial.org/analysis/rst/3-spauto.html perWater = read.csv('../Zoo955_WeeklyAssignments/Week6_WIwater_NLCD.csv',stringsAsFactors = F)#### Counties data #### library(rgdal) counties = readOGR('../Lecture6_Overlays/Data/County_Boundaries_24K/County_Boundaries_24K.shp',layer='County_Boundaries_24K') # Add water to dataframe counties$water = perWater$perWater summary(counties) # Plot counties plot(counties) # Get centroid coordinates and plot xy <- coordinates(counties) points(xy, cex=2, pch=20) ### Find adjacent polygons # Neighbors will typically be created from a spatial polygon file. # Neighbors can be based on contiguity, distance, or the k nearest neighbors may be defined. Determine which polygons are “near”, and how to quantify that. Here we’ll use adjacency as criterion. To find adjacent polygons, we can use package `spdep` library(spdep) # Construct neighbours list from polygon list w <- poly2nb(counties, row.names= counties$OBJECTID) class(w) summary(w) # Plot the links between the polygons plot(counties) plot(w, xy, col='red4', add=TRUE) # An alternative method is to choose the k nearest points as neighbors k = knearneigh(xy, k = 3) k = knn2nb(k, row.names = counties$OBJECTID) plot(counties, main='k nearest neighbors = 3') plot(kk, xy, col='red4', add=TRUE) # Or distance neighbors d = dnearneigh(xy, d1 = 0, d2 = 50000, row.names = counties$OBJECTID) plot(counties, main='neighbors, distance = 50km') plot(d, xy, col='red4', add=TRUE) # Transform w into a spatial weights matrix. # A spatial weights matrix reflects the intensity of the geographic relationship between observations wm <- nb2mat(w, style='B') wm nb2mat(d, style='B') # Compute Moran's I # To do this we need to create a ‘listw’ type spatial weights object (instead of the matrix we used above). # To get the same value as above we use “style=’B’” to use binary (TRUE/FALSE) distance weights. ww <- nb2listw(w, style='B') ww dd <- nb2listw(d, style='B', zero.policy = T) print(dd, zero.policy=TRUE) kk <- nb2listw(k, style='B') kk # Now we can use the moran function. # I = (n sum_i sum_j w_ij (x_i - xbar) (x_j - xbar)) / (S0 sum_i (x_i - xbar)^2) # x = a numeric vector the same length as the neighbours list in listw # listw = a listw object created for example by nb2listw # n = number of zones # S0 = global sum of weights moran(x = counties$water,listw = ww, n=length(ww$neighbours), S0=Szero(ww)) moran(x = counties$water,listw = rr, n=length(rr$neighbours), S0=Szero(rr)) moran(x = counties$water,listw = dd, n=length(dd$neighbours), S0=Szero(dd),zero.policy = T) # Now we can test for significance. First analytically, using linear regression based logic and assumptions. moran.test(counties$water, ww, randomisation=FALSE) # Instead of the approach above you should use Monte Carlo simulation. # That is the preferred method (in fact, the only good method). # The way it works that the values are randomly assigned to the polygons, and the Moran’s I is computed. # This is repeated several times to establish a distribution of expected values. # The observed value of Moran’s I is then compared with the simulated distribution to see how likely # it is that the observed values could be considered a random draw. moran.mc(counties$water, ww, nsim=99) # null hypothesis states that the attribute being analyzed is randomly distributed among the features in your study area
\alias{gdkDisplayPointerUngrab} \name{gdkDisplayPointerUngrab} \title{gdkDisplayPointerUngrab} \description{Release any pointer grab.} \usage{gdkDisplayPointerUngrab(object, time. = "GDK_CURRENT_TIME")} \arguments{ \item{\code{object}}{[\code{\link{GdkDisplay}}] a \code{\link{GdkDisplay}}.} \item{\code{time.}}{[numeric] a timestap (e.g. \code{GDK_CURRENT_TIME}).} } \details{ Since 2.2} \author{Derived by RGtkGen from GTK+ documentation} \keyword{internal}
/man/gdkDisplayPointerUngrab.Rd
no_license
cran/RGtk2.10
R
false
false
464
rd
\alias{gdkDisplayPointerUngrab} \name{gdkDisplayPointerUngrab} \title{gdkDisplayPointerUngrab} \description{Release any pointer grab.} \usage{gdkDisplayPointerUngrab(object, time. = "GDK_CURRENT_TIME")} \arguments{ \item{\code{object}}{[\code{\link{GdkDisplay}}] a \code{\link{GdkDisplay}}.} \item{\code{time.}}{[numeric] a timestap (e.g. \code{GDK_CURRENT_TIME}).} } \details{ Since 2.2} \author{Derived by RGtkGen from GTK+ documentation} \keyword{internal}
#' @title Additive super-efficiency DEA model. #' #' @description Solve the additive super-efficiency model proposed by Du, Liang #' and Zhu (2010). It is an extension of the SBM super-efficiency to the additive #' DEA model. #' #' @usage model_addsupereff(datadea, #' dmu_eval = NULL, #' dmu_ref = NULL, #' orientation = NULL, #' weight_slack_i = NULL, #' weight_slack_o = NULL, #' rts = c("crs", "vrs", "nirs", "ndrs", "grs"), #' L = 1, #' U = 1, #' compute_target = TRUE, #' returnlp = FALSE, #' ...) #' #' @param datadea A \code{deadata} object with \code{n} DMUs, \code{m} inputs and \code{s} outputs. #' @param dmu_eval A numeric vector containing which DMUs have to be evaluated. #' If \code{NULL} (default), all DMUs are considered. #' @param dmu_ref A numeric vector containing which DMUs are the evaluation reference set. #' If \code{NULL} (default), all DMUs are considered. #' @param orientation This parameter is either \code{NULL} (default) or a string, equal to #' "io" (input-oriented) or "oo" (output-oriented). It is used to modify the weight slacks. #' If input-oriented, \code{weight_slack_o} are taken 0. #' If output-oriented, \code{weight_slack_i} are taken 0. #' @param weight_slack_i A value, vector of length \code{m}, or matrix \code{m} x #' \code{ne} (where \code{ne} is the length of \code{dmu_eval}) #' with the weights of the input super-slacks (\code{t_input}). #' If 0, output-oriented. #' If \code{weight_slack_i} is the matrix of the inverses of inputs of DMUS in #' \code{dmu_eval} (default), the model is unit invariant. #' @param weight_slack_o A value, vector of length \code{s}, or matrix \code{s} x #' \code{ne} (where \code{ne} is the length of \code{dmu_eval}) #' with the weights of the output super-slacks (\code{t_output}). #' If 0, input-oriented. #' If \code{weight_slack_o} is the matrix of the inverses of outputs of DMUS in #' \code{dmu_eval} (default), the model is unit invariant. #' @param rts A string, determining the type of returns to scale, equal to "crs" #' (constant), "vrs" (variable), "nirs" (non-increasing), "ndrs" (non-decreasing) #' or "grs" (generalized). #' @param L Lower bound for the generalized returns to scale (grs). #' @param U Upper bound for the generalized returns to scale (grs). #' @param compute_target Logical. If it is \code{TRUE}, it computes targets, #' projections and slacks. #' @param returnlp Logical. If it is \code{TRUE}, it returns the linear problems #' (objective function and constraints). #' @param ... Ignored, for compatibility issues. #' #' @author #' \strong{Vicente Coll-Serrano} (\email{vicente.coll@@uv.es}). #' \emph{Quantitative Methods for Measuring Culture (MC2). Applied Economics.} #' #' \strong{Vicente Bolós} (\email{vicente.bolos@@uv.es}). #' \emph{Department of Business Mathematics} #' #' \strong{Rafael Benítez} (\email{rafael.suarez@@uv.es}). #' \emph{Department of Business Mathematics} #' #' University of Valencia (Spain) #' #' @references #' Du, J.; Liang, L.; Zhu, J. (2010). "A Slacks-based Measure of Super-efficiency #' in Data Envelopment Analysis. A Comment", European Journal of Operational Research, #' 204, 694-697. \doi{10.1016/j.ejor.2009.12.007} #' #' Zhu, J. (2014). Quantitative Models for Performance Evaluation and Benchmarking. #' Data Envelopment Analysis with Spreadsheets. 3rd Edition Springer, New York. #' \doi{10.1007/978-3-319-06647-9} #' #' @examples #' # Replication of results in Du, Liang and Zhu (2010, Table 6, p.696) #' data("Power_plants") #' Power_plants <- make_deadata(Power_plants, #' ni = 4, #' no = 2) #' result <- model_addsupereff(Power_plants, #' rts = "crs") #' efficiencies(result) #' #' @seealso \code{\link{model_additive}}, \code{\link{model_supereff}}, #' \code{\link{model_sbmsupereff}} #' #' @import lpSolve #' #' @export model_addsupereff <- function(datadea, dmu_eval = NULL, dmu_ref = NULL, orientation = NULL, weight_slack_i = NULL, weight_slack_o = NULL, rts = c("crs", "vrs", "nirs", "ndrs", "grs"), L = 1, U = 1, compute_target = TRUE, returnlp = FALSE, ...) { # Cheking whether datadea is of class "deadata" or not... if (!is.deadata(datadea)) { stop("Data should be of class deadata. Run make_deadata function first!") } # Checking rts rts <- tolower(rts) rts <- match.arg(rts) if (!is.null(datadea$ud_inputs) || !is.null(datadea$ud_outputs)) { warning("This model does not take into account the undesirable feature for inputs/outputs.") } if (rts == "grs") { if (L > 1) { stop("L must be <= 1.") } if (U < 1) { stop("U must be >= 1.") } } dmunames <- datadea$dmunames nd <- length(dmunames) # number of dmus if (is.null(dmu_eval)) { dmu_eval <- 1:nd } else if (!all(dmu_eval %in% (1:nd))) { stop("Invalid set of DMUs to be evaluated (dmu_eval).") } names(dmu_eval) <- dmunames[dmu_eval] nde <- length(dmu_eval) if (is.null(dmu_ref)) { dmu_ref <- 1:nd } else if (!all(dmu_ref %in% (1:nd))) { stop("Invalid set of reference DMUs (dmu_ref).") } names(dmu_ref) <- dmunames[dmu_ref] ndr <- length(dmu_ref) input <- datadea$input output <- datadea$output inputnames <- rownames(input) outputnames <- rownames(output) ni <- nrow(input) # number of inputs no <- nrow(output) # number of outputs # Zeros in input and output data. Case 2 (Tone 2001) nzimin <- apply(input, MARGIN = 1, function(x) min(x[x > 0])) / 100 nzomin <- apply(output, MARGIN = 1, function(x) min(x[x > 0])) / 100 for (ii in dmu_eval) { input[which(input[, ii] == 0), ii] <- nzimin[which(input[, ii] == 0)] output[which(output[, ii] == 0), ii] <- nzomin[which(output[, ii] == 0)] } inputref <- matrix(input[, dmu_ref], nrow = ni) outputref <- matrix(output[, dmu_ref], nrow = no) nc_inputs <- datadea$nc_inputs nc_outputs <- datadea$nc_outputs nnci <- length(nc_inputs) nnco <- length(nc_outputs) # Checking weights if (is.null(weight_slack_i)) { weight_slack_i <- matrix(1 / input[, dmu_eval], nrow = ni) / (ni + no - nnci - nnco) } else { if (is.matrix(weight_slack_i)) { if ((nrow(weight_slack_i) != ni) || (ncol(weight_slack_i) != nde)) { stop("Invalid matrix of weights of the input slacks (number of inputs x number of evaluated DMUs).") } } else if ((length(weight_slack_i) == 1) || (length(weight_slack_i) == ni)) { weight_slack_i <- matrix(weight_slack_i, nrow = ni, ncol = nde) } else { stop("Invalid vector of weights of the input slacks.") } } weight_slack_i[nc_inputs, ] <- 0 if ((!is.null(orientation)) && (orientation == "oo")) { weight_slack_i <- matrix(0, nrow = ni, ncol = nde) } rownames(weight_slack_i) <- inputnames colnames(weight_slack_i) <- dmunames[dmu_eval] if (is.null(weight_slack_o)) { weight_slack_o <- matrix(1 / output[, dmu_eval], nrow = no) / (ni + no - nnci - nnco) } else { if (is.matrix(weight_slack_o)) { if ((nrow(weight_slack_o) != no) || (ncol(weight_slack_o) != nde)) { stop("Invalid matrix of weights of the output slacks (number of outputs x number of evaluated DMUs).") } } else if ((length(weight_slack_o) == 1) || (length(weight_slack_o) == no)) { weight_slack_o <- matrix(weight_slack_o, nrow = no, ncol = nde) } else { stop("Invalid vector of weights of the output slacks.") } } weight_slack_o[nc_outputs, ] <- 0 if ((!is.null(orientation)) && (orientation == "io")) { weight_slack_o <- matrix(0, nrow = no, ncol = nde) } rownames(weight_slack_o) <- outputnames colnames(weight_slack_o) <- dmunames[dmu_eval] target_input <- NULL target_output <- NULL project_input <- NULL project_output <- NULL slack_input <- NULL slack_output <- NULL DMU <- vector(mode = "list", length = nde) names(DMU) <- dmunames[dmu_eval] ########################### if (rts == "crs") { f.con.rs <- NULL f.dir.rs <- NULL f.rhs.rs <- NULL } else { f.con.rs <- cbind(matrix(1, nrow = 1, ncol = ndr), matrix(0, nrow = 1, ncol = ni + no)) if (rts == "vrs") { f.dir.rs <- "=" f.rhs.rs <- 1 } else if (rts == "nirs") { f.dir.rs <- "<=" f.rhs.rs <- 1 } else if (rts == "ndrs") { f.dir.rs <- ">=" f.rhs.rs <- 1 } else { f.con.rs <- rbind(f.con.rs, f.con.rs) f.dir.rs <- c(">=", "<=") f.rhs.rs <- c(L, U) } } # Constraints matrix f.con.1 <- cbind(inputref, -diag(ni), matrix(0, nrow = ni, ncol = no)) f.con.2 <- cbind(outputref, matrix(0, nrow = no, ncol = ni), diag(no)) for (i in 1:nde) { ii <- dmu_eval[i] w0i <- which(weight_slack_i[, i] == 0) nw0i <- length(w0i) w0o <- which(weight_slack_o[, i] == 0) nw0o <- length(w0o) # Objective function coefficients f.obj <- c(rep(0, ndr), weight_slack_i[, i], weight_slack_o[, i]) # Constraints matrix f.con.se <- rep(0, ndr) f.con.se[dmu_ref == ii] <- 1 f.con.se <- c(f.con.se, rep(0, ni + no)) f.con.w0 <- matrix(0, nrow = (nw0i + nw0o), ncol = (ndr + ni + no)) f.con.w0[, ndr + c(w0i, ni + w0o)] <- diag(nw0i + nw0o) f.con <- rbind(f.con.1, f.con.2, f.con.se, f.con.w0, f.con.rs) # Directions vector f.dir <- c(rep("<=", ni), rep(">=", no), rep("=", 1 + nw0i + nw0o), f.dir.rs) f.dir[nc_inputs] <- "=" f.dir[ni + nc_outputs] <- "=" # Right hand side vector f.rhs <- c(input[, ii], output[, ii], rep(0, 1 + nw0i + nw0o), f.rhs.rs) if (returnlp) { lambda <- rep(0, ndr) names(lambda) <- dmunames[dmu_ref] t_input <- rep(0, ni) names(t_input) <- inputnames t_output <- rep(0, no) names(t_output) <- outputnames var <- list(lambda = lambda, t_input = t_input, t_output = t_output) DMU[[i]] <- list(direction = "min", objective.in = f.obj, const.mat = f.con, const.dir = f.dir, const.rhs = f.rhs, var = var) } else { res <- lp("min", f.obj, f.con, f.dir, f.rhs) if (res$status == 0) { objval <- res$objval lambda <- res$solution[1 : ndr] names(lambda) <- dmunames[dmu_ref] t_input <- res$solution[(ndr + 1) : (ndr + ni)] # input superslack names(t_input) <- inputnames t_output <- res$solution[(ndr + ni + 1) : (ndr + ni + no)] # output superslack names(t_output) <- outputnames delta_num <- 1 + sum(t_input / input[, ii]) / (ni - nnci) delta_den <- 1 - sum(t_output / output[, ii]) / (no - nnco) delta <- delta_num / delta_den if (compute_target) { target_input <- as.vector(inputref %*% lambda) names(target_input) <- inputnames target_output <- as.vector(outputref %*% lambda) names(target_output) <- outputnames project_input <- input[, ii] + t_input names(project_input) <- inputnames project_output <- output[, ii] - t_output names(project_output) <- outputnames slack_input <- project_input - target_input names(slack_input) <- inputnames slack_output <- target_output - project_output names(slack_output) <- outputnames } } else { delta <- NA objval <- NA lambda <- NA t_input <- NA t_output <- NA if (compute_target) { target_input <- NA target_output <- NA project_input <- NA project_output <- NA slack_input <- NA slack_output <- NA } } DMU[[i]] <- list(delta = delta, objval = objval, lambda = lambda, t_input = t_input, t_output = t_output, slack_input = slack_input, slack_output = slack_output, project_input = project_input, project_output = project_output, target_input = target_input, target_output = target_output) } } deaOutput <- list(modelname = "addsupereff", rts = rts, L = L, U = U, DMU = DMU, data = datadea, dmu_eval = dmu_eval, dmu_ref = dmu_ref, weight_slack_i = weight_slack_i, weight_slack_o = weight_slack_o, orientation = NA) return(structure(deaOutput, class = "dea")) }
/R/model_addsupereff.R
no_license
cran/deaR
R
false
false
13,438
r
#' @title Additive super-efficiency DEA model. #' #' @description Solve the additive super-efficiency model proposed by Du, Liang #' and Zhu (2010). It is an extension of the SBM super-efficiency to the additive #' DEA model. #' #' @usage model_addsupereff(datadea, #' dmu_eval = NULL, #' dmu_ref = NULL, #' orientation = NULL, #' weight_slack_i = NULL, #' weight_slack_o = NULL, #' rts = c("crs", "vrs", "nirs", "ndrs", "grs"), #' L = 1, #' U = 1, #' compute_target = TRUE, #' returnlp = FALSE, #' ...) #' #' @param datadea A \code{deadata} object with \code{n} DMUs, \code{m} inputs and \code{s} outputs. #' @param dmu_eval A numeric vector containing which DMUs have to be evaluated. #' If \code{NULL} (default), all DMUs are considered. #' @param dmu_ref A numeric vector containing which DMUs are the evaluation reference set. #' If \code{NULL} (default), all DMUs are considered. #' @param orientation This parameter is either \code{NULL} (default) or a string, equal to #' "io" (input-oriented) or "oo" (output-oriented). It is used to modify the weight slacks. #' If input-oriented, \code{weight_slack_o} are taken 0. #' If output-oriented, \code{weight_slack_i} are taken 0. #' @param weight_slack_i A value, vector of length \code{m}, or matrix \code{m} x #' \code{ne} (where \code{ne} is the length of \code{dmu_eval}) #' with the weights of the input super-slacks (\code{t_input}). #' If 0, output-oriented. #' If \code{weight_slack_i} is the matrix of the inverses of inputs of DMUS in #' \code{dmu_eval} (default), the model is unit invariant. #' @param weight_slack_o A value, vector of length \code{s}, or matrix \code{s} x #' \code{ne} (where \code{ne} is the length of \code{dmu_eval}) #' with the weights of the output super-slacks (\code{t_output}). #' If 0, input-oriented. #' If \code{weight_slack_o} is the matrix of the inverses of outputs of DMUS in #' \code{dmu_eval} (default), the model is unit invariant. #' @param rts A string, determining the type of returns to scale, equal to "crs" #' (constant), "vrs" (variable), "nirs" (non-increasing), "ndrs" (non-decreasing) #' or "grs" (generalized). #' @param L Lower bound for the generalized returns to scale (grs). #' @param U Upper bound for the generalized returns to scale (grs). #' @param compute_target Logical. If it is \code{TRUE}, it computes targets, #' projections and slacks. #' @param returnlp Logical. If it is \code{TRUE}, it returns the linear problems #' (objective function and constraints). #' @param ... Ignored, for compatibility issues. #' #' @author #' \strong{Vicente Coll-Serrano} (\email{vicente.coll@@uv.es}). #' \emph{Quantitative Methods for Measuring Culture (MC2). Applied Economics.} #' #' \strong{Vicente Bolós} (\email{vicente.bolos@@uv.es}). #' \emph{Department of Business Mathematics} #' #' \strong{Rafael Benítez} (\email{rafael.suarez@@uv.es}). #' \emph{Department of Business Mathematics} #' #' University of Valencia (Spain) #' #' @references #' Du, J.; Liang, L.; Zhu, J. (2010). "A Slacks-based Measure of Super-efficiency #' in Data Envelopment Analysis. A Comment", European Journal of Operational Research, #' 204, 694-697. \doi{10.1016/j.ejor.2009.12.007} #' #' Zhu, J. (2014). Quantitative Models for Performance Evaluation and Benchmarking. #' Data Envelopment Analysis with Spreadsheets. 3rd Edition Springer, New York. #' \doi{10.1007/978-3-319-06647-9} #' #' @examples #' # Replication of results in Du, Liang and Zhu (2010, Table 6, p.696) #' data("Power_plants") #' Power_plants <- make_deadata(Power_plants, #' ni = 4, #' no = 2) #' result <- model_addsupereff(Power_plants, #' rts = "crs") #' efficiencies(result) #' #' @seealso \code{\link{model_additive}}, \code{\link{model_supereff}}, #' \code{\link{model_sbmsupereff}} #' #' @import lpSolve #' #' @export model_addsupereff <- function(datadea, dmu_eval = NULL, dmu_ref = NULL, orientation = NULL, weight_slack_i = NULL, weight_slack_o = NULL, rts = c("crs", "vrs", "nirs", "ndrs", "grs"), L = 1, U = 1, compute_target = TRUE, returnlp = FALSE, ...) { # Cheking whether datadea is of class "deadata" or not... if (!is.deadata(datadea)) { stop("Data should be of class deadata. Run make_deadata function first!") } # Checking rts rts <- tolower(rts) rts <- match.arg(rts) if (!is.null(datadea$ud_inputs) || !is.null(datadea$ud_outputs)) { warning("This model does not take into account the undesirable feature for inputs/outputs.") } if (rts == "grs") { if (L > 1) { stop("L must be <= 1.") } if (U < 1) { stop("U must be >= 1.") } } dmunames <- datadea$dmunames nd <- length(dmunames) # number of dmus if (is.null(dmu_eval)) { dmu_eval <- 1:nd } else if (!all(dmu_eval %in% (1:nd))) { stop("Invalid set of DMUs to be evaluated (dmu_eval).") } names(dmu_eval) <- dmunames[dmu_eval] nde <- length(dmu_eval) if (is.null(dmu_ref)) { dmu_ref <- 1:nd } else if (!all(dmu_ref %in% (1:nd))) { stop("Invalid set of reference DMUs (dmu_ref).") } names(dmu_ref) <- dmunames[dmu_ref] ndr <- length(dmu_ref) input <- datadea$input output <- datadea$output inputnames <- rownames(input) outputnames <- rownames(output) ni <- nrow(input) # number of inputs no <- nrow(output) # number of outputs # Zeros in input and output data. Case 2 (Tone 2001) nzimin <- apply(input, MARGIN = 1, function(x) min(x[x > 0])) / 100 nzomin <- apply(output, MARGIN = 1, function(x) min(x[x > 0])) / 100 for (ii in dmu_eval) { input[which(input[, ii] == 0), ii] <- nzimin[which(input[, ii] == 0)] output[which(output[, ii] == 0), ii] <- nzomin[which(output[, ii] == 0)] } inputref <- matrix(input[, dmu_ref], nrow = ni) outputref <- matrix(output[, dmu_ref], nrow = no) nc_inputs <- datadea$nc_inputs nc_outputs <- datadea$nc_outputs nnci <- length(nc_inputs) nnco <- length(nc_outputs) # Checking weights if (is.null(weight_slack_i)) { weight_slack_i <- matrix(1 / input[, dmu_eval], nrow = ni) / (ni + no - nnci - nnco) } else { if (is.matrix(weight_slack_i)) { if ((nrow(weight_slack_i) != ni) || (ncol(weight_slack_i) != nde)) { stop("Invalid matrix of weights of the input slacks (number of inputs x number of evaluated DMUs).") } } else if ((length(weight_slack_i) == 1) || (length(weight_slack_i) == ni)) { weight_slack_i <- matrix(weight_slack_i, nrow = ni, ncol = nde) } else { stop("Invalid vector of weights of the input slacks.") } } weight_slack_i[nc_inputs, ] <- 0 if ((!is.null(orientation)) && (orientation == "oo")) { weight_slack_i <- matrix(0, nrow = ni, ncol = nde) } rownames(weight_slack_i) <- inputnames colnames(weight_slack_i) <- dmunames[dmu_eval] if (is.null(weight_slack_o)) { weight_slack_o <- matrix(1 / output[, dmu_eval], nrow = no) / (ni + no - nnci - nnco) } else { if (is.matrix(weight_slack_o)) { if ((nrow(weight_slack_o) != no) || (ncol(weight_slack_o) != nde)) { stop("Invalid matrix of weights of the output slacks (number of outputs x number of evaluated DMUs).") } } else if ((length(weight_slack_o) == 1) || (length(weight_slack_o) == no)) { weight_slack_o <- matrix(weight_slack_o, nrow = no, ncol = nde) } else { stop("Invalid vector of weights of the output slacks.") } } weight_slack_o[nc_outputs, ] <- 0 if ((!is.null(orientation)) && (orientation == "io")) { weight_slack_o <- matrix(0, nrow = no, ncol = nde) } rownames(weight_slack_o) <- outputnames colnames(weight_slack_o) <- dmunames[dmu_eval] target_input <- NULL target_output <- NULL project_input <- NULL project_output <- NULL slack_input <- NULL slack_output <- NULL DMU <- vector(mode = "list", length = nde) names(DMU) <- dmunames[dmu_eval] ########################### if (rts == "crs") { f.con.rs <- NULL f.dir.rs <- NULL f.rhs.rs <- NULL } else { f.con.rs <- cbind(matrix(1, nrow = 1, ncol = ndr), matrix(0, nrow = 1, ncol = ni + no)) if (rts == "vrs") { f.dir.rs <- "=" f.rhs.rs <- 1 } else if (rts == "nirs") { f.dir.rs <- "<=" f.rhs.rs <- 1 } else if (rts == "ndrs") { f.dir.rs <- ">=" f.rhs.rs <- 1 } else { f.con.rs <- rbind(f.con.rs, f.con.rs) f.dir.rs <- c(">=", "<=") f.rhs.rs <- c(L, U) } } # Constraints matrix f.con.1 <- cbind(inputref, -diag(ni), matrix(0, nrow = ni, ncol = no)) f.con.2 <- cbind(outputref, matrix(0, nrow = no, ncol = ni), diag(no)) for (i in 1:nde) { ii <- dmu_eval[i] w0i <- which(weight_slack_i[, i] == 0) nw0i <- length(w0i) w0o <- which(weight_slack_o[, i] == 0) nw0o <- length(w0o) # Objective function coefficients f.obj <- c(rep(0, ndr), weight_slack_i[, i], weight_slack_o[, i]) # Constraints matrix f.con.se <- rep(0, ndr) f.con.se[dmu_ref == ii] <- 1 f.con.se <- c(f.con.se, rep(0, ni + no)) f.con.w0 <- matrix(0, nrow = (nw0i + nw0o), ncol = (ndr + ni + no)) f.con.w0[, ndr + c(w0i, ni + w0o)] <- diag(nw0i + nw0o) f.con <- rbind(f.con.1, f.con.2, f.con.se, f.con.w0, f.con.rs) # Directions vector f.dir <- c(rep("<=", ni), rep(">=", no), rep("=", 1 + nw0i + nw0o), f.dir.rs) f.dir[nc_inputs] <- "=" f.dir[ni + nc_outputs] <- "=" # Right hand side vector f.rhs <- c(input[, ii], output[, ii], rep(0, 1 + nw0i + nw0o), f.rhs.rs) if (returnlp) { lambda <- rep(0, ndr) names(lambda) <- dmunames[dmu_ref] t_input <- rep(0, ni) names(t_input) <- inputnames t_output <- rep(0, no) names(t_output) <- outputnames var <- list(lambda = lambda, t_input = t_input, t_output = t_output) DMU[[i]] <- list(direction = "min", objective.in = f.obj, const.mat = f.con, const.dir = f.dir, const.rhs = f.rhs, var = var) } else { res <- lp("min", f.obj, f.con, f.dir, f.rhs) if (res$status == 0) { objval <- res$objval lambda <- res$solution[1 : ndr] names(lambda) <- dmunames[dmu_ref] t_input <- res$solution[(ndr + 1) : (ndr + ni)] # input superslack names(t_input) <- inputnames t_output <- res$solution[(ndr + ni + 1) : (ndr + ni + no)] # output superslack names(t_output) <- outputnames delta_num <- 1 + sum(t_input / input[, ii]) / (ni - nnci) delta_den <- 1 - sum(t_output / output[, ii]) / (no - nnco) delta <- delta_num / delta_den if (compute_target) { target_input <- as.vector(inputref %*% lambda) names(target_input) <- inputnames target_output <- as.vector(outputref %*% lambda) names(target_output) <- outputnames project_input <- input[, ii] + t_input names(project_input) <- inputnames project_output <- output[, ii] - t_output names(project_output) <- outputnames slack_input <- project_input - target_input names(slack_input) <- inputnames slack_output <- target_output - project_output names(slack_output) <- outputnames } } else { delta <- NA objval <- NA lambda <- NA t_input <- NA t_output <- NA if (compute_target) { target_input <- NA target_output <- NA project_input <- NA project_output <- NA slack_input <- NA slack_output <- NA } } DMU[[i]] <- list(delta = delta, objval = objval, lambda = lambda, t_input = t_input, t_output = t_output, slack_input = slack_input, slack_output = slack_output, project_input = project_input, project_output = project_output, target_input = target_input, target_output = target_output) } } deaOutput <- list(modelname = "addsupereff", rts = rts, L = L, U = U, DMU = DMU, data = datadea, dmu_eval = dmu_eval, dmu_ref = dmu_ref, weight_slack_i = weight_slack_i, weight_slack_o = weight_slack_o, orientation = NA) return(structure(deaOutput, class = "dea")) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/multiply.R \name{multiply} \alias{multiply} \title{Take the scalar product of two values} \usage{ multiply(x, y) } \arguments{ \item{x}{a numeric value} \item{y}{a numeric value} } \value{ the product of x and y } \description{ Take the scalar product of two values } \examples{ multiply(3, 4) multiply(4, 3) }
/mathpackage/man/multiply.Rd
no_license
anhnguyendepocen/R-documentation
R
false
true
390
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/multiply.R \name{multiply} \alias{multiply} \title{Take the scalar product of two values} \usage{ multiply(x, y) } \arguments{ \item{x}{a numeric value} \item{y}{a numeric value} } \value{ the product of x and y } \description{ Take the scalar product of two values } \examples{ multiply(3, 4) multiply(4, 3) }
.onAttach <- function(...) { packageStartupMessage("\nCAISEr version 1.0.16 Not compatible with code developed for 0.X.Y versions If needed, please visit https://git.io/fjFwf for version 0.3.3") }
/R/onAttach.R
no_license
fcampelo/CAISEr
R
false
false
199
r
.onAttach <- function(...) { packageStartupMessage("\nCAISEr version 1.0.16 Not compatible with code developed for 0.X.Y versions If needed, please visit https://git.io/fjFwf for version 0.3.3") }
# install.packages("devtools") # devtools::install_github("SWotherspoon/SGAT") # devtools::install_github("SWotherspoon/BAStag") library(SGAT) library(BAStag) library(raster) library(maptools) library(readr) # Main algorithm used to calculate the likelihoods TwilightFreeModel <- function(slices, alpha,beta, x0,fixed=FALSE,dt=NULL,threshold=5,zenith=96, spread = 2) { time <- .POSIXct(sapply(slices,function(d) mean(d$Date)),"GMT") ## Times (hours) between observations if(is.null(dt)) dt <- diff(as.numeric(time)/3600) ## Fixed locations fixed <- rep_len(fixed,length.out=length(slices)) ## Contribution to log posterior from each x location logpk <- function(k,x) { n <- nrow(x) logl <- double(n) ss <- solar(slices[[k]]$Date) obsDay <- (slices[[k]]$Light) >= threshold ## Loop over location for(i in seq_len(n)) { ## Compute for each x the time series of zeniths expDay <- zenith(ss,x[i,1],x[i,2]) <= zenith ## Some comparison to the observed light -> is L=0 (ie logl=-Inf) if(any(obsDay & !expDay)) { logl[i] <- -Inf } else { count <- sum(expDay & !obsDay) logl[i] <- dgamma(count,alpha[1],alpha[2],log=TRUE) } } ## Return sum of likelihood + prior logl + logp0(k,x, spread = spread) } ## Behavioural contribution to the log posterior logbk <- function(k,x1,x2) { spd <- pmax.int(gcDist(x1,x2), 1e-06)/dt[k] dgamma(spd,beta[1L],beta[2L],log=TRUE) } list( logpk=logpk, logbk=logbk, fixed=fixed, x0=x0, time=time, alpha=alpha, beta=beta) } logp0 <- function(k,x, spread = spread) { x[,1] <- x[,1]%%360 tt <- median(d.seg[[k]]$Temp, na.rm = TRUE) if(is.na(tt)) { 0 } else { dnorm(tt,extract(sst[[indices[k]]],x),spread,log=T) } } # TDR light, depth, SST previously resamples to 2 min intervals tag <- "https://raw.githubusercontent.com/ABindoff/geolocationHMM/master/TDR86372ds.csv" d.lig <- read_delim(tag, delim = ",", skip = 0, col_names = c("Date", "Light","Depth","Temp")) head(d.lig) # filtered GPS positions gps <- "https://raw.githubusercontent.com/ABindoff/geolocationHMM/master/86372_filteredGPS.csv" gdat <- read_csv(gps, skip = 0, col_names = c("Date", "Lon","Lat")) head(gdat) compare.light <- function(day, lon, lat, zenith = 96, offset = 0, verbose = T){ single.day <- subset(d.lig, d.lig$Date >= as.POSIXct(day, tz = "GMT")+offset*60*60 & d.lig$Date < as.POSIXct(day+24*60*60, tz = "GMT")+offset*60*60) d.sim <- zenithSimulate(single.day$Date, lon = rep(lon, length(single.day$Date)), lat = rep(lat, length(single.day$Date)), single.day$Date) d.sim$Light0 <- ifelse(d.sim$Zenith<zenith,max(single.day$Light, na.rm = T),1) d.sim$Light <- d.sim$Light0 thresh <- max(single.day$Light[which(d.sim$Zenith >= zenith)]) if(verbose){ plot(single.day$Date, single.day$Light, col = "red",type = "l", lwd = 2, ylim = c(0,max(single.day$Light, na.rm = T)), xlab = day, main = cbind(lon, lat)) lines(d.sim$Date, d.sim$Light, lwd = 2) abline( h = thresh, lty = 2) print(paste0("max light in night window: ", thresh, " assuming a solar zenith angle of: ", zenith)) } return(thresh) } # calibrate on first 7 days GPS data thresh <- 5 zen <- 97 day <- as.POSIXct("2009-10-29 00:00:00", "UTC") for(i in 1:7){ dd <- day + i*60*60 med.lon <- median(subset(gdat$Lon, gdat$Date >= dd & gdat$Date <= dd+24*60*60), na.rm = T) med.lat <- median(subset(gdat$Lat, gdat$Date >= dd & gdat$Date <= dd+24*60*60), na.rm = T) j <- compare.light(day+i*60*60, med.lon, med.lat, zenith = zen, offset = 5.5, verbose = F) if(j > thresh){ thresh <- j } } #thresh <- 129.2 zen <- 97 # figure showing the whole light record for the period chosen offset =5 d.lig <- subset(d.lig,Date >= as.POSIXct("2009-10-28 00:00:01",tz = "UTC") & Date < as.POSIXct("2010-01-20 15:00:01",tz = "UTC")) lightImage(d.lig, offset = 5, zlim = c(0,130)) # the example in the paper does not use SST d.lig$Temp <- NA ## Define segment by date seg <- floor((as.numeric(d.lig$Date)- as.numeric(min(d.lig$Date)))/(24*60*60)) ## Split into segments d.seg <- split(d.lig,seg) ## Drop first and last d.seg <- d.seg[-c(1,length(d.seg))] indices <- NA ## function to plot GLS track over GPS track drawTracks <- function(lon1, lat1, lon2, lat2, col1 = "firebrick", col2 = "dodgerblue", main = ""){ xlm <- range(append(lon1, lon2)) ylm <- range(append(lat1, lat2)) data(wrld_simpl) plot(wrld_simpl,xlim=xlm,ylim=ylm, col="grey90",border="grey80", main = main, axes = T) lines(cbind(lon1, lat1), col = col1) lines(cbind(lon2, lat2), col = col2) } # create the grid library(maptools) data("wrld_simpl") ## Cells per degree, define Lat min and max (xmn, xmx) and Lon (ymn, ymx) ## adjust nrows, ncols to get a feel for the sensitivity at difference cell sizes grid <- raster(nrows = 30, ncols = 70, xmn=45, xmx=225, ymn=-65, ymx=55, crs=proj4string(wrld_simpl)) grid <- rasterize(wrld_simpl,grid,1,silent=TRUE) ## Mask must be non-zero for sea grid <- is.na(grid) foo <- function(params) { ## Make some initial track we only use the start and end points from x0 <- matrix(0, length(d.seg), 2) x0[1, ] <- burrow1 x0[length(d.seg),] <- burrow2 # if the starting point is defined by burrow, make the first entry 'T' # and if the animal returns to the burrow, make the last entry 'T' also fixed <- c(T, logical(length(d.seg) - 2), T) # specify the model model <- TwilightFreeModel( d.seg, alpha = c(params[1], params[2]), beta = c(params[3], params[4]), x0 = x0, zenith = zen, threshold = thresh, fixed = fixed, spread = 2 ) # fit the model using the forward-backward algorithm, SGAT::essie fit <- SGAT::essie(model, grid, epsilon1 = 1.0E-4, epsilon2 = params[5]) # summarise daily locations for GPS and plot both estimated tracks trip <- data.frame(as.POSIXct(strptime(essieMode(fit)$time, "%Y-%m-%d")), essieMode(fit)$x) colnames(trip) <- c("Date", "Lon", "Lat") gdat$Day <- as.POSIXct(strptime(gdat$Date, "%Y-%m-%d")) seg <- floor((as.numeric(gdat$Day) - as.numeric(min(gdat$Day))) / (24 * 60 * 60)) path <- aggregate(gdat[, c("Day", "Lon", "Lat")], list(Seg = seg), FUN = mean) j <- subset(trip, trip$Date %in% path$Day) k <- subset(path, path$Day %in% j$Date) #drawTracks(j$Lon, jitter(j$Lat), k$Lon, k$Lat) # jitter to show more points # calculate RMSE of lon, lat using mean daily GPS locations n = length(trip$Lon) rmse_lon <- round(sqrt((1 / n) * sum(( trip$Lon - path$Lon ) ^ 2)), 2) rmse_lat <- round(sqrt((1 / n) * sum(( trip$Lat - path$Lat ) ^ 2)), 2) # calculate gcDist to nearest GPS position on any day j <- c() for (i in 1:length(trip$Date)) { k <- subset(gdat, as.POSIXct(Day) == as.POSIXct(trip$Date[i])) j[i] <- min(gcDist(cbind(trip$Lon[i], trip$Lat[i]), cbind(k$Lon, k$Lat)), na.rm = T) } gcd_mean <- mean(j) gcd_sd <- sd(j) return(list(rmse_lon, rmse_lat, gcd_mean, gcd_sd)) } # define the parameters params <- list(c(1, 1/5, 1, 1/4, 1.0E-4), c(1, 1/25, 1, 1/2, 1.0E-4), c(1, 1/25, 1, 1/4, 1.0E-4)) burrow1 <- burrow2 <- c(70.75, -49.25) #from med.lon, med.lat on start and end days k <- lapply(params, foo)
/parameter_sensitivity_analysis.R
no_license
ABindoff/geolocationHMM
R
false
false
7,542
r
# install.packages("devtools") # devtools::install_github("SWotherspoon/SGAT") # devtools::install_github("SWotherspoon/BAStag") library(SGAT) library(BAStag) library(raster) library(maptools) library(readr) # Main algorithm used to calculate the likelihoods TwilightFreeModel <- function(slices, alpha,beta, x0,fixed=FALSE,dt=NULL,threshold=5,zenith=96, spread = 2) { time <- .POSIXct(sapply(slices,function(d) mean(d$Date)),"GMT") ## Times (hours) between observations if(is.null(dt)) dt <- diff(as.numeric(time)/3600) ## Fixed locations fixed <- rep_len(fixed,length.out=length(slices)) ## Contribution to log posterior from each x location logpk <- function(k,x) { n <- nrow(x) logl <- double(n) ss <- solar(slices[[k]]$Date) obsDay <- (slices[[k]]$Light) >= threshold ## Loop over location for(i in seq_len(n)) { ## Compute for each x the time series of zeniths expDay <- zenith(ss,x[i,1],x[i,2]) <= zenith ## Some comparison to the observed light -> is L=0 (ie logl=-Inf) if(any(obsDay & !expDay)) { logl[i] <- -Inf } else { count <- sum(expDay & !obsDay) logl[i] <- dgamma(count,alpha[1],alpha[2],log=TRUE) } } ## Return sum of likelihood + prior logl + logp0(k,x, spread = spread) } ## Behavioural contribution to the log posterior logbk <- function(k,x1,x2) { spd <- pmax.int(gcDist(x1,x2), 1e-06)/dt[k] dgamma(spd,beta[1L],beta[2L],log=TRUE) } list( logpk=logpk, logbk=logbk, fixed=fixed, x0=x0, time=time, alpha=alpha, beta=beta) } logp0 <- function(k,x, spread = spread) { x[,1] <- x[,1]%%360 tt <- median(d.seg[[k]]$Temp, na.rm = TRUE) if(is.na(tt)) { 0 } else { dnorm(tt,extract(sst[[indices[k]]],x),spread,log=T) } } # TDR light, depth, SST previously resamples to 2 min intervals tag <- "https://raw.githubusercontent.com/ABindoff/geolocationHMM/master/TDR86372ds.csv" d.lig <- read_delim(tag, delim = ",", skip = 0, col_names = c("Date", "Light","Depth","Temp")) head(d.lig) # filtered GPS positions gps <- "https://raw.githubusercontent.com/ABindoff/geolocationHMM/master/86372_filteredGPS.csv" gdat <- read_csv(gps, skip = 0, col_names = c("Date", "Lon","Lat")) head(gdat) compare.light <- function(day, lon, lat, zenith = 96, offset = 0, verbose = T){ single.day <- subset(d.lig, d.lig$Date >= as.POSIXct(day, tz = "GMT")+offset*60*60 & d.lig$Date < as.POSIXct(day+24*60*60, tz = "GMT")+offset*60*60) d.sim <- zenithSimulate(single.day$Date, lon = rep(lon, length(single.day$Date)), lat = rep(lat, length(single.day$Date)), single.day$Date) d.sim$Light0 <- ifelse(d.sim$Zenith<zenith,max(single.day$Light, na.rm = T),1) d.sim$Light <- d.sim$Light0 thresh <- max(single.day$Light[which(d.sim$Zenith >= zenith)]) if(verbose){ plot(single.day$Date, single.day$Light, col = "red",type = "l", lwd = 2, ylim = c(0,max(single.day$Light, na.rm = T)), xlab = day, main = cbind(lon, lat)) lines(d.sim$Date, d.sim$Light, lwd = 2) abline( h = thresh, lty = 2) print(paste0("max light in night window: ", thresh, " assuming a solar zenith angle of: ", zenith)) } return(thresh) } # calibrate on first 7 days GPS data thresh <- 5 zen <- 97 day <- as.POSIXct("2009-10-29 00:00:00", "UTC") for(i in 1:7){ dd <- day + i*60*60 med.lon <- median(subset(gdat$Lon, gdat$Date >= dd & gdat$Date <= dd+24*60*60), na.rm = T) med.lat <- median(subset(gdat$Lat, gdat$Date >= dd & gdat$Date <= dd+24*60*60), na.rm = T) j <- compare.light(day+i*60*60, med.lon, med.lat, zenith = zen, offset = 5.5, verbose = F) if(j > thresh){ thresh <- j } } #thresh <- 129.2 zen <- 97 # figure showing the whole light record for the period chosen offset =5 d.lig <- subset(d.lig,Date >= as.POSIXct("2009-10-28 00:00:01",tz = "UTC") & Date < as.POSIXct("2010-01-20 15:00:01",tz = "UTC")) lightImage(d.lig, offset = 5, zlim = c(0,130)) # the example in the paper does not use SST d.lig$Temp <- NA ## Define segment by date seg <- floor((as.numeric(d.lig$Date)- as.numeric(min(d.lig$Date)))/(24*60*60)) ## Split into segments d.seg <- split(d.lig,seg) ## Drop first and last d.seg <- d.seg[-c(1,length(d.seg))] indices <- NA ## function to plot GLS track over GPS track drawTracks <- function(lon1, lat1, lon2, lat2, col1 = "firebrick", col2 = "dodgerblue", main = ""){ xlm <- range(append(lon1, lon2)) ylm <- range(append(lat1, lat2)) data(wrld_simpl) plot(wrld_simpl,xlim=xlm,ylim=ylm, col="grey90",border="grey80", main = main, axes = T) lines(cbind(lon1, lat1), col = col1) lines(cbind(lon2, lat2), col = col2) } # create the grid library(maptools) data("wrld_simpl") ## Cells per degree, define Lat min and max (xmn, xmx) and Lon (ymn, ymx) ## adjust nrows, ncols to get a feel for the sensitivity at difference cell sizes grid <- raster(nrows = 30, ncols = 70, xmn=45, xmx=225, ymn=-65, ymx=55, crs=proj4string(wrld_simpl)) grid <- rasterize(wrld_simpl,grid,1,silent=TRUE) ## Mask must be non-zero for sea grid <- is.na(grid) foo <- function(params) { ## Make some initial track we only use the start and end points from x0 <- matrix(0, length(d.seg), 2) x0[1, ] <- burrow1 x0[length(d.seg),] <- burrow2 # if the starting point is defined by burrow, make the first entry 'T' # and if the animal returns to the burrow, make the last entry 'T' also fixed <- c(T, logical(length(d.seg) - 2), T) # specify the model model <- TwilightFreeModel( d.seg, alpha = c(params[1], params[2]), beta = c(params[3], params[4]), x0 = x0, zenith = zen, threshold = thresh, fixed = fixed, spread = 2 ) # fit the model using the forward-backward algorithm, SGAT::essie fit <- SGAT::essie(model, grid, epsilon1 = 1.0E-4, epsilon2 = params[5]) # summarise daily locations for GPS and plot both estimated tracks trip <- data.frame(as.POSIXct(strptime(essieMode(fit)$time, "%Y-%m-%d")), essieMode(fit)$x) colnames(trip) <- c("Date", "Lon", "Lat") gdat$Day <- as.POSIXct(strptime(gdat$Date, "%Y-%m-%d")) seg <- floor((as.numeric(gdat$Day) - as.numeric(min(gdat$Day))) / (24 * 60 * 60)) path <- aggregate(gdat[, c("Day", "Lon", "Lat")], list(Seg = seg), FUN = mean) j <- subset(trip, trip$Date %in% path$Day) k <- subset(path, path$Day %in% j$Date) #drawTracks(j$Lon, jitter(j$Lat), k$Lon, k$Lat) # jitter to show more points # calculate RMSE of lon, lat using mean daily GPS locations n = length(trip$Lon) rmse_lon <- round(sqrt((1 / n) * sum(( trip$Lon - path$Lon ) ^ 2)), 2) rmse_lat <- round(sqrt((1 / n) * sum(( trip$Lat - path$Lat ) ^ 2)), 2) # calculate gcDist to nearest GPS position on any day j <- c() for (i in 1:length(trip$Date)) { k <- subset(gdat, as.POSIXct(Day) == as.POSIXct(trip$Date[i])) j[i] <- min(gcDist(cbind(trip$Lon[i], trip$Lat[i]), cbind(k$Lon, k$Lat)), na.rm = T) } gcd_mean <- mean(j) gcd_sd <- sd(j) return(list(rmse_lon, rmse_lat, gcd_mean, gcd_sd)) } # define the parameters params <- list(c(1, 1/5, 1, 1/4, 1.0E-4), c(1, 1/25, 1, 1/2, 1.0E-4), c(1, 1/25, 1, 1/4, 1.0E-4)) burrow1 <- burrow2 <- c(70.75, -49.25) #from med.lon, med.lat on start and end days k <- lapply(params, foo)
#' Upload masterDf to DB #' #' @details #' Upload masterDf to DB #' #' @param ediData The EDI data in format of data frame to be inserted. #' @param connectionDetails connectionDetails produced by "DatabaseConnector::createconnectionDetails" #' @param vocabularyDatabaseSchema The name of the DB schema where the data frame should be inserted. #' @param tableName The name of the table where the data should be inserted. #' @param useMppBulkLoad If using Redshift or PDW, use more performant bulk loading techniques. Setting the system environment variable "USE_MPP_BULK_LOAD" to TRUE is another way to enable this mode. Please note, Redshift requires valid S3 credentials; PDW requires valid DWLoader installation. This can only be used for permanent tables, and cannot be used to append to an existing table. Default is FALSE :D #' #' @export #' GenerateEdiVocaTable<-function(ediData, connectionDetails, vocabularyDatabaseSchema = connectionDetails$schema, tableName, useMppBulkLoadS = FALSE ){ if (sum(is.na(ediData$conceptCode))) stop("It is not allowed to have NA in concept code") if (sum(is.na(ediData$conceptName))) stop("It is not allowed to have NA in concept name") if (sum(is.na(ediData$domainId))) stop("It is not allowed to have NA in domain id") if (sum(is.na(ediData$vocabularyId))) stop("It is not allowed to have NA in vocabulary id") if (sum(is.na(ediData$conceptClassId))) stop("It is not allowed to have NA in concept class id") if (sum(is.na(ediData$validStartDate))) stop("It is not allowed to have NA in valid start date") if (sum(is.na(ediData$validEndDate))) stop("It is not allowed to have NA in valid end date") colnames(ediData)<-SqlRender::camelCaseToSnakeCase(colnames(ediData)) conn <- DatabaseConnector::connect(connectionDetails) sql<-"IF OBJECT_ID('@vocabulary_database_schema.@table_name', 'U') IS NOT NULL DROP TABLE @vocabulary_database_schema.@table_name; CREATE TABLE @vocabulary_database_schema.@table_name ( concept_code VARCHAR(50) NOT NULL , concept_name VARCHAR(2000) NOT NULL , --Please note that we allowed lengthy concept name concept_synonym VARCHAR(2000) NULL, domain_id VARCHAR(20) NOT NULL , vocabulary_id VARCHAR(20) NOT NULL , concept_class_id VARCHAR(20) NOT NULL , valid_start_date DATE NOT NULL , valid_end_date DATE NOT NULL , invalid_reason VARCHAR(1) NULL , ancestor_concept_code VARCHAR(20) NULL , previous_concept_code VARCHAR(20) NULL , material VARCHAR(2000) NULL , dosage FLOAT NULL , dosage_unit VARCHAR(20) NULL , sanjung_name VARCHAR(2000) NULL ); " sql <- SqlRender::render(sql, vocabulary_database_schema = vocabularyDatabaseSchema, table_name=tableName) sql <- SqlRender::translate(sql, targetDialect = connectionDetails$dbms) DatabaseConnector::executeSql(conn,sql) ##Insert table DatabaseConnector::insertTable(connection = conn, tableName = tableName, data = ediData, dropTableIfExists = T, createTable = FALSE, tempTable = FALSE, progressBar = TRUE, useMppBulkLoad = useMppBulkLoadS ) # writing CSV file # write.csv(ediData,file="./inst/ediData.csv", fileEncoding="UTF-8") DatabaseConnector::disconnect(conn) #return(ediData) } #' Create CSV #' #' @details #' Generate CSV with snake-case columns #' #' @param ediData The EDI data in format of data frame or ffdf containing the data to be inserted. #' @param filePath path and file name where CSV is written #' #' @export #' CreateCsv<-function(ediData, filePath){ #ediData<-rbind(deviceData, sugaData, drugData) if (sum(is.na(ediData$conceptCode))) stop("It is not allowed to have NA in concept code") if (sum(is.na(ediData$conceptName))) stop("It is not allowed to have NA in concept name") if (sum(is.na(ediData$domainId))) stop("It is not allowed to have NA in domain id") if (sum(is.na(ediData$vocabularyId))) stop("It is not allowed to have NA in vocabulary id") if (sum(is.na(ediData$conceptClassId))) stop("It is not allowed to have NA in concept class id") if (sum(is.na(ediData$validStartDate))) stop("It is not allowed to have NA in valid start date") if (sum(is.na(ediData$validEndDate))) stop("It is not allowed to have NA in valid end date") colnames(ediData)<-SqlRender::camelCaseToSnakeCase(colnames(ediData)) write.csv(ediData, filePath, row.names = FALSE, fileEncoding="UTF-8") print(sprintf("EDI data is written in csv format at %s with total line number of %d", filePath, nrow(ediData))) }
/R/export.r
permissive
parkyijoo/EdiToOmop
R
false
false
5,150
r
#' Upload masterDf to DB #' #' @details #' Upload masterDf to DB #' #' @param ediData The EDI data in format of data frame to be inserted. #' @param connectionDetails connectionDetails produced by "DatabaseConnector::createconnectionDetails" #' @param vocabularyDatabaseSchema The name of the DB schema where the data frame should be inserted. #' @param tableName The name of the table where the data should be inserted. #' @param useMppBulkLoad If using Redshift or PDW, use more performant bulk loading techniques. Setting the system environment variable "USE_MPP_BULK_LOAD" to TRUE is another way to enable this mode. Please note, Redshift requires valid S3 credentials; PDW requires valid DWLoader installation. This can only be used for permanent tables, and cannot be used to append to an existing table. Default is FALSE :D #' #' @export #' GenerateEdiVocaTable<-function(ediData, connectionDetails, vocabularyDatabaseSchema = connectionDetails$schema, tableName, useMppBulkLoadS = FALSE ){ if (sum(is.na(ediData$conceptCode))) stop("It is not allowed to have NA in concept code") if (sum(is.na(ediData$conceptName))) stop("It is not allowed to have NA in concept name") if (sum(is.na(ediData$domainId))) stop("It is not allowed to have NA in domain id") if (sum(is.na(ediData$vocabularyId))) stop("It is not allowed to have NA in vocabulary id") if (sum(is.na(ediData$conceptClassId))) stop("It is not allowed to have NA in concept class id") if (sum(is.na(ediData$validStartDate))) stop("It is not allowed to have NA in valid start date") if (sum(is.na(ediData$validEndDate))) stop("It is not allowed to have NA in valid end date") colnames(ediData)<-SqlRender::camelCaseToSnakeCase(colnames(ediData)) conn <- DatabaseConnector::connect(connectionDetails) sql<-"IF OBJECT_ID('@vocabulary_database_schema.@table_name', 'U') IS NOT NULL DROP TABLE @vocabulary_database_schema.@table_name; CREATE TABLE @vocabulary_database_schema.@table_name ( concept_code VARCHAR(50) NOT NULL , concept_name VARCHAR(2000) NOT NULL , --Please note that we allowed lengthy concept name concept_synonym VARCHAR(2000) NULL, domain_id VARCHAR(20) NOT NULL , vocabulary_id VARCHAR(20) NOT NULL , concept_class_id VARCHAR(20) NOT NULL , valid_start_date DATE NOT NULL , valid_end_date DATE NOT NULL , invalid_reason VARCHAR(1) NULL , ancestor_concept_code VARCHAR(20) NULL , previous_concept_code VARCHAR(20) NULL , material VARCHAR(2000) NULL , dosage FLOAT NULL , dosage_unit VARCHAR(20) NULL , sanjung_name VARCHAR(2000) NULL ); " sql <- SqlRender::render(sql, vocabulary_database_schema = vocabularyDatabaseSchema, table_name=tableName) sql <- SqlRender::translate(sql, targetDialect = connectionDetails$dbms) DatabaseConnector::executeSql(conn,sql) ##Insert table DatabaseConnector::insertTable(connection = conn, tableName = tableName, data = ediData, dropTableIfExists = T, createTable = FALSE, tempTable = FALSE, progressBar = TRUE, useMppBulkLoad = useMppBulkLoadS ) # writing CSV file # write.csv(ediData,file="./inst/ediData.csv", fileEncoding="UTF-8") DatabaseConnector::disconnect(conn) #return(ediData) } #' Create CSV #' #' @details #' Generate CSV with snake-case columns #' #' @param ediData The EDI data in format of data frame or ffdf containing the data to be inserted. #' @param filePath path and file name where CSV is written #' #' @export #' CreateCsv<-function(ediData, filePath){ #ediData<-rbind(deviceData, sugaData, drugData) if (sum(is.na(ediData$conceptCode))) stop("It is not allowed to have NA in concept code") if (sum(is.na(ediData$conceptName))) stop("It is not allowed to have NA in concept name") if (sum(is.na(ediData$domainId))) stop("It is not allowed to have NA in domain id") if (sum(is.na(ediData$vocabularyId))) stop("It is not allowed to have NA in vocabulary id") if (sum(is.na(ediData$conceptClassId))) stop("It is not allowed to have NA in concept class id") if (sum(is.na(ediData$validStartDate))) stop("It is not allowed to have NA in valid start date") if (sum(is.na(ediData$validEndDate))) stop("It is not allowed to have NA in valid end date") colnames(ediData)<-SqlRender::camelCaseToSnakeCase(colnames(ediData)) write.csv(ediData, filePath, row.names = FALSE, fileEncoding="UTF-8") print(sprintf("EDI data is written in csv format at %s with total line number of %d", filePath, nrow(ediData))) }
ml_glm <- function(formula, data, family, link, offset = 0, start = NULL, verbose = FALSE, ...) { ### Handle the input mf <- model.frame(formula, data) y <- model.response(mf, "numeric") ### Prepare model infrastructure class(y) <- c(family, link, "expFamily") X <- model.matrix(formula, data = data) ### Check for missing data. Stop if any. if (any(is.na(cbind(y, X)))) stop("Some data are missing!") ### Initialize the search, if needed if (is.null(start)) start <- kickStart(y, X, offset) ### Maximize the joint log likelihood fit <- maximize(start, Sjll, X, y, offset, ...) ### Check for optim failure and report and stop if (verbose | fit$convergence > 0) print(fit) ### Extract and compute quantities of interest beta.hat <- fit$par se.beta.hat <- sqrt(diag(solve(-fit$hessian))) residuals <- devianceResiduals(y, beta.hat, X, offset, ...) ### Fit null model and determine null deviance fit.null <- maximize(mean(y), Sjll, 1, y, offset, ...) null.deviance <- sum(devianceResiduals(y, fit.null$par, 1, offset, ...)^2) ### Report the results, with the needs of print.glm in mind results <- list(fit = fit, X = X, y = y, call = match.call(), obs = length(y), df.null = length(y) - 1, df.residual = length(y) - length(beta.hat), deviance = sum(residuals^2), null.deviance = null.deviance, residuals = residuals, coefficients = beta.hat, se.beta.hat = se.beta.hat, aic = - 2 * fit$val + 2 * length(beta.hat), i = fit$counts[1]) ### Use (new) msme class and glm class class(results) <- c("msme","glm") return(results) }
/R/ml_glm.R
no_license
cran/msme
R
false
false
1,944
r
ml_glm <- function(formula, data, family, link, offset = 0, start = NULL, verbose = FALSE, ...) { ### Handle the input mf <- model.frame(formula, data) y <- model.response(mf, "numeric") ### Prepare model infrastructure class(y) <- c(family, link, "expFamily") X <- model.matrix(formula, data = data) ### Check for missing data. Stop if any. if (any(is.na(cbind(y, X)))) stop("Some data are missing!") ### Initialize the search, if needed if (is.null(start)) start <- kickStart(y, X, offset) ### Maximize the joint log likelihood fit <- maximize(start, Sjll, X, y, offset, ...) ### Check for optim failure and report and stop if (verbose | fit$convergence > 0) print(fit) ### Extract and compute quantities of interest beta.hat <- fit$par se.beta.hat <- sqrt(diag(solve(-fit$hessian))) residuals <- devianceResiduals(y, beta.hat, X, offset, ...) ### Fit null model and determine null deviance fit.null <- maximize(mean(y), Sjll, 1, y, offset, ...) null.deviance <- sum(devianceResiduals(y, fit.null$par, 1, offset, ...)^2) ### Report the results, with the needs of print.glm in mind results <- list(fit = fit, X = X, y = y, call = match.call(), obs = length(y), df.null = length(y) - 1, df.residual = length(y) - length(beta.hat), deviance = sum(residuals^2), null.deviance = null.deviance, residuals = residuals, coefficients = beta.hat, se.beta.hat = se.beta.hat, aic = - 2 * fit$val + 2 * length(beta.hat), i = fit$counts[1]) ### Use (new) msme class and glm class class(results) <- c("msme","glm") return(results) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/type-pnpp_experiment.R \name{wells_positive} \alias{wells_positive} \title{Get positive wells} \usage{ wells_positive(plate) } \arguments{ \item{plate}{A ddPCR plate.} } \value{ Character vector with well IDs of positive wells } \description{ After a ddPCR plate of type \code{pnpp_experiment} has been analyzed, get the wells that were deemed as having mostly positive droplets. } \examples{ \dontrun{ plate <- new_plate(sample_data_dir(), type = plate_types$pnpp_experiment) \%>\% analyze wells_positive(plate) } } \seealso{ \code{\link[ddpcr]{pnpp_experiment}}\cr \code{\link[ddpcr]{wells_negative}} }
/man/wells_positive.Rd
no_license
cran/ddpcr
R
false
true
711
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/type-pnpp_experiment.R \name{wells_positive} \alias{wells_positive} \title{Get positive wells} \usage{ wells_positive(plate) } \arguments{ \item{plate}{A ddPCR plate.} } \value{ Character vector with well IDs of positive wells } \description{ After a ddPCR plate of type \code{pnpp_experiment} has been analyzed, get the wells that were deemed as having mostly positive droplets. } \examples{ \dontrun{ plate <- new_plate(sample_data_dir(), type = plate_types$pnpp_experiment) \%>\% analyze wells_positive(plate) } } \seealso{ \code{\link[ddpcr]{pnpp_experiment}}\cr \code{\link[ddpcr]{wells_negative}} }
## ## these are OBSOLETE: see xyplot (not traceplot) method in coda ## ## new functions for operating on Bayesian stuff ## traceplot <- function (x, smooth=TRUE, ylab="", sys="lattice", ...) { ## UseMethod("traceplot") ## } ## traceplot.mcmc.list <- function(x,smooth=TRUE,ylab="", sys="lattice", ...) { ## require(reshape) ## require(lattice) ## nchains = length(x) ## niter = nrow(x[[1]]) ## x2 = melt(data.frame(do.call(rbind,x), ## iter=rep(1:niter,nchains), ## chain=rep(1:nchains,each=niter)), ## id.var=c("chain","iter")) ## type <- "l" ## if (smooth) type <- c(type,"smooth") ## xyplot(value~iter|variable,groups=chain,type=type,data=x2, ## scales=list(y=list(relation="free")), ## ylab=ylab,xlab="iteration") ## } ## traceplot.mcmc <- function(x,smooth=TRUE,ylab="", sys="lattice", ...) { ## require(reshape) ## require(lattice) ## niter = nrow(x) ## x2 = melt(data.frame(x, ## iter=1:niter), ## id.var="iter") ## type <- "l" ## if (smooth) type <- c(type,"smooth") ## xyplot(value~iter|variable,type=type,data=x2, ## scales=list(y=list(relation="free")), ## ylab=ylab,xlab="iteration") ## } ## copied from traceplot in coda package, but uses lattice ## traceplot.mcmc <- function (x, data = NULL, outer, aspect = "xy", ## default.scales = list(y=list(relation = "free")), ## start = 1, thin = 1, ## main = attr(x, "title"), xlab = "", ## ylab="", ## plot.points = "rug", ..., ## subset = coda:::thinned.indices(x, start = start, ## thin = thin)) ## ## would like to supply subset argument that would also ## ## select specific variables ## ## consider aspect="fill" as an alternative ## { ## require(lattice) ## if (!is.R()) { ## stop("This function is not yet available in S-PLUS") ## } ## if (!missing(outer)) ## warning("specification of outer ignored") ## data <- as.data.frame(x) ## v <- seq(nrow(x)) ## form <- as.formula(paste(paste(lapply(names(data), as.name), ## collapse = "+"), "~v")) ## xyplot(form, data = data[subset, ], outer = TRUE, aspect = aspect, ## default.scales = default.scales, main = main, xlab = xlab, ## ylab=ylab, ## type="l", ## plot.points = plot.points, ...) ## } HPDregionplot <- function(x,vars=1:2,h,n=50,lump=TRUE,prob=0.95, xlab=NULL,ylab=NULL,lims=NULL,...) { parnames <- if (inherits(x, "mcmc.list")) colnames(x[[1]]) else colnames(x) if (is.character(vars)) { vars <- match(vars,parnames) if (any(is.na(vars))) stop("some variable names do not match") } varnames <- parnames[vars] mult <- (class(x)=="mcmc.list" && !lump) if (mult) stop("multiple chains without lumping not yet implemented") if (inherits(x, "mcmc.list")) { if (lump) var1 <- c(sapply(x,function(z)z[,vars[1]])) else var1 <- lapply(x,function(z)z[,vars[1]]) } else var1 <- x[,vars[1]] if (inherits(x,"mcmc.list")) { if (lump) var2 <- c(sapply(x,function(z)z[,vars[2]])) else var2 <- lapply(x,function(z)z[,vars[2]]) } else var2 <- x[,vars[2]] if (is.null(lims)) lims <- c(range(var1),range(var2)) if (!mult) { post1 <- kde2d(var1,var2,n=n,h=h,lims=lims) ## post0 = post1 } else { post1 = mapply(kde2d,var1,var2,MoreArgs=list(n=n,h=h,lims=lims)) } dx <- diff(post1$x[1:2]) dy <- diff(post1$y[1:2]) sz <- sort(post1$z) ## debugging stuff ## if (FALSE) { ## lattice:::contourplot(post1$z) ## d <- with(post1,data.frame(expand.grid(y=y,x=x),z=c(z))) ## lattice:::contourplot(z~x*y,data=d) ## with(post1,contour(x,y,z)) ## points(x[,1],x[,2],col=2) ## afun <- function(n) { ## k2 <- kde2d(x[,1],x[,2],n=n,h=c(1,1)) ## with(k2,sum(z)*diff(x)[1]*diff(y)[1]) ## } ## sapply(5:25,afun) ## } c1 <- cumsum(sz)*dx*dy ## trying to find level containing 95% of volume ... levels <- sapply(prob,function(x) {approx(c1,sz,xout=1-x)$y}) ## meanvec <- c(mean(var1),mean(var2)) if (is.null(xlab)) xlab <- varnames[1] if (is.null(ylab)) ylab <- varnames[2] contour(post1$x,post1$y,post1$z,level=levels, xlab=xlab,ylab=ylab,drawlabels=FALSE,...) invisible(contourLines(post1$x,post1$y,post1$z,levels=levels)) } ## make an mcmc object out of an mcmc.list lump.mcmc.list <- function(x) { x2 <- do.call("rbind",x) mcpars <- sapply(x,attr,"mcpar") class(x2) <- "mcmc" if (var(mcpars[1,])>0 || var(mcpars[3,])>0) stop("can't combine chains with unequal start/thin") attr(x2,"mcpar") <- c(mcpars[1,1],sum(mcpars[2,]),mcpars[3,1]) x2 } perturb.params = function(base,alt,which, mult=FALSE,use.base=TRUE) { if (!missing(alt)) { chlist = mapply( function(name,vals) { x = lapply(vals, function(z2) { if (mult) { base[[name]] = base[[name]]*z2} else base[[name]]=z2 base }) if (is.list(x)) x else list(x) }, names(alt),alt) chlist = unlist(chlist,recursive=FALSE) } if (use.base) { chlist=append(list(base),chlist) } chlist }
/R/bayes.R
no_license
bbolker/emdbook
R
false
false
5,365
r
## ## these are OBSOLETE: see xyplot (not traceplot) method in coda ## ## new functions for operating on Bayesian stuff ## traceplot <- function (x, smooth=TRUE, ylab="", sys="lattice", ...) { ## UseMethod("traceplot") ## } ## traceplot.mcmc.list <- function(x,smooth=TRUE,ylab="", sys="lattice", ...) { ## require(reshape) ## require(lattice) ## nchains = length(x) ## niter = nrow(x[[1]]) ## x2 = melt(data.frame(do.call(rbind,x), ## iter=rep(1:niter,nchains), ## chain=rep(1:nchains,each=niter)), ## id.var=c("chain","iter")) ## type <- "l" ## if (smooth) type <- c(type,"smooth") ## xyplot(value~iter|variable,groups=chain,type=type,data=x2, ## scales=list(y=list(relation="free")), ## ylab=ylab,xlab="iteration") ## } ## traceplot.mcmc <- function(x,smooth=TRUE,ylab="", sys="lattice", ...) { ## require(reshape) ## require(lattice) ## niter = nrow(x) ## x2 = melt(data.frame(x, ## iter=1:niter), ## id.var="iter") ## type <- "l" ## if (smooth) type <- c(type,"smooth") ## xyplot(value~iter|variable,type=type,data=x2, ## scales=list(y=list(relation="free")), ## ylab=ylab,xlab="iteration") ## } ## copied from traceplot in coda package, but uses lattice ## traceplot.mcmc <- function (x, data = NULL, outer, aspect = "xy", ## default.scales = list(y=list(relation = "free")), ## start = 1, thin = 1, ## main = attr(x, "title"), xlab = "", ## ylab="", ## plot.points = "rug", ..., ## subset = coda:::thinned.indices(x, start = start, ## thin = thin)) ## ## would like to supply subset argument that would also ## ## select specific variables ## ## consider aspect="fill" as an alternative ## { ## require(lattice) ## if (!is.R()) { ## stop("This function is not yet available in S-PLUS") ## } ## if (!missing(outer)) ## warning("specification of outer ignored") ## data <- as.data.frame(x) ## v <- seq(nrow(x)) ## form <- as.formula(paste(paste(lapply(names(data), as.name), ## collapse = "+"), "~v")) ## xyplot(form, data = data[subset, ], outer = TRUE, aspect = aspect, ## default.scales = default.scales, main = main, xlab = xlab, ## ylab=ylab, ## type="l", ## plot.points = plot.points, ...) ## } HPDregionplot <- function(x,vars=1:2,h,n=50,lump=TRUE,prob=0.95, xlab=NULL,ylab=NULL,lims=NULL,...) { parnames <- if (inherits(x, "mcmc.list")) colnames(x[[1]]) else colnames(x) if (is.character(vars)) { vars <- match(vars,parnames) if (any(is.na(vars))) stop("some variable names do not match") } varnames <- parnames[vars] mult <- (class(x)=="mcmc.list" && !lump) if (mult) stop("multiple chains without lumping not yet implemented") if (inherits(x, "mcmc.list")) { if (lump) var1 <- c(sapply(x,function(z)z[,vars[1]])) else var1 <- lapply(x,function(z)z[,vars[1]]) } else var1 <- x[,vars[1]] if (inherits(x,"mcmc.list")) { if (lump) var2 <- c(sapply(x,function(z)z[,vars[2]])) else var2 <- lapply(x,function(z)z[,vars[2]]) } else var2 <- x[,vars[2]] if (is.null(lims)) lims <- c(range(var1),range(var2)) if (!mult) { post1 <- kde2d(var1,var2,n=n,h=h,lims=lims) ## post0 = post1 } else { post1 = mapply(kde2d,var1,var2,MoreArgs=list(n=n,h=h,lims=lims)) } dx <- diff(post1$x[1:2]) dy <- diff(post1$y[1:2]) sz <- sort(post1$z) ## debugging stuff ## if (FALSE) { ## lattice:::contourplot(post1$z) ## d <- with(post1,data.frame(expand.grid(y=y,x=x),z=c(z))) ## lattice:::contourplot(z~x*y,data=d) ## with(post1,contour(x,y,z)) ## points(x[,1],x[,2],col=2) ## afun <- function(n) { ## k2 <- kde2d(x[,1],x[,2],n=n,h=c(1,1)) ## with(k2,sum(z)*diff(x)[1]*diff(y)[1]) ## } ## sapply(5:25,afun) ## } c1 <- cumsum(sz)*dx*dy ## trying to find level containing 95% of volume ... levels <- sapply(prob,function(x) {approx(c1,sz,xout=1-x)$y}) ## meanvec <- c(mean(var1),mean(var2)) if (is.null(xlab)) xlab <- varnames[1] if (is.null(ylab)) ylab <- varnames[2] contour(post1$x,post1$y,post1$z,level=levels, xlab=xlab,ylab=ylab,drawlabels=FALSE,...) invisible(contourLines(post1$x,post1$y,post1$z,levels=levels)) } ## make an mcmc object out of an mcmc.list lump.mcmc.list <- function(x) { x2 <- do.call("rbind",x) mcpars <- sapply(x,attr,"mcpar") class(x2) <- "mcmc" if (var(mcpars[1,])>0 || var(mcpars[3,])>0) stop("can't combine chains with unequal start/thin") attr(x2,"mcpar") <- c(mcpars[1,1],sum(mcpars[2,]),mcpars[3,1]) x2 } perturb.params = function(base,alt,which, mult=FALSE,use.base=TRUE) { if (!missing(alt)) { chlist = mapply( function(name,vals) { x = lapply(vals, function(z2) { if (mult) { base[[name]] = base[[name]]*z2} else base[[name]]=z2 base }) if (is.list(x)) x else list(x) }, names(alt),alt) chlist = unlist(chlist,recursive=FALSE) } if (use.base) { chlist=append(list(base),chlist) } chlist }
##-------------------------------------------- ## ## Lecture 7 R methods ## ## Class: PCE Data Science Methods Class ## ##-------------------------------------------- setwd('E:/Work/Teaching/PCE_Data_Science/7_TimeSeries_SpatialStats_Bayes') # Load Libraries library(TSA) library(forecast) library(gstat) library(akima) library(spatstat) library(deldir) library(lattice) library(geoR) library(sp) library(zoo) ##-------Time Series Introduction------ # Moving Window Averages # Create some data x = seq(1,10,len=200) y = 5*sin(x) + sin(10*x) + 0.25*rnorm(100) plot(x,y,main="Time Series Data", xlab="x", ylab="y", pch=16, cex = 0.25) # Past moving window: mov_avg_past = function(x,window_size){ stopifnot(length(x)>=window_size) mov_avg = c() for (i in 1:length(x)){ if (i<window_size){ temp_avg = mean(x[1:i]) }else{ temp_avg = mean(x[(i-window_size):(i)]) } mov_avg = c(mov_avg, temp_avg) } return(mov_avg) } mov_avg_past5 = mov_avg_past(y,window_size = 5) mov_avg_past10 = mov_avg_past(y,window_size = 10) mov_avg_past25 = mov_avg_past(y,window_size = 25) mov_avg_past50 = mov_avg_past(y,window_size = 50) lines(x,mov_avg_past5, col="red", lwd=2) lines(x,mov_avg_past10, col="blue", lwd=2) lines(x,mov_avg_past25, col="green", lwd=2) lines(x,mov_avg_past50, col="purple", lwd=2) legend('bottomright', c('5 units', '10 units', '25 units', '50 units'), lty=c(1,1,1,1), lwd=c(2,2,2,2), col=c('red','blue','green','purple')) # Let's look at the residuals! time_resid_5 = y - mov_avg_past5 time_resid_10 = y - mov_avg_past10 time_resid_25 = y - mov_avg_past25 time_resid_50 = y - mov_avg_past50 plot(time_resid_50, mov_avg_past5, pch = 16, cex = 0.5, col="red") # This plot isn't too helpful, because the result isn't linear... # Look at residuals over time: plot(x, time_resid_5, pch = 16, cex = 0.5, col="red") plot(x, time_resid_10, pch = 16, cex = 0.5, col="blue") plot(x, time_resid_25, pch = 16, cex = 0.5, col="green") plot(x, time_resid_50, pch = 16, cex = 0.5, col="purple") # Notice the wider the window, the more of a trend we see. # Note that there is a function in the 'zoo' package that does this for you: #library(zoo) ?rollapply zoo_mov_avg5 = rollapply(y, width = 5, by = 1, FUN= mean, align = "left") plot(x[5:length(x)],zoo_mov_avg5) lines(x,mov_avg_past5, col="red", lwd=2) # They are the same! points(x,y,pch=16, cex=0.5) # Note that rollapply does not deal with the ends of moving average at all. # One way to deal with this is to 'pad' with NAs zoo_mov_avg5 = rollapply(c(rep(NA, 4),y), width = 5, by = 1, FUN = mean, na.rm = TRUE, align = "left") plot(x,zoo_mov_avg5) lines(x,mov_avg_past5, col="red", lwd=2) ##-----Fourier Transform----- # Create some data period1 = 3 period2 = 0.5 x = seq(0,15,len=5000) y = sin((1/period1)*(2*pi)*x) + 0.5*sin((1/period2)*(2*pi)*x) + 0.05*rnorm(100) plot(x,y, pch = 16, cex=0.25) # Compute Fourier Transform (FFT) and plot the spectrum of frequencies y_spectrum = spectrum(y) plot(y_spectrum$freq[y_spectrum$freq<0.1], y_spectrum$spec[y_spectrum$freq<0.1], type = 'l') # Find maximum frequencies potential_frequencies = y_spectrum$freq[y_spectrum$freq<0.1 & y_spectrum$spec > 25] # Find Periods potential_periods = (1/potential_frequencies) * mean(diff(x)) ##------Simple Exponential Smoothing----- dj_data = read.csv("DJIA.csv") dj_data$Date = as.Date(dj_data$Date, format="%m/%d/%Y") plot(dj_data$Date, dj_data$DJIA, type="l") # Let's consider 2014 onwards dj_data = dj_data[dj_data$Date>=as.Date("2014-01-01"),] plot(dj_data$Date, dj_data$DJIA, type="l", main="Dow Jones Value Daily", xlab="Date", ylab="Value") # use forecast's ses() function: exp_smooth1 = ses(dj_data$DJIA, alpha=0.05, h=31) # h is how many t-units to forecast out exp_smooth2 = ses(dj_data$DJIA, alpha=0.15, h=31) exp_smooth3 = ses(dj_data$DJIA, alpha=0.95, h=31) plot(exp_smooth1) lines(exp_smooth1$fitted, col="blue") lines(exp_smooth2$fitted, col="green") lines(exp_smooth3$fitted, col="red") legend('topleft', c('Original Data','alpha=0.05', 'alpha=0.15', 'alpha=0.95'), col=c('black','blue', 'green', 'red'), lty=c(1,1,1)) ##----Double Exponential Smoothing---- # Remember this is equivalent to ARIMA(0,1,1) # P - Auto Regression # D - Degree Integrated (1) # One level differencing # Q - Moving Average (1) # Based only on the previous one double_exp_smooth = Arima(dj_data$DJIA, order = c(0,1,1), seasonal=c(0,1,0)) double_exp_fit = dj_data$DJIA - double_exp_smooth$residuals # fitted values plot(dj_data$Date, dj_data$DJIA,type="l", lwd=2) lines(dj_data$Date, double_exp_fit, col="red", lwd=2, lty=2) # prediction double_exp_pred = predict(double_exp_smooth, n.ahead = 30) lines(seq(from=dj_data$Date[375], to=dj_data$Date[375]+30, by=1)[-1], double_exp_pred$pred, lwd=2, col='green') # Add in standard error lines lines(seq(from=dj_data$Date[375], to=dj_data$Date[375]+30, by=1)[-1], double_exp_pred$pred + double_exp_pred$se, lwd=2, col='green') lines(seq(from=dj_data$Date[375], to=dj_data$Date[375]+30, by=1)[-1], double_exp_pred$pred - double_exp_pred$se, lwd=2, col='green') ##----Auto regressive Model---- lh # Built in dataset ?lh plot(lh) # First Order Auto regressive ar1 = ar(lh, order.max = 1) ar1_fitted = lh - ar1$resid # Plot outcome plot(lh, lwd=2, xlim=c(0,60)) # extend xlim for prediction later lines(ar1_fitted, lwd=2, col="red") # Predict ahead n ar1_pred = predict(ar1, n.ahead=10, se.fit = TRUE) x_pred = length(ar1$resid) : (length(ar1$resid) + 10) lines(x_pred,c(ar1_fitted[length(ar1_fitted)],ar1_pred$pred), lwd=2, col="red", lty=2) lines(x_pred,c(ar1_fitted[length(ar1_fitted)],ar1_pred$pred + ar1_pred$se), lwd=2, col="red", lty=3) lines(x_pred,c(ar1_fitted[length(ar1_fitted)],ar1_pred$pred - ar1_pred$se), lwd=2, col="red", lty=3) ##-----Auto regressive Moving average Model---- # First Order Auto regressive and Moving average arma1 = arma(lh, order=c(1,1)) # Order is a length 2 vector, [1]=AR, [2]=MA arma1_fitted = lh - arma1$resid # Plot outcome plot(lh, lwd=2, xlim=c(0,60)) lines(arma1_fitted, lwd=2, col="red") # Hold off on predictions until later. ##----ARIMA(0,N,0)=Random Walk Model----- # Generate ARIMA(0,1,0), random walk y_I1 = arima.sim(list(order=c(0,1,0)),n=500) plot(y_I1) # 2nd order random walk y_I2 = arima.sim(list(order=c(0,2,0)), n=500) plot(y_I2) ##----ARIMA(N,0,0)= Autoregressive Model----- # Careful with the AR coefficients, R will throw an error when # D = 0 (Integrated) and series is non-stationary # Generate ARIMA(1,0,0) y_AR1 = arima.sim(list(order=c(1,0,0), ar=0.8),n=500) plot(y_AR1) # 2nd order random walk y_AR2 = arima.sim(list(order=c(2,0,0), ar=c(0.69, 0.3)), n=500) plot(y_AR2) # 5th order random walk y_AR5 = arima.sim(list(order=c(5,0,0), ar=c(0.45, 0.2, 0.1, 0.1, 0.1)), n=500) plot(y_AR5) ##----ARIMA(0,0,N) = Moving Average Model----- # Generate ARIMA(0,0,1) y_MA1 = arima.sim(list(ma=c(0.7)),n=500) plot(y_MA1) # 3rd order Moving Average y_MA3 = arima.sim(list(ma=c(0.9,0.8,0.2)), n=500) plot(y_MA3) # 10th order Moving Average ma_orders = sample(c(0.5,0.6,0.7,0.8), 10, replace=TRUE) y_MA10 = arima.sim(list(ma=ma_orders), n=500) plot(y_MA10) ##-----ARIMA(P,D,Q) X (P,D,Q) seasonality----- # Load las vegas headcounts: headcount = read.csv('JitteredHeadCount.csv', stringsAsFactors = FALSE) # Aggregate headcounts by week: headcount$DateFormat = as.Date(headcount$DateFormat, format="%m/%d/%Y") headcount$week_count = floor(headcount$DayNumber/7.0) headcount_weekly = aggregate(HeadCount ~ week_count, data = headcount, sum) # Not a full last week, drop the last data point headcount_weekly = headcount_weekly[1:52,] # Fit time series: headcount_arima = arima(headcount_weekly$HeadCount, order = c(1,1,1), seasonal = list(order=c(1,0,1))) # What are the fitted coefficients? headcount_arima$coef # Get the predictions. Strange, but arima gives the resids, not the predictions: arima_fitted = headcount_weekly$HeadCount - headcount_arima$residuals arima_predictions = predict(headcount_arima, n.ahead=25) x_pred = nrow(headcount_weekly) : (nrow(headcount_weekly) + 25) plot(headcount_weekly$HeadCount, type="l", lwd = 2, col="black", xlim = c(0,80)) lines(arima_fitted, lwd = 2, col="red") lines(x_pred, c(arima_fitted[length(arima_fitted)],arima_predictions$pred), lwd=2, col="red", lty=8) ##-----Time Series by Factors---- dj = read.csv("dow_jones_data.csv", stringsAsFactors = FALSE) dj$Date = as.Date(dj$Date, format = "%Y-%m-%d") # Create many different time factors dj$day_count = as.numeric(dj$Date - min(dj$Date)) dj$week_count = floor(dj$day_count/7.0) dj$month_count = floor(dj$day_count/30.5) dj$year_count = as.numeric(format(dj$Date, format="%Y")) - 1990 dj$month = as.numeric(format(dj$Date, format="%m")) dj$season = floor((dj$month-1) / 3) dj$weekday = as.numeric(format(dj$Date, format = "%w")) dj$week = as.numeric(format(dj$Date, format = "%W")) # Market Bull Dates bull_dates = seq(from = as.Date("2007-10-11"), to = as.Date("2009-03-09"), by = 1) # Create Bull dates logical 0 - 1 dj$bull_market = as.numeric(dj$Date %in% bull_dates) # Is this cheating? Maybe. # Create linear model: dj_model = lm(DJIA ~ . - Date, data = dj) summary(dj_model) # Look at plot plot(dj$Date, dj$DJIA, type="l", lwd=2, main="DJIA", xlab="Time", ylab="DJIA") lines(dj$Date, dj_model$fitted.values, lwd=2, lty=8, col="red") # Closer view: plot(dj$Date, dj$DJIA, type="l", lwd=2, main="DJIA", xlab="Time", ylab="DJIA", xlim=c(as.Date("2009-01-01"), as.Date("2009-12-31")), ylim=c(6500,11000)) lines(dj$Date, dj_model$fitted.values, lwd=2, lty=8, col="red") # Slightly lagging by a bit? # Makes sense because of the "most important" variable. # More autoregressive than 1 day? Let's check: DJIA_2_periods_ago = sapply(1:nrow(dj), function(x){ if(x == 1){ return(dj$DJIA[1]) }else{ return(dj$DJIA[x-1]) } }) dj$two_days_ago = DJIA_2_periods_ago dj_model_AR2 = lm(DJIA ~ . - Date, data = dj) summary(dj_model_AR2) # Nope! # Can this predict well? Why not? # # - Turns out just slightly shifting the time series is # a good fit, as the stock market is a random walk and # is highly dependent on where it just was. # - Also, can we predict the bull market variable? Probably not. ##-------Spatial Statistics------ # Clean up rm(list = ls()) gc() ?coalash data(coalash) # Coal ash is a pollutant generated from coal powered mining head(coalash) # Plot with labels plot(coalash$x, coalash$y, type="n", lab=c(16,23,7), xlab="X",ylab="Y") text(coalash$x, coalash$y, format(round(coalash$coalash,1))) title("Coal Ash Percentages",cex=1.5) # Median Polish ?medpolish # Have to create a matrix of coalash values according to thier x-y positions: coalash_mat = tapply(coalash$coalash, list(factor(coalash$x), factor(coalash$y)), function(x) x) coalash_med_pol = medpolish(coalash_mat, na.rm=TRUE) # Returns residuals.... coal_no_trend = coalash_mat - coalash_med_pol$residuals # Look at a greyscale plot: par(mfrow=c(1,2)) # We need the max's and mins for scaling the colors zmin = min(coalash_mat[!is.na(coalash_mat)],coal_no_trend[!is.na(coal_no_trend)]) zmax = max(coalash_mat[!is.na(coalash_mat)],coal_no_trend[!is.na(coal_no_trend)]) image(x=1:max(coalash$x), y=1:max(coalash$y), coalash_mat,zlim=c(zmin,zmax),cex.axis=1.5, xlab="Columns",ylab="Rows",cex.lab=1.6,col=gray.colors(12)) title("Original Coal Ash",cex.main=1.5) image(x=1:max(coalash$x),y=1:max(coalash$y),coal_no_trend,zlim=c(zmin,zmax),cex.axis=1.5, xlab="Columns",ylab="Rows",cex.lab=1.6,col=gray.colors(12)) title("Med Polish Coal Ash",cex.main=1.5) ##------Voronoi Diagrams----- par(mfrow=c(1,1)) x = runif(10) y = runif(10) plot(x,y,pch=16, ylim=c(0,1),xlim=c(0,1)) ?ppp xy_ppp = ppp(x,y,c(0,1),c(0,1)) ?deldir tv = deldir(x,y,rw=c(0,1,0,1),plot=TRUE) # Dashed lines are Voronoi Polygons (Dirichlet) # Delaunay Triangulation are the solid lines # get weights: xy_weights_obj = dirichlet(xy_ppp) tile.areas(xy_weights_obj) ##----Variogram---- # First, load and depict the 'walker lake data'. walker_lake = read.table("walk470.txt",header=T) plot(walker_lake$x,walker_lake$y,pch="",xlab="",ylab="", # Plots the (x,y) locations with a "+" sign, no xlim=range(walker_lake$x),ylim=range(walker_lake$y),axes=F)# axis labels, specified axis limits, & no axes. box() # Places a box around the plot. for (i in 1:nrow(walker_lake)){ text(walker_lake$x[i],walker_lake$y[i]+.30, # Iteratively writes the V-values over the "+" paste(walker_lake$v)[i],cex=0.9) # signs of character size 0.9 (default=1). } # Contour plot: int.v = interp(walker_lake$x,walker_lake$y,walker_lake$v) # Linear interpolation across region contour(int.v,levels=seq(0,1500,100),xlab="", # Creates a contour plot with the interpolated ylab="",xlim=range(walker_lake$x),ylim=range(walker_lake$y),labcex=1,axes=F) box() # you can see the effect that clusters of points have on the countour plot by adding the points: points(walker_lake$x, walker_lake$y, pch=16) # Compute Variograms # First, change dataframe to coordinate object, from the 'sp' package coordinates(walker_lake) = ~x+y # Compute and plot the no-angle variogram (all points within a distance at any angle) walker_variogram = variogram(v~x+y, data = walker_lake, width=10, cutoff=100) plot(walker_variogram, type="l") # Make u & v easier to reference v = walker_lake$v u = walker_lake$u # Compute variogram at different angles variogram_angles = seq(0,165,15) # A side note: R computes anges as angle clockwise from north!!! # This means our angle of zero we know is represented by angle=90 deg. walker_variogram_angled = variogram(v~x+y, data=walker_lake, width=10, alpha=variogram_angles, tol.hor=15, cutoff=100) plot(walker_variogram_angled, type="l") # In order to use the variogram to predict, we have to fit the variogram. # We will fit the variogram with a Spherical model (pretty standard) # In order to fit the variogram, we have to make some parameter estimates: # -ratio: this is the ratio of the largest range over the shortest # -major angle: angle of the largest range # -sill: where everything levels out # -nugget: where the curves start # -range: how long it takes to level out # We just have to guess (it most likely will converge with bad guesses) # # Ratio guess: largest = 60, smallest = 20, ratio = 60/20 = 3 # Major angle: 165 # Sill: 80,000 # Nugget: 30,000 # Range: 50 variogram_model = fit.variogram(walker_variogram_angled, vgm(80000,"Sph",50,30000, anis = c(165,1/3)), fit.method = 2) # Note that the angle parameters fall under the 'anis' parameter. # This stands for anisotropy, the term for a non-circular rose plot. plot(walker_variogram_angled, variogram_model, main="Variogram by Angles") ##----Kriging----- # To start doing kriging computationally, we need to define a bounded region around # our points to limit the computations. We do this by computing a 'convex hull', # which is just the region defined by putting a rubber band around our points in space. poly_bounds = chull(coordinates(walker_lake)) # Create a series of closely spaced gridded points in our region to do kriging predictions: x = seq(2.5, 247.5, 5) y = seq(2.5, 247.5, 5) poly_grid = polygrid(x, y, coordinates(walker_lake)[poly_bounds,]) # Perform Kriging: coordinates(poly_grid) = ~x+y krige_results = krige(v ~ 1, walker_lake, newdata = poly_grid, model = variogram_model) head(krige_results) # Plot the interpolation of kriging results: xpred = seq(min(x),max(x),length=100) ypred = seq(min(y),max(y),length=100) int_obs = interp(walker_lake$x,walker_lake$y,walker_lake$v,xo=xpred,yo=ypred) # Interpolates the observed V-values. # Interpolates the kriging prediction values over the coordinates in xo and yo. int_pred = interp(coordinates(poly_grid)[,1],coordinates(poly_grid)[,2],krige_results$var1.pred,xo=xpred,yo=ypred) # Interpolates the kriging standard errors over the coordinates in xo and yo. int_se = interp(coordinates(poly_grid)[,1], coordinates(poly_grid)[,2],sqrt(krige_results$var1.var),xo=xpred,yo=ypred) # Plot original, kriging results, and kriging error par(mfrow=c(2,2)) # Computes the minimum V/predicted value zmin = min(int_obs$z[!is.na(int_obs$z)],int_pred$z[!is.na(int_pred$z)]) # Computes the maximum V/predicted value zmax = max(int_obs$z[!is.na(int_obs$z)], int_pred$z[!is.na(int_pred$z)]) image(int_obs,xlab="X",ylab="Y",cex.lab=1.6,main="Observed Concentrations", cex.main=1.6,zlim=c(zmin,zmax),col=rev(heat.colors(24)),cex=1) # Creates a greyscale plot of the interpolated kriged V-values image(int_pred,xlab="X",ylab="Y",cex.lab=1.6,main="Kriging Predicted Values",cex.main=1.6, zlim=c(zmin,zmax),col=rev(heat.colors(24)),cex=1) # Creates a greyscale plot of the interpolated kriging SE's image(int_se,xlab="X",ylab="Y",cex.lab=1.6,main="Kriging Standard Errors",cex.main=1.6, col=rev(heat.colors(24)),cex=1) # Why are there vertical bands in the standard error graph? # This has to do with our range ratio (varies with angle) # In fact, you can see the slight angle around 165 degrees clockwise from North. ##-----Clustering----- par(mfrow=c(1,1)) # Complete Randomness x_rand = runif(200) y_rand = runif(200) plot(x_rand, y_rand, xlim=c(0,1),ylim=c(0,1), main="Random X Y Points on Unit Square", pch=16) # Clustering x_cluster_centers = runif(5) y_cluster_centers = runif(5) x_cluster = c() y_cluster = c() while (length(x_cluster)<200){ x_temp = runif(1) y_temp = runif(1) # Find the min distance from our random point to all the clusters min_dist = min(sqrt((x_cluster_centers - x_temp)**2 + (y_cluster_centers-y_temp)**2)) if (min_dist < rnorm(1,mean=0,sd=0.05)){ x_cluster = c(x_cluster, x_temp) y_cluster = c(y_cluster, y_temp) } } plot(c(0,1), c(0,1), type="n",main="Clustered X Y Points on Unit Square") points(x_cluster, y_cluster, pch=16) # Over-Regular (homogeneity) x_reg = runif(1) y_reg = runif(1) while (length(x_reg)<200){ x_temp = runif(1) y_temp = runif(1) # Find the min distance from our random point to all the clusters min_dist = min(sqrt((x_reg - x_temp)**2 + (y_reg-y_temp)**2)) expected_even_dist = ( 4 * ( 1/sqrt(200) ) + 4 * ( sqrt(2)/sqrt(200) ) ) / 8 if (min_dist > (expected_even_dist*0.65)){ # Allow for wiggle room x_reg = c(x_reg, x_temp) y_reg = c(y_reg, y_temp) } } plot(c(0,1), c(0,1), type="n",main="Homogeneous X Y Points on Unit Square") points(x_reg, y_reg, pch=16) # Ripley's K clustering coefficient! h = 0.1 # Radius of sample n = 100 # sample n circles num_points_circle = function(x_points, y_points, x_temp, y_temp, h){ distances = sqrt((x_points - x_temp)**2 + (y_points - y_temp)**2) return(sum(distances <= h)) } ripley_k_at_h = function(x_points, y_points, h, n){ stopifnot(length(x_points) == length(y_points)) ripley_k_dist = c() ripley_k_dist = sapply(1:n, function(n_i){ sample_index = sample(1:length(x_points), 1) x_center = x_points[sample_index] y_center = y_points[sample_index] num_points = num_points_circle(x_points, y_points, x_center, y_center, h) }) return(ripley_k_dist) } # Random points: ripley_rand = ripley_k_at_h(x_rand, y_rand, 0.2, 100) mean(ripley_rand) # Clustered Points: ripley_cluster = ripley_k_at_h(x_cluster, y_cluster, 0.2, 100) mean(ripley_cluster) # Regular Points: ripley_reg = ripley_k_at_h(x_reg, y_reg, 0.2, 100) mean(ripley_reg) ##-----Ripley's K as a function of distance!---- h = seq(0.01, 0.5, length=100) ripley_rand_line = sapply(h, function(h_i){ mean(ripley_k_at_h(x_rand, y_rand, h_i, 100)) }) ripley_cluster_line = sapply(h, function(h_i){ mean(ripley_k_at_h(x_cluster, y_cluster, h_i, 100)) }) ripley_reg_line = sapply(h, function(h_i){ mean(ripley_k_at_h(x_reg, y_reg, h_i, 100)) }) # Plot results plot(h, ripley_rand_line, type="l", lwd = 2, main="Ripley's K at varying h", ylab="h", xlab="Ripley's K") lines(h, ripley_cluster_line, col="red", lwd=2) lines(h, ripley_reg_line, col="blue", lwd=2) grid() legend('topleft', c('Random', 'Clustered', 'Over Regular'), lwd=c(2,2,2), col=c('black','red','blue'), lty=c(1,1,1)) # Notice the lines 'straighten out' at larger h values. And they even # get slightly closer together as h increases. # This is because of the edge effects! The larger h, the larger the circles, # and we are missing points outside of our region. # To save us time, let's do this the easy way: cluster_ppp = ppp(x_cluster, y_cluster) cluster_ripleyK = Kest(cluster_ppp) rand_ppp = ppp(x_rand, y_rand) rand_ripleyK = Kest(rand_ppp) reg_ppp = ppp(x_reg, y_reg) reg_ripleyK = Kest(reg_ppp) plot(c(0,0.25), c(0,0.25), type="n", xlab='h', ylab="Ripley's K", main="Ripley's K Function") lines(rand_ripleyK$r, rand_ripleyK$iso, lwd=2, lty=1) lines(cluster_ripleyK$r, cluster_ripleyK$iso, lwd=2, col="red") lines(reg_ripleyK$r, reg_ripleyK$iso, lwd=2, col="blue") lines(rand_ripleyK$r, rand_ripleyK$theo, lwd=2, lty=8, col="green") # Since the theoretical value of Ripley's K is a square function (think A = pi * r^2) # We usually transform the data by taking the square root of it. This is called # Ripley's L function. cluster_ripleyL = Lest(cluster_ppp) rand_ripleyL = Lest(rand_ppp) reg_ripleyL = Lest(reg_ppp) plot(c(0,0.25), c(-0.08,0.15), type="n", xlab='h', ylab="Ripley's K", main="Ripley's K Function") lines(rand_ripleyL$r, rand_ripleyL$iso - rand_ripleyL$r, lwd=2, lty=1) lines(cluster_ripleyL$r, cluster_ripleyL$iso - cluster_ripleyL$r, lwd=2, col="red") lines(reg_ripleyL$r, reg_ripleyL$iso - reg_ripleyL$r, lwd=2, col="blue") lines(rand_ripleyL$r, rand_ripleyL$theo - rand_ripleyL$r, lwd=2, lty=8, col="green") legend('bottomright',c('Theoretical', 'Random', 'Clustered', 'Over Regular'), lty=c(8,1,1,1), lwd=c(2,2,2,2), col=c('green','black', 'red', 'blue'))
/7_TimeSeries_SpatialStats_Bayes/R_Examples_Lecture7.R
no_license
dagangwood163/DataScience350
R
false
false
22,916
r
##-------------------------------------------- ## ## Lecture 7 R methods ## ## Class: PCE Data Science Methods Class ## ##-------------------------------------------- setwd('E:/Work/Teaching/PCE_Data_Science/7_TimeSeries_SpatialStats_Bayes') # Load Libraries library(TSA) library(forecast) library(gstat) library(akima) library(spatstat) library(deldir) library(lattice) library(geoR) library(sp) library(zoo) ##-------Time Series Introduction------ # Moving Window Averages # Create some data x = seq(1,10,len=200) y = 5*sin(x) + sin(10*x) + 0.25*rnorm(100) plot(x,y,main="Time Series Data", xlab="x", ylab="y", pch=16, cex = 0.25) # Past moving window: mov_avg_past = function(x,window_size){ stopifnot(length(x)>=window_size) mov_avg = c() for (i in 1:length(x)){ if (i<window_size){ temp_avg = mean(x[1:i]) }else{ temp_avg = mean(x[(i-window_size):(i)]) } mov_avg = c(mov_avg, temp_avg) } return(mov_avg) } mov_avg_past5 = mov_avg_past(y,window_size = 5) mov_avg_past10 = mov_avg_past(y,window_size = 10) mov_avg_past25 = mov_avg_past(y,window_size = 25) mov_avg_past50 = mov_avg_past(y,window_size = 50) lines(x,mov_avg_past5, col="red", lwd=2) lines(x,mov_avg_past10, col="blue", lwd=2) lines(x,mov_avg_past25, col="green", lwd=2) lines(x,mov_avg_past50, col="purple", lwd=2) legend('bottomright', c('5 units', '10 units', '25 units', '50 units'), lty=c(1,1,1,1), lwd=c(2,2,2,2), col=c('red','blue','green','purple')) # Let's look at the residuals! time_resid_5 = y - mov_avg_past5 time_resid_10 = y - mov_avg_past10 time_resid_25 = y - mov_avg_past25 time_resid_50 = y - mov_avg_past50 plot(time_resid_50, mov_avg_past5, pch = 16, cex = 0.5, col="red") # This plot isn't too helpful, because the result isn't linear... # Look at residuals over time: plot(x, time_resid_5, pch = 16, cex = 0.5, col="red") plot(x, time_resid_10, pch = 16, cex = 0.5, col="blue") plot(x, time_resid_25, pch = 16, cex = 0.5, col="green") plot(x, time_resid_50, pch = 16, cex = 0.5, col="purple") # Notice the wider the window, the more of a trend we see. # Note that there is a function in the 'zoo' package that does this for you: #library(zoo) ?rollapply zoo_mov_avg5 = rollapply(y, width = 5, by = 1, FUN= mean, align = "left") plot(x[5:length(x)],zoo_mov_avg5) lines(x,mov_avg_past5, col="red", lwd=2) # They are the same! points(x,y,pch=16, cex=0.5) # Note that rollapply does not deal with the ends of moving average at all. # One way to deal with this is to 'pad' with NAs zoo_mov_avg5 = rollapply(c(rep(NA, 4),y), width = 5, by = 1, FUN = mean, na.rm = TRUE, align = "left") plot(x,zoo_mov_avg5) lines(x,mov_avg_past5, col="red", lwd=2) ##-----Fourier Transform----- # Create some data period1 = 3 period2 = 0.5 x = seq(0,15,len=5000) y = sin((1/period1)*(2*pi)*x) + 0.5*sin((1/period2)*(2*pi)*x) + 0.05*rnorm(100) plot(x,y, pch = 16, cex=0.25) # Compute Fourier Transform (FFT) and plot the spectrum of frequencies y_spectrum = spectrum(y) plot(y_spectrum$freq[y_spectrum$freq<0.1], y_spectrum$spec[y_spectrum$freq<0.1], type = 'l') # Find maximum frequencies potential_frequencies = y_spectrum$freq[y_spectrum$freq<0.1 & y_spectrum$spec > 25] # Find Periods potential_periods = (1/potential_frequencies) * mean(diff(x)) ##------Simple Exponential Smoothing----- dj_data = read.csv("DJIA.csv") dj_data$Date = as.Date(dj_data$Date, format="%m/%d/%Y") plot(dj_data$Date, dj_data$DJIA, type="l") # Let's consider 2014 onwards dj_data = dj_data[dj_data$Date>=as.Date("2014-01-01"),] plot(dj_data$Date, dj_data$DJIA, type="l", main="Dow Jones Value Daily", xlab="Date", ylab="Value") # use forecast's ses() function: exp_smooth1 = ses(dj_data$DJIA, alpha=0.05, h=31) # h is how many t-units to forecast out exp_smooth2 = ses(dj_data$DJIA, alpha=0.15, h=31) exp_smooth3 = ses(dj_data$DJIA, alpha=0.95, h=31) plot(exp_smooth1) lines(exp_smooth1$fitted, col="blue") lines(exp_smooth2$fitted, col="green") lines(exp_smooth3$fitted, col="red") legend('topleft', c('Original Data','alpha=0.05', 'alpha=0.15', 'alpha=0.95'), col=c('black','blue', 'green', 'red'), lty=c(1,1,1)) ##----Double Exponential Smoothing---- # Remember this is equivalent to ARIMA(0,1,1) # P - Auto Regression # D - Degree Integrated (1) # One level differencing # Q - Moving Average (1) # Based only on the previous one double_exp_smooth = Arima(dj_data$DJIA, order = c(0,1,1), seasonal=c(0,1,0)) double_exp_fit = dj_data$DJIA - double_exp_smooth$residuals # fitted values plot(dj_data$Date, dj_data$DJIA,type="l", lwd=2) lines(dj_data$Date, double_exp_fit, col="red", lwd=2, lty=2) # prediction double_exp_pred = predict(double_exp_smooth, n.ahead = 30) lines(seq(from=dj_data$Date[375], to=dj_data$Date[375]+30, by=1)[-1], double_exp_pred$pred, lwd=2, col='green') # Add in standard error lines lines(seq(from=dj_data$Date[375], to=dj_data$Date[375]+30, by=1)[-1], double_exp_pred$pred + double_exp_pred$se, lwd=2, col='green') lines(seq(from=dj_data$Date[375], to=dj_data$Date[375]+30, by=1)[-1], double_exp_pred$pred - double_exp_pred$se, lwd=2, col='green') ##----Auto regressive Model---- lh # Built in dataset ?lh plot(lh) # First Order Auto regressive ar1 = ar(lh, order.max = 1) ar1_fitted = lh - ar1$resid # Plot outcome plot(lh, lwd=2, xlim=c(0,60)) # extend xlim for prediction later lines(ar1_fitted, lwd=2, col="red") # Predict ahead n ar1_pred = predict(ar1, n.ahead=10, se.fit = TRUE) x_pred = length(ar1$resid) : (length(ar1$resid) + 10) lines(x_pred,c(ar1_fitted[length(ar1_fitted)],ar1_pred$pred), lwd=2, col="red", lty=2) lines(x_pred,c(ar1_fitted[length(ar1_fitted)],ar1_pred$pred + ar1_pred$se), lwd=2, col="red", lty=3) lines(x_pred,c(ar1_fitted[length(ar1_fitted)],ar1_pred$pred - ar1_pred$se), lwd=2, col="red", lty=3) ##-----Auto regressive Moving average Model---- # First Order Auto regressive and Moving average arma1 = arma(lh, order=c(1,1)) # Order is a length 2 vector, [1]=AR, [2]=MA arma1_fitted = lh - arma1$resid # Plot outcome plot(lh, lwd=2, xlim=c(0,60)) lines(arma1_fitted, lwd=2, col="red") # Hold off on predictions until later. ##----ARIMA(0,N,0)=Random Walk Model----- # Generate ARIMA(0,1,0), random walk y_I1 = arima.sim(list(order=c(0,1,0)),n=500) plot(y_I1) # 2nd order random walk y_I2 = arima.sim(list(order=c(0,2,0)), n=500) plot(y_I2) ##----ARIMA(N,0,0)= Autoregressive Model----- # Careful with the AR coefficients, R will throw an error when # D = 0 (Integrated) and series is non-stationary # Generate ARIMA(1,0,0) y_AR1 = arima.sim(list(order=c(1,0,0), ar=0.8),n=500) plot(y_AR1) # 2nd order random walk y_AR2 = arima.sim(list(order=c(2,0,0), ar=c(0.69, 0.3)), n=500) plot(y_AR2) # 5th order random walk y_AR5 = arima.sim(list(order=c(5,0,0), ar=c(0.45, 0.2, 0.1, 0.1, 0.1)), n=500) plot(y_AR5) ##----ARIMA(0,0,N) = Moving Average Model----- # Generate ARIMA(0,0,1) y_MA1 = arima.sim(list(ma=c(0.7)),n=500) plot(y_MA1) # 3rd order Moving Average y_MA3 = arima.sim(list(ma=c(0.9,0.8,0.2)), n=500) plot(y_MA3) # 10th order Moving Average ma_orders = sample(c(0.5,0.6,0.7,0.8), 10, replace=TRUE) y_MA10 = arima.sim(list(ma=ma_orders), n=500) plot(y_MA10) ##-----ARIMA(P,D,Q) X (P,D,Q) seasonality----- # Load las vegas headcounts: headcount = read.csv('JitteredHeadCount.csv', stringsAsFactors = FALSE) # Aggregate headcounts by week: headcount$DateFormat = as.Date(headcount$DateFormat, format="%m/%d/%Y") headcount$week_count = floor(headcount$DayNumber/7.0) headcount_weekly = aggregate(HeadCount ~ week_count, data = headcount, sum) # Not a full last week, drop the last data point headcount_weekly = headcount_weekly[1:52,] # Fit time series: headcount_arima = arima(headcount_weekly$HeadCount, order = c(1,1,1), seasonal = list(order=c(1,0,1))) # What are the fitted coefficients? headcount_arima$coef # Get the predictions. Strange, but arima gives the resids, not the predictions: arima_fitted = headcount_weekly$HeadCount - headcount_arima$residuals arima_predictions = predict(headcount_arima, n.ahead=25) x_pred = nrow(headcount_weekly) : (nrow(headcount_weekly) + 25) plot(headcount_weekly$HeadCount, type="l", lwd = 2, col="black", xlim = c(0,80)) lines(arima_fitted, lwd = 2, col="red") lines(x_pred, c(arima_fitted[length(arima_fitted)],arima_predictions$pred), lwd=2, col="red", lty=8) ##-----Time Series by Factors---- dj = read.csv("dow_jones_data.csv", stringsAsFactors = FALSE) dj$Date = as.Date(dj$Date, format = "%Y-%m-%d") # Create many different time factors dj$day_count = as.numeric(dj$Date - min(dj$Date)) dj$week_count = floor(dj$day_count/7.0) dj$month_count = floor(dj$day_count/30.5) dj$year_count = as.numeric(format(dj$Date, format="%Y")) - 1990 dj$month = as.numeric(format(dj$Date, format="%m")) dj$season = floor((dj$month-1) / 3) dj$weekday = as.numeric(format(dj$Date, format = "%w")) dj$week = as.numeric(format(dj$Date, format = "%W")) # Market Bull Dates bull_dates = seq(from = as.Date("2007-10-11"), to = as.Date("2009-03-09"), by = 1) # Create Bull dates logical 0 - 1 dj$bull_market = as.numeric(dj$Date %in% bull_dates) # Is this cheating? Maybe. # Create linear model: dj_model = lm(DJIA ~ . - Date, data = dj) summary(dj_model) # Look at plot plot(dj$Date, dj$DJIA, type="l", lwd=2, main="DJIA", xlab="Time", ylab="DJIA") lines(dj$Date, dj_model$fitted.values, lwd=2, lty=8, col="red") # Closer view: plot(dj$Date, dj$DJIA, type="l", lwd=2, main="DJIA", xlab="Time", ylab="DJIA", xlim=c(as.Date("2009-01-01"), as.Date("2009-12-31")), ylim=c(6500,11000)) lines(dj$Date, dj_model$fitted.values, lwd=2, lty=8, col="red") # Slightly lagging by a bit? # Makes sense because of the "most important" variable. # More autoregressive than 1 day? Let's check: DJIA_2_periods_ago = sapply(1:nrow(dj), function(x){ if(x == 1){ return(dj$DJIA[1]) }else{ return(dj$DJIA[x-1]) } }) dj$two_days_ago = DJIA_2_periods_ago dj_model_AR2 = lm(DJIA ~ . - Date, data = dj) summary(dj_model_AR2) # Nope! # Can this predict well? Why not? # # - Turns out just slightly shifting the time series is # a good fit, as the stock market is a random walk and # is highly dependent on where it just was. # - Also, can we predict the bull market variable? Probably not. ##-------Spatial Statistics------ # Clean up rm(list = ls()) gc() ?coalash data(coalash) # Coal ash is a pollutant generated from coal powered mining head(coalash) # Plot with labels plot(coalash$x, coalash$y, type="n", lab=c(16,23,7), xlab="X",ylab="Y") text(coalash$x, coalash$y, format(round(coalash$coalash,1))) title("Coal Ash Percentages",cex=1.5) # Median Polish ?medpolish # Have to create a matrix of coalash values according to thier x-y positions: coalash_mat = tapply(coalash$coalash, list(factor(coalash$x), factor(coalash$y)), function(x) x) coalash_med_pol = medpolish(coalash_mat, na.rm=TRUE) # Returns residuals.... coal_no_trend = coalash_mat - coalash_med_pol$residuals # Look at a greyscale plot: par(mfrow=c(1,2)) # We need the max's and mins for scaling the colors zmin = min(coalash_mat[!is.na(coalash_mat)],coal_no_trend[!is.na(coal_no_trend)]) zmax = max(coalash_mat[!is.na(coalash_mat)],coal_no_trend[!is.na(coal_no_trend)]) image(x=1:max(coalash$x), y=1:max(coalash$y), coalash_mat,zlim=c(zmin,zmax),cex.axis=1.5, xlab="Columns",ylab="Rows",cex.lab=1.6,col=gray.colors(12)) title("Original Coal Ash",cex.main=1.5) image(x=1:max(coalash$x),y=1:max(coalash$y),coal_no_trend,zlim=c(zmin,zmax),cex.axis=1.5, xlab="Columns",ylab="Rows",cex.lab=1.6,col=gray.colors(12)) title("Med Polish Coal Ash",cex.main=1.5) ##------Voronoi Diagrams----- par(mfrow=c(1,1)) x = runif(10) y = runif(10) plot(x,y,pch=16, ylim=c(0,1),xlim=c(0,1)) ?ppp xy_ppp = ppp(x,y,c(0,1),c(0,1)) ?deldir tv = deldir(x,y,rw=c(0,1,0,1),plot=TRUE) # Dashed lines are Voronoi Polygons (Dirichlet) # Delaunay Triangulation are the solid lines # get weights: xy_weights_obj = dirichlet(xy_ppp) tile.areas(xy_weights_obj) ##----Variogram---- # First, load and depict the 'walker lake data'. walker_lake = read.table("walk470.txt",header=T) plot(walker_lake$x,walker_lake$y,pch="",xlab="",ylab="", # Plots the (x,y) locations with a "+" sign, no xlim=range(walker_lake$x),ylim=range(walker_lake$y),axes=F)# axis labels, specified axis limits, & no axes. box() # Places a box around the plot. for (i in 1:nrow(walker_lake)){ text(walker_lake$x[i],walker_lake$y[i]+.30, # Iteratively writes the V-values over the "+" paste(walker_lake$v)[i],cex=0.9) # signs of character size 0.9 (default=1). } # Contour plot: int.v = interp(walker_lake$x,walker_lake$y,walker_lake$v) # Linear interpolation across region contour(int.v,levels=seq(0,1500,100),xlab="", # Creates a contour plot with the interpolated ylab="",xlim=range(walker_lake$x),ylim=range(walker_lake$y),labcex=1,axes=F) box() # you can see the effect that clusters of points have on the countour plot by adding the points: points(walker_lake$x, walker_lake$y, pch=16) # Compute Variograms # First, change dataframe to coordinate object, from the 'sp' package coordinates(walker_lake) = ~x+y # Compute and plot the no-angle variogram (all points within a distance at any angle) walker_variogram = variogram(v~x+y, data = walker_lake, width=10, cutoff=100) plot(walker_variogram, type="l") # Make u & v easier to reference v = walker_lake$v u = walker_lake$u # Compute variogram at different angles variogram_angles = seq(0,165,15) # A side note: R computes anges as angle clockwise from north!!! # This means our angle of zero we know is represented by angle=90 deg. walker_variogram_angled = variogram(v~x+y, data=walker_lake, width=10, alpha=variogram_angles, tol.hor=15, cutoff=100) plot(walker_variogram_angled, type="l") # In order to use the variogram to predict, we have to fit the variogram. # We will fit the variogram with a Spherical model (pretty standard) # In order to fit the variogram, we have to make some parameter estimates: # -ratio: this is the ratio of the largest range over the shortest # -major angle: angle of the largest range # -sill: where everything levels out # -nugget: where the curves start # -range: how long it takes to level out # We just have to guess (it most likely will converge with bad guesses) # # Ratio guess: largest = 60, smallest = 20, ratio = 60/20 = 3 # Major angle: 165 # Sill: 80,000 # Nugget: 30,000 # Range: 50 variogram_model = fit.variogram(walker_variogram_angled, vgm(80000,"Sph",50,30000, anis = c(165,1/3)), fit.method = 2) # Note that the angle parameters fall under the 'anis' parameter. # This stands for anisotropy, the term for a non-circular rose plot. plot(walker_variogram_angled, variogram_model, main="Variogram by Angles") ##----Kriging----- # To start doing kriging computationally, we need to define a bounded region around # our points to limit the computations. We do this by computing a 'convex hull', # which is just the region defined by putting a rubber band around our points in space. poly_bounds = chull(coordinates(walker_lake)) # Create a series of closely spaced gridded points in our region to do kriging predictions: x = seq(2.5, 247.5, 5) y = seq(2.5, 247.5, 5) poly_grid = polygrid(x, y, coordinates(walker_lake)[poly_bounds,]) # Perform Kriging: coordinates(poly_grid) = ~x+y krige_results = krige(v ~ 1, walker_lake, newdata = poly_grid, model = variogram_model) head(krige_results) # Plot the interpolation of kriging results: xpred = seq(min(x),max(x),length=100) ypred = seq(min(y),max(y),length=100) int_obs = interp(walker_lake$x,walker_lake$y,walker_lake$v,xo=xpred,yo=ypred) # Interpolates the observed V-values. # Interpolates the kriging prediction values over the coordinates in xo and yo. int_pred = interp(coordinates(poly_grid)[,1],coordinates(poly_grid)[,2],krige_results$var1.pred,xo=xpred,yo=ypred) # Interpolates the kriging standard errors over the coordinates in xo and yo. int_se = interp(coordinates(poly_grid)[,1], coordinates(poly_grid)[,2],sqrt(krige_results$var1.var),xo=xpred,yo=ypred) # Plot original, kriging results, and kriging error par(mfrow=c(2,2)) # Computes the minimum V/predicted value zmin = min(int_obs$z[!is.na(int_obs$z)],int_pred$z[!is.na(int_pred$z)]) # Computes the maximum V/predicted value zmax = max(int_obs$z[!is.na(int_obs$z)], int_pred$z[!is.na(int_pred$z)]) image(int_obs,xlab="X",ylab="Y",cex.lab=1.6,main="Observed Concentrations", cex.main=1.6,zlim=c(zmin,zmax),col=rev(heat.colors(24)),cex=1) # Creates a greyscale plot of the interpolated kriged V-values image(int_pred,xlab="X",ylab="Y",cex.lab=1.6,main="Kriging Predicted Values",cex.main=1.6, zlim=c(zmin,zmax),col=rev(heat.colors(24)),cex=1) # Creates a greyscale plot of the interpolated kriging SE's image(int_se,xlab="X",ylab="Y",cex.lab=1.6,main="Kriging Standard Errors",cex.main=1.6, col=rev(heat.colors(24)),cex=1) # Why are there vertical bands in the standard error graph? # This has to do with our range ratio (varies with angle) # In fact, you can see the slight angle around 165 degrees clockwise from North. ##-----Clustering----- par(mfrow=c(1,1)) # Complete Randomness x_rand = runif(200) y_rand = runif(200) plot(x_rand, y_rand, xlim=c(0,1),ylim=c(0,1), main="Random X Y Points on Unit Square", pch=16) # Clustering x_cluster_centers = runif(5) y_cluster_centers = runif(5) x_cluster = c() y_cluster = c() while (length(x_cluster)<200){ x_temp = runif(1) y_temp = runif(1) # Find the min distance from our random point to all the clusters min_dist = min(sqrt((x_cluster_centers - x_temp)**2 + (y_cluster_centers-y_temp)**2)) if (min_dist < rnorm(1,mean=0,sd=0.05)){ x_cluster = c(x_cluster, x_temp) y_cluster = c(y_cluster, y_temp) } } plot(c(0,1), c(0,1), type="n",main="Clustered X Y Points on Unit Square") points(x_cluster, y_cluster, pch=16) # Over-Regular (homogeneity) x_reg = runif(1) y_reg = runif(1) while (length(x_reg)<200){ x_temp = runif(1) y_temp = runif(1) # Find the min distance from our random point to all the clusters min_dist = min(sqrt((x_reg - x_temp)**2 + (y_reg-y_temp)**2)) expected_even_dist = ( 4 * ( 1/sqrt(200) ) + 4 * ( sqrt(2)/sqrt(200) ) ) / 8 if (min_dist > (expected_even_dist*0.65)){ # Allow for wiggle room x_reg = c(x_reg, x_temp) y_reg = c(y_reg, y_temp) } } plot(c(0,1), c(0,1), type="n",main="Homogeneous X Y Points on Unit Square") points(x_reg, y_reg, pch=16) # Ripley's K clustering coefficient! h = 0.1 # Radius of sample n = 100 # sample n circles num_points_circle = function(x_points, y_points, x_temp, y_temp, h){ distances = sqrt((x_points - x_temp)**2 + (y_points - y_temp)**2) return(sum(distances <= h)) } ripley_k_at_h = function(x_points, y_points, h, n){ stopifnot(length(x_points) == length(y_points)) ripley_k_dist = c() ripley_k_dist = sapply(1:n, function(n_i){ sample_index = sample(1:length(x_points), 1) x_center = x_points[sample_index] y_center = y_points[sample_index] num_points = num_points_circle(x_points, y_points, x_center, y_center, h) }) return(ripley_k_dist) } # Random points: ripley_rand = ripley_k_at_h(x_rand, y_rand, 0.2, 100) mean(ripley_rand) # Clustered Points: ripley_cluster = ripley_k_at_h(x_cluster, y_cluster, 0.2, 100) mean(ripley_cluster) # Regular Points: ripley_reg = ripley_k_at_h(x_reg, y_reg, 0.2, 100) mean(ripley_reg) ##-----Ripley's K as a function of distance!---- h = seq(0.01, 0.5, length=100) ripley_rand_line = sapply(h, function(h_i){ mean(ripley_k_at_h(x_rand, y_rand, h_i, 100)) }) ripley_cluster_line = sapply(h, function(h_i){ mean(ripley_k_at_h(x_cluster, y_cluster, h_i, 100)) }) ripley_reg_line = sapply(h, function(h_i){ mean(ripley_k_at_h(x_reg, y_reg, h_i, 100)) }) # Plot results plot(h, ripley_rand_line, type="l", lwd = 2, main="Ripley's K at varying h", ylab="h", xlab="Ripley's K") lines(h, ripley_cluster_line, col="red", lwd=2) lines(h, ripley_reg_line, col="blue", lwd=2) grid() legend('topleft', c('Random', 'Clustered', 'Over Regular'), lwd=c(2,2,2), col=c('black','red','blue'), lty=c(1,1,1)) # Notice the lines 'straighten out' at larger h values. And they even # get slightly closer together as h increases. # This is because of the edge effects! The larger h, the larger the circles, # and we are missing points outside of our region. # To save us time, let's do this the easy way: cluster_ppp = ppp(x_cluster, y_cluster) cluster_ripleyK = Kest(cluster_ppp) rand_ppp = ppp(x_rand, y_rand) rand_ripleyK = Kest(rand_ppp) reg_ppp = ppp(x_reg, y_reg) reg_ripleyK = Kest(reg_ppp) plot(c(0,0.25), c(0,0.25), type="n", xlab='h', ylab="Ripley's K", main="Ripley's K Function") lines(rand_ripleyK$r, rand_ripleyK$iso, lwd=2, lty=1) lines(cluster_ripleyK$r, cluster_ripleyK$iso, lwd=2, col="red") lines(reg_ripleyK$r, reg_ripleyK$iso, lwd=2, col="blue") lines(rand_ripleyK$r, rand_ripleyK$theo, lwd=2, lty=8, col="green") # Since the theoretical value of Ripley's K is a square function (think A = pi * r^2) # We usually transform the data by taking the square root of it. This is called # Ripley's L function. cluster_ripleyL = Lest(cluster_ppp) rand_ripleyL = Lest(rand_ppp) reg_ripleyL = Lest(reg_ppp) plot(c(0,0.25), c(-0.08,0.15), type="n", xlab='h', ylab="Ripley's K", main="Ripley's K Function") lines(rand_ripleyL$r, rand_ripleyL$iso - rand_ripleyL$r, lwd=2, lty=1) lines(cluster_ripleyL$r, cluster_ripleyL$iso - cluster_ripleyL$r, lwd=2, col="red") lines(reg_ripleyL$r, reg_ripleyL$iso - reg_ripleyL$r, lwd=2, col="blue") lines(rand_ripleyL$r, rand_ripleyL$theo - rand_ripleyL$r, lwd=2, lty=8, col="green") legend('bottomright',c('Theoretical', 'Random', 'Clustered', 'Over Regular'), lty=c(8,1,1,1), lwd=c(2,2,2,2), col=c('green','black', 'red', 'blue'))
library(caret) library(e1071) library(pROC) library(PRROC) library(tidyverse) # CpG sites selected from Cox models active_coef1 <- read.csv("/Cluster_Filespace/Marioni_Group/Yufei/output/cox/coef/0.5coef398422_cox.csv") CpGsites1 <- active_coef1[,1] # CpG sites selected from logistic classifier active_coef2 <- read.csv("/Cluster_Filespace/Marioni_Group/Yufei/output/binary/classweights/coef/0.5coefficient_binomial_398422.csv") CpGsites2 <- active_coef2[-1,][,1] CpGsites <- union(CpGsites1,CpGsites2) xtrain_sel <- xtrain[,CpGsites] xtest_sel <- xtest[,CpGsites] xtrain_sel <- cbind(xtrain_sel,"age"=agetrain,"sex"=sextrain) xtest_sel <-cbind(xtest_sel,"age"=agetest,"sex"=sextest) ytrain_factor = replace(ytrain,which(ytrain==1),"yes") ytrain_factor = replace(ytrain_factor,which(ytrain_factor==0),"no") ytest_factor = replace(ytest,which(ytest==1),"yes") ytest_factor = replace(ytest_factor,which(ytest_factor==0),"no") # Set up Repeated k-fold Cross Validation train_control <- trainControl(method="cv", number=3, classProbs=T, summaryFunction=multiClassSummary) ######################################################################### # svm(RBF) # ######################################################################### # Fit the model set.seed(432) system.time( svmR <- train(xtrain_sel, ytrain_factor, method = "svmRadial", metric="AUC", trControl = train_control, tuneGrid = expand.grid(C = 2^(-5:5),sigma=10^(-3:0)), preProcess = c("center","scale")) ) saveRDS(svmR,file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmR.rds") #View the model svmRcv <- as.data.frame(svmR$results) write.csv(svmRcv, file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmR_cv.csv") png(filename = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmRcv.png") ggplot(svmR) dev.off() svmR <-readRDS("/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmR.rds") # evaluation on the test set # fitted probability ysvmrprob <- predict(svmR,xtest_sel,type="prob")$yes rocsvmR_test <- roc(ytest,ysvmrprob) ggroc(rocsvmR_test,legacy.axes=T)+ labs(x="False Postive Rate(1-Specificity)",y="True Positive Rate(Sensitivity)")+ geom_segment(aes(x = 0, xend = 1, y = 0, yend = 1), color="darkgrey", linetype="dashed") ggsave(filename="/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmR_roctest.png") auc_svmR <- auc(rocsvmR_test) prtst_svmR <- pr.curve(scores.class0=ysvmrprob[ytest_factor=="yes"],scores.class1=ysvmrprob[ytest_factor=="no"],curve=T) ggplot(data.frame(prtst_svmR$curve),aes(x=X1,y=X2,color=X3))+ geom_line()+ labs(x="Recall",y="Precision",color="Threshold") ggsave(filename="/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmR_prroc.png") prauc_svmR<-prtst_svmR$auc.integral # fitted class ysvmrclass <- predict(svmR,xtest_sel) # confusion matrix res <- list() res[[1]] <- as.matrix(confusionMatrix(data=ysvmrclass,reference=factor(ytest_factor),positive="yes"),what = "classes")[1:4,] res_df <- tibble("method"="svm(radial)", "n features"=ncol(xtrain_sel), "sigma"=svmR$bestTune$sigma, "C"=svmR$bestTune$C, "AUC"=auc_svmR, "PR AUC"=prauc_svmR, res)%>% unnest_wider(res) write.csv(res_df, file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmRtestmetrics.csv") ######################################################################### # svm(linear) # ######################################################################### # Set up Repeated k-fold Cross Validation train_control <- trainControl(method="cv", number=3, classProbs=T, summaryFunction=multiClassSummary) set.seed(432) system.time( svmL <- train(factor(ytrain_factor)~., data = cbind.data.frame(ytrain_factor,xtrain_sel), method = "svmLinear", metric="AUC", trControl = train_control, tuneGrid = expand.grid(C = 2^(-5:5)), preProcess = c("center","scale")) ) saveRDS(svmL,file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmL.rds") #View the model svmLcv <- as.data.frame(svmL$results) write.csv(svmLcv, file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmL_cv.csv") png(filename = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmLcv.png") ggplot(svmL) dev.off() svmL <- readRDS("/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmL.rds") # evaluation on the test set # fitted probability ysvmLprob <- predict(svmL,xtest_sel,type="prob")$yes rocsvmL_test <- roc(ytest,ysvmLprob) ggroc(rocsvmL_test,legacy.axes=T)+ labs(x="False Postive Rate(1-Specificity)",y="True Positive Rate(Sensitivity)")+ geom_segment(aes(x = 0, xend = 1, y = 0, yend = 1), color="darkgrey", linetype="dashed") ggsave(filename="/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmL_roctest.png") auc_svmL <- auc(rocsvmL_test) prtst_svmL<-pr.curve(scores.class0=ysvmLprob[ytest_factor=="yes"],scores.class1=ysvmLprob[ytest_factor=="no"],curve=T) ggplot(data.frame(prtst_svmL$curve),aes(x=X1,y=X2,color=X3))+ geom_line()+ labs(x="Recall",y="Precision",color="Threshold") ggsave(filename="/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmL_prroc.png") prauc_svmL<-prtst_svmL$auc.integral # fitted class ysvmLclass <- predict(svmL,xtest_sel) # confusion matrix res <- list() res[[1]] <- as.matrix(confusionMatrix(data=ysvmLclass,reference=factor(ytest_factor),positive="yes"),what = "classes")[1:4,] ressvmL_df <- tibble("method"="svm(Linear)", "n features"=ncol(xtrain_sel), "C"=svmL$bestTune$C, "AUC"=auc_svmL, "PR AUC"=prauc_svmL, res)%>% unnest_wider(res) write.csv(ressvmL_df, file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmLtestmetrics.csv") ########################################################################## # SVM(Poly) # ########################################################################## set.seed(432) system.time( svmP <- train(factor(ytrain_factor)~., data = cbind.data.frame(ytrain_factor,xtrain_sel), method = "svmPoly", metric="AUC", trControl = train_control, tuneLength = 4, preProcess = c("center","scale")) ) saveRDS(svmP,file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmP.rds") #View the model svmPcv <- as.data.frame(svmP$results) write.csv(svmPcv, file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmP_cv.csv") png(filename = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmPcv.png") ggplot(svmP) dev.off() svmP <-readRDS("/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmP.rds") # evaluation on the test set # fitted probability ysvmPprob <- predict(svmP,xtest_sel,type="prob")$yes rocsvmP_test <- roc(ytest,ysvmPprob) ggroc(rocsvmP_test,legacy.axes=T)+ labs(x="False Postive Rate(1-Specificity)",y="True Positive Rate(Sensitivity)")+ geom_segment(aes(x = 0, xend = 1, y = 0, yend = 1), color="darkgrey", linetype="dashed") ggsave(filename="/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmP_roctest.png") auc_svmP <- auc(rocsvmP_test) prtst_svmP <- pr.curve(scores.class0=ysvmPprob[ytest_factor=="yes"],scores.class1=ysvmPprob[ytest_factor=="no"],curve=T) ggplot(data.frame(prtst_svmP$curve),aes(x=X1,y=X2,color=X3))+ geom_line()+ labs(x="Recall",y="Precision",color="Threshold") ggsave(filename="/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmP_prroc.png") prauc_svmP<-prtst_svmP$auc.integral # fitted class ysvmPclass <- predict(svmP,xtest_sel) # confusion matrix res <- list() res[[1]] <- as.matrix(confusionMatrix(data=ysvmPclass,reference=factor(ytest_factor),positive="yes"),what = "classes")[1:4,] ressvmP_df <- tibble("method"="svm(Poly)", "n features"=ncol(xtrain_sel), "C"=svmP$bestTune$C, "degree"=svmP$bestTune$degree, "scale"=svmP$bestTune$scale, "AUC"=auc_svmP, "PR AUC"=prauc_svmP, res)%>% unnest_wider(res) write.csv(ressvmP_df, file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmPtestmetrics.csv")
/IHD incidence/5svm.R
no_license
s1914807/MSc-dissertation
R
false
false
8,449
r
library(caret) library(e1071) library(pROC) library(PRROC) library(tidyverse) # CpG sites selected from Cox models active_coef1 <- read.csv("/Cluster_Filespace/Marioni_Group/Yufei/output/cox/coef/0.5coef398422_cox.csv") CpGsites1 <- active_coef1[,1] # CpG sites selected from logistic classifier active_coef2 <- read.csv("/Cluster_Filespace/Marioni_Group/Yufei/output/binary/classweights/coef/0.5coefficient_binomial_398422.csv") CpGsites2 <- active_coef2[-1,][,1] CpGsites <- union(CpGsites1,CpGsites2) xtrain_sel <- xtrain[,CpGsites] xtest_sel <- xtest[,CpGsites] xtrain_sel <- cbind(xtrain_sel,"age"=agetrain,"sex"=sextrain) xtest_sel <-cbind(xtest_sel,"age"=agetest,"sex"=sextest) ytrain_factor = replace(ytrain,which(ytrain==1),"yes") ytrain_factor = replace(ytrain_factor,which(ytrain_factor==0),"no") ytest_factor = replace(ytest,which(ytest==1),"yes") ytest_factor = replace(ytest_factor,which(ytest_factor==0),"no") # Set up Repeated k-fold Cross Validation train_control <- trainControl(method="cv", number=3, classProbs=T, summaryFunction=multiClassSummary) ######################################################################### # svm(RBF) # ######################################################################### # Fit the model set.seed(432) system.time( svmR <- train(xtrain_sel, ytrain_factor, method = "svmRadial", metric="AUC", trControl = train_control, tuneGrid = expand.grid(C = 2^(-5:5),sigma=10^(-3:0)), preProcess = c("center","scale")) ) saveRDS(svmR,file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmR.rds") #View the model svmRcv <- as.data.frame(svmR$results) write.csv(svmRcv, file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmR_cv.csv") png(filename = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmRcv.png") ggplot(svmR) dev.off() svmR <-readRDS("/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmR.rds") # evaluation on the test set # fitted probability ysvmrprob <- predict(svmR,xtest_sel,type="prob")$yes rocsvmR_test <- roc(ytest,ysvmrprob) ggroc(rocsvmR_test,legacy.axes=T)+ labs(x="False Postive Rate(1-Specificity)",y="True Positive Rate(Sensitivity)")+ geom_segment(aes(x = 0, xend = 1, y = 0, yend = 1), color="darkgrey", linetype="dashed") ggsave(filename="/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmR_roctest.png") auc_svmR <- auc(rocsvmR_test) prtst_svmR <- pr.curve(scores.class0=ysvmrprob[ytest_factor=="yes"],scores.class1=ysvmrprob[ytest_factor=="no"],curve=T) ggplot(data.frame(prtst_svmR$curve),aes(x=X1,y=X2,color=X3))+ geom_line()+ labs(x="Recall",y="Precision",color="Threshold") ggsave(filename="/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmR_prroc.png") prauc_svmR<-prtst_svmR$auc.integral # fitted class ysvmrclass <- predict(svmR,xtest_sel) # confusion matrix res <- list() res[[1]] <- as.matrix(confusionMatrix(data=ysvmrclass,reference=factor(ytest_factor),positive="yes"),what = "classes")[1:4,] res_df <- tibble("method"="svm(radial)", "n features"=ncol(xtrain_sel), "sigma"=svmR$bestTune$sigma, "C"=svmR$bestTune$C, "AUC"=auc_svmR, "PR AUC"=prauc_svmR, res)%>% unnest_wider(res) write.csv(res_df, file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmRtestmetrics.csv") ######################################################################### # svm(linear) # ######################################################################### # Set up Repeated k-fold Cross Validation train_control <- trainControl(method="cv", number=3, classProbs=T, summaryFunction=multiClassSummary) set.seed(432) system.time( svmL <- train(factor(ytrain_factor)~., data = cbind.data.frame(ytrain_factor,xtrain_sel), method = "svmLinear", metric="AUC", trControl = train_control, tuneGrid = expand.grid(C = 2^(-5:5)), preProcess = c("center","scale")) ) saveRDS(svmL,file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmL.rds") #View the model svmLcv <- as.data.frame(svmL$results) write.csv(svmLcv, file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmL_cv.csv") png(filename = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmLcv.png") ggplot(svmL) dev.off() svmL <- readRDS("/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmL.rds") # evaluation on the test set # fitted probability ysvmLprob <- predict(svmL,xtest_sel,type="prob")$yes rocsvmL_test <- roc(ytest,ysvmLprob) ggroc(rocsvmL_test,legacy.axes=T)+ labs(x="False Postive Rate(1-Specificity)",y="True Positive Rate(Sensitivity)")+ geom_segment(aes(x = 0, xend = 1, y = 0, yend = 1), color="darkgrey", linetype="dashed") ggsave(filename="/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmL_roctest.png") auc_svmL <- auc(rocsvmL_test) prtst_svmL<-pr.curve(scores.class0=ysvmLprob[ytest_factor=="yes"],scores.class1=ysvmLprob[ytest_factor=="no"],curve=T) ggplot(data.frame(prtst_svmL$curve),aes(x=X1,y=X2,color=X3))+ geom_line()+ labs(x="Recall",y="Precision",color="Threshold") ggsave(filename="/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmL_prroc.png") prauc_svmL<-prtst_svmL$auc.integral # fitted class ysvmLclass <- predict(svmL,xtest_sel) # confusion matrix res <- list() res[[1]] <- as.matrix(confusionMatrix(data=ysvmLclass,reference=factor(ytest_factor),positive="yes"),what = "classes")[1:4,] ressvmL_df <- tibble("method"="svm(Linear)", "n features"=ncol(xtrain_sel), "C"=svmL$bestTune$C, "AUC"=auc_svmL, "PR AUC"=prauc_svmL, res)%>% unnest_wider(res) write.csv(ressvmL_df, file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmLtestmetrics.csv") ########################################################################## # SVM(Poly) # ########################################################################## set.seed(432) system.time( svmP <- train(factor(ytrain_factor)~., data = cbind.data.frame(ytrain_factor,xtrain_sel), method = "svmPoly", metric="AUC", trControl = train_control, tuneLength = 4, preProcess = c("center","scale")) ) saveRDS(svmP,file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmP.rds") #View the model svmPcv <- as.data.frame(svmP$results) write.csv(svmPcv, file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmP_cv.csv") png(filename = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmPcv.png") ggplot(svmP) dev.off() svmP <-readRDS("/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmP.rds") # evaluation on the test set # fitted probability ysvmPprob <- predict(svmP,xtest_sel,type="prob")$yes rocsvmP_test <- roc(ytest,ysvmPprob) ggroc(rocsvmP_test,legacy.axes=T)+ labs(x="False Postive Rate(1-Specificity)",y="True Positive Rate(Sensitivity)")+ geom_segment(aes(x = 0, xend = 1, y = 0, yend = 1), color="darkgrey", linetype="dashed") ggsave(filename="/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmP_roctest.png") auc_svmP <- auc(rocsvmP_test) prtst_svmP <- pr.curve(scores.class0=ysvmPprob[ytest_factor=="yes"],scores.class1=ysvmPprob[ytest_factor=="no"],curve=T) ggplot(data.frame(prtst_svmP$curve),aes(x=X1,y=X2,color=X3))+ geom_line()+ labs(x="Recall",y="Precision",color="Threshold") ggsave(filename="/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmP_prroc.png") prauc_svmP<-prtst_svmP$auc.integral # fitted class ysvmPclass <- predict(svmP,xtest_sel) # confusion matrix res <- list() res[[1]] <- as.matrix(confusionMatrix(data=ysvmPclass,reference=factor(ytest_factor),positive="yes"),what = "classes")[1:4,] ressvmP_df <- tibble("method"="svm(Poly)", "n features"=ncol(xtrain_sel), "C"=svmP$bestTune$C, "degree"=svmP$bestTune$degree, "scale"=svmP$bestTune$scale, "AUC"=auc_svmP, "PR AUC"=prauc_svmP, res)%>% unnest_wider(res) write.csv(ressvmP_df, file = "/Cluster_Filespace/Marioni_Group/Yufei/output/svm/svmPtestmetrics.csv")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lag.R \name{step_lag} \alias{step_lag} \title{Create a lagged predictor} \usage{ step_lag( recipe, ..., role = "predictor", trained = FALSE, lag = 1, prefix = "lag_", default = NA, columns = NULL, keep_original_cols = TRUE, skip = FALSE, id = rand_id("lag") ) } \arguments{ \item{recipe}{A recipe object. The step will be added to the sequence of operations for this recipe.} \item{...}{One or more selector functions to choose variables for this step. See \code{\link[=selections]{selections()}} for more details.} \item{role}{For model terms created by this step, what analysis role should they be assigned? By default, the new columns created by this step from the original variables will be used as \emph{predictors} in a model.} \item{trained}{A logical to indicate if the quantities for preprocessing have been estimated.} \item{lag}{A vector of positive integers. Each specified column will be lagged for each value in the vector.} \item{prefix}{A prefix for generated column names, default to "lag_".} \item{default}{Passed to \code{dplyr::lag}, determines what fills empty rows left by lagging (defaults to NA).} \item{columns}{A character string of the selected variable names. This field is a placeholder and will be populated once \code{\link[=prep]{prep()}} is used.} \item{keep_original_cols}{A logical to keep the original variables in the output. Defaults to \code{FALSE}.} \item{skip}{A logical. Should the step be skipped when the recipe is baked by \code{\link[=bake]{bake()}}? While all operations are baked when \code{\link[=prep]{prep()}} is run, some operations may not be able to be conducted on new data (e.g. processing the outcome variable(s)). Care should be taken when using \code{skip = TRUE} as it may affect the computations for subsequent operations.} \item{id}{A character string that is unique to this step to identify it.} } \value{ An updated version of \code{recipe} with the new step added to the sequence of any existing operations. } \description{ \code{step_lag()} creates a \emph{specification} of a recipe step that will add new columns of lagged data. Lagged data will by default include NA values where the lag was induced. These can be removed with \code{\link[=step_naomit]{step_naomit()}}, or you may specify an alternative filler value with the \code{default} argument. } \details{ The step assumes that the data are already \emph{in the proper sequential order} for lagging. } \section{Tidying}{ When you \code{\link[=tidy.recipe]{tidy()}} this step, a tibble with column \code{terms} (the columns that will be affected) is returned. } \section{Case weights}{ The underlying operation does not allow for case weights. } \examples{ n <- 10 start <- as.Date("1999/01/01") end <- as.Date("1999/01/10") df <- data.frame( x = runif(n), index = 1:n, day = seq(start, end, by = "day") ) recipe(~., data = df) \%>\% step_lag(index, day, lag = 2:3) \%>\% prep(df) \%>\% bake(df) } \seealso{ Other row operation steps: \code{\link{step_arrange}()}, \code{\link{step_filter}()}, \code{\link{step_impute_roll}()}, \code{\link{step_naomit}()}, \code{\link{step_sample}()}, \code{\link{step_shuffle}()}, \code{\link{step_slice}()} } \concept{row operation steps}
/man/step_lag.Rd
permissive
tidymodels/recipes
R
false
true
3,329
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lag.R \name{step_lag} \alias{step_lag} \title{Create a lagged predictor} \usage{ step_lag( recipe, ..., role = "predictor", trained = FALSE, lag = 1, prefix = "lag_", default = NA, columns = NULL, keep_original_cols = TRUE, skip = FALSE, id = rand_id("lag") ) } \arguments{ \item{recipe}{A recipe object. The step will be added to the sequence of operations for this recipe.} \item{...}{One or more selector functions to choose variables for this step. See \code{\link[=selections]{selections()}} for more details.} \item{role}{For model terms created by this step, what analysis role should they be assigned? By default, the new columns created by this step from the original variables will be used as \emph{predictors} in a model.} \item{trained}{A logical to indicate if the quantities for preprocessing have been estimated.} \item{lag}{A vector of positive integers. Each specified column will be lagged for each value in the vector.} \item{prefix}{A prefix for generated column names, default to "lag_".} \item{default}{Passed to \code{dplyr::lag}, determines what fills empty rows left by lagging (defaults to NA).} \item{columns}{A character string of the selected variable names. This field is a placeholder and will be populated once \code{\link[=prep]{prep()}} is used.} \item{keep_original_cols}{A logical to keep the original variables in the output. Defaults to \code{FALSE}.} \item{skip}{A logical. Should the step be skipped when the recipe is baked by \code{\link[=bake]{bake()}}? While all operations are baked when \code{\link[=prep]{prep()}} is run, some operations may not be able to be conducted on new data (e.g. processing the outcome variable(s)). Care should be taken when using \code{skip = TRUE} as it may affect the computations for subsequent operations.} \item{id}{A character string that is unique to this step to identify it.} } \value{ An updated version of \code{recipe} with the new step added to the sequence of any existing operations. } \description{ \code{step_lag()} creates a \emph{specification} of a recipe step that will add new columns of lagged data. Lagged data will by default include NA values where the lag was induced. These can be removed with \code{\link[=step_naomit]{step_naomit()}}, or you may specify an alternative filler value with the \code{default} argument. } \details{ The step assumes that the data are already \emph{in the proper sequential order} for lagging. } \section{Tidying}{ When you \code{\link[=tidy.recipe]{tidy()}} this step, a tibble with column \code{terms} (the columns that will be affected) is returned. } \section{Case weights}{ The underlying operation does not allow for case weights. } \examples{ n <- 10 start <- as.Date("1999/01/01") end <- as.Date("1999/01/10") df <- data.frame( x = runif(n), index = 1:n, day = seq(start, end, by = "day") ) recipe(~., data = df) \%>\% step_lag(index, day, lag = 2:3) \%>\% prep(df) \%>\% bake(df) } \seealso{ Other row operation steps: \code{\link{step_arrange}()}, \code{\link{step_filter}()}, \code{\link{step_impute_roll}()}, \code{\link{step_naomit}()}, \code{\link{step_sample}()}, \code{\link{step_shuffle}()}, \code{\link{step_slice}()} } \concept{row operation steps}
\name{Df2SpLines} \alias{Df2SpLines} \title{ Data Frame to Spatial Lines } \description{ This function converts an object of type \code{\link{data.frame}}, calculated by the function \code{\link{ProcTraj}}, into an object of type \code{\link{SpatialLines-class}}. } \usage{ Df2SpLines(df, crs=NA) } \arguments{ \item{df}{ \code{\link{data.frame}} Object created by the function \code{\link{ProcTraj}}. } \item{crs}{ String: Valid projection string. An example would be crs="+proj=longlat +datum=NAD27" } } \details{ An individual line consists of a set of lines in the data frame that contains the same ID. This function identifies individual trajectories based on their length. It is assumed that all trajectories calculated by HySplit using the \code{\link{ProcTraj}} function have the same length. Thus, once known the length of the trajectories, this function splits the data frame in X different data frames where each data frame contains R rows, R being the trajectory's length and X being the number of rows in the initial data frame divided by the trajectory's length. Each of the X different data frames will be transformed into a different line. } \value{ Returns an object of class \code{\link{SpatialLines-class}}. } \author{ Thalles Santos Silva } \seealso{ \code{\link{data.frame}}, \code{\link{ProcTraj}}, \code{\link{SpatialLines-class}}. } \examples{ ## load data frame of HYSPLIT trajectory calculations calculated by function ProcTraj crs <- "+proj=longlat +datum=NAD83 +no_defs +ellps=GRS80 +towgs84=0,0,0" air.traj.lines <- Df2SpLines(air.traj, crs) PlotTraj(air.traj.lines) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ Lines } \keyword{ Trajectories }
/man/Df2SpLines.Rd
no_license
sthalles/opentraj
R
false
false
1,763
rd
\name{Df2SpLines} \alias{Df2SpLines} \title{ Data Frame to Spatial Lines } \description{ This function converts an object of type \code{\link{data.frame}}, calculated by the function \code{\link{ProcTraj}}, into an object of type \code{\link{SpatialLines-class}}. } \usage{ Df2SpLines(df, crs=NA) } \arguments{ \item{df}{ \code{\link{data.frame}} Object created by the function \code{\link{ProcTraj}}. } \item{crs}{ String: Valid projection string. An example would be crs="+proj=longlat +datum=NAD27" } } \details{ An individual line consists of a set of lines in the data frame that contains the same ID. This function identifies individual trajectories based on their length. It is assumed that all trajectories calculated by HySplit using the \code{\link{ProcTraj}} function have the same length. Thus, once known the length of the trajectories, this function splits the data frame in X different data frames where each data frame contains R rows, R being the trajectory's length and X being the number of rows in the initial data frame divided by the trajectory's length. Each of the X different data frames will be transformed into a different line. } \value{ Returns an object of class \code{\link{SpatialLines-class}}. } \author{ Thalles Santos Silva } \seealso{ \code{\link{data.frame}}, \code{\link{ProcTraj}}, \code{\link{SpatialLines-class}}. } \examples{ ## load data frame of HYSPLIT trajectory calculations calculated by function ProcTraj crs <- "+proj=longlat +datum=NAD83 +no_defs +ellps=GRS80 +towgs84=0,0,0" air.traj.lines <- Df2SpLines(air.traj, crs) PlotTraj(air.traj.lines) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ Lines } \keyword{ Trajectories }
#plot3 #unzip and load the file setwd("C:/Users/homepc/Dropbox/Labshare/UJames/Coursera/data_science/exploratory_data_analysis/plothw") #unzip and open the file unzip ("exdata-data-household_power_consumption.zip") powercon <- read.csv("household_power_consumption.txt", sep=";") library(lubridate) powercon <- powercon[powercon$Date %in% c("1/2/2007","2/2/2007") ,] Date2 <- as.character(powercon$Date) Date3 <- Date3 <- dmy(Date2) Time2 <- as.character(powercon$Time) Time3 <- hms(Time2) weekday <- weekdays(as.Date(powercon$Date)) powercon <- cbind(Time3, weekday, Date3, powercon) datetime <- as.POSIXct(paste(powercon$Date3, powercon$Time), format = "%Y-%m-%d %H:%M:%S") powercon <- cbind(datetime, powercon) powercon$Sub_metering_1 <- as.numeric(as.character(powercon$Sub_metering_1)) powercon$Sub_metering_2 <- as.numeric(as.character(powercon$Sub_metering_2)) powercon$Sub_metering_3 <- as.numeric(as.character(powercon$Sub_metering_3)) plot(powercon$datetime,powercon$Sub_metering_1,type="l", ylim=c(0,40), col="black") par(new=T) powercon$Sub_metering_2 <- as.numeric(levels(powercon$Sub_metering_2)[powercon$Sub_metering_2]) plot(powercon$datetime,powercon$Sub_metering_2,type="l", ylim=c(0,40), col="red") par(new=T) plot(powercon$datetime,powercon$Sub_metering_3,type="l", ylim=c(0,40), col="blue") legend('topright', legend=c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), col=c("black","red","blue"), lty=c(1,1,1), bty = "l") #save plot 3 as a png #dev.copy(png, "plot3.png") #dev.off()
/plot3.R
no_license
jbhanks/ExData_Plotting1
R
false
false
1,508
r
#plot3 #unzip and load the file setwd("C:/Users/homepc/Dropbox/Labshare/UJames/Coursera/data_science/exploratory_data_analysis/plothw") #unzip and open the file unzip ("exdata-data-household_power_consumption.zip") powercon <- read.csv("household_power_consumption.txt", sep=";") library(lubridate) powercon <- powercon[powercon$Date %in% c("1/2/2007","2/2/2007") ,] Date2 <- as.character(powercon$Date) Date3 <- Date3 <- dmy(Date2) Time2 <- as.character(powercon$Time) Time3 <- hms(Time2) weekday <- weekdays(as.Date(powercon$Date)) powercon <- cbind(Time3, weekday, Date3, powercon) datetime <- as.POSIXct(paste(powercon$Date3, powercon$Time), format = "%Y-%m-%d %H:%M:%S") powercon <- cbind(datetime, powercon) powercon$Sub_metering_1 <- as.numeric(as.character(powercon$Sub_metering_1)) powercon$Sub_metering_2 <- as.numeric(as.character(powercon$Sub_metering_2)) powercon$Sub_metering_3 <- as.numeric(as.character(powercon$Sub_metering_3)) plot(powercon$datetime,powercon$Sub_metering_1,type="l", ylim=c(0,40), col="black") par(new=T) powercon$Sub_metering_2 <- as.numeric(levels(powercon$Sub_metering_2)[powercon$Sub_metering_2]) plot(powercon$datetime,powercon$Sub_metering_2,type="l", ylim=c(0,40), col="red") par(new=T) plot(powercon$datetime,powercon$Sub_metering_3,type="l", ylim=c(0,40), col="blue") legend('topright', legend=c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), col=c("black","red","blue"), lty=c(1,1,1), bty = "l") #save plot 3 as a png #dev.copy(png, "plot3.png") #dev.off()
testlist <- list(AgeVector = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ExpressionSet = structure(c(1.63064200184954e+212, 1.11558267938731e+157, 2.3453288146775e+59, 7.59955788945772e-256, 5.55398349536846e-07, 1.64896247962423e-241, 1.7158565271506e-24, 3.45241123006119e+96, 3.49016504742521e+83, 8.75421973838948e-251, 1.93656435398732e-237, 3.86617962359471e-308, 1.51457052685755e+122, 6.35453708406506e-226, 1.34149999500835e+258, 3.08695662079571e+274, 1.2778384355529e-304, 1.3429648484931e-231, 7085.87319714646, 4.26173394236936e+31, 3.05695536508135e-40, 2.80384286150823e-70, 4.98598164707396e+226, 2.67284746621031e-50, 1.268983112604e+270, 6.96927128326474e-92, 0.00315105907067092, 2.28082165029915e+210, 964215356953.314, 3.48762608111849e-233, 1.57025504623905e+177, 2.36697187507964e+42, 3.62903965781702e+225, 1.7243009391465e-142, 1.46182058652606e-281), .Dim = c(5L, 7L))) result <- do.call(myTAI:::cpp_omitMatrix,testlist) str(result)
/myTAI/inst/testfiles/cpp_omitMatrix/AFL_cpp_omitMatrix/cpp_omitMatrix_valgrind_files/1615846231-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
1,090
r
testlist <- list(AgeVector = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ExpressionSet = structure(c(1.63064200184954e+212, 1.11558267938731e+157, 2.3453288146775e+59, 7.59955788945772e-256, 5.55398349536846e-07, 1.64896247962423e-241, 1.7158565271506e-24, 3.45241123006119e+96, 3.49016504742521e+83, 8.75421973838948e-251, 1.93656435398732e-237, 3.86617962359471e-308, 1.51457052685755e+122, 6.35453708406506e-226, 1.34149999500835e+258, 3.08695662079571e+274, 1.2778384355529e-304, 1.3429648484931e-231, 7085.87319714646, 4.26173394236936e+31, 3.05695536508135e-40, 2.80384286150823e-70, 4.98598164707396e+226, 2.67284746621031e-50, 1.268983112604e+270, 6.96927128326474e-92, 0.00315105907067092, 2.28082165029915e+210, 964215356953.314, 3.48762608111849e-233, 1.57025504623905e+177, 2.36697187507964e+42, 3.62903965781702e+225, 1.7243009391465e-142, 1.46182058652606e-281), .Dim = c(5L, 7L))) result <- do.call(myTAI:::cpp_omitMatrix,testlist) str(result)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rl_growth_forms.R \name{rl_growth_forms} \alias{rl_growth_forms} \alias{rl_growth_forms_} \title{Get plant species growth forms by taxon name, IUCN id, and region} \usage{ rl_growth_forms(name = NULL, id = NULL, region = NULL, key = NULL, parse = TRUE, ...) rl_growth_forms_(name = NULL, id = NULL, region = NULL, key = NULL, ...) } \arguments{ \item{name}{(character) A taxonomic name} \item{id}{(character) An IUCN identifier} \item{region}{(character) A region name, see \code{\link{rl_regions}} for acceptable region identifiers (use the entries in the \code{identifier} column)} \item{key}{A IUCN API token. See \url{http://apiv3.iucnredlist.org/api/v3/token} to get a token} \item{parse}{(logical) Whether to parse to list (\code{FALSE}) or data.frame (\code{TRUE}). Default: \code{TRUE}} \item{...}{Curl options passed to \code{\link[crul]{HttpClient}}} } \value{ A list, with the data in the \code{result} slot, unless using a function with a trailing underscore, in which case json as character string is returned. } \description{ Get plant species growth forms by taxon name, IUCN id, and region } \examples{ \dontrun{ rl_growth_forms('Quercus robur') rl_growth_forms('Quercus robur', region = 'europe') rl_growth_forms(id = 63532) rl_growth_forms(id = 63532, region = 'europe') rl_growth_forms('Mucuna bracteata') rl_growth_forms('Abarema villifera') rl_growth_forms('Adansonia perrieri') rl_growth_forms('Adenostemma harlingii') rl_growth_forms_('Quercus robur') rl_growth_forms_(id = 63532, region = 'europe') } } \references{ API docs at \url{http://apiv3.iucnredlist.org/api/v3/docs} }
/man/rl_growth_forms.Rd
permissive
MoisesExpositoAlonso/rredlist
R
false
true
1,689
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rl_growth_forms.R \name{rl_growth_forms} \alias{rl_growth_forms} \alias{rl_growth_forms_} \title{Get plant species growth forms by taxon name, IUCN id, and region} \usage{ rl_growth_forms(name = NULL, id = NULL, region = NULL, key = NULL, parse = TRUE, ...) rl_growth_forms_(name = NULL, id = NULL, region = NULL, key = NULL, ...) } \arguments{ \item{name}{(character) A taxonomic name} \item{id}{(character) An IUCN identifier} \item{region}{(character) A region name, see \code{\link{rl_regions}} for acceptable region identifiers (use the entries in the \code{identifier} column)} \item{key}{A IUCN API token. See \url{http://apiv3.iucnredlist.org/api/v3/token} to get a token} \item{parse}{(logical) Whether to parse to list (\code{FALSE}) or data.frame (\code{TRUE}). Default: \code{TRUE}} \item{...}{Curl options passed to \code{\link[crul]{HttpClient}}} } \value{ A list, with the data in the \code{result} slot, unless using a function with a trailing underscore, in which case json as character string is returned. } \description{ Get plant species growth forms by taxon name, IUCN id, and region } \examples{ \dontrun{ rl_growth_forms('Quercus robur') rl_growth_forms('Quercus robur', region = 'europe') rl_growth_forms(id = 63532) rl_growth_forms(id = 63532, region = 'europe') rl_growth_forms('Mucuna bracteata') rl_growth_forms('Abarema villifera') rl_growth_forms('Adansonia perrieri') rl_growth_forms('Adenostemma harlingii') rl_growth_forms_('Quercus robur') rl_growth_forms_(id = 63532, region = 'europe') } } \references{ API docs at \url{http://apiv3.iucnredlist.org/api/v3/docs} }
% Generated by roxygen2 (4.0.2): do not edit by hand \docType{data} \encoding{UTF-8} \name{test_poly} \alias{test_poly} \title{Polygon outlining TEAM site in Caxiuanã, Brazil} \description{ Contains a \code{SpatialPolygonsDataFrame} with a simplified polygon of the area within the Tropical Ecology Assessment and Monitoring (TEAM) network site in Caxiuanã, Brazil. }
/man/test_poly.Rd
no_license
j-indarto/gfcanalysis
R
false
false
371
rd
% Generated by roxygen2 (4.0.2): do not edit by hand \docType{data} \encoding{UTF-8} \name{test_poly} \alias{test_poly} \title{Polygon outlining TEAM site in Caxiuanã, Brazil} \description{ Contains a \code{SpatialPolygonsDataFrame} with a simplified polygon of the area within the Tropical Ecology Assessment and Monitoring (TEAM) network site in Caxiuanã, Brazil. }
library('RODBC') library('quantmod') library('PerformanceAnalytics') library('tidyverse') library('lubridate') library('reshape2') library('viridis') library('ggthemes') library('ggrepel') options("scipen"=100) options(stringsAsFactors = FALSE) source("d:/stockviz/r/config.r") source("D:/StockViz/public/blog/common/plot.common.R") pdf(NULL) reportPath <- "D:/StockViz/public/blog/factors/plots" lcon <- odbcDriverConnect(sprintf("Driver={ODBC Driver 17 for SQL Server};Server=%s;Database=%s;Uid=%s;Pwd=%s;", ldbserver, "StockVizUs2", ldbuser, ldbpassword), case = "nochange", believeNRows = TRUE) fdf <- sqlQuery(lcon, "select time_stamp, ret/100 from FAMA_FRENCH_5_FACTOR_DAILY where RET_TYPE = 'TTBIA' and KEY_ID='MKT-RF'") mktXts <- xts(fdf[,2], fdf[,1]) fdf <- sqlQuery(lcon, "select time_stamp, ret/100 from FAMA_FRENCH_MOMENTUM_DAILY where RET_TYPE = 'M' and KEY_ID='MOM'") momXts <- xts(fdf[,2], fdf[,1]) toPlot <- na.omit(merge(momXts, mktXts)) names(toPlot) <- c('MOM', 'MKT') Common.PlotCumReturns(toPlot, "French Momentum Factor", "", sprintf("%s/french-momentum.png", reportPath), NULL) Common.PlotCumReturns(toPlot["1995/2005"], "French Momentum Factor", "", sprintf("%s/french-momentum.1995.2005.png", reportPath), NULL) Common.PlotCumReturns(toPlot["2006/2016"], "French Momentum Factor", "", sprintf("%s/french-momentum.2006.2016.png", reportPath), NULL) Common.PlotCumReturns(toPlot["2017/"], "French Momentum Factor", "", sprintf("%s/french-momentum.2017.png", reportPath), NULL) cmTr <- merge(cumprod(1 + toPlot[,2]), cumprod(1 + toPlot[,1])) annRets <- merge(yearlyReturn(cmTr[,1]), yearlyReturn(cmTr[,2])) names(annRets) <- c('MKT', 'MOM') toPlot <- data.frame(100*annRets) toPlot$Y <- year(index(annRets)) toPlot$Y <- factor(toPlot$Y, levels=unique(toPlot$Y)) toPlot <- melt(toPlot, id='Y') ggplot(toPlot, aes(x=Y, y=value, fill=variable)) + theme_economist() + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + scale_fill_viridis(discrete = TRUE) + geom_bar(stat="identity", position=position_dodge()) + labs(y='return (%)', x='', color='', fill='', title="French Momentum Factor") + annotate("text", x=1, y=min(toPlot$value, na.rm=T), label = "@StockViz", hjust=0, vjust=-1, col="white", cex=6, fontface = "bold", alpha = 0.5) ggsave(sprintf("%s/french-momentum.annual.png", reportPath), width=16, height=8, units="in")
/factors/mom.r
no_license
stockviz/blog
R
false
false
2,375
r
library('RODBC') library('quantmod') library('PerformanceAnalytics') library('tidyverse') library('lubridate') library('reshape2') library('viridis') library('ggthemes') library('ggrepel') options("scipen"=100) options(stringsAsFactors = FALSE) source("d:/stockviz/r/config.r") source("D:/StockViz/public/blog/common/plot.common.R") pdf(NULL) reportPath <- "D:/StockViz/public/blog/factors/plots" lcon <- odbcDriverConnect(sprintf("Driver={ODBC Driver 17 for SQL Server};Server=%s;Database=%s;Uid=%s;Pwd=%s;", ldbserver, "StockVizUs2", ldbuser, ldbpassword), case = "nochange", believeNRows = TRUE) fdf <- sqlQuery(lcon, "select time_stamp, ret/100 from FAMA_FRENCH_5_FACTOR_DAILY where RET_TYPE = 'TTBIA' and KEY_ID='MKT-RF'") mktXts <- xts(fdf[,2], fdf[,1]) fdf <- sqlQuery(lcon, "select time_stamp, ret/100 from FAMA_FRENCH_MOMENTUM_DAILY where RET_TYPE = 'M' and KEY_ID='MOM'") momXts <- xts(fdf[,2], fdf[,1]) toPlot <- na.omit(merge(momXts, mktXts)) names(toPlot) <- c('MOM', 'MKT') Common.PlotCumReturns(toPlot, "French Momentum Factor", "", sprintf("%s/french-momentum.png", reportPath), NULL) Common.PlotCumReturns(toPlot["1995/2005"], "French Momentum Factor", "", sprintf("%s/french-momentum.1995.2005.png", reportPath), NULL) Common.PlotCumReturns(toPlot["2006/2016"], "French Momentum Factor", "", sprintf("%s/french-momentum.2006.2016.png", reportPath), NULL) Common.PlotCumReturns(toPlot["2017/"], "French Momentum Factor", "", sprintf("%s/french-momentum.2017.png", reportPath), NULL) cmTr <- merge(cumprod(1 + toPlot[,2]), cumprod(1 + toPlot[,1])) annRets <- merge(yearlyReturn(cmTr[,1]), yearlyReturn(cmTr[,2])) names(annRets) <- c('MKT', 'MOM') toPlot <- data.frame(100*annRets) toPlot$Y <- year(index(annRets)) toPlot$Y <- factor(toPlot$Y, levels=unique(toPlot$Y)) toPlot <- melt(toPlot, id='Y') ggplot(toPlot, aes(x=Y, y=value, fill=variable)) + theme_economist() + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + scale_fill_viridis(discrete = TRUE) + geom_bar(stat="identity", position=position_dodge()) + labs(y='return (%)', x='', color='', fill='', title="French Momentum Factor") + annotate("text", x=1, y=min(toPlot$value, na.rm=T), label = "@StockViz", hjust=0, vjust=-1, col="white", cex=6, fontface = "bold", alpha = 0.5) ggsave(sprintf("%s/french-momentum.annual.png", reportPath), width=16, height=8, units="in")
########################################################## # Create edx set, validation set (final hold-out test set) ########################################################## # Note: this process could take a couple of minutes # if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org") # if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org") # if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org") library(tidyverse) library(caret) library(data.table) # MovieLens 10M dataset: # https://grouplens.org/datasets/movielens/10m/ # http://files.grouplens.org/datasets/movielens/ml-10m.zip # dl <- tempfile() # download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl) ratings <- fread(text = gsub("::", "\t", readLines("DS Course/capstone/ml-10m/ml-10M100K/ratings.dat")), col.names = c("userId", "movieId", "rating", "timestamp")) movies <- str_split_fixed(readLines("DS Course/capstone/ml-10m/ml-10M100K/movies.dat"), "\\::", 3) colnames(movies) <- c("movieId", "title", "genres") # if using R 3.6 or earlier: # movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId], # title = as.character(title), # genres = as.character(genres)) # if using R 4.0 or later: movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId), title = as.character(title), genres = as.character(genres)) movielens <- left_join(ratings, movies, by = "movieId") # Validation set will be 10% of MovieLens data set.seed(1, sample.kind="Rounding") # if using R 3.5 or earlier, use `set.seed(1)` test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE) edx <- movielens[-test_index,] temp <- movielens[test_index,] # Make sure userId and movieId in validation set are also in edx set validation <- temp %>% semi_join(edx, by = "movieId") %>% semi_join(edx, by = "userId") # Add rows removed from validation set back into edx set removed <- anti_join(temp, validation) edx <- rbind(edx, removed) # rm(dl, ratings, movies, test_index, temp, movielens, removed) rm(ratings, movies, test_index, temp, movielens, removed) edx %>% filter(rating == 3) %>% count() # No of Users and No of Movies edx %>% summarize(n_users = n_distinct(userId), n_movies = n_distinct(movieId)) edx %>% summarize(n_users = n_distinct(userId), n_movies = n_distinct(movieId)) edx %>% filter(grepl("Drama", genres)) %>% count() edx %>% filter(grepl("Comedy", genres)) %>% count() edx %>% filter(grepl("Thriller", genres)) %>% count() edx %>% filter(grepl("Romance", genres)) %>% count() edx %>% group_by(title) %>% summarise(n = n()) %>% arrange(-n) edx %>% group_by(rating) %>% summarise(n = n()) %>% arrange(-n) # Create Training set and Test set from edx set.seed(1, sample.kind="Rounding") # if using R 3.5 or earlier, use `set.seed(1)` test_index <- createDataPartition(y = edx$rating, times = 1, p = 0.1, list = FALSE) train_set <- edx[-test_index,] test_set <- edx[test_index,] # Remove entrices from test_set that do not appear in test_set by semi_join test_set <- test_set %>% semi_join(train_set, by = "movieId") %>% semi_join(train_set, by = "userId") # Define function calculating the RMSE RMSE <- function(true_ratings, predicted_ratings){ sqrt(mean((true_ratings - predicted_ratings)^2)) } # Build the prelim model with average rating of a movie from a user mu_hat <- mean(train_set$rating) # RMSE of the first model naive_rmse <- RMSE(test_set$rating, mu_hat) naive_rmse # Create a result tables for rmse alongside the improvement of the model rmse_results <- tibble(method = "Just the average", RMSE = naive_rmse) # Modeling movie effects - movie bias # fit <- lm(rating ~ as.factor(movieId), data = movielens) movie_avgs <- train_set %>% group_by(movieId) %>% summarize(b_i = mean(rating - mu_hat)) qplot(b_i, data = movie_avgs, bins = 10, color = I("black")) predicted_ratings <- mu_hat + test_set %>% left_join(movie_avgs, by='movieId') %>% pull(b_i) RMSE(predicted_ratings, test_set$rating) rmse_results <- rmse_results %>% add_row(method = "Added Movie effects", RMSE = RMSE(predicted_ratings, test_set$rating)) # User effects - user bias train_set %>% group_by(userId) %>% summarize(b_u = mean(rating)) %>% filter(n()>=100) %>% ggplot(aes(b_u)) + geom_histogram(bins = 30, color = "black") # lm(rating ~ as.factor(movieId) + as.factor(userId)) user_avgs <- train_set %>% left_join(movie_avgs, by='movieId') %>% group_by(userId) %>% summarize(b_u = mean(rating - mu_hat - b_i)) predicted_ratings <- test_set %>% left_join(movie_avgs, by='movieId') %>% left_join(user_avgs, by='userId') %>% mutate(pred = mu_hat + b_i + b_u) %>% pull(pred) RMSE(predicted_ratings, test_set$rating) rmse_results <- rmse_results %>% add_row(method = "Added User effects", RMSE = RMSE(predicted_ratings, test_set$rating)) rmse_results # Regularization to penalize the large estimate on movie effects with a small sample size -> penalize least square lambda <- 3 mu <- mean(train_set$rating) movie_reg_avgs <- train_set %>% group_by(movieId) %>% summarize(b_i = sum(rating - mu)/(n()+lambda), n_i = n()) tibble(original = movie_avgs$b_i, regularlized = movie_reg_avgs$b_i, n = movie_reg_avgs$n_i) %>% ggplot(aes(original, regularlized, size=sqrt(n))) + geom_point(shape=1, alpha=0.5) movie_titles <- edx %>% select(movieId, title) %>% distinct() train_set %>% count(movieId) %>% left_join(movie_reg_avgs, by = "movieId") %>% left_join(movie_titles, by = "movieId") %>% arrange(desc(b_i)) %>% slice(1:10) %>% pull(title) train_set %>% count(movieId) %>% left_join(movie_reg_avgs, by = "movieId") %>% left_join(movie_titles, by="movieId") %>% arrange(b_i) %>% select(title, b_i, n) %>% slice(1:10) %>% pull(title) predicted_ratings <- test_set %>% left_join(movie_reg_avgs, by = "movieId") %>% left_join(user_avgs, by='userId') %>% mutate(pred = mu + b_i) %>% pull(pred) RMSE(predicted_ratings, test_set$rating) rmse_results <- rmse_results %>% add_row(method = "Regularized Movie effects",RMSE= RMSE(predicted_ratings, test_set$rating)) rmse_results # Cross validation on lamda - the penalize terms lambdas <- seq(0, 10, 0.25) mu <- mean(train_set$rating) just_the_sum <- train_set %>% group_by(movieId) %>% summarize(s = sum(rating - mu), n_i = n()) #> `summarise()` ungrouping output (override with `.groups` argument) rmses <- sapply(lambdas, function(l){ predicted_ratings <- test_set %>% left_join(just_the_sum, by='movieId') %>% mutate(b_i = s/(n_i+l)) %>% mutate(pred = mu + b_i) %>% pull(pred) return(RMSE(predicted_ratings, test_set$rating)) }) qplot(lambdas, rmses) lambdas[which.min(rmses)] # Redo the regularization with the best fit lambda lamda <- 1.5 movie_reg_avgs <- train_set %>% group_by(movieId) %>% summarize(b_i = sum(rating - mu)/(n()+lambda), n_i = n()) user_avgs_after_reg_avg <- train_set %>% left_join(movie_reg_avgs, by='movieId') %>% group_by(userId) %>% summarize(b_u = mean(rating - mu_hat - b_i)) tibble(original = movie_avgs$b_i, regularlized = movie_reg_avgs$b_i, n = movie_reg_avgs$n_i) %>% ggplot(aes(original, regularlized, size=sqrt(n))) + geom_point(shape=1, alpha=0.5) predicted_ratings <- test_set %>% left_join(movie_reg_avgs, by = "movieId") %>% left_join(user_avgs_after_reg_avg, by='userId') %>% mutate(pred = mu + b_i + b_u) %>% pull(pred) RMSE(predicted_ratings, test_set$rating) rmse_results <- rmse_results %>% add_row(method = "Regularized Movie effects with cross validation with User effects updated",RMSE= RMSE(predicted_ratings, test_set$rating)) rmse_results # Matrix Factorization for group of movie effects and group of user pattern train_small <- train_set %>% group_by(movieId) %>% filter(n() >= 150 | movieId == 3252) %>% ungroup() %>% group_by(userId) %>% filter(n() >= 150) %>% ungroup() y <- train_small %>% select(userId, movieId, rating) %>% spread(movieId, rating) %>% as.matrix() rownames(y)<- y[,1] y <- y[,-1] colnames(y) <- with(movie_titles, title[match(colnames(y), movieId)]) # Removing colmean and rowmean -> convert them into residuals y <- sweep(y, 2, colMeans(y, na.rm=TRUE)) y <- sweep(y, 1, rowMeans(y, na.rm=TRUE)) m_1 <- "Toy Story (1995)" m_2 <- "Jumanji (1995)" p1 <- qplot(y[ ,m_1], y[,m_2], xlab = m_1, ylab = m_2) m_1 <- "Toy Story (1995)" m_3 <- "Grumpier Old Men (1995)" p2 <- qplot(y[ ,m_1], y[,m_3], xlab = m_1, ylab = m_3) m_4 <- "Toy Story (1995)" m_5 <- "Magnificent Seven, The (1960)" p3 <- qplot(y[ ,m_4], y[,m_5], xlab = m_4, ylab = m_5) gridExtra::grid.arrange(p1, p2 ,p3, ncol = 3) rmse_results # Time effect library(lubridate) train_set <- train_set %>% mutate(date = as_datetime(timestamp)) train_set %>% mutate(date = round_date(date, unit = "week")) %>% group_by(date) %>% summarize(rating = mean(rating)) %>% ggplot(aes(date, rating)) + geom_point() + geom_smooth() test_set <- test_set %>% mutate(date = as_datetime(timestamp)) time_effect_wk <- train_set %>% mutate(date = round_date(date, unit = "week")) %>% left_join(movie_reg_avgs, by = "movieId") %>% left_join(user_avgs_after_reg_avg, by='userId') %>% group_by(date) %>% summarize(f_t = mean(rating - mu_hat - b_i - b_u)) time_effect_wk train_set %>% pull(date) predicted_ratings <- test_set %>% mutate(date = round_date(date, unit ="week")) %>% left_join(movie_reg_avgs, by = "movieId") %>% left_join(user_avgs_after_reg_avg, by='userId') %>% left_join(time_effect_wk, by='date') %>% mutate(pred = mu + b_i + b_u + f_t) %>% pull(pred) RMSE(predicted_ratings, test_set$rating) rmse_results <- rmse_results %>% add_row(method = "Added Weekly Time effects",RMSE= RMSE(predicted_ratings, test_set$rating)) rmse_results # Genre Effect
/DS Course/movie-lens-recommendation-system.R
no_license
OnionOrio/harvard-x-ds
R
false
false
10,482
r
########################################################## # Create edx set, validation set (final hold-out test set) ########################################################## # Note: this process could take a couple of minutes # if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org") # if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org") # if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org") library(tidyverse) library(caret) library(data.table) # MovieLens 10M dataset: # https://grouplens.org/datasets/movielens/10m/ # http://files.grouplens.org/datasets/movielens/ml-10m.zip # dl <- tempfile() # download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl) ratings <- fread(text = gsub("::", "\t", readLines("DS Course/capstone/ml-10m/ml-10M100K/ratings.dat")), col.names = c("userId", "movieId", "rating", "timestamp")) movies <- str_split_fixed(readLines("DS Course/capstone/ml-10m/ml-10M100K/movies.dat"), "\\::", 3) colnames(movies) <- c("movieId", "title", "genres") # if using R 3.6 or earlier: # movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId], # title = as.character(title), # genres = as.character(genres)) # if using R 4.0 or later: movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId), title = as.character(title), genres = as.character(genres)) movielens <- left_join(ratings, movies, by = "movieId") # Validation set will be 10% of MovieLens data set.seed(1, sample.kind="Rounding") # if using R 3.5 or earlier, use `set.seed(1)` test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE) edx <- movielens[-test_index,] temp <- movielens[test_index,] # Make sure userId and movieId in validation set are also in edx set validation <- temp %>% semi_join(edx, by = "movieId") %>% semi_join(edx, by = "userId") # Add rows removed from validation set back into edx set removed <- anti_join(temp, validation) edx <- rbind(edx, removed) # rm(dl, ratings, movies, test_index, temp, movielens, removed) rm(ratings, movies, test_index, temp, movielens, removed) edx %>% filter(rating == 3) %>% count() # No of Users and No of Movies edx %>% summarize(n_users = n_distinct(userId), n_movies = n_distinct(movieId)) edx %>% summarize(n_users = n_distinct(userId), n_movies = n_distinct(movieId)) edx %>% filter(grepl("Drama", genres)) %>% count() edx %>% filter(grepl("Comedy", genres)) %>% count() edx %>% filter(grepl("Thriller", genres)) %>% count() edx %>% filter(grepl("Romance", genres)) %>% count() edx %>% group_by(title) %>% summarise(n = n()) %>% arrange(-n) edx %>% group_by(rating) %>% summarise(n = n()) %>% arrange(-n) # Create Training set and Test set from edx set.seed(1, sample.kind="Rounding") # if using R 3.5 or earlier, use `set.seed(1)` test_index <- createDataPartition(y = edx$rating, times = 1, p = 0.1, list = FALSE) train_set <- edx[-test_index,] test_set <- edx[test_index,] # Remove entrices from test_set that do not appear in test_set by semi_join test_set <- test_set %>% semi_join(train_set, by = "movieId") %>% semi_join(train_set, by = "userId") # Define function calculating the RMSE RMSE <- function(true_ratings, predicted_ratings){ sqrt(mean((true_ratings - predicted_ratings)^2)) } # Build the prelim model with average rating of a movie from a user mu_hat <- mean(train_set$rating) # RMSE of the first model naive_rmse <- RMSE(test_set$rating, mu_hat) naive_rmse # Create a result tables for rmse alongside the improvement of the model rmse_results <- tibble(method = "Just the average", RMSE = naive_rmse) # Modeling movie effects - movie bias # fit <- lm(rating ~ as.factor(movieId), data = movielens) movie_avgs <- train_set %>% group_by(movieId) %>% summarize(b_i = mean(rating - mu_hat)) qplot(b_i, data = movie_avgs, bins = 10, color = I("black")) predicted_ratings <- mu_hat + test_set %>% left_join(movie_avgs, by='movieId') %>% pull(b_i) RMSE(predicted_ratings, test_set$rating) rmse_results <- rmse_results %>% add_row(method = "Added Movie effects", RMSE = RMSE(predicted_ratings, test_set$rating)) # User effects - user bias train_set %>% group_by(userId) %>% summarize(b_u = mean(rating)) %>% filter(n()>=100) %>% ggplot(aes(b_u)) + geom_histogram(bins = 30, color = "black") # lm(rating ~ as.factor(movieId) + as.factor(userId)) user_avgs <- train_set %>% left_join(movie_avgs, by='movieId') %>% group_by(userId) %>% summarize(b_u = mean(rating - mu_hat - b_i)) predicted_ratings <- test_set %>% left_join(movie_avgs, by='movieId') %>% left_join(user_avgs, by='userId') %>% mutate(pred = mu_hat + b_i + b_u) %>% pull(pred) RMSE(predicted_ratings, test_set$rating) rmse_results <- rmse_results %>% add_row(method = "Added User effects", RMSE = RMSE(predicted_ratings, test_set$rating)) rmse_results # Regularization to penalize the large estimate on movie effects with a small sample size -> penalize least square lambda <- 3 mu <- mean(train_set$rating) movie_reg_avgs <- train_set %>% group_by(movieId) %>% summarize(b_i = sum(rating - mu)/(n()+lambda), n_i = n()) tibble(original = movie_avgs$b_i, regularlized = movie_reg_avgs$b_i, n = movie_reg_avgs$n_i) %>% ggplot(aes(original, regularlized, size=sqrt(n))) + geom_point(shape=1, alpha=0.5) movie_titles <- edx %>% select(movieId, title) %>% distinct() train_set %>% count(movieId) %>% left_join(movie_reg_avgs, by = "movieId") %>% left_join(movie_titles, by = "movieId") %>% arrange(desc(b_i)) %>% slice(1:10) %>% pull(title) train_set %>% count(movieId) %>% left_join(movie_reg_avgs, by = "movieId") %>% left_join(movie_titles, by="movieId") %>% arrange(b_i) %>% select(title, b_i, n) %>% slice(1:10) %>% pull(title) predicted_ratings <- test_set %>% left_join(movie_reg_avgs, by = "movieId") %>% left_join(user_avgs, by='userId') %>% mutate(pred = mu + b_i) %>% pull(pred) RMSE(predicted_ratings, test_set$rating) rmse_results <- rmse_results %>% add_row(method = "Regularized Movie effects",RMSE= RMSE(predicted_ratings, test_set$rating)) rmse_results # Cross validation on lamda - the penalize terms lambdas <- seq(0, 10, 0.25) mu <- mean(train_set$rating) just_the_sum <- train_set %>% group_by(movieId) %>% summarize(s = sum(rating - mu), n_i = n()) #> `summarise()` ungrouping output (override with `.groups` argument) rmses <- sapply(lambdas, function(l){ predicted_ratings <- test_set %>% left_join(just_the_sum, by='movieId') %>% mutate(b_i = s/(n_i+l)) %>% mutate(pred = mu + b_i) %>% pull(pred) return(RMSE(predicted_ratings, test_set$rating)) }) qplot(lambdas, rmses) lambdas[which.min(rmses)] # Redo the regularization with the best fit lambda lamda <- 1.5 movie_reg_avgs <- train_set %>% group_by(movieId) %>% summarize(b_i = sum(rating - mu)/(n()+lambda), n_i = n()) user_avgs_after_reg_avg <- train_set %>% left_join(movie_reg_avgs, by='movieId') %>% group_by(userId) %>% summarize(b_u = mean(rating - mu_hat - b_i)) tibble(original = movie_avgs$b_i, regularlized = movie_reg_avgs$b_i, n = movie_reg_avgs$n_i) %>% ggplot(aes(original, regularlized, size=sqrt(n))) + geom_point(shape=1, alpha=0.5) predicted_ratings <- test_set %>% left_join(movie_reg_avgs, by = "movieId") %>% left_join(user_avgs_after_reg_avg, by='userId') %>% mutate(pred = mu + b_i + b_u) %>% pull(pred) RMSE(predicted_ratings, test_set$rating) rmse_results <- rmse_results %>% add_row(method = "Regularized Movie effects with cross validation with User effects updated",RMSE= RMSE(predicted_ratings, test_set$rating)) rmse_results # Matrix Factorization for group of movie effects and group of user pattern train_small <- train_set %>% group_by(movieId) %>% filter(n() >= 150 | movieId == 3252) %>% ungroup() %>% group_by(userId) %>% filter(n() >= 150) %>% ungroup() y <- train_small %>% select(userId, movieId, rating) %>% spread(movieId, rating) %>% as.matrix() rownames(y)<- y[,1] y <- y[,-1] colnames(y) <- with(movie_titles, title[match(colnames(y), movieId)]) # Removing colmean and rowmean -> convert them into residuals y <- sweep(y, 2, colMeans(y, na.rm=TRUE)) y <- sweep(y, 1, rowMeans(y, na.rm=TRUE)) m_1 <- "Toy Story (1995)" m_2 <- "Jumanji (1995)" p1 <- qplot(y[ ,m_1], y[,m_2], xlab = m_1, ylab = m_2) m_1 <- "Toy Story (1995)" m_3 <- "Grumpier Old Men (1995)" p2 <- qplot(y[ ,m_1], y[,m_3], xlab = m_1, ylab = m_3) m_4 <- "Toy Story (1995)" m_5 <- "Magnificent Seven, The (1960)" p3 <- qplot(y[ ,m_4], y[,m_5], xlab = m_4, ylab = m_5) gridExtra::grid.arrange(p1, p2 ,p3, ncol = 3) rmse_results # Time effect library(lubridate) train_set <- train_set %>% mutate(date = as_datetime(timestamp)) train_set %>% mutate(date = round_date(date, unit = "week")) %>% group_by(date) %>% summarize(rating = mean(rating)) %>% ggplot(aes(date, rating)) + geom_point() + geom_smooth() test_set <- test_set %>% mutate(date = as_datetime(timestamp)) time_effect_wk <- train_set %>% mutate(date = round_date(date, unit = "week")) %>% left_join(movie_reg_avgs, by = "movieId") %>% left_join(user_avgs_after_reg_avg, by='userId') %>% group_by(date) %>% summarize(f_t = mean(rating - mu_hat - b_i - b_u)) time_effect_wk train_set %>% pull(date) predicted_ratings <- test_set %>% mutate(date = round_date(date, unit ="week")) %>% left_join(movie_reg_avgs, by = "movieId") %>% left_join(user_avgs_after_reg_avg, by='userId') %>% left_join(time_effect_wk, by='date') %>% mutate(pred = mu + b_i + b_u + f_t) %>% pull(pred) RMSE(predicted_ratings, test_set$rating) rmse_results <- rmse_results %>% add_row(method = "Added Weekly Time effects",RMSE= RMSE(predicted_ratings, test_set$rating)) rmse_results # Genre Effect
#example code #part <- computePartitionIndices(types=c("lastBlock"), len=length(lynx), order=1, numPartitions=0, ratioValSet=0) #part # #part <- computePartitionIndices(types=c("lastBlock", "blockedCV"), len=length(lynx), order=2, numPartitions=5, ratioValSet=0.2) #part # #part$inSetTrainIndicesO$blockedCV == part$inSetTrainIndices$blockedCV #part$inSetTrainIndicesO$CV == part$inSetTrainIndices$CV # # #part <- computePartitionIndices(types=c("lastBlock"), len=length(lynx), order=0, numPartitions=0, ratioValSet=0, ratioLB=0) #part # x: a time series # name: the name of the time series # lags: the lags to use # partitions: the numer of cv and blocked cv partitions to generate # ratio: how many percent of the data are to be used for OOS validation #' This function computes the indices that can later be used to partition the time series into #' training and test sets, for different validation schemes. Currently supported #' are: "lastBlock", "CV", "noDepCV", "blockedCV". #' #' @title Compute the indices for the partitioning scheme #' @param len the length of the target series #' @param type one of \code{c("lastBlock", "CV", "noDepCV", "blockedCV")} #' @param order the embedding dimension, if the indices are computed for a series that is not embedded. #' If the indices are used with an embedded series, this has to be set to zero. #' @param ratioLB if between zero and one, defines the ratio of data that are used as test set. #' If greater one, defines the absolute value of instances in the test set. #' @param numPartitions the numer of partitions to generate, for the cross-validation schemes #' @param ratioValSet if an additional validation set is generated, its ratio (zero to not generate additional validation set) #' @param seed the seed to use for randomly partitioning the data during cross-validation #' @param notEmbedded if this flag is true, the indices are shifted by ``order'' into the future, so that #' the results are the indices in the not embedded series, of the values which are the target values in the embedded series. # @example: dataPartitions <- generateDataPartitions(lynx, "lynx", c(2,3,4), 5, 0.8) #' @export computePartitionIndices <- function(len, type="lastBlock", order=0, ratioLB=0.15, numPartitions=0, ratioValSet=0, seed=1, notEmbedded=FALSE) { partitions <- list() unadjustedInSetLength <- floor((1-ratioValSet)*len) - order if(type == "lastBlock") { if(ratioLB > 1) { inSetLength <- unadjustedInSetLength partitionLength <- floor(ratioLB) } else { inSetLength <- unadjustedInSetLength partitionLength <- floor(ratioLB*inSetLength) } } else { inSetLength <- unadjustedInSetLength - (unadjustedInSetLength %% numPartitions) partitionLength <- inSetLength / numPartitions } inSetData <- 1:inSetLength if((inSetLength + order + 1) > (len-order)) partitions[["outSetData"]] <- NULL else partitions[["outSetData"]] <- (inSetLength + order + 1):(len-order) if(type == "CV" || type == "noDepCV") { partitions <- list() #attr(partitions, "shortname") <- shortname <- paste("CV-",numPartitions, sep="") #class(partitions) <- "CV" set.seed(seed) shuffledIndices <- sample(inSetData,length(inSetData)) for(t in 1:numPartitions) { tempTest <- ((t-1)*partitionLength+1):(t*partitionLength) #tempTrain <- (1:nrow(inSetE))[-tempTest] tempTrain <- inSetData[-tempTest] partitions[["inSetTrainData"]] <- cbind(partitions[["inSetTrainData"]], shuffledIndices[tempTrain]) partitions[["inSetTestData"]] <- cbind(partitions[["inSetTestData"]], shuffledIndices[tempTest]) } if(type == "noDepCV") { partitionsCV <- partitions partitions <- list() #attr(partitions, "shortname") <- shortname <- paste("nDCV-",numPartitions, sep="") #class(partitions) <- "noDepCV" inSetTrainDataNoDepCV <- NULL inSetTestDataNoDepCV <- NULL diffZero <- FALSE for(t in 1:numPartitions) { tempTest <- partitionsCV[["inSetTestData"]][,t] forbiddenIndices <- vector() for(tInd in tempTest) { forbiddenIndices <- union(forbiddenIndices, (max(1,(tInd - order))):(tInd+order)) } diff <- setdiff(partitionsCV[["inSetTrainData"]][,t], forbiddenIndices) if(length(diff) == 0) { diffZero <- TRUE break; } tempTrain <- vector(length = length(partitionsCV[["inSetTrainData"]][,t])) tempTrain[1:length(tempTrain)] <- NA tempTrain[1:length(diff)] <- diff inSetTrainDataNoDepCV <- cbind(inSetTrainDataNoDepCV, tempTrain) inSetTestDataNoDepCV <- cbind(inSetTestDataNoDepCV, tempTest) } if(!diffZero) { partitions[["inSetTrainData"]] <- inSetTrainDataNoDepCV partitions[["inSetTestData"]] <- inSetTestDataNoDepCV colnames(partitions[["inSetTrainData"]]) <- NULL colnames(partitions[["inSetTestData"]]) <- NULL } } } else if(type == "blockedCV") { partitions <- list() #attr(partitions, "shortname") <- shortname <- paste("bCV-",numPartitions, sep="") #class(partitions) <- "blockedCV" for(t in 1:numPartitions) { #tempTest <- ((t-1)*partitionLength-order+1):(t*partitionLength+order) tempTest <- max(1,((t-1)*partitionLength-order+1)):min((t*partitionLength+order),numPartitions*partitionLength) #tempTrain <- (1:nrow(inSetE))[-tempTest] tempTrain <- inSetData[-tempTest] #remove dependencies tempTest <- ((t-1)*partitionLength+1):(t*partitionLength) #if (t==1) tempTest <- ((t-1)*partitionLength+1):(t*partitionLength-order) #else if(t==numPartitions) tempTest <- ((t-1)*partitionLength+order+1):(t*partitionLength) #else tempTest <- ((t-1)*partitionLength+order+1):(t*partitionLength-order) if(t != 1 && t != numPartitions) { tempTrainFilled <- vector(length = length(partitions[["inSetTrainData"]][,1])) tempTrainFilled[1:length(tempTrainFilled)] <- NA tempTrainFilled[1:length(tempTrain)] <- tempTrain tempTrain <- tempTrainFilled } partitions[["inSetTrainData"]] <- cbind(partitions[["inSetTrainData"]], as.vector(tempTrain)) partitions[["inSetTestData"]] <- cbind(partitions[["inSetTestData"]], as.vector(tempTest)) } #partitions[["turns"]][["blockedCV"]] <- sprintf("blockedCV%02d",1:numPartitions) } else if(type == "lastBlock") { partitions <- list() #attr(partitions, "shortname") <- shortname <- paste("lB-", ratioLB, sep="") #class(partitions) <- "lastBlock" if((inSetLength - partitionLength + order + 1) > inSetLength) { #tempTest <- NULL #tempTrain <- inSetData partitions[["inSetTrainData"]] <- inSetData partitions[["inSetTestData"]] <- NULL } else { tempTest <- (inSetLength - partitionLength + 1) : inSetLength tempTrain <- inSetData[-tempTest] tempTest <- (inSetLength - partitionLength + order + 1) : inSetLength partitions[["inSetTrainData"]] <- as.matrix(tempTrain) partitions[["inSetTestData"]] <- as.matrix(tempTest) } # if((inSetLength - partitionLength + order + 1) > inSetLength) # tempTest <- NULL # else } if(ratioValSet != 0) { partitions[["valSetData"]] <- (inSetLength + order + 1):len } ##to get the target indices in the unembedded series: # if(notEmbedded) { partitions <- lapply(partitions, function(part) { part + order }) } # partitions[["outSetData"]] <- partitions[["outSetData"]] # partitions[["inSetTrainData"]] <- partitions[["inSetTrainData"]] # partitions[["inSetTestData"]] <- partitions[["inSetTestData"]] # this is now done by setting order=0 and doing the embedding afterwards.. #partitions[["outSetData"]] <- partitions[["outSetData"]] + order #(inSetLength+2*order + 1):(len) #partitions <- lapply(partitions, function(part) { # if(is.list(part)) { # res <- lapply(part, function(x) x + order) # attr(res, "shortname") <- attr(part, "shortname") # } else # res <- part + order # res # }) #partitions[["inSetTestData"]] <- lapply(partitions[["inSetTestData"]], function(x) x + order) #rudimentary time stamping #partitions[["timeStampO"]] <- 1:len #partitions[["timeStampE"]] <- partitions[["timeStampO"]][1:nrow(eData)] + (order-1) #attr(partitions, "shortname") <- shortname #class(partitions) <- type partitionParameters <- list(type=type, shortname=shortname, order=order, ratioLB=ratioLB, numPartitions=numPartitions, ratioValSet=ratioValSet, seed=seed) attr(partitions, "partitionParameters") <- partitionParameters partitions } # # # # # # # # # # # # # # # #computePartitionIndices <- function(type="lastBlock", len, order=0, ratio=0.15, numPartitions=0, ratioValSet=0) { # # if(ratioValSet == 0){ # inIndices <- 1 : (len - order) # outIndices <- NULL # } else { # inIndices <- 1 : (trunc(len * (1-ratioValSet)) - 1 - order) # outIndices <- trunc((len * (1-ratioValSet))) : len # # } # # if(type == "none") { # trainIndices <- inIndices # testIndices <- NULL # } else if(type == "lb") { # # trainIndices <- 1 : (trunc(length[inIndices] * (1-ratio)) - 1 - order) # outIndices <- trunc((len * (1-ratioValSet))) : len # # } else if(type == "bCV") { # # } # # # return(list(trainIndices=trainIndices, testIndices=testIndices)) # # #} # #len <- 114 #order <- 1 #ratioValSet <- 0.2 #' This function is very similar to \code{\link{computeTrainingAndTestIndices}}. #' It splits the given indices to indices for a training and a test set. Test set #' is taken from the end of the data. If the data is to be shuffled, this should #' be done before calling this function. #' #' @title Function to split data indices into indices for training and test set #' @param indices the indices to be used #' @param ratio ratio of training and test sets (default: 15\% of the data is used for testing) #' @return a named list with the following elements: #' \item{trainIndices}{a matrix containing the training indices} #' \item{testIndices}{a matrix containing the test indices} #' @export computeTrainingAndTestIndices <- function(indices, ratio=0.15) { #indices <- computeEmbeddedIndices(ts, lags) trainIndices <- indices[1 : (length(indices) * (1-ratio))] testIndices <- indices[-trainIndices] return(list(trainIndices=trainIndices, testIndices=testIndices)) } #' This function is very similar to \code{\link{computeTrainingAndTestIndices}}, #' but instead of a vector of indices, it takes a length and an embedding order. #' #' @title Function to split data into training and test set for last block evaluation. #' @param len the length of the time series to be used #' @param order the order of the embedding to be used #' @param ratio the ratio of training and test set #' @return a named list with the following elements: #' \item{trainIndices}{a matrix containing the training indices} #' \item{testIndices}{a matrix containing the test indices} #' @export computeIndicesLastBlockEval <- function(len, order, ratio=0.15) { #indices <- computeEmbeddedIndices(ts, lags) trainIndices <- 1 : (trunc(len * (1-ratio)) - 1 - order) testIndices <- (len * (1-ratio)) : len return(list(trainIndices=trainIndices, testIndices=testIndices)) }
/R/computePartitionIndices.R
no_license
cbergmeir/tsExpKit
R
false
false
11,750
r
#example code #part <- computePartitionIndices(types=c("lastBlock"), len=length(lynx), order=1, numPartitions=0, ratioValSet=0) #part # #part <- computePartitionIndices(types=c("lastBlock", "blockedCV"), len=length(lynx), order=2, numPartitions=5, ratioValSet=0.2) #part # #part$inSetTrainIndicesO$blockedCV == part$inSetTrainIndices$blockedCV #part$inSetTrainIndicesO$CV == part$inSetTrainIndices$CV # # #part <- computePartitionIndices(types=c("lastBlock"), len=length(lynx), order=0, numPartitions=0, ratioValSet=0, ratioLB=0) #part # x: a time series # name: the name of the time series # lags: the lags to use # partitions: the numer of cv and blocked cv partitions to generate # ratio: how many percent of the data are to be used for OOS validation #' This function computes the indices that can later be used to partition the time series into #' training and test sets, for different validation schemes. Currently supported #' are: "lastBlock", "CV", "noDepCV", "blockedCV". #' #' @title Compute the indices for the partitioning scheme #' @param len the length of the target series #' @param type one of \code{c("lastBlock", "CV", "noDepCV", "blockedCV")} #' @param order the embedding dimension, if the indices are computed for a series that is not embedded. #' If the indices are used with an embedded series, this has to be set to zero. #' @param ratioLB if between zero and one, defines the ratio of data that are used as test set. #' If greater one, defines the absolute value of instances in the test set. #' @param numPartitions the numer of partitions to generate, for the cross-validation schemes #' @param ratioValSet if an additional validation set is generated, its ratio (zero to not generate additional validation set) #' @param seed the seed to use for randomly partitioning the data during cross-validation #' @param notEmbedded if this flag is true, the indices are shifted by ``order'' into the future, so that #' the results are the indices in the not embedded series, of the values which are the target values in the embedded series. # @example: dataPartitions <- generateDataPartitions(lynx, "lynx", c(2,3,4), 5, 0.8) #' @export computePartitionIndices <- function(len, type="lastBlock", order=0, ratioLB=0.15, numPartitions=0, ratioValSet=0, seed=1, notEmbedded=FALSE) { partitions <- list() unadjustedInSetLength <- floor((1-ratioValSet)*len) - order if(type == "lastBlock") { if(ratioLB > 1) { inSetLength <- unadjustedInSetLength partitionLength <- floor(ratioLB) } else { inSetLength <- unadjustedInSetLength partitionLength <- floor(ratioLB*inSetLength) } } else { inSetLength <- unadjustedInSetLength - (unadjustedInSetLength %% numPartitions) partitionLength <- inSetLength / numPartitions } inSetData <- 1:inSetLength if((inSetLength + order + 1) > (len-order)) partitions[["outSetData"]] <- NULL else partitions[["outSetData"]] <- (inSetLength + order + 1):(len-order) if(type == "CV" || type == "noDepCV") { partitions <- list() #attr(partitions, "shortname") <- shortname <- paste("CV-",numPartitions, sep="") #class(partitions) <- "CV" set.seed(seed) shuffledIndices <- sample(inSetData,length(inSetData)) for(t in 1:numPartitions) { tempTest <- ((t-1)*partitionLength+1):(t*partitionLength) #tempTrain <- (1:nrow(inSetE))[-tempTest] tempTrain <- inSetData[-tempTest] partitions[["inSetTrainData"]] <- cbind(partitions[["inSetTrainData"]], shuffledIndices[tempTrain]) partitions[["inSetTestData"]] <- cbind(partitions[["inSetTestData"]], shuffledIndices[tempTest]) } if(type == "noDepCV") { partitionsCV <- partitions partitions <- list() #attr(partitions, "shortname") <- shortname <- paste("nDCV-",numPartitions, sep="") #class(partitions) <- "noDepCV" inSetTrainDataNoDepCV <- NULL inSetTestDataNoDepCV <- NULL diffZero <- FALSE for(t in 1:numPartitions) { tempTest <- partitionsCV[["inSetTestData"]][,t] forbiddenIndices <- vector() for(tInd in tempTest) { forbiddenIndices <- union(forbiddenIndices, (max(1,(tInd - order))):(tInd+order)) } diff <- setdiff(partitionsCV[["inSetTrainData"]][,t], forbiddenIndices) if(length(diff) == 0) { diffZero <- TRUE break; } tempTrain <- vector(length = length(partitionsCV[["inSetTrainData"]][,t])) tempTrain[1:length(tempTrain)] <- NA tempTrain[1:length(diff)] <- diff inSetTrainDataNoDepCV <- cbind(inSetTrainDataNoDepCV, tempTrain) inSetTestDataNoDepCV <- cbind(inSetTestDataNoDepCV, tempTest) } if(!diffZero) { partitions[["inSetTrainData"]] <- inSetTrainDataNoDepCV partitions[["inSetTestData"]] <- inSetTestDataNoDepCV colnames(partitions[["inSetTrainData"]]) <- NULL colnames(partitions[["inSetTestData"]]) <- NULL } } } else if(type == "blockedCV") { partitions <- list() #attr(partitions, "shortname") <- shortname <- paste("bCV-",numPartitions, sep="") #class(partitions) <- "blockedCV" for(t in 1:numPartitions) { #tempTest <- ((t-1)*partitionLength-order+1):(t*partitionLength+order) tempTest <- max(1,((t-1)*partitionLength-order+1)):min((t*partitionLength+order),numPartitions*partitionLength) #tempTrain <- (1:nrow(inSetE))[-tempTest] tempTrain <- inSetData[-tempTest] #remove dependencies tempTest <- ((t-1)*partitionLength+1):(t*partitionLength) #if (t==1) tempTest <- ((t-1)*partitionLength+1):(t*partitionLength-order) #else if(t==numPartitions) tempTest <- ((t-1)*partitionLength+order+1):(t*partitionLength) #else tempTest <- ((t-1)*partitionLength+order+1):(t*partitionLength-order) if(t != 1 && t != numPartitions) { tempTrainFilled <- vector(length = length(partitions[["inSetTrainData"]][,1])) tempTrainFilled[1:length(tempTrainFilled)] <- NA tempTrainFilled[1:length(tempTrain)] <- tempTrain tempTrain <- tempTrainFilled } partitions[["inSetTrainData"]] <- cbind(partitions[["inSetTrainData"]], as.vector(tempTrain)) partitions[["inSetTestData"]] <- cbind(partitions[["inSetTestData"]], as.vector(tempTest)) } #partitions[["turns"]][["blockedCV"]] <- sprintf("blockedCV%02d",1:numPartitions) } else if(type == "lastBlock") { partitions <- list() #attr(partitions, "shortname") <- shortname <- paste("lB-", ratioLB, sep="") #class(partitions) <- "lastBlock" if((inSetLength - partitionLength + order + 1) > inSetLength) { #tempTest <- NULL #tempTrain <- inSetData partitions[["inSetTrainData"]] <- inSetData partitions[["inSetTestData"]] <- NULL } else { tempTest <- (inSetLength - partitionLength + 1) : inSetLength tempTrain <- inSetData[-tempTest] tempTest <- (inSetLength - partitionLength + order + 1) : inSetLength partitions[["inSetTrainData"]] <- as.matrix(tempTrain) partitions[["inSetTestData"]] <- as.matrix(tempTest) } # if((inSetLength - partitionLength + order + 1) > inSetLength) # tempTest <- NULL # else } if(ratioValSet != 0) { partitions[["valSetData"]] <- (inSetLength + order + 1):len } ##to get the target indices in the unembedded series: # if(notEmbedded) { partitions <- lapply(partitions, function(part) { part + order }) } # partitions[["outSetData"]] <- partitions[["outSetData"]] # partitions[["inSetTrainData"]] <- partitions[["inSetTrainData"]] # partitions[["inSetTestData"]] <- partitions[["inSetTestData"]] # this is now done by setting order=0 and doing the embedding afterwards.. #partitions[["outSetData"]] <- partitions[["outSetData"]] + order #(inSetLength+2*order + 1):(len) #partitions <- lapply(partitions, function(part) { # if(is.list(part)) { # res <- lapply(part, function(x) x + order) # attr(res, "shortname") <- attr(part, "shortname") # } else # res <- part + order # res # }) #partitions[["inSetTestData"]] <- lapply(partitions[["inSetTestData"]], function(x) x + order) #rudimentary time stamping #partitions[["timeStampO"]] <- 1:len #partitions[["timeStampE"]] <- partitions[["timeStampO"]][1:nrow(eData)] + (order-1) #attr(partitions, "shortname") <- shortname #class(partitions) <- type partitionParameters <- list(type=type, shortname=shortname, order=order, ratioLB=ratioLB, numPartitions=numPartitions, ratioValSet=ratioValSet, seed=seed) attr(partitions, "partitionParameters") <- partitionParameters partitions } # # # # # # # # # # # # # # # #computePartitionIndices <- function(type="lastBlock", len, order=0, ratio=0.15, numPartitions=0, ratioValSet=0) { # # if(ratioValSet == 0){ # inIndices <- 1 : (len - order) # outIndices <- NULL # } else { # inIndices <- 1 : (trunc(len * (1-ratioValSet)) - 1 - order) # outIndices <- trunc((len * (1-ratioValSet))) : len # # } # # if(type == "none") { # trainIndices <- inIndices # testIndices <- NULL # } else if(type == "lb") { # # trainIndices <- 1 : (trunc(length[inIndices] * (1-ratio)) - 1 - order) # outIndices <- trunc((len * (1-ratioValSet))) : len # # } else if(type == "bCV") { # # } # # # return(list(trainIndices=trainIndices, testIndices=testIndices)) # # #} # #len <- 114 #order <- 1 #ratioValSet <- 0.2 #' This function is very similar to \code{\link{computeTrainingAndTestIndices}}. #' It splits the given indices to indices for a training and a test set. Test set #' is taken from the end of the data. If the data is to be shuffled, this should #' be done before calling this function. #' #' @title Function to split data indices into indices for training and test set #' @param indices the indices to be used #' @param ratio ratio of training and test sets (default: 15\% of the data is used for testing) #' @return a named list with the following elements: #' \item{trainIndices}{a matrix containing the training indices} #' \item{testIndices}{a matrix containing the test indices} #' @export computeTrainingAndTestIndices <- function(indices, ratio=0.15) { #indices <- computeEmbeddedIndices(ts, lags) trainIndices <- indices[1 : (length(indices) * (1-ratio))] testIndices <- indices[-trainIndices] return(list(trainIndices=trainIndices, testIndices=testIndices)) } #' This function is very similar to \code{\link{computeTrainingAndTestIndices}}, #' but instead of a vector of indices, it takes a length and an embedding order. #' #' @title Function to split data into training and test set for last block evaluation. #' @param len the length of the time series to be used #' @param order the order of the embedding to be used #' @param ratio the ratio of training and test set #' @return a named list with the following elements: #' \item{trainIndices}{a matrix containing the training indices} #' \item{testIndices}{a matrix containing the test indices} #' @export computeIndicesLastBlockEval <- function(len, order, ratio=0.15) { #indices <- computeEmbeddedIndices(ts, lags) trainIndices <- 1 : (trunc(len * (1-ratio)) - 1 - order) testIndices <- (len * (1-ratio)) : len return(list(trainIndices=trainIndices, testIndices=testIndices)) }
# Copyright (C) 2013-2015 Philipp Benner # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #' Create a new link function object #' #' @param type name of the link function #' @param ... unused #' @export new.link <- function(type = "probit", ...) { if (type == "probit") { new.link.probit(...) } else if (type == "logistic") { new.link.logistic(...) } else if (type == "null") { # A null link function doesn't do anything. #For likelihood models that doesn't require link functions. new.link.null(...) } else { stop("Unknown type.") } } new.link.probit <- function(...) { # qnorm: quantile function (qnorm = pnorm^-1) # pnorm: cumulative density (pnorm = Int dnorm) # dnorm: density function (dnorm = d/dx pnorm) result <- list(link = qnorm, # link function response = pnorm, # inverse link function (response) response.derivative = dnorm) class(result) <- c("link.probit", "link") result } new.link.logistic <- function(...) { dr <- function(x, n=1) { if (n == 1) exp(x)/(1+exp(x)) else if (n == 2) exp(x)/(1+exp(x)) - exp(2*x)/(1+exp(x))^2 else stop("Invalid derivative") } result <- list(link = function(x) log(exp(x) - 1), response = function(x) log(exp(x) + 1), response.derivative = dr) class(result) <- c("link.logistic", "link") result } new.link.null <- function(...) { # A null link object doesn't do anything. # It can be used in likelihood models that doesn't require link functions for Laplace approximation. result <- list(link = function(x) x, # link function response = NULL, response.derivative = NULL) class(result) <- c("link.null", "link") result } #' Summarize the posterior of a Gaussian process equipped with a probit link function #' #' @param model probit link object #' @param p.mean mean of the posterior Laplace approximation #' @param p.variance variance of the posterior Laplace approximation #' @param ... unused #' @method summarize link.probit #' @export summarize.link.probit <- function(model, p.mean, p.variance, ...) { n <- length(p.mean) mean <- rep(0, n) variance <- rep(0, n) for (i in 1:n) { # prepare the covariance matrix sigma <- matrix(c(1+p.variance[i], p.variance[i], p.variance[i], 1+p.variance[i]), 2, 2) # predictive mean # (cf. Rasmussen 2006, Eq. 3.82) mean[i] <- pnorm(p.mean[i], mean=0, sd=sqrt(1+p.variance[i])) # and predictive variance # (cf. math/probit.nb) variance[i] <- pmvnorm(upper=c(p.mean[i], p.mean[i]), mean=c(0, 0), sigma=sigma) - mean[i]^2 } return (list(mean = mean, variance = variance)) } #' Summarize the posterior of a Gaussian process equipped with a logistic link function #' #' There is no analytic solution for the expectation and variance, hence return only #' the MAP estimate. #' #' @param model logistic link object #' @param p.mean mean of the posterior Laplace approximation #' @param p.variance variance of the posterior Laplace approximation #' @param ... unused #' @method summarize link.logistic #' @export summarize.link.logistic <- function(model, p.mean, p.variance, ...) { mean = model$response(p.mean) confidence.1 = model$response(p.mean - 2.0*sqrt(p.variance)) confidence.2 = model$response(p.mean + 2.0*sqrt(p.variance)) variance = (confidence.1-confidence.2)^2/4^2 return (list(mean = mean, variance = variance)) } # ------------------------------------------------------------------------------ if (FALSE) { # binomial observations # -------------------------------------------------------------------------- xp <- c(1,2,3,4) yp <- matrix(0, 4, 2) yp[1,] <- c(2, 14) yp[2,] <- c(4, 12) yp[3,] <- c(7, 10) yp[4,] <- c(15, 8) gp <- new.gp(0.5, kernel.squared.exponential(1, 0.25), likelihood=NULL, link=new.link("probit")) gp <- posterior(gp, xp, yp) summarize(gp, 1:100/20) plot(gp, 1:100/20) # gamma distributed observations # -------------------------------------------------------------------------- n <- 1000 xp <- 10*runif(n) yp <- rgamma(n, 1, 2) gp <- new.gp(1.0, kernel.squared.exponential(1.0, 5.0), likelihood=new.likelihood("gamma", 1.0), link=new.link("logistic")) # add some tiny noise to the diagonal for numerical stability gp <- posterior(gp, xp, yp, ep=0.01, verbose=TRUE) summarize(gp, 0:10/5) plot(gp, 1:100/10) }
/R/gp.link.R
no_license
pbenner/gp.regression
R
false
false
5,444
r
# Copyright (C) 2013-2015 Philipp Benner # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #' Create a new link function object #' #' @param type name of the link function #' @param ... unused #' @export new.link <- function(type = "probit", ...) { if (type == "probit") { new.link.probit(...) } else if (type == "logistic") { new.link.logistic(...) } else if (type == "null") { # A null link function doesn't do anything. #For likelihood models that doesn't require link functions. new.link.null(...) } else { stop("Unknown type.") } } new.link.probit <- function(...) { # qnorm: quantile function (qnorm = pnorm^-1) # pnorm: cumulative density (pnorm = Int dnorm) # dnorm: density function (dnorm = d/dx pnorm) result <- list(link = qnorm, # link function response = pnorm, # inverse link function (response) response.derivative = dnorm) class(result) <- c("link.probit", "link") result } new.link.logistic <- function(...) { dr <- function(x, n=1) { if (n == 1) exp(x)/(1+exp(x)) else if (n == 2) exp(x)/(1+exp(x)) - exp(2*x)/(1+exp(x))^2 else stop("Invalid derivative") } result <- list(link = function(x) log(exp(x) - 1), response = function(x) log(exp(x) + 1), response.derivative = dr) class(result) <- c("link.logistic", "link") result } new.link.null <- function(...) { # A null link object doesn't do anything. # It can be used in likelihood models that doesn't require link functions for Laplace approximation. result <- list(link = function(x) x, # link function response = NULL, response.derivative = NULL) class(result) <- c("link.null", "link") result } #' Summarize the posterior of a Gaussian process equipped with a probit link function #' #' @param model probit link object #' @param p.mean mean of the posterior Laplace approximation #' @param p.variance variance of the posterior Laplace approximation #' @param ... unused #' @method summarize link.probit #' @export summarize.link.probit <- function(model, p.mean, p.variance, ...) { n <- length(p.mean) mean <- rep(0, n) variance <- rep(0, n) for (i in 1:n) { # prepare the covariance matrix sigma <- matrix(c(1+p.variance[i], p.variance[i], p.variance[i], 1+p.variance[i]), 2, 2) # predictive mean # (cf. Rasmussen 2006, Eq. 3.82) mean[i] <- pnorm(p.mean[i], mean=0, sd=sqrt(1+p.variance[i])) # and predictive variance # (cf. math/probit.nb) variance[i] <- pmvnorm(upper=c(p.mean[i], p.mean[i]), mean=c(0, 0), sigma=sigma) - mean[i]^2 } return (list(mean = mean, variance = variance)) } #' Summarize the posterior of a Gaussian process equipped with a logistic link function #' #' There is no analytic solution for the expectation and variance, hence return only #' the MAP estimate. #' #' @param model logistic link object #' @param p.mean mean of the posterior Laplace approximation #' @param p.variance variance of the posterior Laplace approximation #' @param ... unused #' @method summarize link.logistic #' @export summarize.link.logistic <- function(model, p.mean, p.variance, ...) { mean = model$response(p.mean) confidence.1 = model$response(p.mean - 2.0*sqrt(p.variance)) confidence.2 = model$response(p.mean + 2.0*sqrt(p.variance)) variance = (confidence.1-confidence.2)^2/4^2 return (list(mean = mean, variance = variance)) } # ------------------------------------------------------------------------------ if (FALSE) { # binomial observations # -------------------------------------------------------------------------- xp <- c(1,2,3,4) yp <- matrix(0, 4, 2) yp[1,] <- c(2, 14) yp[2,] <- c(4, 12) yp[3,] <- c(7, 10) yp[4,] <- c(15, 8) gp <- new.gp(0.5, kernel.squared.exponential(1, 0.25), likelihood=NULL, link=new.link("probit")) gp <- posterior(gp, xp, yp) summarize(gp, 1:100/20) plot(gp, 1:100/20) # gamma distributed observations # -------------------------------------------------------------------------- n <- 1000 xp <- 10*runif(n) yp <- rgamma(n, 1, 2) gp <- new.gp(1.0, kernel.squared.exponential(1.0, 5.0), likelihood=new.likelihood("gamma", 1.0), link=new.link("logistic")) # add some tiny noise to the diagonal for numerical stability gp <- posterior(gp, xp, yp, ep=0.01, verbose=TRUE) summarize(gp, 0:10/5) plot(gp, 1:100/10) }
#' @export makeRLearner.surv.CoxBoost = function() { makeRLearnerSurv( cl = "surv.CoxBoost", package = "CoxBoost", par.set = makeParamSet( makeIntegerLearnerParam(id = "maxstepno", default = 100, lower = 0), makeIntegerLearnerParam(id = "K", default = 10, lower = 1), makeDiscreteLearnerParam(id = "type", default = "verweij", values = c("verweij", "naive")), makeIntegerLearnerParam(id = "stepno", default = 100L, lower = 1), makeNumericLearnerParam(id = "penalty", lower = 0), makeLogicalLearnerParam(id = "standardize", default = TRUE), makeDiscreteLearnerParam(id = "criterion", default = "pscore", values = c("pscore", "score", "hpscore", "hscore")), makeNumericLearnerParam(id = "stepsize.factor", default = 1, lower = 0), makeDiscreteLearnerParam(id = "sf.scheme", default = "sigmoid", values = c("sigmoid", "linear")) # FIXME: still missing some arguments ), properties = c("numerics", "weights", "rcens"), name = "Cox Proportional Hazards Model with Componentwise Likelihood based Boosting", short.name = "coxboost", note = "" ) } #' @export trainLearner.surv.CoxBoost = function(.learner, .task, .subset, .weights = NULL, penalty = NULL, ...) { # FIXME: use model.matrix to allow factors data = getTaskData(.task, subset = .subset, target.extra = TRUE, recode.target = "rcens") if (is.null(penalty)) penalty = 9 * sum(data$target[, 2L]) CoxBoost::CoxBoost( time = data$target[, 1L], status = data$target[, 2L], x = as.matrix(data$data), weights = .weights, penalty = penalty, ... ) } #' @export predictLearner.surv.CoxBoost = function(.learner, .model, .newdata, ...) { if(.learner$predict.type == "response") as.numeric(predict(.model$learner.model, newdata = as.matrix(.newdata), type = "lp")) else stop("Unknown predict type") }
/R/RLearner_surv_CoxBoost.R
no_license
narayana1208/mlr
R
false
false
1,892
r
#' @export makeRLearner.surv.CoxBoost = function() { makeRLearnerSurv( cl = "surv.CoxBoost", package = "CoxBoost", par.set = makeParamSet( makeIntegerLearnerParam(id = "maxstepno", default = 100, lower = 0), makeIntegerLearnerParam(id = "K", default = 10, lower = 1), makeDiscreteLearnerParam(id = "type", default = "verweij", values = c("verweij", "naive")), makeIntegerLearnerParam(id = "stepno", default = 100L, lower = 1), makeNumericLearnerParam(id = "penalty", lower = 0), makeLogicalLearnerParam(id = "standardize", default = TRUE), makeDiscreteLearnerParam(id = "criterion", default = "pscore", values = c("pscore", "score", "hpscore", "hscore")), makeNumericLearnerParam(id = "stepsize.factor", default = 1, lower = 0), makeDiscreteLearnerParam(id = "sf.scheme", default = "sigmoid", values = c("sigmoid", "linear")) # FIXME: still missing some arguments ), properties = c("numerics", "weights", "rcens"), name = "Cox Proportional Hazards Model with Componentwise Likelihood based Boosting", short.name = "coxboost", note = "" ) } #' @export trainLearner.surv.CoxBoost = function(.learner, .task, .subset, .weights = NULL, penalty = NULL, ...) { # FIXME: use model.matrix to allow factors data = getTaskData(.task, subset = .subset, target.extra = TRUE, recode.target = "rcens") if (is.null(penalty)) penalty = 9 * sum(data$target[, 2L]) CoxBoost::CoxBoost( time = data$target[, 1L], status = data$target[, 2L], x = as.matrix(data$data), weights = .weights, penalty = penalty, ... ) } #' @export predictLearner.surv.CoxBoost = function(.learner, .model, .newdata, ...) { if(.learner$predict.type == "response") as.numeric(predict(.model$learner.model, newdata = as.matrix(.newdata), type = "lp")) else stop("Unknown predict type") }
# 072720 rm(list = ls()) library(here) library(survival) dat0<-read.csv("NSLT_participants_dat0.csv") dat<-dat0 #---- Data management & summary statistics ----# names(dat) table(dat$rndgroup, useNA = "ifany") # 26722 CT, 26730 X-ray # Num of ineligible obs table(dat$ineligible, dat$elig, useNA = "ifany") # 53248 elig, 204 inelig table(dat$elig, dat$rndgroup, useNA = "ifany") chisq.test(dat$elig, dat$rndgroup) # p = 0.3629 dat<-dat[dat$elig == "Eligible Participant",] table(dat$rndgroup, useNA = "ifany") # 26627 CT, 26621 X-ray2 # Check num of obs with fup_days = 0 & remove them str(dat$fup_days) dat$fup_is_0<-ifelse(dat$fup_days==0, 1, 0) table(dat$fup_is_0) # 58 fup = 0 (they all dead) table(dat$deathstat, dat$fup_is_0) chisq.test(dat$fup_is_0, dat$rndgroup) # p = 0.8623 dat<-dat[dat$fup_days!=0,] table(dat$rndgroup, useNA = "ifany") # 26593 CT, 26578 x-ray # Num who received T0 screenig table(dat$scr_res0) table(dat$scr_res0, dat$rndgroup, useNA = "ifany") dat$screen_t0_yn<-ifelse(substr(dat$scr_res0, 0, 3) %in% c("Neg", "Pos"), 1, ifelse(substr(dat$scr_res0, 0, 3) %in% c("Not", "Ina"), 0, NA)) table(dat$screen_t0_yn, useNA = "ifany") # 52174 left, 1074 excluded table(dat$screen_t0_yn, dat$rndgroup, useNA = "ifany") chisq.test(dat$screen_t0_yn, dat$rndgroup) # p < 0.0001 dat<-dat[dat$screen_t0_yn == 1,] table(dat$rndgroup, useNA = "ifany") # 26203 CT, 25913 x-ray # time to T0 scr str(dat$scr_days0) dat$scr_days0_num<-as.numeric(as.character(dat$scr_days0)) tapply(dat$scr_days0_num, dat$rndgroup, summary) wilcox.test(dat$scr_days0_num~dat$rndgroup) # p < 0.0001 t.test(log(dat$scr_days0_num+1)~dat$rndgroup, na.rm = T) # p < 0.0001 table(is.na(dat$scr_days0_num), dat$screen_t0_yn) # All patients receive screening at T0 have scr_days0_num tmp<-dat[dat$scr_days0_num>=0 & is.na(dat$scr_days0_num) == 0,] tapply(tmp$scr_days0_num, tmp$rndgroup, summary) wilcox.test(tmp$scr_days0_num~tmp$rndgroup) # p < 0.0001 # Outcome ## All cause mortality ### Adjust follow up time (see NLST user guide) dat$fup_days_adj_allcausemort<-ifelse(dat$deathstat == "No report of death", ifelse(dat$rndgroup == "Spiral CT", dat$fup_days - 58.1772, # CT arm ifelse(dat$rndgroup == "X-ray", dat$fup_days - 58.7590, NA)), # x-ray arm dat$fup_days) # death (no change) summary(dat$fup_days_adj_allcausemort) tapply(dat$fup_days_adj_allcausemort, dat$deathstat, summary) dat$fup_days_adj_allcausemort<-ifelse(dat$fup_days_adj_allcausemort < 0 & is.na(dat$fup_days_adj_allcausemort) == 0, 0, dat$fup_days_adj_allcausemort) tapply(dat$fup_days_adj_allcausemort, dat$deathstat, summary) ### All cause death dat$allcause_death<-ifelse(dat$deathstat %in% c("EVP certified", "Death Certificate coded", "Death Certificate received but not coded"), 1, 0) table(dat$deathstat, dat$allcause_death) ### 8-yr mortality maxtm<-360*8 dat$allcause_death_8yr<-ifelse(dat$fup_days_adj_allcausemort <= maxtm, dat$allcause_death, ifelse(dat$fup_days_adj_allcausemort > maxtm, 0, NA)) table(dat$allcause_death_8yr, useNA = "ifany") # 3791 deaths dat$allcause_death_8yr_time<-ifelse(dat$fup_days_adj_allcausemort <= maxtm, dat$fup_days_adj_allcausemort, ifelse(dat$fup_days_adj_allcausemort > maxtm, maxtm, NA)) summary(dat$allcause_death_8yr_time, useNA = "ifany") ## Lung cancer mortality ### Adjust follow up time (see NLST user guide) dat$fup_days_adj_lungmort<-ifelse(dat$deathcutoff %in% c("No death or no date of death", "Death Not Included"), ifelse(dat$rndgroup == "Spiral CT", dat$fup_days - 394.6020, # CT arm ifelse(dat$rndgroup == "X-ray", dat$fup_days - 392.1746, NA)), # x-ray arm dat$fup_days) # death (no change) summary(dat$fup_days_adj_lungmort) tapply(dat$fup_days_adj_lungmort, dat$deathcutoff, summary) dat$fup_days_adj_lungmort<-ifelse(dat$fup_days_adj_lungmort < 0 & is.na(dat$fup_days_adj_lungmort) == 0, 0, dat$fup_days_adj_lungmort) tapply(dat$fup_days_adj_lungmort, dat$deathcutoff, summary) ### Lung cancer deaths dat$lung_death<-ifelse(dat$finaldeathlc %in% c("Death due to lung cancer or work-up of suspected lung cancer") & dat$deathcutoff == "Death Included", 1, 0) table(dat$finaldeathlc, dat$lung_death, dat$deathcutoff) ### 8-yr mortality maxtm<-360*8 dat$lung_death_8yr<-ifelse(dat$fup_days_adj_lungmort <= maxtm, dat$lung_death, ifelse(dat$fup_days_adj_lungmort > maxtm, 0, NA)) table(dat$lung_death_8yr, useNA = "ifany") # 769 deaths dat$lung_death_8yr_time<-ifelse(dat$fup_days_adj_lungmort <= maxtm, dat$fup_days_adj_lungmort, ifelse(dat$fup_days_adj_lungmort > maxtm, maxtm, NA)) summary(dat$lung_death_8yr_time, useNA = "ifany") # Check num of lost FU table(dat$lung_death, dat$allcause_death, useNA = "ifany") # Cov ## Trt dat$CT<-ifelse(dat$rndgroup=="Spiral CT", 1, ifelse(dat$rndgroup=="X-ray",0,NA)) table(dat$rndgroup, dat$CT, useNA = "ifany") ## Age summary(dat$age) ## Sex table(dat$gender, useNA = "ifany") # 21795 female, 31376 male dat$female<-ifelse(dat$gender == "Female", 1, ifelse(dat$gender == "Male", 0, NA)) table(dat$female, dat$gender, useNA = "ifany") ## Race/Ethnic table(dat$race, useNA = "ifany");table(dat$ethnic, useNA = "ifany") fisher.test(dat$race, dat$rndgroup, simulate.p.value=TRUE) # p = 0.08 fisher.test(dat$ethnic, dat$rndgroup, simulate.p.value=TRUE) # p = 0.0009 dat$race_condensed<-ifelse(substr(dat$race,1,7) == "Missing" | dat$race %in% c("Participant refused to answer","Unknown/ decline to answer"), NA, as.character(dat$race)) table(dat$race_condensed, dat$race, useNA = "ifany") table(dat$race_condensed, dat$rndgroup, useNA = "ifany") fisher.test(dat$race_condensed, dat$rndgroup, simulate.p.value=TRUE) # p = 0.4053 dat$race_condensed2<-factor(ifelse(dat$race_condensed %in% c("American Indian or Alaskan Native", "Native Hawaiian or Other Pacific Islander", "More than one race"), "Other", ifelse(dat$race_condensed == "Black or African-American", "Black", as.character(dat$race_condensed)))) dat$race_condensed2_num<-ifelse(dat$race_condensed2 == "White", 1, ifelse(dat$race_condensed2 == "Black", 2, ifelse(dat$race_condensed2 == "Asian", 3, ifelse(dat$race_condensed2 == "Other", 4, NA)))) table(dat$race_condensed2, dat$race_condensed2_num, useNA = "ifany") dat$ethnic_condensed<-ifelse(substr(dat$ethnic,1,7) == "Missing" | dat$ethnic %in% c("Participant refused to answer","Unknown/ decline to answer"), NA, as.character(dat$ethnic)) table(dat$ethnic_condensed) table(dat$ethnic, dat$ethnic_condensed, useNA = "ifany") fisher.test(dat$ethnic_condensed, dat$rndgroup, simulate.p.value=TRUE) # p = 0.0020 table(dat$race_condensed, dat$ethnic_condensed, useNA = "ifany") ## Smoking ### Current vs former table(dat$cigsmok, dat$rndgroup, useNA = "ifany") ### Age quit str(dat$age_quit) dat$age_quit_num<-as.numeric(as.character(dat$age_quit)) tapply(dat$age_quit_num, dat$rndgroup, summary) tapply(dat$age_quit_num, dat$cigsmok, summary) ### Yrs from cessation dat$yrs_from_cessation0<-ifelse(dat$cigsmok=="Current",0, ifelse(dat$cigsmok=="Former",dat$age-dat$age_quit_num, NA)) summary(dat$yrs_from_cessation0) tapply(dat$yrs_from_cessation0, dat$cigsmok, summary) dat$yrs_from_cessation<-ifelse(dat$yrs_from_cessation0<0 & is.na(dat$yrs_from_cessation0)==0, 0, ifelse(dat$yrs_from_cessation0>15 & is.na(dat$yrs_from_cessation0)==0, NA, as.numeric(dat$yrs_from_cessation0))) summary(dat$yrs_from_cessation) tapply(dat$yrs_from_cessation, dat$cigsmok, summary) tmp<-dat[dat$yrs_from_cessation<0 & is.na(dat$yrs_from_cessation)==0,] # 12 pt tmp<-dat[dat$yrs_from_cessation>15 & is.na(dat$yrs_from_cessation)==0,] # 288 pt ### Age start str(dat$smokeage) dat$smokeage_num<-as.numeric(as.character(dat$smokeage)) tapply(dat$smokeage_num, dat$rndgroup, summary) ### Pipe table(dat$pipe, dat$rndgroup, useNA = "ifany") dat$pipe_YNNA<-ifelse(dat$pipe == "Missing", NA, as.character(dat$pipe)) table(dat$pipe_YNNA, dat$pipe, useNA = "ifany") chisq.test(dat$pipe_YNNA, dat$rndgroup) # p = 0.0653 ### Cigar table(dat$cigar, dat$rndgroup, useNA = "ifany") dat$cigar_YNNA<-ifelse(dat$cigar == "Missing", NA, as.character(dat$cigar)) table(dat$cigar_YNNA, dat$cigar, useNA = "ifany") # 153 NA ### pack per yr str(dat$pkyr) tapply(dat$pkyr, dat$rndgroup, summary) tapply(dat$pkyr, dat$rndgroup, hist) wilcox.test(dat$pkyr~dat$rndgroup) # p = 0.9653 ### Smoke live table(dat$smokelive, dat$rndgroup, useNA = "ifany") dat$smokelive_YNNA<-ifelse(dat$smokelive == "Missing", NA, as.character(dat$smokelive)) table(dat$smokelive_YNNA, dat$smokelive, useNA = "ifany") # 158 NA ### Smoke work table(dat$smokework, dat$rndgroup, useNA = "ifany") dat$smokework_YNNA<-ifelse(dat$smokework == "Missing", NA, as.character(dat$smokework)) table(dat$smokework_YNNA, dat$smokework, useNA = "ifany") # 259 NA ### Family hist dat$fam_hist_lung<-ifelse(dat$famchild=="Yes" |dat$famfather=="Yes" | dat$fammother=="Yes"| dat$famsister=="Yes" | dat$fambrother=="Yes", "Yes", ifelse(dat$famchild=="No" & dat$famfather=="No" & dat$fammother=="No" & dat$famsister=="No" & dat$fambrother=="No", "No", NA)) table(dat$fam_hist_lung, useNA = "ifany") # 1544 missing dat$fam_hist_lung_num<-ifelse(dat$fam_hist_lung=="Yes",1, ifelse(dat$fam_hist_lung=="No",0,NA)) table(dat$fam_hist_lung_num, dat$fam_hist_lung, useNA = "ifany") ## Working hist and masks dat$wrk_hist<-ifelse(dat$wrkasbe=="Yes"|dat$wrkbaki=="Yes"|dat$wrkbutc=="Yes"| dat$wrkchem=="Yes"|dat$wrkcoal=="Yes"|dat$wrkcott=="Yes"|dat$wrkfarm=="Yes"| dat$wrkfire=="Yes"|dat$wrkflou=="Yes"|dat$wrkfoun=="Yes"|dat$wrkhard=="Yes"| dat$wrkpain=="Yes"|dat$wrksand=="Yes"|dat$wrkweld=="Yes", "Yes", ifelse(dat$wrkasbe=="No"&dat$wrkbaki=="No"&dat$wrkbutc=="No"& dat$wrkchem=="No"&dat$wrkcoal=="No"&dat$wrkcott=="No"&dat$wrkfarm=="No"& dat$wrkfire=="No"&dat$wrkflou=="No"&dat$wrkfoun=="No"&dat$wrkhard=="No"& dat$wrkpain=="No"&dat$wrksand=="No"&dat$wrkweld=="No", "No", NA)) table(dat$wrk_hist, useNA = "ifany") # 88 NA # BMI dat$weight<-as.numeric(as.character(dat$weight)) dat$height<-as.numeric(as.character(dat$height)) dat$BMI<-as.numeric(as.character(dat$weight))/(as.numeric(as.character(dat$height))^2)*703 ## Diag hist dat[,c("diagadas_YNNA", "diagasbe_YNNA", "diagbron_YNNA", "diagchas_YNNA", "diagchro_YNNA", "diagcopd_YNNA", "diagdiab_YNNA", "diagemph_YNNA", "diagfibr_YNNA", "diaghear_YNNA", "diaghype_YNNA", "diagpneu_YNNA", "diagsarc_YNNA", "diagsili_YNNA", "diagstro_YNNA", "diagtube_YNNA", # medical hist "cancblad_YNNA", "cancbrea_YNNA", "canccerv_YNNA", "canccolo_YNNA", "cancesop_YNNA", "canckidn_YNNA", "canclary_YNNA", "canclung_YNNA", "cancnasa_YNNA", "cancoral_YNNA", "cancpanc_YNNA", "cancphar_YNNA", "cancstom_YNNA", "cancthyr_YNNA", "canctran_YNNA")]<- apply(dat[,c("diagadas", "diagasbe", "diagbron", "diagchas", "diagchro", "diagcopd", "diagdiab", "diagemph", "diagfibr", "diaghear", "diaghype", "diagpneu", "diagsarc", "diagsili", "diagstro", "diagtube", # medical hist "cancblad", "cancbrea", "canccerv", "canccolo", "cancesop", "canckidn", "canclary", "canclung", "cancnasa", "cancoral", "cancpanc", "cancphar", "cancstom", "cancthyr", "canctran")], 2, function(x){ifelse(x == "Missing", NA, as.character(x))}) dat$diagcopd_num<-ifelse(dat$diagcopd_YNNA=="Yes",1, ifelse(dat$diagcopd_YNNA=="No",0,NA)) dat$diagemph_num<-ifelse(dat$diagemph_YNNA=="Yes",1, ifelse(dat$diagemph_YNNA=="No",0,NA)) table(dat$diagcopd_num, dat$diagcopd_YNNA, useNA = "ifany") table(dat$diagemph_num, dat$diagemph_YNNA, useNA = "ifany") # Number of confirmed cancer dat$num_confirmed_condensed<-ifelse(dat$num_confirmed>=2, 2, dat$num_confirmed) table(dat$num_confirmed_condensed, dat$num_confirmed) # Alcohol per day dat$acrin_drinknum_curr_num<-as.numeric(as.character(dat$acrin_drinknum_curr)) dat$acrin_drinknum_form_num<-as.numeric(as.character(dat$acrin_drinknum_form)) dat$acfin_num_alc_perday<-ifelse(is.na(dat$acrin_drinknum_curr_num)==0 & is.na(dat$acrin_drinknum_form_num)==0, NA, ifelse(is.na(dat$acrin_drinknum_curr_num)==1 | is.na(dat$acrin_drinknum_form_num)==1, ifelse(is.na(dat$acrin_drinknum_curr_num)==0, dat$acrin_drinknum_curr_num/7, ifelse(is.na(dat$acrin_drinknum_form_num)==0, dat$acrin_drinknum_form_num/7, NA)),NA)) # Married table(dat$marital, useNA = "ifany") dat$marital_condensed<-ifelse(dat$marital %in% c("Missing", "Not Ascertained", "Participant refused to answer"), NA, as.character(dat$marital)) table(dat$marital_condensed, dat$marital, useNA = "ifany") # 154 NA # Education table(dat$educat, dat$rndgroup, useNA = "ifany") dat$educat_condensed<-ifelse(substr(dat$educat, 1, 7) %in% c("Missing", "Unknown"), NA, as.character(dat$educat)) table(dat$educat_condensed, dat$educat, useNA = "ifany") dat1<-dat[,c("cen", "pid", "rndgroup", "study", # Study "age", "educat_condensed", "ethnic", "gender", "height", "marital_condensed" , "race", "weight", "race_condensed", "ethnic_condensed","BMI", # Demo "age_quit_num", "cigar_YNNA", "cigsmok", "pipe_YNNA", "pkyr", "smokeage_num", "smokeday", "smokelive_YNNA", "smokework_YNNA", "smokeyr", "yrs_from_cessation",# Smk "scr_days0", "fup_days_adj_allcausemort", "allcause_death", "allcause_death_8yr", "allcause_death_8yr_time", "fup_days_adj_lungmort", "lung_death", "lung_death_8yr", "lung_death_8yr_time", # Outcome "diagadas_YNNA", "diagasbe_YNNA", "diagbron_YNNA", "diagchas_YNNA", "diagchro_YNNA", "diagcopd_YNNA", "diagdiab_YNNA", "diagemph_YNNA", "diagfibr_YNNA", "diaghear_YNNA", "diaghype_YNNA", "diagpneu_YNNA", "diagsarc_YNNA", "diagsili_YNNA", "diagstro_YNNA", "diagtube_YNNA", # medical hist "cancblad_YNNA", "cancbrea_YNNA", "canccerv_YNNA", "canccolo_YNNA", "cancesop_YNNA", "canckidn_YNNA", "canclary_YNNA", "canclung_YNNA", "cancnasa_YNNA", "cancoral_YNNA", "cancpanc_YNNA", "cancphar_YNNA", "cancstom_YNNA", "cancthyr_YNNA", "canctran_YNNA", # cancer hist "fam_hist_lung", # family history "wrk_hist", #work hist, "race_condensed2_num","female","diagcopd_num","diagemph_num","fam_hist_lung_num","CT" )] # 67 var write.csv(dat1, "NSLT_baseline_072320.csv")
/NLST_data_072720.R
no_license
JoyceLin415/NLST_HTE
R
false
false
16,130
r
# 072720 rm(list = ls()) library(here) library(survival) dat0<-read.csv("NSLT_participants_dat0.csv") dat<-dat0 #---- Data management & summary statistics ----# names(dat) table(dat$rndgroup, useNA = "ifany") # 26722 CT, 26730 X-ray # Num of ineligible obs table(dat$ineligible, dat$elig, useNA = "ifany") # 53248 elig, 204 inelig table(dat$elig, dat$rndgroup, useNA = "ifany") chisq.test(dat$elig, dat$rndgroup) # p = 0.3629 dat<-dat[dat$elig == "Eligible Participant",] table(dat$rndgroup, useNA = "ifany") # 26627 CT, 26621 X-ray2 # Check num of obs with fup_days = 0 & remove them str(dat$fup_days) dat$fup_is_0<-ifelse(dat$fup_days==0, 1, 0) table(dat$fup_is_0) # 58 fup = 0 (they all dead) table(dat$deathstat, dat$fup_is_0) chisq.test(dat$fup_is_0, dat$rndgroup) # p = 0.8623 dat<-dat[dat$fup_days!=0,] table(dat$rndgroup, useNA = "ifany") # 26593 CT, 26578 x-ray # Num who received T0 screenig table(dat$scr_res0) table(dat$scr_res0, dat$rndgroup, useNA = "ifany") dat$screen_t0_yn<-ifelse(substr(dat$scr_res0, 0, 3) %in% c("Neg", "Pos"), 1, ifelse(substr(dat$scr_res0, 0, 3) %in% c("Not", "Ina"), 0, NA)) table(dat$screen_t0_yn, useNA = "ifany") # 52174 left, 1074 excluded table(dat$screen_t0_yn, dat$rndgroup, useNA = "ifany") chisq.test(dat$screen_t0_yn, dat$rndgroup) # p < 0.0001 dat<-dat[dat$screen_t0_yn == 1,] table(dat$rndgroup, useNA = "ifany") # 26203 CT, 25913 x-ray # time to T0 scr str(dat$scr_days0) dat$scr_days0_num<-as.numeric(as.character(dat$scr_days0)) tapply(dat$scr_days0_num, dat$rndgroup, summary) wilcox.test(dat$scr_days0_num~dat$rndgroup) # p < 0.0001 t.test(log(dat$scr_days0_num+1)~dat$rndgroup, na.rm = T) # p < 0.0001 table(is.na(dat$scr_days0_num), dat$screen_t0_yn) # All patients receive screening at T0 have scr_days0_num tmp<-dat[dat$scr_days0_num>=0 & is.na(dat$scr_days0_num) == 0,] tapply(tmp$scr_days0_num, tmp$rndgroup, summary) wilcox.test(tmp$scr_days0_num~tmp$rndgroup) # p < 0.0001 # Outcome ## All cause mortality ### Adjust follow up time (see NLST user guide) dat$fup_days_adj_allcausemort<-ifelse(dat$deathstat == "No report of death", ifelse(dat$rndgroup == "Spiral CT", dat$fup_days - 58.1772, # CT arm ifelse(dat$rndgroup == "X-ray", dat$fup_days - 58.7590, NA)), # x-ray arm dat$fup_days) # death (no change) summary(dat$fup_days_adj_allcausemort) tapply(dat$fup_days_adj_allcausemort, dat$deathstat, summary) dat$fup_days_adj_allcausemort<-ifelse(dat$fup_days_adj_allcausemort < 0 & is.na(dat$fup_days_adj_allcausemort) == 0, 0, dat$fup_days_adj_allcausemort) tapply(dat$fup_days_adj_allcausemort, dat$deathstat, summary) ### All cause death dat$allcause_death<-ifelse(dat$deathstat %in% c("EVP certified", "Death Certificate coded", "Death Certificate received but not coded"), 1, 0) table(dat$deathstat, dat$allcause_death) ### 8-yr mortality maxtm<-360*8 dat$allcause_death_8yr<-ifelse(dat$fup_days_adj_allcausemort <= maxtm, dat$allcause_death, ifelse(dat$fup_days_adj_allcausemort > maxtm, 0, NA)) table(dat$allcause_death_8yr, useNA = "ifany") # 3791 deaths dat$allcause_death_8yr_time<-ifelse(dat$fup_days_adj_allcausemort <= maxtm, dat$fup_days_adj_allcausemort, ifelse(dat$fup_days_adj_allcausemort > maxtm, maxtm, NA)) summary(dat$allcause_death_8yr_time, useNA = "ifany") ## Lung cancer mortality ### Adjust follow up time (see NLST user guide) dat$fup_days_adj_lungmort<-ifelse(dat$deathcutoff %in% c("No death or no date of death", "Death Not Included"), ifelse(dat$rndgroup == "Spiral CT", dat$fup_days - 394.6020, # CT arm ifelse(dat$rndgroup == "X-ray", dat$fup_days - 392.1746, NA)), # x-ray arm dat$fup_days) # death (no change) summary(dat$fup_days_adj_lungmort) tapply(dat$fup_days_adj_lungmort, dat$deathcutoff, summary) dat$fup_days_adj_lungmort<-ifelse(dat$fup_days_adj_lungmort < 0 & is.na(dat$fup_days_adj_lungmort) == 0, 0, dat$fup_days_adj_lungmort) tapply(dat$fup_days_adj_lungmort, dat$deathcutoff, summary) ### Lung cancer deaths dat$lung_death<-ifelse(dat$finaldeathlc %in% c("Death due to lung cancer or work-up of suspected lung cancer") & dat$deathcutoff == "Death Included", 1, 0) table(dat$finaldeathlc, dat$lung_death, dat$deathcutoff) ### 8-yr mortality maxtm<-360*8 dat$lung_death_8yr<-ifelse(dat$fup_days_adj_lungmort <= maxtm, dat$lung_death, ifelse(dat$fup_days_adj_lungmort > maxtm, 0, NA)) table(dat$lung_death_8yr, useNA = "ifany") # 769 deaths dat$lung_death_8yr_time<-ifelse(dat$fup_days_adj_lungmort <= maxtm, dat$fup_days_adj_lungmort, ifelse(dat$fup_days_adj_lungmort > maxtm, maxtm, NA)) summary(dat$lung_death_8yr_time, useNA = "ifany") # Check num of lost FU table(dat$lung_death, dat$allcause_death, useNA = "ifany") # Cov ## Trt dat$CT<-ifelse(dat$rndgroup=="Spiral CT", 1, ifelse(dat$rndgroup=="X-ray",0,NA)) table(dat$rndgroup, dat$CT, useNA = "ifany") ## Age summary(dat$age) ## Sex table(dat$gender, useNA = "ifany") # 21795 female, 31376 male dat$female<-ifelse(dat$gender == "Female", 1, ifelse(dat$gender == "Male", 0, NA)) table(dat$female, dat$gender, useNA = "ifany") ## Race/Ethnic table(dat$race, useNA = "ifany");table(dat$ethnic, useNA = "ifany") fisher.test(dat$race, dat$rndgroup, simulate.p.value=TRUE) # p = 0.08 fisher.test(dat$ethnic, dat$rndgroup, simulate.p.value=TRUE) # p = 0.0009 dat$race_condensed<-ifelse(substr(dat$race,1,7) == "Missing" | dat$race %in% c("Participant refused to answer","Unknown/ decline to answer"), NA, as.character(dat$race)) table(dat$race_condensed, dat$race, useNA = "ifany") table(dat$race_condensed, dat$rndgroup, useNA = "ifany") fisher.test(dat$race_condensed, dat$rndgroup, simulate.p.value=TRUE) # p = 0.4053 dat$race_condensed2<-factor(ifelse(dat$race_condensed %in% c("American Indian or Alaskan Native", "Native Hawaiian or Other Pacific Islander", "More than one race"), "Other", ifelse(dat$race_condensed == "Black or African-American", "Black", as.character(dat$race_condensed)))) dat$race_condensed2_num<-ifelse(dat$race_condensed2 == "White", 1, ifelse(dat$race_condensed2 == "Black", 2, ifelse(dat$race_condensed2 == "Asian", 3, ifelse(dat$race_condensed2 == "Other", 4, NA)))) table(dat$race_condensed2, dat$race_condensed2_num, useNA = "ifany") dat$ethnic_condensed<-ifelse(substr(dat$ethnic,1,7) == "Missing" | dat$ethnic %in% c("Participant refused to answer","Unknown/ decline to answer"), NA, as.character(dat$ethnic)) table(dat$ethnic_condensed) table(dat$ethnic, dat$ethnic_condensed, useNA = "ifany") fisher.test(dat$ethnic_condensed, dat$rndgroup, simulate.p.value=TRUE) # p = 0.0020 table(dat$race_condensed, dat$ethnic_condensed, useNA = "ifany") ## Smoking ### Current vs former table(dat$cigsmok, dat$rndgroup, useNA = "ifany") ### Age quit str(dat$age_quit) dat$age_quit_num<-as.numeric(as.character(dat$age_quit)) tapply(dat$age_quit_num, dat$rndgroup, summary) tapply(dat$age_quit_num, dat$cigsmok, summary) ### Yrs from cessation dat$yrs_from_cessation0<-ifelse(dat$cigsmok=="Current",0, ifelse(dat$cigsmok=="Former",dat$age-dat$age_quit_num, NA)) summary(dat$yrs_from_cessation0) tapply(dat$yrs_from_cessation0, dat$cigsmok, summary) dat$yrs_from_cessation<-ifelse(dat$yrs_from_cessation0<0 & is.na(dat$yrs_from_cessation0)==0, 0, ifelse(dat$yrs_from_cessation0>15 & is.na(dat$yrs_from_cessation0)==0, NA, as.numeric(dat$yrs_from_cessation0))) summary(dat$yrs_from_cessation) tapply(dat$yrs_from_cessation, dat$cigsmok, summary) tmp<-dat[dat$yrs_from_cessation<0 & is.na(dat$yrs_from_cessation)==0,] # 12 pt tmp<-dat[dat$yrs_from_cessation>15 & is.na(dat$yrs_from_cessation)==0,] # 288 pt ### Age start str(dat$smokeage) dat$smokeage_num<-as.numeric(as.character(dat$smokeage)) tapply(dat$smokeage_num, dat$rndgroup, summary) ### Pipe table(dat$pipe, dat$rndgroup, useNA = "ifany") dat$pipe_YNNA<-ifelse(dat$pipe == "Missing", NA, as.character(dat$pipe)) table(dat$pipe_YNNA, dat$pipe, useNA = "ifany") chisq.test(dat$pipe_YNNA, dat$rndgroup) # p = 0.0653 ### Cigar table(dat$cigar, dat$rndgroup, useNA = "ifany") dat$cigar_YNNA<-ifelse(dat$cigar == "Missing", NA, as.character(dat$cigar)) table(dat$cigar_YNNA, dat$cigar, useNA = "ifany") # 153 NA ### pack per yr str(dat$pkyr) tapply(dat$pkyr, dat$rndgroup, summary) tapply(dat$pkyr, dat$rndgroup, hist) wilcox.test(dat$pkyr~dat$rndgroup) # p = 0.9653 ### Smoke live table(dat$smokelive, dat$rndgroup, useNA = "ifany") dat$smokelive_YNNA<-ifelse(dat$smokelive == "Missing", NA, as.character(dat$smokelive)) table(dat$smokelive_YNNA, dat$smokelive, useNA = "ifany") # 158 NA ### Smoke work table(dat$smokework, dat$rndgroup, useNA = "ifany") dat$smokework_YNNA<-ifelse(dat$smokework == "Missing", NA, as.character(dat$smokework)) table(dat$smokework_YNNA, dat$smokework, useNA = "ifany") # 259 NA ### Family hist dat$fam_hist_lung<-ifelse(dat$famchild=="Yes" |dat$famfather=="Yes" | dat$fammother=="Yes"| dat$famsister=="Yes" | dat$fambrother=="Yes", "Yes", ifelse(dat$famchild=="No" & dat$famfather=="No" & dat$fammother=="No" & dat$famsister=="No" & dat$fambrother=="No", "No", NA)) table(dat$fam_hist_lung, useNA = "ifany") # 1544 missing dat$fam_hist_lung_num<-ifelse(dat$fam_hist_lung=="Yes",1, ifelse(dat$fam_hist_lung=="No",0,NA)) table(dat$fam_hist_lung_num, dat$fam_hist_lung, useNA = "ifany") ## Working hist and masks dat$wrk_hist<-ifelse(dat$wrkasbe=="Yes"|dat$wrkbaki=="Yes"|dat$wrkbutc=="Yes"| dat$wrkchem=="Yes"|dat$wrkcoal=="Yes"|dat$wrkcott=="Yes"|dat$wrkfarm=="Yes"| dat$wrkfire=="Yes"|dat$wrkflou=="Yes"|dat$wrkfoun=="Yes"|dat$wrkhard=="Yes"| dat$wrkpain=="Yes"|dat$wrksand=="Yes"|dat$wrkweld=="Yes", "Yes", ifelse(dat$wrkasbe=="No"&dat$wrkbaki=="No"&dat$wrkbutc=="No"& dat$wrkchem=="No"&dat$wrkcoal=="No"&dat$wrkcott=="No"&dat$wrkfarm=="No"& dat$wrkfire=="No"&dat$wrkflou=="No"&dat$wrkfoun=="No"&dat$wrkhard=="No"& dat$wrkpain=="No"&dat$wrksand=="No"&dat$wrkweld=="No", "No", NA)) table(dat$wrk_hist, useNA = "ifany") # 88 NA # BMI dat$weight<-as.numeric(as.character(dat$weight)) dat$height<-as.numeric(as.character(dat$height)) dat$BMI<-as.numeric(as.character(dat$weight))/(as.numeric(as.character(dat$height))^2)*703 ## Diag hist dat[,c("diagadas_YNNA", "diagasbe_YNNA", "diagbron_YNNA", "diagchas_YNNA", "diagchro_YNNA", "diagcopd_YNNA", "diagdiab_YNNA", "diagemph_YNNA", "diagfibr_YNNA", "diaghear_YNNA", "diaghype_YNNA", "diagpneu_YNNA", "diagsarc_YNNA", "diagsili_YNNA", "diagstro_YNNA", "diagtube_YNNA", # medical hist "cancblad_YNNA", "cancbrea_YNNA", "canccerv_YNNA", "canccolo_YNNA", "cancesop_YNNA", "canckidn_YNNA", "canclary_YNNA", "canclung_YNNA", "cancnasa_YNNA", "cancoral_YNNA", "cancpanc_YNNA", "cancphar_YNNA", "cancstom_YNNA", "cancthyr_YNNA", "canctran_YNNA")]<- apply(dat[,c("diagadas", "diagasbe", "diagbron", "diagchas", "diagchro", "diagcopd", "diagdiab", "diagemph", "diagfibr", "diaghear", "diaghype", "diagpneu", "diagsarc", "diagsili", "diagstro", "diagtube", # medical hist "cancblad", "cancbrea", "canccerv", "canccolo", "cancesop", "canckidn", "canclary", "canclung", "cancnasa", "cancoral", "cancpanc", "cancphar", "cancstom", "cancthyr", "canctran")], 2, function(x){ifelse(x == "Missing", NA, as.character(x))}) dat$diagcopd_num<-ifelse(dat$diagcopd_YNNA=="Yes",1, ifelse(dat$diagcopd_YNNA=="No",0,NA)) dat$diagemph_num<-ifelse(dat$diagemph_YNNA=="Yes",1, ifelse(dat$diagemph_YNNA=="No",0,NA)) table(dat$diagcopd_num, dat$diagcopd_YNNA, useNA = "ifany") table(dat$diagemph_num, dat$diagemph_YNNA, useNA = "ifany") # Number of confirmed cancer dat$num_confirmed_condensed<-ifelse(dat$num_confirmed>=2, 2, dat$num_confirmed) table(dat$num_confirmed_condensed, dat$num_confirmed) # Alcohol per day dat$acrin_drinknum_curr_num<-as.numeric(as.character(dat$acrin_drinknum_curr)) dat$acrin_drinknum_form_num<-as.numeric(as.character(dat$acrin_drinknum_form)) dat$acfin_num_alc_perday<-ifelse(is.na(dat$acrin_drinknum_curr_num)==0 & is.na(dat$acrin_drinknum_form_num)==0, NA, ifelse(is.na(dat$acrin_drinknum_curr_num)==1 | is.na(dat$acrin_drinknum_form_num)==1, ifelse(is.na(dat$acrin_drinknum_curr_num)==0, dat$acrin_drinknum_curr_num/7, ifelse(is.na(dat$acrin_drinknum_form_num)==0, dat$acrin_drinknum_form_num/7, NA)),NA)) # Married table(dat$marital, useNA = "ifany") dat$marital_condensed<-ifelse(dat$marital %in% c("Missing", "Not Ascertained", "Participant refused to answer"), NA, as.character(dat$marital)) table(dat$marital_condensed, dat$marital, useNA = "ifany") # 154 NA # Education table(dat$educat, dat$rndgroup, useNA = "ifany") dat$educat_condensed<-ifelse(substr(dat$educat, 1, 7) %in% c("Missing", "Unknown"), NA, as.character(dat$educat)) table(dat$educat_condensed, dat$educat, useNA = "ifany") dat1<-dat[,c("cen", "pid", "rndgroup", "study", # Study "age", "educat_condensed", "ethnic", "gender", "height", "marital_condensed" , "race", "weight", "race_condensed", "ethnic_condensed","BMI", # Demo "age_quit_num", "cigar_YNNA", "cigsmok", "pipe_YNNA", "pkyr", "smokeage_num", "smokeday", "smokelive_YNNA", "smokework_YNNA", "smokeyr", "yrs_from_cessation",# Smk "scr_days0", "fup_days_adj_allcausemort", "allcause_death", "allcause_death_8yr", "allcause_death_8yr_time", "fup_days_adj_lungmort", "lung_death", "lung_death_8yr", "lung_death_8yr_time", # Outcome "diagadas_YNNA", "diagasbe_YNNA", "diagbron_YNNA", "diagchas_YNNA", "diagchro_YNNA", "diagcopd_YNNA", "diagdiab_YNNA", "diagemph_YNNA", "diagfibr_YNNA", "diaghear_YNNA", "diaghype_YNNA", "diagpneu_YNNA", "diagsarc_YNNA", "diagsili_YNNA", "diagstro_YNNA", "diagtube_YNNA", # medical hist "cancblad_YNNA", "cancbrea_YNNA", "canccerv_YNNA", "canccolo_YNNA", "cancesop_YNNA", "canckidn_YNNA", "canclary_YNNA", "canclung_YNNA", "cancnasa_YNNA", "cancoral_YNNA", "cancpanc_YNNA", "cancphar_YNNA", "cancstom_YNNA", "cancthyr_YNNA", "canctran_YNNA", # cancer hist "fam_hist_lung", # family history "wrk_hist", #work hist, "race_condensed2_num","female","diagcopd_num","diagemph_num","fam_hist_lung_num","CT" )] # 67 var write.csv(dat1, "NSLT_baseline_072320.csv")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/week.R \name{ymd_to_weekno} \alias{ymd_to_weekno} \title{Calculate weekno from Year, month and Day} \usage{ ymd_to_weekno(year, month, day) } \arguments{ \item{year}{the year of the focal date} \item{month}{the month of the focal date} \item{day}{the date of the focal date} } \value{ Week number } \description{ Calculate weekno from Year, month and Day }
/man/ymd_to_weekno.Rd
permissive
hmito/hmRLib
R
false
true
437
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/week.R \name{ymd_to_weekno} \alias{ymd_to_weekno} \title{Calculate weekno from Year, month and Day} \usage{ ymd_to_weekno(year, month, day) } \arguments{ \item{year}{the year of the focal date} \item{month}{the month of the focal date} \item{day}{the date of the focal date} } \value{ Week number } \description{ Calculate weekno from Year, month and Day }
library(data.table) library(dtangle) library(doParallel) get_marker_list <- function(value){ typ <- typeof(value[[1]]) if (typ == "list") return(value$L) return(value) } multi_core_function <- function(samples_no){ library(data.table) library(dtangle) input_file <- paste0('~/IndependentStudy/Data/SignatureSimulation/', as.character(samples_no), '_signature.tsv') reference_samples_py <- fread(input_file, data.table = FALSE) row.names(reference_samples_py) <- reference_samples_py$`Unnamed: 0` pure_samples <- reference_samples_py$`Unnamed: 0` reference_samples_py$`Unnamed: 0` <- NULL reference_samples_py$V1 <- NULL # reference_samples_py <- t(reference_samples_py) # reference_samples_py <- log2(reference_samples_py) n_markers <- 0.1 K <- 48 markers <- find_markers(Y=reference_samples_py, pure_samples = c(1:48), marker_method = 'ratio') Y <- reference_samples_py # n_markers <- sapply(floor(0.1 * lengths(markers$L)), min, ncol(Y)/K) n_markers <- sapply(floor(0.1 * lengths(markers$L)), min, 600/K) markers <- get_marker_list(markers) if (length(n_markers) == 1) n_markers <- rep(n_markers, K) wq_markers <- which(n_markers < 1) n_markers[wq_markers] <- floor(n_markers[wq_markers] * lengths(markers)[wq_markers]) mrkrs <- lapply(1:K, function(i) { markers[[i]][1:n_markers[i]] }) names(mrkrs) <- names(pure_samples) mrkers_list <- c() for(i in mrkrs){ mrkers_list <- c(mrkers_list, i) } result_mrkers <- data.frame(mrkers_list) result_mrkers$mrkers_list <- result_mrkers$mrkers_list-1 output_file <- paste0('~/IndependentStudy/Data/dtangle_500/', as.character(samples_no), '_signature_filter.tsv') # print(output_file) write.csv(x = result_mrkers, file = output_file) # write.csv(x = reference_samples_py, file = output_file) } cl <- makeCluster(50) registerDoParallel(cl) foreach(samples_no = 0:99) %dopar% multi_core_function(samples_no) # samples_no = 0 # input_file <- paste0('~/IndependentStudy/Data/SignatureSimulation/', as.character(samples_no), '_signature.tsv') # reference_samples_py <- fread(input_file, data.table = FALSE) # row.names(reference_samples_py) <- reference_samples_py$`Unnamed: 0` # pure_samples <- reference_samples_py$`Unnamed: 0` # reference_samples_py$`Unnamed: 0` <- NULL # reference_samples_py$V1 <- NULL # # reference_samples_py <- t(reference_samples_py) # # reference_samples_py <- log2(reference_samples_py) # n_markers <- 0.1 # K <- 48 # markers <- find_markers(Y=reference_samples_py, pure_samples = c(1:48), marker_method = 'ratio') # Y <- reference_samples_py # # n_markers <- sapply(floor(0.1 * lengths(markers$L)), min, ncol(Y)/K) # n_markers <- sapply(floor(0.1 * lengths(markers$L)), min, 600/K) # markers <- get_marker_list(markers) # if (length(n_markers) == 1) # n_markers <- rep(n_markers, K) # wq_markers <- which(n_markers < 1) # n_markers[wq_markers] <- floor(n_markers[wq_markers] * lengths(markers)[wq_markers]) # mrkrs <- lapply(1:K, function(i) { # markers[[i]][1:n_markers[i]] # }) # names(mrkrs) <- names(pure_samples) # mrkers_list <- c() # for(i in mrkrs){ # mrkers_list <- c(mrkers_list, i) # } # result_mrkers <- data.frame(mrkers_list) # result_mrkers$mrkers_list <- result_mrkers$mrkers_list-1 # output_file <- paste0('~/IndependentStudy/Data/dtangle_500/', as.character(samples_no), '_signature_filter.tsv') # # print(output_file) # # write.csv(x = result_mrkers, file = output_file) # write.csv(x = reference_samples_py, file = output_file)
/Code/marker_genes/Analysis_marker_gene_finding_500.r
no_license
yujias424/RNA-seq_deconvolution_project
R
false
false
3,533
r
library(data.table) library(dtangle) library(doParallel) get_marker_list <- function(value){ typ <- typeof(value[[1]]) if (typ == "list") return(value$L) return(value) } multi_core_function <- function(samples_no){ library(data.table) library(dtangle) input_file <- paste0('~/IndependentStudy/Data/SignatureSimulation/', as.character(samples_no), '_signature.tsv') reference_samples_py <- fread(input_file, data.table = FALSE) row.names(reference_samples_py) <- reference_samples_py$`Unnamed: 0` pure_samples <- reference_samples_py$`Unnamed: 0` reference_samples_py$`Unnamed: 0` <- NULL reference_samples_py$V1 <- NULL # reference_samples_py <- t(reference_samples_py) # reference_samples_py <- log2(reference_samples_py) n_markers <- 0.1 K <- 48 markers <- find_markers(Y=reference_samples_py, pure_samples = c(1:48), marker_method = 'ratio') Y <- reference_samples_py # n_markers <- sapply(floor(0.1 * lengths(markers$L)), min, ncol(Y)/K) n_markers <- sapply(floor(0.1 * lengths(markers$L)), min, 600/K) markers <- get_marker_list(markers) if (length(n_markers) == 1) n_markers <- rep(n_markers, K) wq_markers <- which(n_markers < 1) n_markers[wq_markers] <- floor(n_markers[wq_markers] * lengths(markers)[wq_markers]) mrkrs <- lapply(1:K, function(i) { markers[[i]][1:n_markers[i]] }) names(mrkrs) <- names(pure_samples) mrkers_list <- c() for(i in mrkrs){ mrkers_list <- c(mrkers_list, i) } result_mrkers <- data.frame(mrkers_list) result_mrkers$mrkers_list <- result_mrkers$mrkers_list-1 output_file <- paste0('~/IndependentStudy/Data/dtangle_500/', as.character(samples_no), '_signature_filter.tsv') # print(output_file) write.csv(x = result_mrkers, file = output_file) # write.csv(x = reference_samples_py, file = output_file) } cl <- makeCluster(50) registerDoParallel(cl) foreach(samples_no = 0:99) %dopar% multi_core_function(samples_no) # samples_no = 0 # input_file <- paste0('~/IndependentStudy/Data/SignatureSimulation/', as.character(samples_no), '_signature.tsv') # reference_samples_py <- fread(input_file, data.table = FALSE) # row.names(reference_samples_py) <- reference_samples_py$`Unnamed: 0` # pure_samples <- reference_samples_py$`Unnamed: 0` # reference_samples_py$`Unnamed: 0` <- NULL # reference_samples_py$V1 <- NULL # # reference_samples_py <- t(reference_samples_py) # # reference_samples_py <- log2(reference_samples_py) # n_markers <- 0.1 # K <- 48 # markers <- find_markers(Y=reference_samples_py, pure_samples = c(1:48), marker_method = 'ratio') # Y <- reference_samples_py # # n_markers <- sapply(floor(0.1 * lengths(markers$L)), min, ncol(Y)/K) # n_markers <- sapply(floor(0.1 * lengths(markers$L)), min, 600/K) # markers <- get_marker_list(markers) # if (length(n_markers) == 1) # n_markers <- rep(n_markers, K) # wq_markers <- which(n_markers < 1) # n_markers[wq_markers] <- floor(n_markers[wq_markers] * lengths(markers)[wq_markers]) # mrkrs <- lapply(1:K, function(i) { # markers[[i]][1:n_markers[i]] # }) # names(mrkrs) <- names(pure_samples) # mrkers_list <- c() # for(i in mrkrs){ # mrkers_list <- c(mrkers_list, i) # } # result_mrkers <- data.frame(mrkers_list) # result_mrkers$mrkers_list <- result_mrkers$mrkers_list-1 # output_file <- paste0('~/IndependentStudy/Data/dtangle_500/', as.character(samples_no), '_signature_filter.tsv') # # print(output_file) # # write.csv(x = result_mrkers, file = output_file) # write.csv(x = reference_samples_py, file = output_file)
#' Naver 1차 연관검색어 추출 #' @description 특정 키워드를 입력하여 실행하면 그 키워드를 기준으로 검색되는 네이버 연관검색어가 벡터형으로 반환됩니다. #' @param x 키워드를 캐릭터 형식으로 입력합니다 #' @export #' @examples #' naverRelation1('한국') #' naverRelation1('사과') naverRelation1 <- function(x){ # Pre stopifnot(is.character(x)) stopifnot(require(rvest)); stopifnot(require(stringr)); stopifnot(require(lava)) # Content html <- paste0('https://search.naver.com/search.naver?where=nexearch&sm=top_hty&fbm=1&ie=utf8&query=', x) %>% read_html %>% html_nodes(css = '.lst_relate') %>% html_text %>% trim %>% str_split(' ') %>% unlist # Return return(html) } #' Naver 2차 연관검색어 추출 #' @description 특정 키워드를 입력하여 실행하면 그 키워드를 기준으로 검색되는 네이버 연관검색어가 2차 연관검색어까지 데이터프레임 형태로 반환됩니다. #' @param x 키워드를 캐릭터 형식으로 입력합니다 #' @export #' @return tibble type data.frame #' @examples #' naverRelation2("한국") #' naverRelation2("사과") naverRelation2 <- function(x){ # Pre stopifnot(is.character(x)) stopifnot(require(rvest)); stopifnot(require(stringr)); stopifnot(require(lava)); stopifnot(require(dplyr)); stopifnot(require(reshape2)) # Content html <- paste0('https://search.naver.com/search.naver?where=nexearch&sm=top_hty&fbm=1&ie=utf8&query=', x) %>% read_html %>% html_nodes(css = '.lst_relate') %>% html_text %>% trim %>% str_split(' ') %>% unlist html2 <- list() for(i in html) html2[[i]] <- naverRelation1(i) pre <- html2 %>% # lapply(FUN = function(x) gsub(paste0("\\b", x, "\\b", "|", "^", x, " ", "| ", x, "$"), "", x = x)) %>% melt %>% tbl_df %>% select(R1 = L1, R2 = value) %>% dplyr::filter(R2 != "") # Return return(pre) }
/naverRelation.R
no_license
mkhoin/chatBotAPI
R
false
false
1,977
r
#' Naver 1차 연관검색어 추출 #' @description 특정 키워드를 입력하여 실행하면 그 키워드를 기준으로 검색되는 네이버 연관검색어가 벡터형으로 반환됩니다. #' @param x 키워드를 캐릭터 형식으로 입력합니다 #' @export #' @examples #' naverRelation1('한국') #' naverRelation1('사과') naverRelation1 <- function(x){ # Pre stopifnot(is.character(x)) stopifnot(require(rvest)); stopifnot(require(stringr)); stopifnot(require(lava)) # Content html <- paste0('https://search.naver.com/search.naver?where=nexearch&sm=top_hty&fbm=1&ie=utf8&query=', x) %>% read_html %>% html_nodes(css = '.lst_relate') %>% html_text %>% trim %>% str_split(' ') %>% unlist # Return return(html) } #' Naver 2차 연관검색어 추출 #' @description 특정 키워드를 입력하여 실행하면 그 키워드를 기준으로 검색되는 네이버 연관검색어가 2차 연관검색어까지 데이터프레임 형태로 반환됩니다. #' @param x 키워드를 캐릭터 형식으로 입력합니다 #' @export #' @return tibble type data.frame #' @examples #' naverRelation2("한국") #' naverRelation2("사과") naverRelation2 <- function(x){ # Pre stopifnot(is.character(x)) stopifnot(require(rvest)); stopifnot(require(stringr)); stopifnot(require(lava)); stopifnot(require(dplyr)); stopifnot(require(reshape2)) # Content html <- paste0('https://search.naver.com/search.naver?where=nexearch&sm=top_hty&fbm=1&ie=utf8&query=', x) %>% read_html %>% html_nodes(css = '.lst_relate') %>% html_text %>% trim %>% str_split(' ') %>% unlist html2 <- list() for(i in html) html2[[i]] <- naverRelation1(i) pre <- html2 %>% # lapply(FUN = function(x) gsub(paste0("\\b", x, "\\b", "|", "^", x, " ", "| ", x, "$"), "", x = x)) %>% melt %>% tbl_df %>% select(R1 = L1, R2 = value) %>% dplyr::filter(R2 != "") # Return return(pre) }
#!/usr/bin/env Rscript # # Annotate DMRs based on other genomic features (including # allele-specific H3K27me3 at promoters). # # run("imprinting", "annotate") dependOn("imprinting", c("define_regions", "topol")) # Make a big annotation table with all different types of information about DMRs and DEGs combined: curGenes <- geneAnnotationExpr$gene curRegsDt <- regsDt[imprintedRegionCandidates[ranges_type=="dmrseq",regionId],] curRegs <- lib$dtToGr(curRegsDt,"seqnames") curRegsM <- lib$dtToGr(curRegsDt[imprint_type=="M",],"seqnames") curRegsP <- lib$dtToGr(curRegsDt[imprint_type=="P",],"seqnames") dtAll <- geneAnnotationExpr dtAll <- merge(dtAll, dtExprComplete, by="gene", all=F)[!is.na(chrom),] dmrInPromo <- overlapsAny(lib$dtToGr(geneAnnotationExpr[,.(chrom, start=tss-promoWin, end=tss+promoWin)]), lib$dtToGr(curRegsDt,"seqnames"), ignore.strand=T) dmrInGbody <- overlapsAny(lib$dtToGr(geneAnnotationExpr[,.(chrom, start, end)]), lib$dtToGr(curRegsDt,"seqnames"), ignore.strand=T) & !dmrInPromo dmrUpstream <- overlapsAny(lib$dtToGr(geneAnnotationExpr[,.(chrom, start=ifelse(strand>0, tss, tss-100000), end=ifelse(strand>0, tss+100000, tss))]), lib$dtToGr(curRegsDt,"seqnames"), ignore.strand=T) & !dmrInPromo & !dmrInGbody dmrDownstream <- overlapsAny(lib$dtToGr(geneAnnotationExpr[,.(chrom, start=ifelse(strand>0, start-100000, end), end=ifelse(strand>0, start, end+100000))]), lib$dtToGr(curRegsDt,"seqnames"), ignore.strand=T) & !dmrInPromo & !dmrInGbody dtAll[, has_dmr_in_promo := dmrInPromo] dtAll[, has_dmr_in_gbody := dmrInGbody] dtAll[, has_dmr_100kb_us := dmrUpstream] dtAll[, has_dmr_100kb_ds := dmrDownstream] dtAll[, has_dmr_100kb_usds := dmrUpstream | dmrDownstream] dtAll[, is_confirmed:=gene%in%geneLists[list_type=="is_confirmed",gene]] dtAll[, is_unconfirmed:=gene%in%geneLists[list_type=="is_unconfirmed",gene]] dtAll[,is_bix_up := is_bix & log2fc>0] dtAll[,is_bix_down := is_bix & log2fc<0] dtAll[,is_bsx_up := is_bsx & log2fc>0] dtAll[,is_bsx_down := is_bsx & log2fc<0] ### add allele-specific H3K27me3 peak annotation ### # load data obtained from GEO: loadLibraries(c("R.utils","liftOver")) ch <- resultsDir("mm9toMm10.chain.gz") if(!file.exists(gsub(".gz","",ch))) { msg("download mm9 to mm10 chain") download.file("http://hgdownload.soe.ucsc.edu/goldenPath/mm9/liftOver/mm9ToMm10.over.chain.gz", destfile=ch) gunzip(ch) } ch <- import.chain(gsub(".gz","",ch)) f <- resultsDir("GSE76687_ICM_k27me3_maternal_broadpeak.bed.gz") if(!file.exists(f)) download.file("https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSE76687&format=file&file=GSE76687%5FICM%5Fk27me3%5Fmaternal%5Fbroadpeak%2Ebed%2Egz", destfile=f) h3k27me3mat <- fread(f) h3k27me3mat <- lib$dtToGr(h3k27me3mat,"V1","V2","V3") h3k27me3mat <- unlist(liftOver(h3k27me3mat, ch)) f <- resultsDir("GSE76687_ICM_k27me3_paternal_broadpeak.bed.gz") if(!file.exists(f)) download.file("https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSE76687&format=file&file=GSE76687%5FICM%5Fk27me3%5Fpaternal%5Fbroadpeak%2Ebed%2Egz", destfile=f) h3k27me3pat <- fread(f) h3k27me3pat <- lib$dtToGr(h3k27me3pat,"V1","V2","V3") h3k27me3pat <- unlist(liftOver(h3k27me3pat, ch)) h3k27me3Regs <- c(h3k27me3mat,h3k27me3pat) # - annotate all genes with a H3K27me3 peak within 5kb of their TSS: curDist <- 5000 curGeneWindows <- lib$dtToGr(transcriptAnnotationExpr[,.(chrom, start=tss-curDist, end=tss+curDist)]) dtAll[, h3k27me3_imprint:=gene%in%transcriptAnnotationExpr[overlapsAny(curGeneWindows, h3k27me3Regs, ignore.strand=T), unique(gene)]] # load H3K27me3 data from gametes: f <- resultsDir("GSE76687_h3k27me3_sperm.broadpeak.gz") if(!file.exists(f)) download.file("https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSM2041066&format=file&file=GSM2041066%5FSperm%5Fk27me3%5Fbroadpeak%2Ebed%2Egz", destfile=f) h3k27me3sp <- fread(f) h3k27me3sp <- lib$dtToGr(h3k27me3sp,"V1","V2","V3") h3k27me3sp <- unlist(liftOver(h3k27me3sp, ch)) f <- resultsDir("GSE76687_h3k27me3_oocyte.broadpeak.gz") if(!file.exists(f)) download.file("https://ftp.ncbi.nlm.nih.gov/geo/series/GSE76nnn/GSE76687/suppl/GSE76687_MII_oocyte_k27me3_broadpeak.bed.gz", destfile=f) h3k27me3oo <- fread(f) h3k27me3oo <- lib$dtToGr(h3k27me3oo,"V1","V2","V3") h3k27me3oo <- unlist(liftOver(h3k27me3oo, ch)) f <- resultsDir("GSE76687_h3k27me3_icm_any.broadpeak.gz") if(!file.exists(f)) download.file("https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSE76687&format=file&file=GSE76687%5FICM%5Fk27me3%5Fbroadpeak%2Ebed%2Egz", destfile=f) h3k27me3icmany <- fread(f) h3k27me3icmany <- lib$dtToGr(h3k27me3icmany,"V1","V2","V3") h3k27me3icmany <- unlist(liftOver(h3k27me3icmany, ch)) # annotate genes overlapping with H3K27me3 peaks: dtAll[, h3k27me3_icm_any:=gene%in%transcriptAnnotationExpr[overlapsAny(curGeneWindows, h3k27me3icmany, ignore.strand=T), unique(gene)]] dtAll[, h3k27me3_icm_mat:=gene%in%transcriptAnnotationExpr[overlapsAny(curGeneWindows, h3k27me3mat, ignore.strand=T), unique(gene)]] dtAll[, h3k27me3_icm_pat:=gene%in%transcriptAnnotationExpr[overlapsAny(curGeneWindows, h3k27me3pat, ignore.strand=T), unique(gene)]] dtAll[, h3k27me3_sperm:=gene%in%transcriptAnnotationExpr[overlapsAny(curGeneWindows, h3k27me3sp, ignore.strand=T), unique(gene)]] dtAll[, h3k27me3_oocyte:=gene%in%transcriptAnnotationExpr[overlapsAny(curGeneWindows, h3k27me3oo, ignore.strand=T), unique(gene)]] #m <- lib$dtToDf(dtAll[,.(gene, is_bix, is_bix_up, is_bix_down, is_bsx, is_bsx_up, is_bsx_down, h3k27me3_icm_mat, h3k27me3_icm_pat, h3k27me3_imprint, h3k27me3_sperm, h3k27me3_oocyte)]) * 1 # define sperm- or oocyte-specific H3K27me3 as those seen in one but not the other: dtAll[, h3k27me3_sperm_specific:=h3k27me3_sperm&!h3k27me3_oocyte] dtAll[, h3k27me3_oocyte_specific:=!h3k27me3_sperm&h3k27me3_oocyte] # likewise for maternal/paternal in the ICM: dtAll[, h3k27me3_icm_mat_specfic:=h3k27me3_icm_mat&!h3k27me3_icm_pat] dtAll[, h3k27me3_icm_pat_specfic:=h3k27me3_icm_pat&!h3k27me3_icm_mat] # define H3K27me3 imprinting status: 2 or -2 denotes sperm/oocyte/ICM-maternal/ICM-paternal specific, 1 denotes unspecific, 0 denotes no H3K27me3 present at promoter: dtAll[, h3k27me3_sperm_status:=ifelse(h3k27me3_sperm,ifelse(h3k27me3_sperm_specific, 2, 1),0)] dtAll[, h3k27me3_oocyte_status:=ifelse(h3k27me3_oocyte,ifelse(h3k27me3_oocyte_specific, -2, -1),0)] dtAll[, h3k27me3_gamete_status:=ifelse(h3k27me3_oocyte|h3k27me3_sperm,ifelse(h3k27me3_oocyte_specific, -2, ifelse(h3k27me3_sperm_specific, 2, 1)),0)] dtAll[, h3k27me3_icm_status:=ifelse(h3k27me3_icm_mat_specfic,-2,ifelse(h3k27me3_icm_pat_specfic,2,ifelse(h3k27me3_icm_any|h3k27me3_icm_pat|h3k27me3_icm_mat, 1, 0)))] # generate a heatmap summarizing H3K27me3 patterns: m <- lib$dtToDf(dtAll[,.(gene, is_bix_down=is_bix_down*3, is_bix_up=is_bix_up*3, is_bsx_down=is_bsx_down*3, is_bsx_up=is_bsx_up*3,h3k27me3_gamete_status, h3k27me3_icm_status)]) * 1 m <- m[rowSums(m[,grepl("is_b[si]x",colnames(m))]!=0)>0,] a <- lib$dtToDf(dtAll[rownames(m),.(gene, dir=sign(log2fc), gene_list=ifelse(is_confirmed==T, "is_confirmed", ifelse(is_nbix==T, "is_nbix", "is_nbsx")))]) aCols <- list(dir=brewer.pal(3,"RdBu"), gene_list=colorPalettes$genesetLabel[unique(a[,2])]) aCols$gene_list[["is_nbsx"]] <- alpha(aCols$gene_list[["is_nbsx"]], 0.25) m <- m[hclust(dist(m))$order,] m <- m[order(a[rownames(m),"dir"], a[rownames(m),"gene_list"]),] colnames(m) <- gsub("h3k27me3_(.+)_status","\\1",colnames(m)) pheatmap::pheatmap(m[,5:6], gaps_row=cumsum(table(a[rownames(m),1])), annotation_colors=aCols, annotation_row=a, cluster_cols=F, cluster_rows=F, main="All BsX", breaks=-3.5:3.5, file=resultsDir("hm_h3k27me3_oo_sp_status_filt.pdf"), treeheight_col=0, treeheight_row=0, cellheight=9, cellwidth=10, col=c("black","red","yellow","white","yellow","blue","black")) pheatmap::pheatmap(m[,5:6], gaps_row=cumsum(table(a[rownames(m),1])), show_rownames=F, annotation_colors=aCols, annotation_row=a, cluster_cols=F, cluster_rows=F, main="All BsX", breaks=-3.5:3.5, file=resultsDir("hm_h3k27me3_oo_sp_status_filt_nolbl.pdf"), treeheight_col=0, treeheight_row=0, cellheight=1, cellwidth=10, col=c("black","red","yellow","white","yellow","blue","black")) fwrite(cbind(a[rownames(m),], m[,5:6]), file=resultsDir("source_data_figure_4a.csv")) # genenerate pie charts: ref <- "is_robust_expr" pData <- melt(melt(dtAll, measure.vars=c(ref, "is_bix","is_bsx", "is_confirmed", "is_unconfirmed", "is_nbix", "is_nbsx", "published_imprint", "hc_repositories_imprint", "is_equivalent_0.1", "h3k27me3_confirmed_inoue_2017", "h3k27me3_candidate_inoue_2017", "h3k27me3_imprint"), variable.name="gene_selection", value.name="v1")[v1==T,], measure.vars=c("h3k27me3_gamete_status", "h3k27me3_icm_status"), variable.name="dev_stage")[, .(gene_selection=ifelse(gene_selection==ref,as.character(gene_selection),paste_(gene_selection, sign(log2fc))), dev_stage, status=ifelse(value==-2, "M", ifelse(value==2, "P", ifelse(abs(value)==1, "both", "-"))))] pData <- pData[, .(.N),by=.(dev_stage, gene_selection, status)] pData <- pData[, .(N2=N, N=sum(N), perc=N/sum(N), status), by=.(dev_stage, gene_selection)] selCats <- c("is_nbix", "is_nbsx", "is_confirmed") selCats <- c(ref,c(paste_(selCats,1),paste_(selCats,-1))) p <- ggplot(pData[gene_selection%in%selCats,], aes(x="", y=perc, fill=status))+ geom_bar(width=1, stat="identity") + coord_polar("y", start=0) + theme_minimal() + theme(axis.title.x = element_blank(), axis.title.y = element_blank(), panel.border = element_blank(), panel.grid=element_blank(), axis.ticks = element_blank(), plot.title=element_text(size=14, face="bold"), axis.text.x=element_blank(), legend.title=element_blank()) + facet_wrap(gene_selection~dev_stage+N, dir="v",nrow=2) p <- p + scale_fill_manual(values=c("-"="#EEEEEE", "both"="yellow", "M"="red", "P"="blue")) gg(p, "pie_h3k27_devstage", 44, 6, type="pdf") fwrite(pData[gene_selection%in%grep(ref,selCats,value=T),.(gene_selection, dev_stage, status, perc, N)], file=resultsDir("source_data_figure_4b.csv")) fwrite(pData[gene_selection%in%grep("_-1$",selCats,value=T),.(gene_selection, dev_stage, status, perc, N)], file=resultsDir("source_data_figure_4c.csv")) fwrite(pData[gene_selection%in%grep("_1$",selCats,value=T),.(gene_selection, dev_stage, status, perc, N)], file=resultsDir("source_data_figure_4d.csv")) pvals <- rblapply(setdiff(pData[,unique(gene_selection)],ref), function(geneSel) { rblapply(pData[,as.character(unique(dev_stage))], function(devStage) { rblapply(c("M","P"), function(imprintDir) { msgF("%s %s %s", geneSel, devStage, imprintDir) tab <- lib$dtToDf(dcast(unique(pData[gene_selection%in%c(ref,geneSel) & dev_stage==devStage, .(N2=sum(N2),N), by=.(gene_selection, dev_stage, status=ifelse(status==imprintDir,imprintDir,paste0("X",imprintDir)))]), status~gene_selection, fun.aggregate=sum, value.var="N2"))#[,c(2,1)] print(tab) data.table(fg=tab[1,1]/tab[2,1], bg=tab[1,2]/tab[2,2], pval=fisher.test(tab, alternative="greater")$p.value) }, "imprint_dir") }, "dev_stage") }, "gene_selection") pvals[,padj:=p.adjust(pval)] pvals[,psig:=lib$pToSig(padj)] pvals[grepl("bix",gene_selection),] fwrite(pvals, file=resultsDir("d_h3k27me3_pvals_fisher.csv")) pData[, stat:=as.factor(status)] res <- rblapply(c("is_bix","is_bsx"), function(geneSel) { res1 <- rblapply(c("1","-1"), function(geneDir) { data.table(fixed_size="gene_dir", pval=chisq.test(apply(lib$dtToDf(dcast(pData[gene_selection==paste_(geneSel,geneDir),], status~dev_stage, value.var="N2")), 1, lib$naToZero))$p.value) }, "fixed_val") res2 <- rblapply(c("h3k27me3_gamete_status","h3k27me3_icm_status"), function(devStage) { data.table(fixed_size="dev_stage", pval=chisq.test(apply(lib$dtToDf(dcast(pData[grepl(geneSel,gene_selection) & dev_stage==devStage,], status~gene_selection, value.var="N2")), 1, lib$naToZero))$p.value) }, "fixed_val") rbind(res1,res2) }, "gene_selection") res[, psig:=lib$pToSig(pval)] fwrite(res, file=resultsDir("d_h3k27me3_pvals.csv")) ### annotate genes by distance to DMR ### # annotate genes with next DMR or DMRs within a certain window: dtAll[, best_overlap:=ifelse(h3k27me3_imprint==T,"H3K27me3","")] dtAll[, best_dist:=ifelse(h3k27me3_imprint==T,-2,Inf)] dtAll[, best_overlap_M:=best_overlap] dtAll[, best_dist_M:=best_dist] dtAll[, best_overlap_P:=best_overlap] dtAll[, best_dist_P:=best_dist] d <- distanceToNearest(lib$dtToGr(dtAll), curRegs) dtAll[queryHits(d), c("dmr_region_id", "dmr_meth_dif", "dmr_padj", "dmr_group"):=curRegsDt[subjectHits(d), .(rid, meth_diff, padj, grp)]] dtAll[queryHits(d), dmr_dist:=d@elementMetadata$distance] for(curDist in distThreshs) { cname <- paste_("meth_gene",round(curDist/1000),"kb") lbl <- sprintf("Meth:Gene%dkb",curDist/1000) curGeneWindows <- lib$dtToGr(dtAll[,.(chrom, start=start-curDist, end=end+curDist)]) dtAll[, (cname):=overlapsAny(curGeneWindows, curRegs)] dtAll[, (paste_(cname,"M")):=overlapsAny(curGeneWindows, curRegsM)] dtAll[, (paste_(cname,"P")):=overlapsAny(curGeneWindows, curRegsP)] dtAll[(curDist==0 | h3k27me3_imprint==F) & !grepl("meth",best_overlap,ignore.case=T) & get(cname)==T, c("best_dist","best_overlap"):=.(curDist,ifelse(best_overlap=="",lbl,paste0(best_overlap,"; ",lbl)))] dtAll[(curDist==0 | h3k27me3_imprint==F) & !grepl("meth",best_overlap_M,ignore.case=T) & get(paste_(cname,"M"))==T, c("best_dist_M","best_overlap_M"):=.(curDist,ifelse(best_overlap_M=="",lbl,paste0(best_overlap_M,"; ",lbl)))] dtAll[(curDist==0 | h3k27me3_imprint==F) & !grepl("meth",best_overlap_P,ignore.case=T) & get(paste_(cname,"P"))==T, c("best_dist_P","best_overlap_P"):=.(curDist,ifelse(best_overlap_P=="",lbl,paste0(best_overlap_P,"; ",lbl)))] } # annotate with membership in different gene lists: for(lt in geneLists[,unique(list_type)]) { dtAll[, (lt):=gene%in%geneLists[list_type==lt,gene]] } dtAll[topol==T & !grepl("Meth",best_overlap), best_overlap:=ifelse(h3k27me3_imprint==T,"H3K27me3, same TAD","Same TAD")] # genes are associated with at least one DMR if the DMR is within 250kb of the gene and/or if there's a DMR in the same TAD: dtAll[, meth_imprint:=topol | (is.finite(dmr_dist) & dmr_dist<=250000)] # plot pie charts for genes in different categories and segments indicating the distance to the next DMR or H3K27me3 imprint: pData <- melt(dtAll, measure.vars=names(genesetLabels))[value==T,.(N=.N,genes=paste(sort(gene),collapse="; ")), by=.(variable,best_dist,best_overlap)] pData[, list_type:=factor(variable, levels=names(genesetLabels), labels=genesetLabels)] pData[, list_size:=lib$dt2namedVec(geneLists[,.N,by=.(ID=list_type)])[as.character(variable)]] pData[,best_overlap:=factor(best_overlap,levels=pData[order(-best_dist),unique(best_overlap)])] pData <- pData[order(best_overlap),] pData[, perc:=N/sum(N), by=list_type] pData[best_overlap=="", best_overlap:="> 250kb, outside TAD"] plotCols <- c( "H3K27me3"="#cb181d", "H3K27me3; Meth:Gene0kb"="#6a51a3", "H3K27me3, same TAD"="#c994c7", "Meth:Gene0kb"="#253494", "Meth:Gene100kb"="#2c7fb8", "Meth:Gene250kb"="#41b6c4", "Same TAD"="#a1dab4", "> 250kb, outside TAD"="#efefec" ) pData[, best_overlap:=factor(best_overlap, levels=names(plotCols))] p <- ggplot(pData, aes(x="", y=perc, fill=best_overlap))+ geom_bar(width=1, stat="identity") + coord_polar("y", start=0) + theme_minimal() + theme(axis.title.x = element_blank(), axis.title.y = element_blank(), panel.border = element_blank(), panel.grid=element_blank(), axis.ticks = element_blank(), plot.title=element_text(size=14, face="bold"), axis.text.x=element_blank(), legend.title=element_blank()) + facet_wrap(~sprintf("%s\n(%s)\nn = %d",list_type,variable,list_size), ncol=8) p <- p + scale_fill_manual(values=plotCols) gg(p, "pie", 16, 8, type="pdf") fwrite(pData[list_type%in%c("allexpressed", "published_imprint", "hc_repositories_imprint", "is_confirmed", "is_unconfirmed", "is_equivalent_0.1", "is_nbix", "is_nbsx"), .(perc, best_overlap, list_type, variable, list_size)][order(list_type),], file=resultsDir("source_data_figure_3b.csv"))
/src/imprinting/annotate.R
permissive
cancerbits/santini2021_imprints
R
false
false
15,947
r
#!/usr/bin/env Rscript # # Annotate DMRs based on other genomic features (including # allele-specific H3K27me3 at promoters). # # run("imprinting", "annotate") dependOn("imprinting", c("define_regions", "topol")) # Make a big annotation table with all different types of information about DMRs and DEGs combined: curGenes <- geneAnnotationExpr$gene curRegsDt <- regsDt[imprintedRegionCandidates[ranges_type=="dmrseq",regionId],] curRegs <- lib$dtToGr(curRegsDt,"seqnames") curRegsM <- lib$dtToGr(curRegsDt[imprint_type=="M",],"seqnames") curRegsP <- lib$dtToGr(curRegsDt[imprint_type=="P",],"seqnames") dtAll <- geneAnnotationExpr dtAll <- merge(dtAll, dtExprComplete, by="gene", all=F)[!is.na(chrom),] dmrInPromo <- overlapsAny(lib$dtToGr(geneAnnotationExpr[,.(chrom, start=tss-promoWin, end=tss+promoWin)]), lib$dtToGr(curRegsDt,"seqnames"), ignore.strand=T) dmrInGbody <- overlapsAny(lib$dtToGr(geneAnnotationExpr[,.(chrom, start, end)]), lib$dtToGr(curRegsDt,"seqnames"), ignore.strand=T) & !dmrInPromo dmrUpstream <- overlapsAny(lib$dtToGr(geneAnnotationExpr[,.(chrom, start=ifelse(strand>0, tss, tss-100000), end=ifelse(strand>0, tss+100000, tss))]), lib$dtToGr(curRegsDt,"seqnames"), ignore.strand=T) & !dmrInPromo & !dmrInGbody dmrDownstream <- overlapsAny(lib$dtToGr(geneAnnotationExpr[,.(chrom, start=ifelse(strand>0, start-100000, end), end=ifelse(strand>0, start, end+100000))]), lib$dtToGr(curRegsDt,"seqnames"), ignore.strand=T) & !dmrInPromo & !dmrInGbody dtAll[, has_dmr_in_promo := dmrInPromo] dtAll[, has_dmr_in_gbody := dmrInGbody] dtAll[, has_dmr_100kb_us := dmrUpstream] dtAll[, has_dmr_100kb_ds := dmrDownstream] dtAll[, has_dmr_100kb_usds := dmrUpstream | dmrDownstream] dtAll[, is_confirmed:=gene%in%geneLists[list_type=="is_confirmed",gene]] dtAll[, is_unconfirmed:=gene%in%geneLists[list_type=="is_unconfirmed",gene]] dtAll[,is_bix_up := is_bix & log2fc>0] dtAll[,is_bix_down := is_bix & log2fc<0] dtAll[,is_bsx_up := is_bsx & log2fc>0] dtAll[,is_bsx_down := is_bsx & log2fc<0] ### add allele-specific H3K27me3 peak annotation ### # load data obtained from GEO: loadLibraries(c("R.utils","liftOver")) ch <- resultsDir("mm9toMm10.chain.gz") if(!file.exists(gsub(".gz","",ch))) { msg("download mm9 to mm10 chain") download.file("http://hgdownload.soe.ucsc.edu/goldenPath/mm9/liftOver/mm9ToMm10.over.chain.gz", destfile=ch) gunzip(ch) } ch <- import.chain(gsub(".gz","",ch)) f <- resultsDir("GSE76687_ICM_k27me3_maternal_broadpeak.bed.gz") if(!file.exists(f)) download.file("https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSE76687&format=file&file=GSE76687%5FICM%5Fk27me3%5Fmaternal%5Fbroadpeak%2Ebed%2Egz", destfile=f) h3k27me3mat <- fread(f) h3k27me3mat <- lib$dtToGr(h3k27me3mat,"V1","V2","V3") h3k27me3mat <- unlist(liftOver(h3k27me3mat, ch)) f <- resultsDir("GSE76687_ICM_k27me3_paternal_broadpeak.bed.gz") if(!file.exists(f)) download.file("https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSE76687&format=file&file=GSE76687%5FICM%5Fk27me3%5Fpaternal%5Fbroadpeak%2Ebed%2Egz", destfile=f) h3k27me3pat <- fread(f) h3k27me3pat <- lib$dtToGr(h3k27me3pat,"V1","V2","V3") h3k27me3pat <- unlist(liftOver(h3k27me3pat, ch)) h3k27me3Regs <- c(h3k27me3mat,h3k27me3pat) # - annotate all genes with a H3K27me3 peak within 5kb of their TSS: curDist <- 5000 curGeneWindows <- lib$dtToGr(transcriptAnnotationExpr[,.(chrom, start=tss-curDist, end=tss+curDist)]) dtAll[, h3k27me3_imprint:=gene%in%transcriptAnnotationExpr[overlapsAny(curGeneWindows, h3k27me3Regs, ignore.strand=T), unique(gene)]] # load H3K27me3 data from gametes: f <- resultsDir("GSE76687_h3k27me3_sperm.broadpeak.gz") if(!file.exists(f)) download.file("https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSM2041066&format=file&file=GSM2041066%5FSperm%5Fk27me3%5Fbroadpeak%2Ebed%2Egz", destfile=f) h3k27me3sp <- fread(f) h3k27me3sp <- lib$dtToGr(h3k27me3sp,"V1","V2","V3") h3k27me3sp <- unlist(liftOver(h3k27me3sp, ch)) f <- resultsDir("GSE76687_h3k27me3_oocyte.broadpeak.gz") if(!file.exists(f)) download.file("https://ftp.ncbi.nlm.nih.gov/geo/series/GSE76nnn/GSE76687/suppl/GSE76687_MII_oocyte_k27me3_broadpeak.bed.gz", destfile=f) h3k27me3oo <- fread(f) h3k27me3oo <- lib$dtToGr(h3k27me3oo,"V1","V2","V3") h3k27me3oo <- unlist(liftOver(h3k27me3oo, ch)) f <- resultsDir("GSE76687_h3k27me3_icm_any.broadpeak.gz") if(!file.exists(f)) download.file("https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSE76687&format=file&file=GSE76687%5FICM%5Fk27me3%5Fbroadpeak%2Ebed%2Egz", destfile=f) h3k27me3icmany <- fread(f) h3k27me3icmany <- lib$dtToGr(h3k27me3icmany,"V1","V2","V3") h3k27me3icmany <- unlist(liftOver(h3k27me3icmany, ch)) # annotate genes overlapping with H3K27me3 peaks: dtAll[, h3k27me3_icm_any:=gene%in%transcriptAnnotationExpr[overlapsAny(curGeneWindows, h3k27me3icmany, ignore.strand=T), unique(gene)]] dtAll[, h3k27me3_icm_mat:=gene%in%transcriptAnnotationExpr[overlapsAny(curGeneWindows, h3k27me3mat, ignore.strand=T), unique(gene)]] dtAll[, h3k27me3_icm_pat:=gene%in%transcriptAnnotationExpr[overlapsAny(curGeneWindows, h3k27me3pat, ignore.strand=T), unique(gene)]] dtAll[, h3k27me3_sperm:=gene%in%transcriptAnnotationExpr[overlapsAny(curGeneWindows, h3k27me3sp, ignore.strand=T), unique(gene)]] dtAll[, h3k27me3_oocyte:=gene%in%transcriptAnnotationExpr[overlapsAny(curGeneWindows, h3k27me3oo, ignore.strand=T), unique(gene)]] #m <- lib$dtToDf(dtAll[,.(gene, is_bix, is_bix_up, is_bix_down, is_bsx, is_bsx_up, is_bsx_down, h3k27me3_icm_mat, h3k27me3_icm_pat, h3k27me3_imprint, h3k27me3_sperm, h3k27me3_oocyte)]) * 1 # define sperm- or oocyte-specific H3K27me3 as those seen in one but not the other: dtAll[, h3k27me3_sperm_specific:=h3k27me3_sperm&!h3k27me3_oocyte] dtAll[, h3k27me3_oocyte_specific:=!h3k27me3_sperm&h3k27me3_oocyte] # likewise for maternal/paternal in the ICM: dtAll[, h3k27me3_icm_mat_specfic:=h3k27me3_icm_mat&!h3k27me3_icm_pat] dtAll[, h3k27me3_icm_pat_specfic:=h3k27me3_icm_pat&!h3k27me3_icm_mat] # define H3K27me3 imprinting status: 2 or -2 denotes sperm/oocyte/ICM-maternal/ICM-paternal specific, 1 denotes unspecific, 0 denotes no H3K27me3 present at promoter: dtAll[, h3k27me3_sperm_status:=ifelse(h3k27me3_sperm,ifelse(h3k27me3_sperm_specific, 2, 1),0)] dtAll[, h3k27me3_oocyte_status:=ifelse(h3k27me3_oocyte,ifelse(h3k27me3_oocyte_specific, -2, -1),0)] dtAll[, h3k27me3_gamete_status:=ifelse(h3k27me3_oocyte|h3k27me3_sperm,ifelse(h3k27me3_oocyte_specific, -2, ifelse(h3k27me3_sperm_specific, 2, 1)),0)] dtAll[, h3k27me3_icm_status:=ifelse(h3k27me3_icm_mat_specfic,-2,ifelse(h3k27me3_icm_pat_specfic,2,ifelse(h3k27me3_icm_any|h3k27me3_icm_pat|h3k27me3_icm_mat, 1, 0)))] # generate a heatmap summarizing H3K27me3 patterns: m <- lib$dtToDf(dtAll[,.(gene, is_bix_down=is_bix_down*3, is_bix_up=is_bix_up*3, is_bsx_down=is_bsx_down*3, is_bsx_up=is_bsx_up*3,h3k27me3_gamete_status, h3k27me3_icm_status)]) * 1 m <- m[rowSums(m[,grepl("is_b[si]x",colnames(m))]!=0)>0,] a <- lib$dtToDf(dtAll[rownames(m),.(gene, dir=sign(log2fc), gene_list=ifelse(is_confirmed==T, "is_confirmed", ifelse(is_nbix==T, "is_nbix", "is_nbsx")))]) aCols <- list(dir=brewer.pal(3,"RdBu"), gene_list=colorPalettes$genesetLabel[unique(a[,2])]) aCols$gene_list[["is_nbsx"]] <- alpha(aCols$gene_list[["is_nbsx"]], 0.25) m <- m[hclust(dist(m))$order,] m <- m[order(a[rownames(m),"dir"], a[rownames(m),"gene_list"]),] colnames(m) <- gsub("h3k27me3_(.+)_status","\\1",colnames(m)) pheatmap::pheatmap(m[,5:6], gaps_row=cumsum(table(a[rownames(m),1])), annotation_colors=aCols, annotation_row=a, cluster_cols=F, cluster_rows=F, main="All BsX", breaks=-3.5:3.5, file=resultsDir("hm_h3k27me3_oo_sp_status_filt.pdf"), treeheight_col=0, treeheight_row=0, cellheight=9, cellwidth=10, col=c("black","red","yellow","white","yellow","blue","black")) pheatmap::pheatmap(m[,5:6], gaps_row=cumsum(table(a[rownames(m),1])), show_rownames=F, annotation_colors=aCols, annotation_row=a, cluster_cols=F, cluster_rows=F, main="All BsX", breaks=-3.5:3.5, file=resultsDir("hm_h3k27me3_oo_sp_status_filt_nolbl.pdf"), treeheight_col=0, treeheight_row=0, cellheight=1, cellwidth=10, col=c("black","red","yellow","white","yellow","blue","black")) fwrite(cbind(a[rownames(m),], m[,5:6]), file=resultsDir("source_data_figure_4a.csv")) # genenerate pie charts: ref <- "is_robust_expr" pData <- melt(melt(dtAll, measure.vars=c(ref, "is_bix","is_bsx", "is_confirmed", "is_unconfirmed", "is_nbix", "is_nbsx", "published_imprint", "hc_repositories_imprint", "is_equivalent_0.1", "h3k27me3_confirmed_inoue_2017", "h3k27me3_candidate_inoue_2017", "h3k27me3_imprint"), variable.name="gene_selection", value.name="v1")[v1==T,], measure.vars=c("h3k27me3_gamete_status", "h3k27me3_icm_status"), variable.name="dev_stage")[, .(gene_selection=ifelse(gene_selection==ref,as.character(gene_selection),paste_(gene_selection, sign(log2fc))), dev_stage, status=ifelse(value==-2, "M", ifelse(value==2, "P", ifelse(abs(value)==1, "both", "-"))))] pData <- pData[, .(.N),by=.(dev_stage, gene_selection, status)] pData <- pData[, .(N2=N, N=sum(N), perc=N/sum(N), status), by=.(dev_stage, gene_selection)] selCats <- c("is_nbix", "is_nbsx", "is_confirmed") selCats <- c(ref,c(paste_(selCats,1),paste_(selCats,-1))) p <- ggplot(pData[gene_selection%in%selCats,], aes(x="", y=perc, fill=status))+ geom_bar(width=1, stat="identity") + coord_polar("y", start=0) + theme_minimal() + theme(axis.title.x = element_blank(), axis.title.y = element_blank(), panel.border = element_blank(), panel.grid=element_blank(), axis.ticks = element_blank(), plot.title=element_text(size=14, face="bold"), axis.text.x=element_blank(), legend.title=element_blank()) + facet_wrap(gene_selection~dev_stage+N, dir="v",nrow=2) p <- p + scale_fill_manual(values=c("-"="#EEEEEE", "both"="yellow", "M"="red", "P"="blue")) gg(p, "pie_h3k27_devstage", 44, 6, type="pdf") fwrite(pData[gene_selection%in%grep(ref,selCats,value=T),.(gene_selection, dev_stage, status, perc, N)], file=resultsDir("source_data_figure_4b.csv")) fwrite(pData[gene_selection%in%grep("_-1$",selCats,value=T),.(gene_selection, dev_stage, status, perc, N)], file=resultsDir("source_data_figure_4c.csv")) fwrite(pData[gene_selection%in%grep("_1$",selCats,value=T),.(gene_selection, dev_stage, status, perc, N)], file=resultsDir("source_data_figure_4d.csv")) pvals <- rblapply(setdiff(pData[,unique(gene_selection)],ref), function(geneSel) { rblapply(pData[,as.character(unique(dev_stage))], function(devStage) { rblapply(c("M","P"), function(imprintDir) { msgF("%s %s %s", geneSel, devStage, imprintDir) tab <- lib$dtToDf(dcast(unique(pData[gene_selection%in%c(ref,geneSel) & dev_stage==devStage, .(N2=sum(N2),N), by=.(gene_selection, dev_stage, status=ifelse(status==imprintDir,imprintDir,paste0("X",imprintDir)))]), status~gene_selection, fun.aggregate=sum, value.var="N2"))#[,c(2,1)] print(tab) data.table(fg=tab[1,1]/tab[2,1], bg=tab[1,2]/tab[2,2], pval=fisher.test(tab, alternative="greater")$p.value) }, "imprint_dir") }, "dev_stage") }, "gene_selection") pvals[,padj:=p.adjust(pval)] pvals[,psig:=lib$pToSig(padj)] pvals[grepl("bix",gene_selection),] fwrite(pvals, file=resultsDir("d_h3k27me3_pvals_fisher.csv")) pData[, stat:=as.factor(status)] res <- rblapply(c("is_bix","is_bsx"), function(geneSel) { res1 <- rblapply(c("1","-1"), function(geneDir) { data.table(fixed_size="gene_dir", pval=chisq.test(apply(lib$dtToDf(dcast(pData[gene_selection==paste_(geneSel,geneDir),], status~dev_stage, value.var="N2")), 1, lib$naToZero))$p.value) }, "fixed_val") res2 <- rblapply(c("h3k27me3_gamete_status","h3k27me3_icm_status"), function(devStage) { data.table(fixed_size="dev_stage", pval=chisq.test(apply(lib$dtToDf(dcast(pData[grepl(geneSel,gene_selection) & dev_stage==devStage,], status~gene_selection, value.var="N2")), 1, lib$naToZero))$p.value) }, "fixed_val") rbind(res1,res2) }, "gene_selection") res[, psig:=lib$pToSig(pval)] fwrite(res, file=resultsDir("d_h3k27me3_pvals.csv")) ### annotate genes by distance to DMR ### # annotate genes with next DMR or DMRs within a certain window: dtAll[, best_overlap:=ifelse(h3k27me3_imprint==T,"H3K27me3","")] dtAll[, best_dist:=ifelse(h3k27me3_imprint==T,-2,Inf)] dtAll[, best_overlap_M:=best_overlap] dtAll[, best_dist_M:=best_dist] dtAll[, best_overlap_P:=best_overlap] dtAll[, best_dist_P:=best_dist] d <- distanceToNearest(lib$dtToGr(dtAll), curRegs) dtAll[queryHits(d), c("dmr_region_id", "dmr_meth_dif", "dmr_padj", "dmr_group"):=curRegsDt[subjectHits(d), .(rid, meth_diff, padj, grp)]] dtAll[queryHits(d), dmr_dist:=d@elementMetadata$distance] for(curDist in distThreshs) { cname <- paste_("meth_gene",round(curDist/1000),"kb") lbl <- sprintf("Meth:Gene%dkb",curDist/1000) curGeneWindows <- lib$dtToGr(dtAll[,.(chrom, start=start-curDist, end=end+curDist)]) dtAll[, (cname):=overlapsAny(curGeneWindows, curRegs)] dtAll[, (paste_(cname,"M")):=overlapsAny(curGeneWindows, curRegsM)] dtAll[, (paste_(cname,"P")):=overlapsAny(curGeneWindows, curRegsP)] dtAll[(curDist==0 | h3k27me3_imprint==F) & !grepl("meth",best_overlap,ignore.case=T) & get(cname)==T, c("best_dist","best_overlap"):=.(curDist,ifelse(best_overlap=="",lbl,paste0(best_overlap,"; ",lbl)))] dtAll[(curDist==0 | h3k27me3_imprint==F) & !grepl("meth",best_overlap_M,ignore.case=T) & get(paste_(cname,"M"))==T, c("best_dist_M","best_overlap_M"):=.(curDist,ifelse(best_overlap_M=="",lbl,paste0(best_overlap_M,"; ",lbl)))] dtAll[(curDist==0 | h3k27me3_imprint==F) & !grepl("meth",best_overlap_P,ignore.case=T) & get(paste_(cname,"P"))==T, c("best_dist_P","best_overlap_P"):=.(curDist,ifelse(best_overlap_P=="",lbl,paste0(best_overlap_P,"; ",lbl)))] } # annotate with membership in different gene lists: for(lt in geneLists[,unique(list_type)]) { dtAll[, (lt):=gene%in%geneLists[list_type==lt,gene]] } dtAll[topol==T & !grepl("Meth",best_overlap), best_overlap:=ifelse(h3k27me3_imprint==T,"H3K27me3, same TAD","Same TAD")] # genes are associated with at least one DMR if the DMR is within 250kb of the gene and/or if there's a DMR in the same TAD: dtAll[, meth_imprint:=topol | (is.finite(dmr_dist) & dmr_dist<=250000)] # plot pie charts for genes in different categories and segments indicating the distance to the next DMR or H3K27me3 imprint: pData <- melt(dtAll, measure.vars=names(genesetLabels))[value==T,.(N=.N,genes=paste(sort(gene),collapse="; ")), by=.(variable,best_dist,best_overlap)] pData[, list_type:=factor(variable, levels=names(genesetLabels), labels=genesetLabels)] pData[, list_size:=lib$dt2namedVec(geneLists[,.N,by=.(ID=list_type)])[as.character(variable)]] pData[,best_overlap:=factor(best_overlap,levels=pData[order(-best_dist),unique(best_overlap)])] pData <- pData[order(best_overlap),] pData[, perc:=N/sum(N), by=list_type] pData[best_overlap=="", best_overlap:="> 250kb, outside TAD"] plotCols <- c( "H3K27me3"="#cb181d", "H3K27me3; Meth:Gene0kb"="#6a51a3", "H3K27me3, same TAD"="#c994c7", "Meth:Gene0kb"="#253494", "Meth:Gene100kb"="#2c7fb8", "Meth:Gene250kb"="#41b6c4", "Same TAD"="#a1dab4", "> 250kb, outside TAD"="#efefec" ) pData[, best_overlap:=factor(best_overlap, levels=names(plotCols))] p <- ggplot(pData, aes(x="", y=perc, fill=best_overlap))+ geom_bar(width=1, stat="identity") + coord_polar("y", start=0) + theme_minimal() + theme(axis.title.x = element_blank(), axis.title.y = element_blank(), panel.border = element_blank(), panel.grid=element_blank(), axis.ticks = element_blank(), plot.title=element_text(size=14, face="bold"), axis.text.x=element_blank(), legend.title=element_blank()) + facet_wrap(~sprintf("%s\n(%s)\nn = %d",list_type,variable,list_size), ncol=8) p <- p + scale_fill_manual(values=plotCols) gg(p, "pie", 16, 8, type="pdf") fwrite(pData[list_type%in%c("allexpressed", "published_imprint", "hc_repositories_imprint", "is_confirmed", "is_unconfirmed", "is_equivalent_0.1", "is_nbix", "is_nbsx"), .(perc, best_overlap, list_type, variable, list_size)][order(list_type),], file=resultsDir("source_data_figure_3b.csv"))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/filter_prevalent.R \name{filter_prevalent} \alias{filter_prevalent} \title{filter_prevalent} \usage{ filter_prevalent(x, detection.threshold, prevalence.threshold) } \arguments{ \item{x}{\code{\link{phyloseq-class}} object} \item{detection.threshold}{Detection threshold for absence/presence.} \item{prevalence.threshold}{Prevalence threshold} } \value{ Filtered phyloseq object including only prevalent taxa } \description{ Filter the phyloseq object to include only prevalent taxa } \examples{ #peerj32 <- download_microbiome("peerj32") #filter_prevalent(peerj32$physeq, 200, 0.2) } \author{ Contact: Leo Lahti \email{microbiome-admin@googlegroups.com} } \references{ To cite the microbiome R package, see citation('microbiome') } \keyword{utilities}
/man/filter_prevalent.Rd
no_license
TTloveTT/microbiome
R
false
true
838
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/filter_prevalent.R \name{filter_prevalent} \alias{filter_prevalent} \title{filter_prevalent} \usage{ filter_prevalent(x, detection.threshold, prevalence.threshold) } \arguments{ \item{x}{\code{\link{phyloseq-class}} object} \item{detection.threshold}{Detection threshold for absence/presence.} \item{prevalence.threshold}{Prevalence threshold} } \value{ Filtered phyloseq object including only prevalent taxa } \description{ Filter the phyloseq object to include only prevalent taxa } \examples{ #peerj32 <- download_microbiome("peerj32") #filter_prevalent(peerj32$physeq, 200, 0.2) } \author{ Contact: Leo Lahti \email{microbiome-admin@googlegroups.com} } \references{ To cite the microbiome R package, see citation('microbiome') } \keyword{utilities}
\name{src_sql} \alias{src_sql} \title{Create a "sql src" object} \usage{ src_sql(subclass, con, ...) } \arguments{ \item{subclass}{name of subclass. "src_sql" is an abstract base class, so you must supply this value. \code{src_} is automatically prepended to the class name} \item{con}{the connection object} \item{...}{fields used by object} } \description{ \code{src_sql} is the standard constructor for all SLQ based srcs. } \keyword{internal}
/dplyr/man/src_sql.Rd
no_license
radfordneal/R-package-mods
R
false
false
460
rd
\name{src_sql} \alias{src_sql} \title{Create a "sql src" object} \usage{ src_sql(subclass, con, ...) } \arguments{ \item{subclass}{name of subclass. "src_sql" is an abstract base class, so you must supply this value. \code{src_} is automatically prepended to the class name} \item{con}{the connection object} \item{...}{fields used by object} } \description{ \code{src_sql} is the standard constructor for all SLQ based srcs. } \keyword{internal}
#http://movie.douban.com/subject/5308265/discussion/ ##x<-get_movie_discussions(movieid=5308265,results = 100) .get_movie_discussion0<-function(u,fresh,verbose){ p<-.refreshURL(u,fresh, verbose) title<-gsub('[\n ]','',sapply(getNodeSet(p, '//head//title'),xmlValue)) published<-sapply(getNodeSet(p, '//div[@class="article"]//span[@class="mn"]'),xmlValue) published<-gsub("\n| ","", published) n1<-getNodeSet(p, '//div[@class="article"]//span[@class="pl2"]//a') author<-gsub("[\n ]","",sapply(n1,xmlValue)[1]) author_uri<-sapply(n1,function(x) xmlGetAttr(x, "href"))[1] dicussion<-sapply(getNodeSet(p, '//div[@class="article"]//span[@class=""]')[1],xmlValue) useful<-sapply(getNodeSet(p, '//span[@class="useful"]//em'),xmlValue) unuseful<-sapply(getNodeSet(p, '//span[@class="unuseful"]//em'),xmlValue) out<-c(dicussion_uri=u,title=title,published=published,author=author, author_uri=author_uri,dicussion=dicussion,useful=useful,unuseful=unuseful) return(out) } ################################################# get_movie_discussions<-function(movieid,results = 100, fresh = 10,count=20, verbose = TRUE,...){ u = paste0("http://movie.douban.com/subject/", movieid, "/discussion/") p <- .refreshURL(u, fresh, verbose) total<-gsub('[^0-9]','',sapply(getNodeSet(p, '//span[@class="count"]'),xmlValue)) if (length(total)==0) stop('There is no discussions about this movie.') cat('-----There is a tatal of ',total,' movie discussions.-----\n') pages<-ceiling(min(results,as.integer(total))/count) out <- data.frame(matrix(nrow = pages * count, ncol = 8), stringsAsFactors = F) colnames(out) <- c("dicussion_uri", "title", "published", "author", "author_uri", "dicussion", "useful", "unuseful") k=1 if(pages>0){ for(pg in 1:pages){ u=paste0('http://movie.douban.com/subject/',movieid, '/discussion/?start=',(pg-1)*count,'&sort=vote/') if(verbose==TRUE) { #cat('Getting',(pg-1)*20+1,'--',pg*20,'discussions...\n') cat("Getting movie discussion URLS from page=",pg,": ",u,"...\n") } p <- .refreshURL(u, fresh, verbose) n1<-getNodeSet(p ,'//table[@class="olt"]//td/a') href<-unique(sapply(n1,function(x) xmlGetAttr(x, "href"))) href<-href[grep('/discussion/',href)] href <- href[!href %in% out$dicussion_uri] n=length(href) if(n>0){ for(i in 1:n){ u0<-href[i] if(verbose==TRUE){ cat(" Getting ", k, " movie discussion from URL: ", u0, " ...\n") } out0<-.get_movie_discussion0(u=u0,fresh,verbose) if(length(out0)==8){ out[k,]<-out0 k=k+1 } else{ cat(" !!!! Getting failed at URL: ", u0, " \n") } } } } } out <- out[!is.na(out[, 1]), ] return(out) }
/R/get_movie_discussions.R
no_license
ljtyduyu/Rdouban
R
false
false
2,952
r
#http://movie.douban.com/subject/5308265/discussion/ ##x<-get_movie_discussions(movieid=5308265,results = 100) .get_movie_discussion0<-function(u,fresh,verbose){ p<-.refreshURL(u,fresh, verbose) title<-gsub('[\n ]','',sapply(getNodeSet(p, '//head//title'),xmlValue)) published<-sapply(getNodeSet(p, '//div[@class="article"]//span[@class="mn"]'),xmlValue) published<-gsub("\n| ","", published) n1<-getNodeSet(p, '//div[@class="article"]//span[@class="pl2"]//a') author<-gsub("[\n ]","",sapply(n1,xmlValue)[1]) author_uri<-sapply(n1,function(x) xmlGetAttr(x, "href"))[1] dicussion<-sapply(getNodeSet(p, '//div[@class="article"]//span[@class=""]')[1],xmlValue) useful<-sapply(getNodeSet(p, '//span[@class="useful"]//em'),xmlValue) unuseful<-sapply(getNodeSet(p, '//span[@class="unuseful"]//em'),xmlValue) out<-c(dicussion_uri=u,title=title,published=published,author=author, author_uri=author_uri,dicussion=dicussion,useful=useful,unuseful=unuseful) return(out) } ################################################# get_movie_discussions<-function(movieid,results = 100, fresh = 10,count=20, verbose = TRUE,...){ u = paste0("http://movie.douban.com/subject/", movieid, "/discussion/") p <- .refreshURL(u, fresh, verbose) total<-gsub('[^0-9]','',sapply(getNodeSet(p, '//span[@class="count"]'),xmlValue)) if (length(total)==0) stop('There is no discussions about this movie.') cat('-----There is a tatal of ',total,' movie discussions.-----\n') pages<-ceiling(min(results,as.integer(total))/count) out <- data.frame(matrix(nrow = pages * count, ncol = 8), stringsAsFactors = F) colnames(out) <- c("dicussion_uri", "title", "published", "author", "author_uri", "dicussion", "useful", "unuseful") k=1 if(pages>0){ for(pg in 1:pages){ u=paste0('http://movie.douban.com/subject/',movieid, '/discussion/?start=',(pg-1)*count,'&sort=vote/') if(verbose==TRUE) { #cat('Getting',(pg-1)*20+1,'--',pg*20,'discussions...\n') cat("Getting movie discussion URLS from page=",pg,": ",u,"...\n") } p <- .refreshURL(u, fresh, verbose) n1<-getNodeSet(p ,'//table[@class="olt"]//td/a') href<-unique(sapply(n1,function(x) xmlGetAttr(x, "href"))) href<-href[grep('/discussion/',href)] href <- href[!href %in% out$dicussion_uri] n=length(href) if(n>0){ for(i in 1:n){ u0<-href[i] if(verbose==TRUE){ cat(" Getting ", k, " movie discussion from URL: ", u0, " ...\n") } out0<-.get_movie_discussion0(u=u0,fresh,verbose) if(length(out0)==8){ out[k,]<-out0 k=k+1 } else{ cat(" !!!! Getting failed at URL: ", u0, " \n") } } } } } out <- out[!is.na(out[, 1]), ] return(out) }
# Uncomment if you need to load datasets #setwd("~/Data Science Projects/RNA_TPM") # Put our datasets into memory #tpms <- fread("tpm.gct") #pheno <- fread("Pheno.txt") #attributes <- fread("attributes.txt") # Use tissue parser to grab interesting tissue type Heart_tissue <- tissue_parser(tpms, attributes, pheno, "Heart") # Set protein coding genes symbol pc_id<-protein_coding_parser(Heart_tissue) # Grab only the genes that are protein coding genes pc_subsetter <- function(data_tissue){ return(data_tissue %>% select.(SAMPID, SMTS, SEX, AGE, DTHHRDY, c(pc_id))) } Heart_tissue <- pc_subsetter(Heart_tissue) # For initial testing (commented out) #library(tidymodels) #Heart_split <- initial_split(Heart_tissue, prop = 9/10, strata = AGE) #Heart_training <- training(Heart_split) #Heart_test <- testing(Heart_split) # Create our Heart_model Heart_model<-age_tissue_guesser(Heart_tissue, max_sample = 350, v=6, metric = "accuracy") # See how it did Heart_preds <- predict_age(Heart_model, Heart_tissue)
/Models/Heart_Model.R
no_license
ShaneGPutney/Genetic-Aging-Analysis
R
false
false
1,170
r
# Uncomment if you need to load datasets #setwd("~/Data Science Projects/RNA_TPM") # Put our datasets into memory #tpms <- fread("tpm.gct") #pheno <- fread("Pheno.txt") #attributes <- fread("attributes.txt") # Use tissue parser to grab interesting tissue type Heart_tissue <- tissue_parser(tpms, attributes, pheno, "Heart") # Set protein coding genes symbol pc_id<-protein_coding_parser(Heart_tissue) # Grab only the genes that are protein coding genes pc_subsetter <- function(data_tissue){ return(data_tissue %>% select.(SAMPID, SMTS, SEX, AGE, DTHHRDY, c(pc_id))) } Heart_tissue <- pc_subsetter(Heart_tissue) # For initial testing (commented out) #library(tidymodels) #Heart_split <- initial_split(Heart_tissue, prop = 9/10, strata = AGE) #Heart_training <- training(Heart_split) #Heart_test <- testing(Heart_split) # Create our Heart_model Heart_model<-age_tissue_guesser(Heart_tissue, max_sample = 350, v=6, metric = "accuracy") # See how it did Heart_preds <- predict_age(Heart_model, Heart_tissue)
library(tibble) library(dplyr) # Basic Unstacking, while looping over list # Generating two variable dataframe based on two named lists of t and c device_id = list(t = c('abc', 'efg'), c = c('efs', 'gjie', 'werj')) df_1 <- data.frame(device_id$t, 1 + rep(0, length(device_id$t))) colnames(df_1) <- c('devices', 't_c') df_0 <- data.frame(device_id$c, 0 + rep(0, length(device_id$t))) colnames(df_0) <- c('devices', 't_c') rbind(df_0, df_1) # Basic Unstacking, while looping over list # Generating two variable dataframe based on two named lists of t and c device_id = list(t = as.tibble(c('abc', 'efg')), c = as.tibble(c('efs', 'gjie', 'werj'))) df_1 <- data.frame(device_id$t, 1 + rep(0, length(device_id$t))) colnames(df_1) <- c('devices', 't_c') df_0 <- data.frame(device_id$c, 0 + rep(0, length(device_id$t))) colnames(df_0) <- c('devices', 't_c') rbind(df_0, df_1) # TO help Nathan see more summary stats etc # we want dataframe with three variables: deviceid, t_c, ad_id ads_device_id = list(list(t = as.tibble(c('abc1', 'efg')), c = as.tibble(c('efs', 'gjie', 'werj'))), list(t = as.tibble(c('gjie', 'gfg', 'efg', 'qwej')), c = as.tibble(c('ewfs', 'adfgjie', 'erwerj', 'erwewer13')))) gen_data_frame <- function(device_id_list){ df_1 <- data.frame(device_id_list$t, 1 + rep(0, length(device_id_list$t))) colnames(df_1) <- c('devices', 't_c') df_0 <- data.frame(device_id$c, 0 + rep(0, length(device_id$t))) colnames(df_0) <- c('devices', 't_c') df_combo <- rbind(df_0, df_1) return(df_combo) } gen_data_frame(ads_device_id[[1]]) gen_data_frame(ads_device_id[[2]]) list_df <- lapply(ads_device_id, gen_data_frame) nathan_is_happy_df <- bind_rows(list_df, .id = 'ad_id') # Summary Statistics # First: how many ads each device is exposed to nathan_is_happy_df %>% group_by(devices) %>% mutate (ad_count = n()) nathan_is_happy_df %>% arrange(devices, ad_id) %>% group_by(devices) %>% mutate (ad_continuous = cumsum(t_c)) Z_county <- df %>% select(county_state, three_digit_zip) group_by(county_state, three_digit_zip) %>% top_n(row_number()==1)
/shapeshift/nathan_list_to_df.R
no_license
chkangkch/Stat4Econ
R
false
false
2,207
r
library(tibble) library(dplyr) # Basic Unstacking, while looping over list # Generating two variable dataframe based on two named lists of t and c device_id = list(t = c('abc', 'efg'), c = c('efs', 'gjie', 'werj')) df_1 <- data.frame(device_id$t, 1 + rep(0, length(device_id$t))) colnames(df_1) <- c('devices', 't_c') df_0 <- data.frame(device_id$c, 0 + rep(0, length(device_id$t))) colnames(df_0) <- c('devices', 't_c') rbind(df_0, df_1) # Basic Unstacking, while looping over list # Generating two variable dataframe based on two named lists of t and c device_id = list(t = as.tibble(c('abc', 'efg')), c = as.tibble(c('efs', 'gjie', 'werj'))) df_1 <- data.frame(device_id$t, 1 + rep(0, length(device_id$t))) colnames(df_1) <- c('devices', 't_c') df_0 <- data.frame(device_id$c, 0 + rep(0, length(device_id$t))) colnames(df_0) <- c('devices', 't_c') rbind(df_0, df_1) # TO help Nathan see more summary stats etc # we want dataframe with three variables: deviceid, t_c, ad_id ads_device_id = list(list(t = as.tibble(c('abc1', 'efg')), c = as.tibble(c('efs', 'gjie', 'werj'))), list(t = as.tibble(c('gjie', 'gfg', 'efg', 'qwej')), c = as.tibble(c('ewfs', 'adfgjie', 'erwerj', 'erwewer13')))) gen_data_frame <- function(device_id_list){ df_1 <- data.frame(device_id_list$t, 1 + rep(0, length(device_id_list$t))) colnames(df_1) <- c('devices', 't_c') df_0 <- data.frame(device_id$c, 0 + rep(0, length(device_id$t))) colnames(df_0) <- c('devices', 't_c') df_combo <- rbind(df_0, df_1) return(df_combo) } gen_data_frame(ads_device_id[[1]]) gen_data_frame(ads_device_id[[2]]) list_df <- lapply(ads_device_id, gen_data_frame) nathan_is_happy_df <- bind_rows(list_df, .id = 'ad_id') # Summary Statistics # First: how many ads each device is exposed to nathan_is_happy_df %>% group_by(devices) %>% mutate (ad_count = n()) nathan_is_happy_df %>% arrange(devices, ad_id) %>% group_by(devices) %>% mutate (ad_continuous = cumsum(t_c)) Z_county <- df %>% select(county_state, three_digit_zip) group_by(county_state, three_digit_zip) %>% top_n(row_number()==1)
library(tidyverse) library(dslabs) ####################################################################### # Start of Answer, for Q-1 # ####################################################################### data("movielens") #colnames(movielens) #"movieId" "title" "year" "genres" "userId" "rating" "timestamp" p <- movielens %>% count(year) #typeof(p) # "list" #plot(p) p %>% ggplot(aes(x=year, y=sqrt(n))) + geom_point() #To find the year with the max number of ratings: p[which(p$n==max(p$n)),] # So, the answer is 1995 # A tibble: 1 x 2 # year n # <int> <int> # 1 1995 6635 movielens %>% group_by(year) %>% ggplot(aes(x=year, y=sqrt(rating), fill=year)) + geom_col() + ggtitle("Movies") + scale_fill_gradientn(colours = rainbow(100)) + scale_x_continuous(breaks = seq(1900,2020, 10)) + theme(legend.position="none") #From the above plot we can see that maximum reviews year is between 1990 and 2000 #So, here I am narrowing down the result. movielens %>% filter(movielens$year >= 1990 & movielens$year <= 2000) %>% group_by(year) %>% ggplot(aes(x=year, y=sqrt(rating), fill=year)) + geom_col() + ggtitle("Movies") + scale_fill_gradientn(colours = rainbow(100)) + scale_x_continuous(breaks = seq(1900,2020, 10)) + theme(legend.position="none") ####################################################################### # Start of Answer, for Q-5 # ####################################################################### movielens2 <- movielens colnames(movielens2) head(movielens2) movielens2 <- mutate(movielens2, date = as_datetime(timestamp)) colnames(movielens2) head(movielens2) ####################################################################### library(lubridate) last_year_Forrest <- movielens %>% filter(title == unique(grep('Forrest Gump', movielens$title, ignore.case = TRUE, value= TRUE))) %>% arrange(desc(timestamp)) %>% slice(1) %>% select(timestamp) %>% as.numeric() %>% as.POSIXct(origin = "1970-01-01") %>% year() last_year_Forrest # 2016 last_year_Shawshank <- movielens %>% filter(title == unique(grep('Shawshank Redemption', movielens$title, ignore.case = TRUE, value= TRUE))) %>% arrange(desc(timestamp)) %>% slice(1) %>% select(timestamp) %>% as.numeric() %>% as.POSIXct(origin = "1970-01-01") %>% year() last_year_Shawshank # 2016 ###################### movielens %>% filter(movielens$year >= 1993 & movielens$year <= last_year_Forrest) %>% filter(title == unique(grep('Forrest Gump', movielens$title, ignore.case = TRUE, value= TRUE))) %>% group_by(year) %>% ggplot(aes(x=year, y=sqrt(rating), fill=year)) + geom_col() + ggtitle("Movies Forrest Gump") + scale_fill_gradientn(colours = rainbow(100)) + scale_x_continuous(breaks = seq(1993,last_year_Forrest, 10)) + theme(legend.position="none") movielens %>% filter(movielens$year >= 1993 & movielens$year <= last_year_Shawshank) %>% filter(title == unique(grep('Shawshank Redemption', movielens$title, ignore.case = TRUE, value= TRUE))) %>% group_by(year) %>% ggplot(aes(x=year, y=sqrt(rating), fill=year)) + geom_col() + ggtitle("Movies Shawshank Redemption") + scale_fill_gradientn(colours = rainbow(100)) + scale_x_continuous(breaks = seq(1993,last_year_Shawshank, 10)) + theme(legend.position="none") ############################################################### x <- unique(movielens$year[movielens$year >= 1994]) # returns 1994 to 2016. NA. x #returns total number of unique records x <- length(unique(movielens$year[movielens$year >= 1994])) x # 24 #Error: n() should only be called in a data context x <- ( avg = n() / length(unique(movielens$year[movielens$year >= 1994])) ) #rlang::last_error() y <- unique(movielens$year[movielens$year >= 1994]) y #boxplots #movies %>% movielens %>% ggplot(aes(x=year, y=sqrt(tot_ratings), fill=year)) + # object 'tot_ratings' not found ggplot(aes(x=year, y=sqrt(rating), fill=year)) + geom_boxplot(aes(group=year)) + ggtitle("Movies") + scale_fill_gradientn(colours = rainbow(100)) + scale_x_continuous(breaks = seq(1900,2020, 10)) + theme(legend.position="none") #calculate the medians directly and then plot with cols: ggplot(data = medians, aes(x=year, y=medi, fill=year)) + # object 'medians' not found geom_col() + scale_fill_gradientn(colours = rainbow(100)) + scale_x_continuous(breaks = seq(1900,2020, 10)) + ggtitle("Movies")
/Machine_Learning_Recommendation_Systems_Q_1_5.R
no_license
rishi10819/machine_learning
R
false
false
4,522
r
library(tidyverse) library(dslabs) ####################################################################### # Start of Answer, for Q-1 # ####################################################################### data("movielens") #colnames(movielens) #"movieId" "title" "year" "genres" "userId" "rating" "timestamp" p <- movielens %>% count(year) #typeof(p) # "list" #plot(p) p %>% ggplot(aes(x=year, y=sqrt(n))) + geom_point() #To find the year with the max number of ratings: p[which(p$n==max(p$n)),] # So, the answer is 1995 # A tibble: 1 x 2 # year n # <int> <int> # 1 1995 6635 movielens %>% group_by(year) %>% ggplot(aes(x=year, y=sqrt(rating), fill=year)) + geom_col() + ggtitle("Movies") + scale_fill_gradientn(colours = rainbow(100)) + scale_x_continuous(breaks = seq(1900,2020, 10)) + theme(legend.position="none") #From the above plot we can see that maximum reviews year is between 1990 and 2000 #So, here I am narrowing down the result. movielens %>% filter(movielens$year >= 1990 & movielens$year <= 2000) %>% group_by(year) %>% ggplot(aes(x=year, y=sqrt(rating), fill=year)) + geom_col() + ggtitle("Movies") + scale_fill_gradientn(colours = rainbow(100)) + scale_x_continuous(breaks = seq(1900,2020, 10)) + theme(legend.position="none") ####################################################################### # Start of Answer, for Q-5 # ####################################################################### movielens2 <- movielens colnames(movielens2) head(movielens2) movielens2 <- mutate(movielens2, date = as_datetime(timestamp)) colnames(movielens2) head(movielens2) ####################################################################### library(lubridate) last_year_Forrest <- movielens %>% filter(title == unique(grep('Forrest Gump', movielens$title, ignore.case = TRUE, value= TRUE))) %>% arrange(desc(timestamp)) %>% slice(1) %>% select(timestamp) %>% as.numeric() %>% as.POSIXct(origin = "1970-01-01") %>% year() last_year_Forrest # 2016 last_year_Shawshank <- movielens %>% filter(title == unique(grep('Shawshank Redemption', movielens$title, ignore.case = TRUE, value= TRUE))) %>% arrange(desc(timestamp)) %>% slice(1) %>% select(timestamp) %>% as.numeric() %>% as.POSIXct(origin = "1970-01-01") %>% year() last_year_Shawshank # 2016 ###################### movielens %>% filter(movielens$year >= 1993 & movielens$year <= last_year_Forrest) %>% filter(title == unique(grep('Forrest Gump', movielens$title, ignore.case = TRUE, value= TRUE))) %>% group_by(year) %>% ggplot(aes(x=year, y=sqrt(rating), fill=year)) + geom_col() + ggtitle("Movies Forrest Gump") + scale_fill_gradientn(colours = rainbow(100)) + scale_x_continuous(breaks = seq(1993,last_year_Forrest, 10)) + theme(legend.position="none") movielens %>% filter(movielens$year >= 1993 & movielens$year <= last_year_Shawshank) %>% filter(title == unique(grep('Shawshank Redemption', movielens$title, ignore.case = TRUE, value= TRUE))) %>% group_by(year) %>% ggplot(aes(x=year, y=sqrt(rating), fill=year)) + geom_col() + ggtitle("Movies Shawshank Redemption") + scale_fill_gradientn(colours = rainbow(100)) + scale_x_continuous(breaks = seq(1993,last_year_Shawshank, 10)) + theme(legend.position="none") ############################################################### x <- unique(movielens$year[movielens$year >= 1994]) # returns 1994 to 2016. NA. x #returns total number of unique records x <- length(unique(movielens$year[movielens$year >= 1994])) x # 24 #Error: n() should only be called in a data context x <- ( avg = n() / length(unique(movielens$year[movielens$year >= 1994])) ) #rlang::last_error() y <- unique(movielens$year[movielens$year >= 1994]) y #boxplots #movies %>% movielens %>% ggplot(aes(x=year, y=sqrt(tot_ratings), fill=year)) + # object 'tot_ratings' not found ggplot(aes(x=year, y=sqrt(rating), fill=year)) + geom_boxplot(aes(group=year)) + ggtitle("Movies") + scale_fill_gradientn(colours = rainbow(100)) + scale_x_continuous(breaks = seq(1900,2020, 10)) + theme(legend.position="none") #calculate the medians directly and then plot with cols: ggplot(data = medians, aes(x=year, y=medi, fill=year)) + # object 'medians' not found geom_col() + scale_fill_gradientn(colours = rainbow(100)) + scale_x_continuous(breaks = seq(1900,2020, 10)) + ggtitle("Movies")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_maps.R \docType{data} \name{biceland} \alias{biceland} \title{Icelandic shorelines - mainland and islands} \format{\code{SpatialPolygonsDataFrame}} \source{ \url{http://www.lmi.is} } \usage{ biceland } \description{ The original data from Icelandic National Land Survey (Landmælingar Íslands - LMÍ). The original contains 6249 separate Polygons (island and scerries) with a total of 266502 coordinate points. } \author{ Einar Hjorleifsson <einar.hjorleifsson@gmail.com> } \keyword{datasets}
/man/biceland.Rd
no_license
sigurdurthorjonsson/gisland
R
false
true
579
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_maps.R \docType{data} \name{biceland} \alias{biceland} \title{Icelandic shorelines - mainland and islands} \format{\code{SpatialPolygonsDataFrame}} \source{ \url{http://www.lmi.is} } \usage{ biceland } \description{ The original data from Icelandic National Land Survey (Landmælingar Íslands - LMÍ). The original contains 6249 separate Polygons (island and scerries) with a total of 266502 coordinate points. } \author{ Einar Hjorleifsson <einar.hjorleifsson@gmail.com> } \keyword{datasets}
# Prepare Workspace suppressWarnings({library(ggplot2)}) suppressWarnings({library(tidyverse)}) suppressWarnings({library(caret)}) suppressWarnings({library(corrplot)}) suppressWarnings({library(gridExtra)}) suppressWarnings({library(MLmetrics)}) # Upload Dataset path <- "C:/Users/user/Documents/eRUM2020/hmeq.csv" df = read.csv(path) # Dimensions of data set dim(df) # List types for each attribute sapply(df, class) # Take a peek at the first rows of the data set head(df,5) # Summarize attribute distributions summary(df) # Summarize data structure str(df) # Formatting Features and managing some levels BAD <- df$BAD <- as.factor(df$BAD) df$LOAN <- as.numeric(df$LOAN) df$DEROG <- as.factor(df$DEROG) df$DELINQ <- as.factor(df$DELINQ) df$NINQ <- as.factor(df$NINQ) df$CLNO <- as.factor(df$CLNO) df$JOB[df$JOB == ""] <- "NA" df$REASON[df$REASON == ""] <- "NA" # Check missing values mi_summary <- function(data_frame){ mi_summary<-c() for (col in colnames(data_frame)){ mi_summary <- c(mi_summary,mean(is.na(data_frame[,col])*100)) } mi_summary_new <- mi_summary[mi_summary>0] mi_summary_cols <- colnames(data_frame)[mi_summary>0] mi_summary <- data.frame('col_name' = mi_summary_cols, 'perc_missing' = mi_summary_new) mi_summary <- mi_summary[order(mi_summary[,2], decreasing = TRUE), ] mi_summary[,2] <- round(mi_summary[,2],6) rownames(mi_summary) <- NULL return(mi_summary) } missing_summary <- mi_summary(df) missing_summary # Input boolean variables for features with NA's df <- df %>% mutate(DEBTINC_NA = ifelse(is.na(DEBTINC),1,0)) %>% mutate(DEROG_NA = ifelse(is.na(DEROG),1,0)) %>% mutate(DELINQ_NA = ifelse(is.na(DELINQ),1,0)) %>% mutate(MORTDUE_NA = ifelse(is.na(MORTDUE),1,0)) %>% mutate(YOJ_NA = ifelse(is.na(YOJ),1,0)) %>% mutate(NINQ_NA = ifelse(is.na(NINQ),1,0)) %>% mutate(CLAGE_NA = ifelse(is.na(CLAGE),1,0)) %>% mutate(CLNO_NA = ifelse(is.na(CLNO),1,0)) %>% mutate(VALUE_NA = ifelse(is.na(VALUE),1,0)) %>% mutate(JOB_NA = ifelse(is.na(JOB),1,0)) %>% mutate(REASON_NA = ifelse(is.na(REASON),1,0)) # Input missing values with median for numerical columns and with the most common level for categorical columns for (col in missing_summary$col_name){ if (class(df[,col]) == 'factor'){ unique_levels <- unique(df[,col]) df[is.na(df[,col]), col] <- unique_levels[which.max(tabulate(match(df[,col], unique_levels)))] } else { df[is.na(df[,col]),col] <- median(as.numeric(df[,col]), na.rm = TRUE) } } # Check results pMiss <- function(x){sum(is.na(x))/length(x)*100} pMiss <- apply(df,2,pMiss) pMiss <- pMiss[pMiss > 0] pMiss <- pMiss[order(pMiss, decreasing=T)] pMiss # Formatting new features and managing some levels df$DEROG_NA <- as.factor(df$DEROG_NA) df$DEBTINC_NA <- as.factor(df$DEBTINC_NA) df$DELINQ_NA <- as.factor(df$DELINQ_NA) df$MORTDUE_NA <- as.factor(df$MORTDUE_NA) df$YOJ_NA <- as.factor(df$YOJ_NA) df$NINQ_NA <- as.factor(df$NINQ_NA) df$CLAGE_NA <- as.factor(df$CLAGE_NA) df$CLNO_NA <- as.factor(df$CLNO_NA) df$VALUE_NA <- as.factor(df$VALUE_NA) df$JOB_NA <- as.factor(df$JOB_NA) df$REASON_NA <- as.factor(df$REASON_NA) df$JOB <- factor(df$JOB, labels=c('Mgr','Office','Other','ProfExe','Sales','Self')) df$REASON <- factor(df$REASON, labels=c('DebtCon','HomeImp')) # Split data set into categorical, boolean and numerical variables cat <- df[,sapply(df, is.factor)] %>% select_if(~nlevels(.) <=15 ) %>% select(-BAD) bol <- df[,c('DEBTINC_NA','DEROG_NA','DELINQ_NA','MORTDUE_NA','YOJ_NA','NINQ_NA','CLAGE_NA','CLNO_NA','VALUE_NA','JOB_NA','REASON_NA')] num <- df[,sapply(df, is.numeric)] # Summarize the class distribution of the target variable cbind(freq=table(df$BAD), percentage=prop.table(table(df$BAD))*100) # Visualize data ggplot(df, aes(BAD, fill=BAD)) + geom_bar() + scale_fill_brewer(palette = "Set1") + ggtitle("Distribution of Target variable") # Analysis for categorical features (barplot, univariate analysis, bivariate analysis) # Univariate Analysis cat <- cat[,c('DELINQ','REASON','JOB','DEROG')] for(i in 1:length(cat)) { counts <- table(cat[,i]) name <- names(cat)[i] barplot(counts, main=name, col=c("blue","red","green","orange","purple")) } # Bivariate Analysis with Feature Selection Analysis par(mfrow=c(2,2)) for(i in 1:length(cat)){ freq=table(cat[,i]) percentage=prop.table(table(cat[,i]))*100 freq_cat_outcome=table(BAD,cat[,i]) name <- names(cat)[i] cat(sep="\n") cat(paste("Distribution of", name), sep="\n") print(cbind(freq,percentage)) cat(sep="\n") cat(paste("Distribution by Target variable and", name), sep="\n") print(freq_cat_outcome) cat(sep="\n") cat(paste("Chi-squared test by Target variable and", name), sep="\n") suppressWarnings({print(chisq.test(table(BAD,cat[,i])))}) } # Visualization of Bivariate Analysis pl1 <- cat %>% ggplot(aes(x=BAD, y=DELINQ, fill=BAD)) + geom_bar(stat='identity') + ggtitle("Distribution by BAD and DELINQ") pl2 <- cat %>% ggplot(aes(x=BAD, y=REASON, fill=BAD)) + geom_bar(stat='identity') + ggtitle("Distribution by BAD and REASON") pl3 <- cat %>% ggplot(aes(x=BAD, y=JOB, fill=BAD)) + geom_bar(stat='identity') + ggtitle("Distribution by BAD and JOB") pl4 <- cat %>% ggplot(aes(x=BAD, y=DEROG, fill=BAD)) + geom_bar(stat='identity') + ggtitle("Distribution by BAD and DEROG") par(mfrow=c(2,2)) grid.arrange(pl1,pl2,pl3,pl4, ncol=2) # One-hot encoding on categorical features dmy <- dummyVars("~.", data = cat,fullRank = F) cat_num <- data.frame(predict(dmy, newdata = cat)) # Remove correlated levels from boolean features drop_cols <- c('DEBTINC_NA.0','DEROG_NA.0','DELINQ_NA.0','MORTDUE_NA.0','YOJ_NA.0','NINQ_NA.0','CLAGE_NA.0','CLNO_NA.0','VALUE_NA.0','JOB_NA.0','REASON_NA.0') categorical <- cat_num[,!colnames(cat_num) %in% drop_cols] # Analysis for numerical features (univariate analysis, bivariate analysis) # Univariate Analysis, histograms par(mfrow=c(2,3)) for(i in 1:length(num)) { hist(num[,i], main=names(num)[i], col='blue') } # Univariate Analysis, boxplots par(mfrow=c(2,3)) for(i in 1:length(num)) { boxplot(num[,i], main=names(num)[i], col='orange') } # Univariate Analysis, densityplots par(mfrow=c(2,3)) for(i in 1:length(num)){ plot(density(num[,i]), main=names(num)[i], col='red') } # Bivariate Analysis for(i in 1:length(num)){ name <- names(num)[i] cat(paste("Distribution of", name), sep="\n") #cat(names(num)[i],sep = "\n") print(summary(num[,i])) cat(sep="\n") stand.deviation = sd(num[,i]) variance = var(num[,i]) skewness = mean((num[,i] - mean(num[,i]))^3/sd(num[,i])^3) kurtosis = mean((num[,i] - mean(num[,i]))^4/sd(num[,i])^4) - 3 outlier_values <- sum(table(boxplot.stats(num[,i])$out)) cat(paste("Statistical analysis of", name), sep="\n") print(cbind(stand.deviation, variance, skewness, kurtosis, outlier_values)) cat(sep="\n") cat(paste("anova_test between BAD and", name),sep = "\n") print(summary(aov(as.numeric(BAD)~num[,i], data=num))) cat(sep="\n") } # Visualization of Bivariate Analysis pl5 <- num %>% ggplot(aes(x=BAD, y=LOAN, fill=BAD)) + geom_boxplot() pl6 <- num %>% ggplot(aes(x=BAD, y=MORTDUE, fill=BAD)) + geom_boxplot() pl7 <- num %>% ggplot(aes(x=BAD, y=VALUE, fill=BAD)) + geom_boxplot() pl8 <- num %>% ggplot(aes(x=BAD, y=YOJ, fill=BAD)) + geom_boxplot() pl9 <- num %>% ggplot(aes(x=BAD, y=CLAGE, fill=BAD)) + geom_boxplot() pl10 <- num %>% ggplot(aes(x=BAD, y=DEBTINC, fill=BAD)) + geom_boxplot() par(mfrow=c(2,3)) grid.arrange(pl5,pl6,pl7,pl8,pl9,pl10, ncol=2) # Handling outliers # Before ggplot(num, aes(x = LOAN, fill = BAD)) + geom_density(alpha = .3) + ggtitle("LOAN") # Managing outliers qnt <- quantile(num$LOAN, probs=c(.25, .75), na.rm = T) caps <- quantile(num$LOAN, probs=c(.05, .95), na.rm = T) H <- 1.5 * IQR(num$LOAN, na.rm = T) num$LOAN[num$LOAN < (qnt[1] - H)] <- caps[1] num$LOAN[num$LOAN >(qnt[2] + H)] <- caps[2] # After ggplot(num, aes(x = LOAN, fill = BAD)) + geom_density(alpha = .3) + ggtitle("LOAN after handled outliers") # Before ggplot(num, aes(x = MORTDUE, fill = BAD)) + geom_density(alpha = .3) + ggtitle("MORTDUE") # Managing outliers qnt <- quantile(num$MORTDUE, probs=c(.25, .75), na.rm = T) caps <- quantile(num$MORTDUE, probs=c(.05, .95), na.rm = T) H <- 1.5 * IQR(num$MORTDUE, na.rm = T) num$MORTDUE[num$MORTDUE < (qnt[1] - H)] <- caps[1] num$MORTDUE[num$MORTDUE >(qnt[2] + H)] <- caps[2] # After ggplot(num, aes(x = MORTDUE, fill = BAD)) + geom_density(alpha = .3) + ggtitle("MORTDUE after handled outliers") # Before ggplot(num, aes(x = VALUE, fill = BAD)) + geom_density(alpha = .3) + ggtitle("VALUE") # Managing outliers qnt <- quantile(num$VALUE, probs=c(.25, .75), na.rm = T) caps <- quantile(num$VALUE, probs=c(.05, .95), na.rm = T) H <- 1.5 * IQR(num$VALUE, na.rm = T) num$VALUE[num$VALUE < (qnt[1] - H)] <- caps[1] num$VALUE[num$VALUE >(qnt[2] + H)] <- caps[2] # After ggplot(num, aes(x = VALUE, fill = BAD)) + geom_density(alpha = .3) + ggtitle("VALUE after handled outliers") # Before ggplot(num, aes(x = YOJ, fill = BAD)) + geom_density(alpha = .3) + ggtitle("YOJ") # Managing outliers qnt <- quantile(num$YOJ, probs=c(.25, .75), na.rm = T) caps <- quantile(num$YOJ, probs=c(.05, .95), na.rm = T) H <- 1.5 * IQR(num$YOJ, na.rm = T) num$YOJ[num$YOJ < (qnt[1] - H)] <- caps[1] num$YOJ[num$YOJ >(qnt[2] + H)] <- caps[2] # After ggplot(num, aes(x = YOJ, fill = BAD)) + geom_density(alpha = .3) + ggtitle("YOJ after handled outliers") # Before ggplot(num, aes(x = CLAGE, fill = BAD)) + geom_density(alpha = .3) + ggtitle("CLAGE") # Managing outliers qnt <- quantile(num$CLAGE, probs=c(.25, .75), na.rm = T) caps <- quantile(num$CLAGE, probs=c(.05, .95), na.rm = T) H <- 1.5 * IQR(num$CLAGE, na.rm = T) num$CLAGE[num$CLAGE < (qnt[1] - H)] <- caps[1] num$CLAGE[num$CLAGE >(qnt[2] + H)] <- caps[2] # After ggplot(num, aes(x = CLAGE, fill = BAD)) + geom_density(alpha = .3) + ggtitle("CLAGE after handled outliers") # Before ggplot(num, aes(x = DEBTINC, fill = BAD)) + geom_density(alpha = .3) + ggtitle("DEBTINC") # Managing outliers qnt <- quantile(num$DEBTINC, probs=c(.25, .75), na.rm = T) caps <- quantile(num$DEBTINC, probs=c(.05, .95), na.rm = T) H <- 1.5 * IQR(num$DEBTINC, na.rm = T) num$DEBTINC[num$DEBTINC < (qnt[1] - H)] <- caps[1] num$DEBTINC[num$DEBTINC >(qnt[2] + H)] <- caps[2] # After ggplot(num, aes(x = DEBTINC, fill = BAD)) + geom_density(alpha = .3) + ggtitle("DEBTINC after handled outliers") # Delete Zero-and Near Zero-Variance Predictors data <- cbind(categorical,num) nzv <- nearZeroVar(data, saveMetrics= TRUE) nzv[nzv$nzv,][1:15,] nzv <- nearZeroVar(data) data_new <- data[, -nzv] # Correlation # Visualization par(mfrow=c(1,1)) cor <- cor(data_new,use="complete.obs",method = "spearman") corrplot(cor, type="lower", tl.col = "black", diag=FALSE, method="number", mar = c(0, 0, 2, 0), title="Correlation") summary(cor[upper.tri(cor)]) # Delete correlated features tmp <- cor(data_new) tmp[upper.tri(tmp)] <- 0 diag(tmp) <- 0 df_new <- data_new[,!apply(tmp,2,function(x) any(abs(x) > 0.75))] cor <- cor(df_new,use="complete.obs",method = "spearman") summary(cor[upper.tri(cor)]) # Pre-processing # calculate the pre-process parameters from the data set set.seed(2019) preprocessParams <- preProcess(df_new, method=c("center", "scale")) # Transform the data set using the parameters transformed <- predict(preprocessParams, df_new) # Manage levels on the target variable y <- as.factor(df$BAD) transformed <- cbind.data.frame(transformed,y) levels(transformed$y) <- make.names(levels(factor(transformed$y))) str(transformed) # Split data set # Draw a random, stratified sample including p percent of the data set.seed(12345) test_index <- createDataPartition(transformed$y, p=0.80, list=FALSE) # select 20% of the data for test itest <- transformed[-test_index,] # use the remaining 80% of data to training the models itrain <- transformed[test_index,] # Baseline Models: evaluating models by Caret control <- trainControl(method="cv", number=5,classProbs = TRUE, summaryFunction = prSummary) metric <- 'AUC' # GLM set.seed(2019) fit.glm <- train(y~., data=itrain, method="glm", family=binomial(link='logit'), metric=metric, trControl=control) print(fit.glm) plot(varImp(fit.glm),15, main = 'GLM feature selection') # RANDOM FOREST set.seed(2019) fit.rf <- train(y~., data=itrain, method="rf", metric=metric, trControl=control) print(fit.rf) plot(varImp(fit.rf),15, main='Random Forest feature selection') # NNET set.seed(2019) fit.nnet <- train(y~., data=itrain, method="nnet", metric=metric, trControl=control) print(fit.nnet) plot(varImp(fit.nnet),15, main = 'Neural Network feature selection') # GBM set.seed(2019) fit.gbm <- train(y~., data=itrain, method="gbm", metric=metric, trControl=control, verbose=F) print(fit.gbm) par(mar = c(4, 11, 1, 1)) summary(fit.gbm, cBars=15, las=2, plotit=T, main = 'GBM feature selection') # Comparison of algorithms results <- resamples(list(glm=fit.glm, rf=fit.rf, nnet=fit.nnet, gbm=fit.gbm)) cat(paste('Results'), sep='\n') summary(results) par(mar = c(4, 11, 1, 1)) dotplot(results, main = 'AUC results from algorithms') # Predictions par(mfrow=c(2,2)) set.seed(2019) prediction.glm <-predict(fit.glm,newdata=itest,type="raw") set.seed(2019) prediction.rf <-predict(fit.rf,newdata=itest,type="raw") set.seed(2019) prediction.nnet <-predict(fit.nnet,newdata=itest,type="raw") set.seed(2019) prediction.gbm <-predict(fit.gbm,newdata=itest,type="raw") # Visualization of results cat(paste('Confusion Matrix GLM Model'), sep='\n') confusionMatrix(prediction.glm, itest$y) F1_train <- F1_Score(itrain$y,predict(fit.glm,newdata=itrain,type="raw")) F1_test <- F1_Score(itest$y, prediction.glm) cat(paste('F1_train_glm:',F1_train, 'F1_test_glm:', F1_test), sep='\n') cat(paste('Confusion Matrix Random Forest Model'), sep='\n') confusionMatrix(prediction.rf, itest$y) F1_train <- F1_Score(itrain$y,predict(fit.rf,newdata=itrain,type="raw")) F1_test <- F1_Score(itest$y, prediction.rf) cat(paste('F1_train_rf:',F1_train,'F1_test_rf:', F1_test), sep='\n') cat(paste('Confusion Matrix Neural Network Model'), sep='\n') confusionMatrix(prediction.nnet, itest$y) F1_train <- F1_Score(itrain$y,predict(fit.nnet,newdata=itrain,type="raw")) F1_test <- F1_Score(itest$y, prediction.nnet) cat(paste('F1_train_nnet:',F1_train,'F1_test_nnet:', F1_test), sep='\n') cat(paste('Confusion Matrix GBM Model'), sep='\n') confusionMatrix(prediction.gbm, itest$y) F1_train <- F1_Score(itrain$y,predict(fit.gbm,newdata=itrain,type="raw")) F1_test <- F1_Score(itest$y, prediction.gbm) cat(paste('F1_train_gbm:',F1_train,'F1_test_gbm:', F1_test), sep='\n') # Confusion Matrix Plots par(mfrow=c(2,2)) ctable.glm <- table(prediction.glm, itest$y) fourfoldplot(ctable.glm, color = c("#CC6666", "#99CC99"), conf.level = 0, margin = 1, main = "GLM Confusion Matrix") ctable.rf <- table(prediction.rf, itest$y) fourfoldplot(ctable.rf, color = c("#CC6666", "#99CC99"), conf.level = 0, margin = 1, main = "RF Confusion Matrix") ctable.nnet <- table(prediction.nnet, itest$y) fourfoldplot(ctable.nnet, color = c("#CC6666", "#99CC99"), conf.level = 0, margin = 1, main = "NNET Confusion Matrix") ctable.gbm <- table(prediction.gbm, itest$y) fourfoldplot(ctable.gbm, color = c("#CC6666", "#99CC99"), conf.level = 0, margin = 1, main = "GBM Confusion Matrix")
/How to face a majority class greater than a minority class in a classification task/R approach/Loan_Prediction_baseline_models.R
no_license
claudio1975/Medium-blog
R
false
false
15,805
r
# Prepare Workspace suppressWarnings({library(ggplot2)}) suppressWarnings({library(tidyverse)}) suppressWarnings({library(caret)}) suppressWarnings({library(corrplot)}) suppressWarnings({library(gridExtra)}) suppressWarnings({library(MLmetrics)}) # Upload Dataset path <- "C:/Users/user/Documents/eRUM2020/hmeq.csv" df = read.csv(path) # Dimensions of data set dim(df) # List types for each attribute sapply(df, class) # Take a peek at the first rows of the data set head(df,5) # Summarize attribute distributions summary(df) # Summarize data structure str(df) # Formatting Features and managing some levels BAD <- df$BAD <- as.factor(df$BAD) df$LOAN <- as.numeric(df$LOAN) df$DEROG <- as.factor(df$DEROG) df$DELINQ <- as.factor(df$DELINQ) df$NINQ <- as.factor(df$NINQ) df$CLNO <- as.factor(df$CLNO) df$JOB[df$JOB == ""] <- "NA" df$REASON[df$REASON == ""] <- "NA" # Check missing values mi_summary <- function(data_frame){ mi_summary<-c() for (col in colnames(data_frame)){ mi_summary <- c(mi_summary,mean(is.na(data_frame[,col])*100)) } mi_summary_new <- mi_summary[mi_summary>0] mi_summary_cols <- colnames(data_frame)[mi_summary>0] mi_summary <- data.frame('col_name' = mi_summary_cols, 'perc_missing' = mi_summary_new) mi_summary <- mi_summary[order(mi_summary[,2], decreasing = TRUE), ] mi_summary[,2] <- round(mi_summary[,2],6) rownames(mi_summary) <- NULL return(mi_summary) } missing_summary <- mi_summary(df) missing_summary # Input boolean variables for features with NA's df <- df %>% mutate(DEBTINC_NA = ifelse(is.na(DEBTINC),1,0)) %>% mutate(DEROG_NA = ifelse(is.na(DEROG),1,0)) %>% mutate(DELINQ_NA = ifelse(is.na(DELINQ),1,0)) %>% mutate(MORTDUE_NA = ifelse(is.na(MORTDUE),1,0)) %>% mutate(YOJ_NA = ifelse(is.na(YOJ),1,0)) %>% mutate(NINQ_NA = ifelse(is.na(NINQ),1,0)) %>% mutate(CLAGE_NA = ifelse(is.na(CLAGE),1,0)) %>% mutate(CLNO_NA = ifelse(is.na(CLNO),1,0)) %>% mutate(VALUE_NA = ifelse(is.na(VALUE),1,0)) %>% mutate(JOB_NA = ifelse(is.na(JOB),1,0)) %>% mutate(REASON_NA = ifelse(is.na(REASON),1,0)) # Input missing values with median for numerical columns and with the most common level for categorical columns for (col in missing_summary$col_name){ if (class(df[,col]) == 'factor'){ unique_levels <- unique(df[,col]) df[is.na(df[,col]), col] <- unique_levels[which.max(tabulate(match(df[,col], unique_levels)))] } else { df[is.na(df[,col]),col] <- median(as.numeric(df[,col]), na.rm = TRUE) } } # Check results pMiss <- function(x){sum(is.na(x))/length(x)*100} pMiss <- apply(df,2,pMiss) pMiss <- pMiss[pMiss > 0] pMiss <- pMiss[order(pMiss, decreasing=T)] pMiss # Formatting new features and managing some levels df$DEROG_NA <- as.factor(df$DEROG_NA) df$DEBTINC_NA <- as.factor(df$DEBTINC_NA) df$DELINQ_NA <- as.factor(df$DELINQ_NA) df$MORTDUE_NA <- as.factor(df$MORTDUE_NA) df$YOJ_NA <- as.factor(df$YOJ_NA) df$NINQ_NA <- as.factor(df$NINQ_NA) df$CLAGE_NA <- as.factor(df$CLAGE_NA) df$CLNO_NA <- as.factor(df$CLNO_NA) df$VALUE_NA <- as.factor(df$VALUE_NA) df$JOB_NA <- as.factor(df$JOB_NA) df$REASON_NA <- as.factor(df$REASON_NA) df$JOB <- factor(df$JOB, labels=c('Mgr','Office','Other','ProfExe','Sales','Self')) df$REASON <- factor(df$REASON, labels=c('DebtCon','HomeImp')) # Split data set into categorical, boolean and numerical variables cat <- df[,sapply(df, is.factor)] %>% select_if(~nlevels(.) <=15 ) %>% select(-BAD) bol <- df[,c('DEBTINC_NA','DEROG_NA','DELINQ_NA','MORTDUE_NA','YOJ_NA','NINQ_NA','CLAGE_NA','CLNO_NA','VALUE_NA','JOB_NA','REASON_NA')] num <- df[,sapply(df, is.numeric)] # Summarize the class distribution of the target variable cbind(freq=table(df$BAD), percentage=prop.table(table(df$BAD))*100) # Visualize data ggplot(df, aes(BAD, fill=BAD)) + geom_bar() + scale_fill_brewer(palette = "Set1") + ggtitle("Distribution of Target variable") # Analysis for categorical features (barplot, univariate analysis, bivariate analysis) # Univariate Analysis cat <- cat[,c('DELINQ','REASON','JOB','DEROG')] for(i in 1:length(cat)) { counts <- table(cat[,i]) name <- names(cat)[i] barplot(counts, main=name, col=c("blue","red","green","orange","purple")) } # Bivariate Analysis with Feature Selection Analysis par(mfrow=c(2,2)) for(i in 1:length(cat)){ freq=table(cat[,i]) percentage=prop.table(table(cat[,i]))*100 freq_cat_outcome=table(BAD,cat[,i]) name <- names(cat)[i] cat(sep="\n") cat(paste("Distribution of", name), sep="\n") print(cbind(freq,percentage)) cat(sep="\n") cat(paste("Distribution by Target variable and", name), sep="\n") print(freq_cat_outcome) cat(sep="\n") cat(paste("Chi-squared test by Target variable and", name), sep="\n") suppressWarnings({print(chisq.test(table(BAD,cat[,i])))}) } # Visualization of Bivariate Analysis pl1 <- cat %>% ggplot(aes(x=BAD, y=DELINQ, fill=BAD)) + geom_bar(stat='identity') + ggtitle("Distribution by BAD and DELINQ") pl2 <- cat %>% ggplot(aes(x=BAD, y=REASON, fill=BAD)) + geom_bar(stat='identity') + ggtitle("Distribution by BAD and REASON") pl3 <- cat %>% ggplot(aes(x=BAD, y=JOB, fill=BAD)) + geom_bar(stat='identity') + ggtitle("Distribution by BAD and JOB") pl4 <- cat %>% ggplot(aes(x=BAD, y=DEROG, fill=BAD)) + geom_bar(stat='identity') + ggtitle("Distribution by BAD and DEROG") par(mfrow=c(2,2)) grid.arrange(pl1,pl2,pl3,pl4, ncol=2) # One-hot encoding on categorical features dmy <- dummyVars("~.", data = cat,fullRank = F) cat_num <- data.frame(predict(dmy, newdata = cat)) # Remove correlated levels from boolean features drop_cols <- c('DEBTINC_NA.0','DEROG_NA.0','DELINQ_NA.0','MORTDUE_NA.0','YOJ_NA.0','NINQ_NA.0','CLAGE_NA.0','CLNO_NA.0','VALUE_NA.0','JOB_NA.0','REASON_NA.0') categorical <- cat_num[,!colnames(cat_num) %in% drop_cols] # Analysis for numerical features (univariate analysis, bivariate analysis) # Univariate Analysis, histograms par(mfrow=c(2,3)) for(i in 1:length(num)) { hist(num[,i], main=names(num)[i], col='blue') } # Univariate Analysis, boxplots par(mfrow=c(2,3)) for(i in 1:length(num)) { boxplot(num[,i], main=names(num)[i], col='orange') } # Univariate Analysis, densityplots par(mfrow=c(2,3)) for(i in 1:length(num)){ plot(density(num[,i]), main=names(num)[i], col='red') } # Bivariate Analysis for(i in 1:length(num)){ name <- names(num)[i] cat(paste("Distribution of", name), sep="\n") #cat(names(num)[i],sep = "\n") print(summary(num[,i])) cat(sep="\n") stand.deviation = sd(num[,i]) variance = var(num[,i]) skewness = mean((num[,i] - mean(num[,i]))^3/sd(num[,i])^3) kurtosis = mean((num[,i] - mean(num[,i]))^4/sd(num[,i])^4) - 3 outlier_values <- sum(table(boxplot.stats(num[,i])$out)) cat(paste("Statistical analysis of", name), sep="\n") print(cbind(stand.deviation, variance, skewness, kurtosis, outlier_values)) cat(sep="\n") cat(paste("anova_test between BAD and", name),sep = "\n") print(summary(aov(as.numeric(BAD)~num[,i], data=num))) cat(sep="\n") } # Visualization of Bivariate Analysis pl5 <- num %>% ggplot(aes(x=BAD, y=LOAN, fill=BAD)) + geom_boxplot() pl6 <- num %>% ggplot(aes(x=BAD, y=MORTDUE, fill=BAD)) + geom_boxplot() pl7 <- num %>% ggplot(aes(x=BAD, y=VALUE, fill=BAD)) + geom_boxplot() pl8 <- num %>% ggplot(aes(x=BAD, y=YOJ, fill=BAD)) + geom_boxplot() pl9 <- num %>% ggplot(aes(x=BAD, y=CLAGE, fill=BAD)) + geom_boxplot() pl10 <- num %>% ggplot(aes(x=BAD, y=DEBTINC, fill=BAD)) + geom_boxplot() par(mfrow=c(2,3)) grid.arrange(pl5,pl6,pl7,pl8,pl9,pl10, ncol=2) # Handling outliers # Before ggplot(num, aes(x = LOAN, fill = BAD)) + geom_density(alpha = .3) + ggtitle("LOAN") # Managing outliers qnt <- quantile(num$LOAN, probs=c(.25, .75), na.rm = T) caps <- quantile(num$LOAN, probs=c(.05, .95), na.rm = T) H <- 1.5 * IQR(num$LOAN, na.rm = T) num$LOAN[num$LOAN < (qnt[1] - H)] <- caps[1] num$LOAN[num$LOAN >(qnt[2] + H)] <- caps[2] # After ggplot(num, aes(x = LOAN, fill = BAD)) + geom_density(alpha = .3) + ggtitle("LOAN after handled outliers") # Before ggplot(num, aes(x = MORTDUE, fill = BAD)) + geom_density(alpha = .3) + ggtitle("MORTDUE") # Managing outliers qnt <- quantile(num$MORTDUE, probs=c(.25, .75), na.rm = T) caps <- quantile(num$MORTDUE, probs=c(.05, .95), na.rm = T) H <- 1.5 * IQR(num$MORTDUE, na.rm = T) num$MORTDUE[num$MORTDUE < (qnt[1] - H)] <- caps[1] num$MORTDUE[num$MORTDUE >(qnt[2] + H)] <- caps[2] # After ggplot(num, aes(x = MORTDUE, fill = BAD)) + geom_density(alpha = .3) + ggtitle("MORTDUE after handled outliers") # Before ggplot(num, aes(x = VALUE, fill = BAD)) + geom_density(alpha = .3) + ggtitle("VALUE") # Managing outliers qnt <- quantile(num$VALUE, probs=c(.25, .75), na.rm = T) caps <- quantile(num$VALUE, probs=c(.05, .95), na.rm = T) H <- 1.5 * IQR(num$VALUE, na.rm = T) num$VALUE[num$VALUE < (qnt[1] - H)] <- caps[1] num$VALUE[num$VALUE >(qnt[2] + H)] <- caps[2] # After ggplot(num, aes(x = VALUE, fill = BAD)) + geom_density(alpha = .3) + ggtitle("VALUE after handled outliers") # Before ggplot(num, aes(x = YOJ, fill = BAD)) + geom_density(alpha = .3) + ggtitle("YOJ") # Managing outliers qnt <- quantile(num$YOJ, probs=c(.25, .75), na.rm = T) caps <- quantile(num$YOJ, probs=c(.05, .95), na.rm = T) H <- 1.5 * IQR(num$YOJ, na.rm = T) num$YOJ[num$YOJ < (qnt[1] - H)] <- caps[1] num$YOJ[num$YOJ >(qnt[2] + H)] <- caps[2] # After ggplot(num, aes(x = YOJ, fill = BAD)) + geom_density(alpha = .3) + ggtitle("YOJ after handled outliers") # Before ggplot(num, aes(x = CLAGE, fill = BAD)) + geom_density(alpha = .3) + ggtitle("CLAGE") # Managing outliers qnt <- quantile(num$CLAGE, probs=c(.25, .75), na.rm = T) caps <- quantile(num$CLAGE, probs=c(.05, .95), na.rm = T) H <- 1.5 * IQR(num$CLAGE, na.rm = T) num$CLAGE[num$CLAGE < (qnt[1] - H)] <- caps[1] num$CLAGE[num$CLAGE >(qnt[2] + H)] <- caps[2] # After ggplot(num, aes(x = CLAGE, fill = BAD)) + geom_density(alpha = .3) + ggtitle("CLAGE after handled outliers") # Before ggplot(num, aes(x = DEBTINC, fill = BAD)) + geom_density(alpha = .3) + ggtitle("DEBTINC") # Managing outliers qnt <- quantile(num$DEBTINC, probs=c(.25, .75), na.rm = T) caps <- quantile(num$DEBTINC, probs=c(.05, .95), na.rm = T) H <- 1.5 * IQR(num$DEBTINC, na.rm = T) num$DEBTINC[num$DEBTINC < (qnt[1] - H)] <- caps[1] num$DEBTINC[num$DEBTINC >(qnt[2] + H)] <- caps[2] # After ggplot(num, aes(x = DEBTINC, fill = BAD)) + geom_density(alpha = .3) + ggtitle("DEBTINC after handled outliers") # Delete Zero-and Near Zero-Variance Predictors data <- cbind(categorical,num) nzv <- nearZeroVar(data, saveMetrics= TRUE) nzv[nzv$nzv,][1:15,] nzv <- nearZeroVar(data) data_new <- data[, -nzv] # Correlation # Visualization par(mfrow=c(1,1)) cor <- cor(data_new,use="complete.obs",method = "spearman") corrplot(cor, type="lower", tl.col = "black", diag=FALSE, method="number", mar = c(0, 0, 2, 0), title="Correlation") summary(cor[upper.tri(cor)]) # Delete correlated features tmp <- cor(data_new) tmp[upper.tri(tmp)] <- 0 diag(tmp) <- 0 df_new <- data_new[,!apply(tmp,2,function(x) any(abs(x) > 0.75))] cor <- cor(df_new,use="complete.obs",method = "spearman") summary(cor[upper.tri(cor)]) # Pre-processing # calculate the pre-process parameters from the data set set.seed(2019) preprocessParams <- preProcess(df_new, method=c("center", "scale")) # Transform the data set using the parameters transformed <- predict(preprocessParams, df_new) # Manage levels on the target variable y <- as.factor(df$BAD) transformed <- cbind.data.frame(transformed,y) levels(transformed$y) <- make.names(levels(factor(transformed$y))) str(transformed) # Split data set # Draw a random, stratified sample including p percent of the data set.seed(12345) test_index <- createDataPartition(transformed$y, p=0.80, list=FALSE) # select 20% of the data for test itest <- transformed[-test_index,] # use the remaining 80% of data to training the models itrain <- transformed[test_index,] # Baseline Models: evaluating models by Caret control <- trainControl(method="cv", number=5,classProbs = TRUE, summaryFunction = prSummary) metric <- 'AUC' # GLM set.seed(2019) fit.glm <- train(y~., data=itrain, method="glm", family=binomial(link='logit'), metric=metric, trControl=control) print(fit.glm) plot(varImp(fit.glm),15, main = 'GLM feature selection') # RANDOM FOREST set.seed(2019) fit.rf <- train(y~., data=itrain, method="rf", metric=metric, trControl=control) print(fit.rf) plot(varImp(fit.rf),15, main='Random Forest feature selection') # NNET set.seed(2019) fit.nnet <- train(y~., data=itrain, method="nnet", metric=metric, trControl=control) print(fit.nnet) plot(varImp(fit.nnet),15, main = 'Neural Network feature selection') # GBM set.seed(2019) fit.gbm <- train(y~., data=itrain, method="gbm", metric=metric, trControl=control, verbose=F) print(fit.gbm) par(mar = c(4, 11, 1, 1)) summary(fit.gbm, cBars=15, las=2, plotit=T, main = 'GBM feature selection') # Comparison of algorithms results <- resamples(list(glm=fit.glm, rf=fit.rf, nnet=fit.nnet, gbm=fit.gbm)) cat(paste('Results'), sep='\n') summary(results) par(mar = c(4, 11, 1, 1)) dotplot(results, main = 'AUC results from algorithms') # Predictions par(mfrow=c(2,2)) set.seed(2019) prediction.glm <-predict(fit.glm,newdata=itest,type="raw") set.seed(2019) prediction.rf <-predict(fit.rf,newdata=itest,type="raw") set.seed(2019) prediction.nnet <-predict(fit.nnet,newdata=itest,type="raw") set.seed(2019) prediction.gbm <-predict(fit.gbm,newdata=itest,type="raw") # Visualization of results cat(paste('Confusion Matrix GLM Model'), sep='\n') confusionMatrix(prediction.glm, itest$y) F1_train <- F1_Score(itrain$y,predict(fit.glm,newdata=itrain,type="raw")) F1_test <- F1_Score(itest$y, prediction.glm) cat(paste('F1_train_glm:',F1_train, 'F1_test_glm:', F1_test), sep='\n') cat(paste('Confusion Matrix Random Forest Model'), sep='\n') confusionMatrix(prediction.rf, itest$y) F1_train <- F1_Score(itrain$y,predict(fit.rf,newdata=itrain,type="raw")) F1_test <- F1_Score(itest$y, prediction.rf) cat(paste('F1_train_rf:',F1_train,'F1_test_rf:', F1_test), sep='\n') cat(paste('Confusion Matrix Neural Network Model'), sep='\n') confusionMatrix(prediction.nnet, itest$y) F1_train <- F1_Score(itrain$y,predict(fit.nnet,newdata=itrain,type="raw")) F1_test <- F1_Score(itest$y, prediction.nnet) cat(paste('F1_train_nnet:',F1_train,'F1_test_nnet:', F1_test), sep='\n') cat(paste('Confusion Matrix GBM Model'), sep='\n') confusionMatrix(prediction.gbm, itest$y) F1_train <- F1_Score(itrain$y,predict(fit.gbm,newdata=itrain,type="raw")) F1_test <- F1_Score(itest$y, prediction.gbm) cat(paste('F1_train_gbm:',F1_train,'F1_test_gbm:', F1_test), sep='\n') # Confusion Matrix Plots par(mfrow=c(2,2)) ctable.glm <- table(prediction.glm, itest$y) fourfoldplot(ctable.glm, color = c("#CC6666", "#99CC99"), conf.level = 0, margin = 1, main = "GLM Confusion Matrix") ctable.rf <- table(prediction.rf, itest$y) fourfoldplot(ctable.rf, color = c("#CC6666", "#99CC99"), conf.level = 0, margin = 1, main = "RF Confusion Matrix") ctable.nnet <- table(prediction.nnet, itest$y) fourfoldplot(ctable.nnet, color = c("#CC6666", "#99CC99"), conf.level = 0, margin = 1, main = "NNET Confusion Matrix") ctable.gbm <- table(prediction.gbm, itest$y) fourfoldplot(ctable.gbm, color = c("#CC6666", "#99CC99"), conf.level = 0, margin = 1, main = "GBM Confusion Matrix")
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 14423 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 14423 c c Input Parameter (command line, file): c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query42_query26_1344.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 4213 c no.of clauses 14423 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 14423 c c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query42_query26_1344.qdimacs 4213 14423 E1 [] 0 16 4197 14423 NONE
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query42_query26_1344/query42_query26_1344.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
717
r
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 14423 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 14423 c c Input Parameter (command line, file): c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query42_query26_1344.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 4213 c no.of clauses 14423 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 14423 c c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query42_query26_1344.qdimacs 4213 14423 E1 [] 0 16 4197 14423 NONE
library(xml2) library(magrittr) fix_xml <- function(projekt_dir){ error_log <- file.path(projekt_dir, "error.log") if(file.exists(error_log) == TRUE) file.remove(error_log) folders <- list.dirs(file.path(projekt_dir, "tei"), recursive = FALSE) for(folder in folders){ print(folder) lapply(list.files(folder, full.names = TRUE), function(file){ tryCatch(a1 <- xml2::read_xml(file), error = function(e){ print(file) 'con <- file(description = file) fix_to_write <- readLines(con) %>% gsub("&", "und", .) %>% gsub("<<", "<", .) %>% gsub(">>", ">", .) %>% gsub(">< ", ">", .) %>% gsub(" ><", "<", .) %>% gsub("<7i>|< b>|<7I>|<-2>|<\\(i>|<7b>|<7b>|>>>|<<<", "", .) close(con)' #writeLines(fix_to_write, file) write(file, error_log, append = TRUE) }) }) } } fix_xml("/Users/simgeh/lab/_temp/DieZeit")
/xml_fix.R
no_license
simongehlhar/helper
R
false
false
1,054
r
library(xml2) library(magrittr) fix_xml <- function(projekt_dir){ error_log <- file.path(projekt_dir, "error.log") if(file.exists(error_log) == TRUE) file.remove(error_log) folders <- list.dirs(file.path(projekt_dir, "tei"), recursive = FALSE) for(folder in folders){ print(folder) lapply(list.files(folder, full.names = TRUE), function(file){ tryCatch(a1 <- xml2::read_xml(file), error = function(e){ print(file) 'con <- file(description = file) fix_to_write <- readLines(con) %>% gsub("&", "und", .) %>% gsub("<<", "<", .) %>% gsub(">>", ">", .) %>% gsub(">< ", ">", .) %>% gsub(" ><", "<", .) %>% gsub("<7i>|< b>|<7I>|<-2>|<\\(i>|<7b>|<7b>|>>>|<<<", "", .) close(con)' #writeLines(fix_to_write, file) write(file, error_log, append = TRUE) }) }) } } fix_xml("/Users/simgeh/lab/_temp/DieZeit")
########################################## ## A script to analyse the effect of various factors on the postsynaptic Vm ########################################## source('./StatsEval_functions.R', chdir = TRUE) ## data: binary file, containing the somatic membrane potential of the biophysical model ## scripts for simulating the biophysical model can be found here: https://bitbucket.org/bbu20/clustering/ ## data format: matrix, with rows being the individual trials, recorded at 1000 Hz localdir <- getwd() # datadir is the directory where the biophysical model's output was saved # it is now pointing to a non-existing directory, which should be created when running the biophysical model with appropriate parameters. datadir <- '../../CA1/global_regular_lrand/place/' library(viridis) library(colormap) library(gplots) stim_types <- c('random_NR', 'balanced') act_types <- c('Dactive', 'Dactive') spinetype <- 'spines' typenames <- c('act_rand', 'act_bal') n.sim <- length(stim_types) ntrial <- 16 Tmax <- 10 gN <- '0.8' graphics <- F fname <- '../datasets/response_variability_ttt_globalReg_localRand_spines.Rdata' if (file.exists(fname)) load(fname) else { setwd(datadir) stats_data <- eval_Vm_stats(n.sim, stim_types, act_types, typenames, graphics=0) setwd(localdir) save(stats_data, file=fname) } sds <- stats_data$sds meanresps <- stats_data$meanresps ttt_variance <- stats_data$ttt_variance ttt_variance_peak <- stats_data$ttt_variance_peak ############################################################ cols <- c(rgb(255, 208, 220, max=255), rgb(181, 181, 181, max=255)) pchs <- c(22, 22, 23, 23) plot_data_SEM(filename=NULL, sds, ttt_variance, ttt_variance_peak, typenames, col=cols, pch=pchs)~/Projects/KOKI/Synchrony/CA1/
/NEW_Biophysical/analysis/functions/Collect_VmStats.R
no_license
yjkimnada/CA1_Sim
R
false
false
1,750
r
########################################## ## A script to analyse the effect of various factors on the postsynaptic Vm ########################################## source('./StatsEval_functions.R', chdir = TRUE) ## data: binary file, containing the somatic membrane potential of the biophysical model ## scripts for simulating the biophysical model can be found here: https://bitbucket.org/bbu20/clustering/ ## data format: matrix, with rows being the individual trials, recorded at 1000 Hz localdir <- getwd() # datadir is the directory where the biophysical model's output was saved # it is now pointing to a non-existing directory, which should be created when running the biophysical model with appropriate parameters. datadir <- '../../CA1/global_regular_lrand/place/' library(viridis) library(colormap) library(gplots) stim_types <- c('random_NR', 'balanced') act_types <- c('Dactive', 'Dactive') spinetype <- 'spines' typenames <- c('act_rand', 'act_bal') n.sim <- length(stim_types) ntrial <- 16 Tmax <- 10 gN <- '0.8' graphics <- F fname <- '../datasets/response_variability_ttt_globalReg_localRand_spines.Rdata' if (file.exists(fname)) load(fname) else { setwd(datadir) stats_data <- eval_Vm_stats(n.sim, stim_types, act_types, typenames, graphics=0) setwd(localdir) save(stats_data, file=fname) } sds <- stats_data$sds meanresps <- stats_data$meanresps ttt_variance <- stats_data$ttt_variance ttt_variance_peak <- stats_data$ttt_variance_peak ############################################################ cols <- c(rgb(255, 208, 220, max=255), rgb(181, 181, 181, max=255)) pchs <- c(22, 22, 23, 23) plot_data_SEM(filename=NULL, sds, ttt_variance, ttt_variance_peak, typenames, col=cols, pch=pchs)~/Projects/KOKI/Synchrony/CA1/
## Importing Library library(dplyr) library(ggplot2) library(readxl) ## Importing the dataset Online.Retail <- read_excel("Online Retail.xlsx") ## NA value treatment order_wise <- na.omit(Online.Retail) ## Making RFM data Amount <- order_wise$Quantity * order_wise$UnitPrice order_wise <- cbind(order_wise,Amount) order_wise <- order_wise[order(order_wise$CustomerID),] monetary <- aggregate(Amount~CustomerID, order_wise, sum) frequency <- order_wise[,c(7,1)] k<-table(as.factor(frequency$CustomerID)) k<-data.frame(k) colnames(k)[1]<-c("CustomerID") master <-merge(monetary,k,by="CustomerID") recency <- order_wise[,c(7,5)] recency$InvoiceDate<-as.Date(recency$InvoiceDate,"%d-%m-%Y %H:%M") maximum<-max(recency$InvoiceDate) maximum<-maximum+1 maximum$diff <-maximum-recency$InvoiceDate recency$diff<-maximum$diff df<-aggregate(recency$diff,by=list(recency$CustomerID),FUN="min") colnames(df)[1]<- "CustomerID" colnames(df)[2]<- "Recency" RFM <- merge(monetary, k, by = ("CustomerID")) RFM <- merge(RFM, df, by = ("CustomerID")) RFM$Recency <- as.numeric(RFM$Recency) ## Outlier treatment box <- boxplot.stats(RFM$Amount) out <- box$out RFM1 <- RFM[ !RFM$Amount %in% out, ] RFM <- RFM1 box <- boxplot.stats(RFM$Freq) out <- box$out RFM1 <- RFM[ !RFM$Freq %in% out, ] RFM <- RFM1 box <- boxplot.stats(RFM$Recency) out <- box$out RFM1 <- RFM[ !RFM$Recency %in% out, ] RFM <- RFM1 ## Standardization of data RFM_norm1<- RFM[,-1] RFM_norm1$Amount <- scale(RFM_norm1$Amount) RFM_norm1$Freq <- scale(RFM_norm1$Freq) RFM_norm1$Recency <- scale(RFM_norm1$Recency) View(RFM_norm1) ## Implementing K-Means algorithm clus3 <- kmeans(RFM_norm1, centers = 3, iter.max = 50, nstart = 50) ## Finding the optimal value of K r_sq<- rnorm(20) for (number in 1:20){clus <- kmeans(RFM_norm1, centers = number, nstart = 50) r_sq[number]<- clus$betweenss/clus$totss } plot(r_sq) ## Running the K-Means algorithm for K =4,5,6 clus4 <- kmeans(RFM_norm1, centers = 4, iter.max = 50, nstart = 50) clus5 <- kmeans(RFM_norm1, centers = 5, iter.max = 50, nstart = 50) clus6 <- kmeans(RFM_norm1, centers = 6, iter.max = 50, nstart = 50) ## Appending the ClusterIDs to RFM data RFM_km <-cbind(RFM,clus5$cluster) colnames(RFM_km)[5]<- "ClusterID" ## Cluster Analysis km_clusters<- group_by(RFM_km, ClusterID) tab1<- summarise(km_clusters, Mean_amount=mean(Amount), Mean_freq=mean(Freq), Mean_recency=mean(Recency)) ## Plot ggplot(tab1, aes(x= factor(ClusterID), y=Mean_amount)) + geom_bar(stat = "identity") ggplot(tab1, aes(x= factor(ClusterID), y=Mean_freq)) + geom_bar(stat = "identity") ggplot(tab1, aes(x= factor(ClusterID), y=Mean_recency)) + geom_bar(stat = "identity")
/Customer Segmentation/kmeans_clustering.R
no_license
ashish-kamboj/Data-Science
R
false
false
2,675
r
## Importing Library library(dplyr) library(ggplot2) library(readxl) ## Importing the dataset Online.Retail <- read_excel("Online Retail.xlsx") ## NA value treatment order_wise <- na.omit(Online.Retail) ## Making RFM data Amount <- order_wise$Quantity * order_wise$UnitPrice order_wise <- cbind(order_wise,Amount) order_wise <- order_wise[order(order_wise$CustomerID),] monetary <- aggregate(Amount~CustomerID, order_wise, sum) frequency <- order_wise[,c(7,1)] k<-table(as.factor(frequency$CustomerID)) k<-data.frame(k) colnames(k)[1]<-c("CustomerID") master <-merge(monetary,k,by="CustomerID") recency <- order_wise[,c(7,5)] recency$InvoiceDate<-as.Date(recency$InvoiceDate,"%d-%m-%Y %H:%M") maximum<-max(recency$InvoiceDate) maximum<-maximum+1 maximum$diff <-maximum-recency$InvoiceDate recency$diff<-maximum$diff df<-aggregate(recency$diff,by=list(recency$CustomerID),FUN="min") colnames(df)[1]<- "CustomerID" colnames(df)[2]<- "Recency" RFM <- merge(monetary, k, by = ("CustomerID")) RFM <- merge(RFM, df, by = ("CustomerID")) RFM$Recency <- as.numeric(RFM$Recency) ## Outlier treatment box <- boxplot.stats(RFM$Amount) out <- box$out RFM1 <- RFM[ !RFM$Amount %in% out, ] RFM <- RFM1 box <- boxplot.stats(RFM$Freq) out <- box$out RFM1 <- RFM[ !RFM$Freq %in% out, ] RFM <- RFM1 box <- boxplot.stats(RFM$Recency) out <- box$out RFM1 <- RFM[ !RFM$Recency %in% out, ] RFM <- RFM1 ## Standardization of data RFM_norm1<- RFM[,-1] RFM_norm1$Amount <- scale(RFM_norm1$Amount) RFM_norm1$Freq <- scale(RFM_norm1$Freq) RFM_norm1$Recency <- scale(RFM_norm1$Recency) View(RFM_norm1) ## Implementing K-Means algorithm clus3 <- kmeans(RFM_norm1, centers = 3, iter.max = 50, nstart = 50) ## Finding the optimal value of K r_sq<- rnorm(20) for (number in 1:20){clus <- kmeans(RFM_norm1, centers = number, nstart = 50) r_sq[number]<- clus$betweenss/clus$totss } plot(r_sq) ## Running the K-Means algorithm for K =4,5,6 clus4 <- kmeans(RFM_norm1, centers = 4, iter.max = 50, nstart = 50) clus5 <- kmeans(RFM_norm1, centers = 5, iter.max = 50, nstart = 50) clus6 <- kmeans(RFM_norm1, centers = 6, iter.max = 50, nstart = 50) ## Appending the ClusterIDs to RFM data RFM_km <-cbind(RFM,clus5$cluster) colnames(RFM_km)[5]<- "ClusterID" ## Cluster Analysis km_clusters<- group_by(RFM_km, ClusterID) tab1<- summarise(km_clusters, Mean_amount=mean(Amount), Mean_freq=mean(Freq), Mean_recency=mean(Recency)) ## Plot ggplot(tab1, aes(x= factor(ClusterID), y=Mean_amount)) + geom_bar(stat = "identity") ggplot(tab1, aes(x= factor(ClusterID), y=Mean_freq)) + geom_bar(stat = "identity") ggplot(tab1, aes(x= factor(ClusterID), y=Mean_recency)) + geom_bar(stat = "identity")
######################## getAllNlConfigNames ################################### #' Generate a list of all possible configNames for a given nlType #' #' Generate a list of all possible configNames for a given nlType #' #' @param nlType if present show only configNames matching the nlType #' #' @examples #' getAllNlConfigNames("OLS.Y") #' #returns '"cf_cvg", "avg_vis", "stable_lights"' #' @export getAllNlConfigNames <- function(nlType) { allConfigNames <- list( "OLS.Y" = c("cf_cvg", "avg_vis", "stable_lights", "pct_lights", "avg_lights_x_pct"), "VIIRS.D" = c("vcmcfg", "vcmsl"), "VIIRS.M" = c("vcmcfg", "vcmsl"), "VIIRS.Y" = c("vcm-orm", "vcm-orm-ntl", "vcm-ntl")) if(missing(nlType)) return(allConfigNames) sapply(nlType, function(x) { pos <- grep(pattern = paste0("^",x,"$"), x = names(allConfigNames)) if(length(pos) == 0) return(NA) allConfigNames[pos] }, USE.NAMES = F) } ######################## validNlConfigName ################################### #' Check if a configName is valid for a given nlType #' #' Check if a configName is valid for a given nlType #' #' @param configName the raster in use #' #' @param nlType types of nightlight to check #' #' @return logical a vector of logical values #' #' @examples #' Rnightlights:::validNlConfigName("VCMCFG", "OLS.Y") #' #returns FALSE #' #' Rnightlights:::validNlConfigName("VCMCFG", "VIIRS.M") #' #returns TRUE #' validNlConfigName <- function(configName, nlType) { toupper(configName) %in% toupper(unlist(getAllNlConfigNames(nlType))) } ######################## downloadNlTiles ################################### #' Download the listed tiles for a given nlType in a given nlPeriod #' #' Download the listed tiles for a given nlType in a given nlPeriod #' #' @param nlType character The nightlight type #' #' @param nlPeriod character The nlPeriod to process in the appropriate #' format #' #' @param tileList integer vector or character vector of digits containing #' valid tile numbers as obtained by tileName2Idx for VIIRS. Ignore for #' nlType=="OLS" #' #' @return TRUE/FALSE if the download was successful #' #' @examples #' #download VIIRS tiles for "KEN" which are tiles 2 and 5 for the specified #' #time periods #' \dontrun{ #' Rnightlights:::downloadNlTiles("VIIRS.M", "201401", c(2, 5)) #' } #' #' #same as above but getting the tileList automatically #' \dontrun{ #' Rnightlights:::downloadNlTiles(nlType="VIIRS.M", #' nlPeriod="201401", #' tileList=Rnightlights:::getCtryTileList(ctryCodes="KEN", #' nlType="VIIRS.M") #' ) #' } #' #' #returns TRUE if the download was successful or tile is cached locally #' downloadNlTiles <- function(nlType, configName=pkgOptions(paste0("configName_", nlType)), nlPeriod, tileList, multiTileStrategy = pkgOptions("multiTileStrategy")) { if(missing(nlType)) stop(Sys.time(), ": Missing required parameter nlType") if(missing(nlPeriod)) stop(Sys.time(), ": Missing required parameter nlPeriod") if(stringr::str_detect(nlType, "VIIRS") && missing(tileList)) stop(Sys.time(), ": Missing required parameter tileList") if(!validNlTypes(nlType)) stop(Sys.time(), ": Invalid nlType detected") if(!allValidNlPeriods(nlPeriods = nlPeriod, nlTypes = nlType)) stop(Sys.time(), ": Invalid nlPeriod: ", nlPeriod) if(stringr::str_detect(nlType, "VIIRS") && !allValid(tileList, validNlTileNameVIIRS, nlType)) stop(Sys.time(), ": Invalid tile detected") success <- TRUE #ensure we have all required tiles if(stringr::str_detect(nlType, "OLS")) success <- success && downloadNlTilesOLS(nlPeriod = nlPeriod, downloadMethod = pkgOptions("downloadMethod"), nlType = nlType, configName = configName, multiTileStrategy = multiTileStrategy) else if(stringr::str_detect(nlType, "VIIRS")) for (tile in tileList) { nlTile <- tileName2Idx(tile, nlType) message(Sys.time(), ": Downloading tile: ", paste0(nlPeriod, nlTile)) #download tile success <- success && downloadNlTilesVIIRS(nlPeriod = nlPeriod, tileNum = nlTile, nlType = nlType, configName = configName) } return (success) } ######################## getCtryTileList ################################### #' Returns a list of VIIRS nightlight tiles that a country or countries #' intersects with #' #' Given a list of countries, this function will provide alist of VIIRS #' nightlight tiles that intersect with them. This helps in processing #' multiple countries by determining which nightlight tiles are required #' for processing by allowing the download of all required tiles before #' processing. Note all VIIRS_* nlTypes have the same nlTiles. #' #' @param ctryCodes character vector of country codes to process #' #' @param nlType character string The nlType of interest #' #' @param omitCountries countries to exclude from processing. This is #' helpful when the number of countries to exclude is smaller than #' the number to process e.g. when one wants to process all countries #' and exclude countries that take long to process i.e. #' omitCountries = "long" #' #' @return TRUE/FALSE #' #' @examples #' Rnightlights:::getCtryTileList(ctryCodes=c("BDI", "KEN", "RWA", "TZA", "UGA"), #' nlType="VIIRS.M", omitCountries="none") #' #' #only 1 tile for OLS #' Rnightlights:::getCtryTileList(ctryCodes=c("BDI", "KEN", "RWA", "TZA", "UGA"), #' nlType="OLS.Y", omitCountries="none") #' #returns "DUMMY" #' getCtryTileList <- function(ctryCodes, nlType, omitCountries="none") { if(missing(ctryCodes)) stop(Sys.time(), ": Missing required parameter ctryCodes") if(missing(nlType)) stop(Sys.time(), ": Missing required parameter nlType") if(!allValid(ctryCodes, validCtryCodes)) stop(Sys.time(), ": Invalid ctryCode(s) detected") if(!validNlTypes(nlType)) stop(Sys.time(), ": Invalid nlType: ", nlType) if(stringr::str_detect(nlType, "OLS")) ctryTiles <- "DUMMY" else if(stringr::str_detect(nlType, "VIIRS")) ctryTiles <- unlist(mapCtryPolyToTilesVIIRS(ctryCodes, omitCountries)$tiles) return (ctryTiles) } ######################## getNlTiles ################################### #' Create mapping of nightlight tiles #' #' Creates a data.frame mapping nightlight tile names to their vertice coordinates. This is used to #' identify nightlight tiles as well as to build a spatial polygons dataframe used to plot the tiles. OLS #' only has one tile for the whole world and thus has a dummy entry. OLS is included to #' prevent code duplication by writing separate functions for OLS. #' #' @param nlType the nlType of interest #' #' @return A data.frame of names of tiles and lon-lat coordinate of top-left corner of each #' #' @examples #' Rnightlights:::getNlTiles("VIIRS.M") #' #' Rnightlights:::getNlTiles("OLS.Y") #' getNlTiles <- function(nlType) { #6 nightlight tiles named by top-left geo coordinate numbered from left-right & top-bottom #creates columns as strings. createSpPolysDF converts relevant columns to numeric nlTiles <- data.frame( id=c(1,1,2,3,4,5,6), type=c("OLS","VIIRS","VIIRS","VIIRS","VIIRS","VIIRS","VIIRS"), name=c("DUMMY", "75N180W", "75N060W", "75N060E", "00N180W", "00N060W", "00N060E"), minx=c(-1, -180, -60, 60, -180, -60, 60), maxx=c(-1, -60, 60, 180, -60, 60, 180), miny=c(-1, 0, 0, 0, -75, -75, -75), maxy=c(-1, 75, 75, 75, 0, 0, 0), stringsAsFactors=FALSE) if(!missing(nlType)) { if(!validNlTypes(nlType)) stop(Sys.time(), ": Invalid nlType") if(length(grep("VIIRS", nlType)) > 0) nlType <- "VIIRS" else if(length(grep("OLS", nlType)) > 0) nlType <- "OLS" nlTiles <- nlTiles[grepl(nlType, nlTiles$type),] } return (nlTiles) } ######################## createNlTilesSpPolysDF ################################### #' Creates a tile Spatial Polygons DataFrame from the \code{"nlTiles"} dataframe #' #' Creates a Spatial Polygons DataFrame from the \code{"nlTiles"} dataframe of VIIRS tiles #' #' @return TRUE/FALSE #' #' @examples #' tilesSpPolysDFs <- Rnightlights:::createNlTilesSpPolysDF() #' createNlTilesSpPolysDF <- function() { if (!exists("nlTiles")) { nlTiles <- getNlTiles(grep("VIIRS", getAllNlTypes(), value = T)[1]) } wgs84 <- getCRS() #convert nlTiles min/max columns to numeric for (cIdx in grep("id|min|max", names(nlTiles))) nlTiles[,cIdx] <- as.numeric(as.character(nlTiles[, cIdx])) #create the empty obj to hold the data frame of tile PolygonsDataFrams tilesSpPolysDFs <- NULL #for each row in nlTiles for (i in 1:nrow(nlTiles)) { #grab the row containing the tile t <- nlTiles[i,] #convert the tile x,y extents to a matrix #format is 2 cols x & y tMat <- as.matrix(cbind(rbind(t$minx, t$maxx, t$maxx, t$minx), rbind(t$maxy, t$maxy, t$miny, t$miny))) #create a Polygon object from the tile extents matrix tPoly <- list(sp::Polygon(tMat)) #create a Polygons object with a list of 1 polygon tPolys <- sp::Polygons(srl = tPoly, ID = i) #create a SpatialPolygons object with a list of 1 list of Polygons tilesSpPolys <- sp::SpatialPolygons(Srl = list(tPolys)) #we assign the CRS at this point (note other objects cannot be assigned CRS) raster::projection(tilesSpPolys) <- sp::CRS(wgs84) #convert the SpatialPolygons object into a SpatialPolygonsDataFrame #tilesSpPolysDF <- methods::as(tilesSpPolys, "SpatialPolygonsDataFrame") #z used for plotCtryWithTilesVIIRS to color the tiles tilesSpPolysDF <- sp::SpatialPolygonsDataFrame(tilesSpPolys, data.frame(z=factor(i), name=nlTiles[i,"name"], row.names=i)) #append the SPDF into a dataframe of SPDFs if (is.null(tilesSpPolysDFs)) tilesSpPolysDFs <- tilesSpPolysDF else tilesSpPolysDFs <- sp::rbind.SpatialPolygonsDataFrame(tilesSpPolysDFs, tilesSpPolysDF) } return (tilesSpPolysDFs) } ######################## plotCtryWithTilesVIIRS ################################### #' Plot a country boundary with the VIIRS tiles and world map #' #' Plot a country boundary as defined in the \pkg{rworldmap} package along #' with the VIIRS nightlight tiles for a visual inspection of the tiles #' required for download in order to process a country's nightlight #' data. Output corresponds to that of \code{getCtryNlTiles()} #' #' It utilizes \code{rworldmap::rwmgetISO3()} to resolve country #' codes as well as names. #' #' @param ctry \code{character} the 3-letter ISO3 country code e.g. "KEN" #' or a common name of the country e.g. "Kenya" as found valid by #' \code{rworldmap::rwmgetISO3()} #' #' @return None #' #' @examples #' #by ctryCode #' \dontrun{plotCtryWithTilesVIIRS("KEN")} #' #' @export plotCtryWithTilesVIIRS <- function(ctry) { if(missing(ctry)) stop(Sys.time(), ": You must supply a country code or index") if(!is.character(ctry)) stop(Sys.time(), ": The parameter you supplied needs to be type character") wgs84 <- getCRS() #if the map variable does not exist map <- getWorldMap() #if the tiles spatial polygons dataframe does not exist create it if(!exists("tilesSpPolysDFs")) tilesSpPolysDFs <- createNlTilesSpPolysDF() ctryISO3 <- ctryNameToCode(ctry) if(is.na(ctryISO3)) ctryName <- ctryCodeToName(ctry) if(is.na(ctryISO3) && !is.na(ctryName)) ctryISO3 <- ctryNameToCode(ctryName) if(is.na(ctryISO3)) stop(Sys.time(), ": Invalid ctryCode/Name ", ctry) #if ctryISO3 is empty then the country was not found if (is.na(ctryISO3) || ctryISO3 == "") return("Country code/name not found") #otherwise we have a valid country ISO3 code. get its index idx <- which(as.character(map@data$ISO3) == ctryISO3) #get the polygon that matches the index ctryPolys <- map@polygons[[idx]] #get the name of the polygon ctryPolyTitle <- paste0("VIIRS Nightlight Tiles Required for:\n", map@data$ADMIN[[idx]], " (", map@data$ISO3[[idx]], ")") #create a SpatialPolygons object with the list of Polygons ctrySpPolys <- sp::SpatialPolygons(Srl = list(ctryPolys)) #set the coordinate reference system raster::projection(ctrySpPolys) <- sp::CRS(wgs84) #convert the spatial polygons to an SPsDF ctrySpPolysDF <- methods::as(ctrySpPolys, "SpatialPolygonsDataFrame") #set the 4 margins to 2 inches from the border to avoid boundary errors #graphics::par(mar=rep(2,4)) #plot the tiles first #sp::plot(tilesSpPolysDFs, main=ctryPolyName) #plot the country on the same plot and fill with blue #sp::plot(ctrySpPolysDF, col="blue", add=TRUE) #get the extents of the SpatialPolygonsDataFrame. Used to draw a bounding box around the plotted country. Especially helpful for very small countries e <- raster::extent(ctrySpPolysDF) #draw back the boundaries by 10 units e@xmin <- e@xmin - 10 e@xmax <- e@xmax + 10 e@ymin <- e@ymin - 10 e@ymax <- e@ymax + 10 #convert the Extents object to a SpatialLines object for spplot to use extents <- methods::as(e, 'SpatialLines') #get a list of the intersecting tiles. Used to highlight tiles which intersect with plotted country tilesIntersected <- tileName2Idx(tileName = getTilesCtryIntersectVIIRS(map@data$ISO3[[idx]]), nlType=grep("VIIRS", getAllNlTypes(), value = TRUE)[1]) #create a list which serves as a subtitle showing the mapping of tile index to tile name tileIdxNames <- paste(tilesSpPolysDFs@data$z, tilesSpPolysDFs@data$name, sep = "=") #plot the map sp::spplot(tilesSpPolysDFs, #the tiles SPDF zcol = "z", #the col in the tiles SPDF which determines their color col.regions = as.vector(ifelse(1:nrow(tilesSpPolysDFs) %in% tilesIntersected, "lightblue", "transparent")), #colors of the tiles. intersected tiles are lightblue, otherwise transparent colorkey = FALSE, sp.layout = list(list(map, col='grey', fill='transparent', first=FALSE), #plot the world map from rworldmap list(ctrySpPolysDF, col='black', fill='blue', first=FALSE), #plot the selected country list('sp.lines', extents, col='green', lwd=2), #plot the bounding box list('sp.text', sp::coordinates(tilesSpPolysDFs), 1:nrow(tilesSpPolysDFs), col='black', cex=2) #label the tiles with their index numbers ), main = ctryPolyTitle, #the main title sub = tileIdxNames #the sub title ) #ggplot(tilesSpPolysDFs, aes(x=long,y=lat))+geom_polygon(col="black", fill="white", alpha=0.5)#+geom_polygon(data=ctrySpPolysDF, alpha=0.5) #ggplot(ctrySpPolysDF, aes(x=long,y=lat, group=group))+geom_polygon(col="black", fill="white",alpha=0.5) #a <- spplot(tilesSpPolysDFs, main=map@polygons[[idx]]@ID) #b <- spplot(ctrySpPolysDF) #a+as.layer(b) } ######################## mapAllCtryPolyToTilesVIIRS ################################### #' Create a mapping of all countries and the tiles they intersect #' #' This is simply another name for mapCtryPolyToTilesVIIRS with ctryCodes="all" #' #' @param omitCountries A character vector or list of countries to leave #' out when processing. Default is \code{"none"} #' #' @return None #' #' @examples #' #no countries omitted #' \dontrun{ #' tileMap <- Rnightlights:::mapAllCtryPolyToTilesVIIRS() #' } #' #' #no countries omitted #' \dontrun{ #' tileMap <- Rnightlights:::mapAllCtryPolyToTilesVIIRS(omitCountries="none") #' } #' #' #include countries that take long to process #' \dontrun{ #' tileMap <- Rnightlights:::mapAllCtryPolyToTilesVIIRS(omitCountries=c("error", "long")) #' } #' mapAllCtryPolyToTilesVIIRS <- function(omitCountries=pkgOptions("omitCountries")) { mapCtryPolyToTilesVIIRS(ctryCodes="all", omitCountries) } ######################## mapCtryPolyToTilesVIIRS ################################### #' Create a mapping of all countries and the tiles they intersect #' #' Create a dataframe mapping each country in the rworldmap to the VIIRS #' tiles which they intersect with and thus need to be retrieved to #' process their nightlight imagery. Since some functions use this #' dataframe for long-term processing, omitCountries can eliminate #' countries that should be excluded from the list hence from processing. #' Countries can be added in the omitCountries function. Default is "none". #' #' @param ctryCodes A character vector or list of countries to map. Default #' is \code{"all"} #' @param omitCountries A character vector or list of countries to leave out. #' Default is \code{"none"} #' #' @return ctryCodeTiles A data frame of countries and the tiles they #' intersect with as give by \code{getNlTiles} #' #' @examples #' #map all countries #' \dontrun{ #' tileMap <- Rnightlights:::mapCtryPolyToTilesVIIRS() #' } #' #' #map all countries, no countries omitted #' \dontrun{ #' tileMap <- Rnightlights:::mapCtryPolyToTilesVIIRS(ctryCodes="all", omitCountries="none") #' } #' #' #will not omit countries that do not have polygons on GADM #' \dontrun{ #' tileMap <- Rnightlights:::mapCtryPolyToTilesVIIRS(omitCountries=c("error", "missing")) #' } #' mapCtryPolyToTilesVIIRS <- function(ctryCodes="all", omitCountries=pkgOptions("omitCountries")) { #if ctryCodes is "all" otherwise consider ctryCodes to be a list of countries if (length(ctryCodes) == 1 && tolower(ctryCodes) == "all") { #get list of all country codes ctryCodes <- getAllNlCtryCodes(omitCountries) } #if the rworldmap::getMap() hasn't been loaded, load it map <- getWorldMap() wgs84 <- getCRS() #get the indices of the country polygons from the rworldmap ctryCodeIdx <- which(map@data$ISO3 %in% ctryCodes) ctryCodeTiles <- NULL #for each ctryCode index for (i in ctryCodeIdx) { #get the matching polygon ctryPolys <- map@polygons[[i]] #create a SpatialPolygons object with a list of 1 list of Polygons ctrySpPolys <- sp::SpatialPolygons(Srl = list(ctryPolys)) #set the CRS raster::projection(ctrySpPolys) <- sp::CRS(wgs84) #convert the SpatialPolygons to a SpatialPolygonsDataFrame ctrySpPolysDF <- methods::as(ctrySpPolys, "SpatialPolygonsDataFrame") #find the tiles the SPDF intersects with and add to the list of tiles ctryCodeTiles <- rbind(ctryCodeTiles, list(tilesPolygonIntersectVIIRS(ctrySpPolys))) } #combine the ctryCodes and intersecting tile columns into a dataframe ctryCodeTiles <- as.data.frame(cbind(code = as.character(ctryCodes), tiles = ctryCodeTiles)) #name the columns names(ctryCodeTiles) <- c("code", "tiles") #convert the code column to character since it is picked as factor ctryCodeTiles$code <- as.character(ctryCodeTiles$code) #return the data frame return(ctryCodeTiles) } ######################## getTilesCtryIntersectVIIRS ################################### #' Get a list of tiles that a country polygon intersects with #' #' Create a dataframe mapping each country in the rworldmap to the VIIRS #' tiles which they intersect with and thus need to be retrieved to #' process their nightlight imagery. Since some functions use this #' dataframe for long-term processing, omitCountries can eliminate #' countries that should be excluded from the list hence from processing. #' Countries can be added in the omitCountries function. #' Default is "none". #' #' @param ctryCode The country's ISO3 code #' #' @return None #' #' @examples #' #' Rnightlights:::getTilesCtryIntersectVIIRS("KEN") #' getTilesCtryIntersectVIIRS <- function(ctryCode) { if(missing(ctryCode)) stop(Sys.time(), ": Missing equired parameter ctryCode") ctryCode <- as.character(ctryCode) if(!validCtryCodes(ctryCode)) { warning("Invalid/Unknown ctryCode: ", ctryCode) return(NA) } ctryISO3 <- ctryCode map <- getWorldMap() wgs84 <- getCRS() #print(ctryISO3) if (is.na(ctryISO3) || ctryISO3 == "") return("Unknown country") idx <- which(map@data$ISO3 == ctryISO3) ctryCodeTiles <- NULL ctryPolys <- map@polygons[[idx]] #create a SpatialPolygons object with a list of 1 list of Polygons ctrySpPolys <- sp::SpatialPolygons(Srl = list(ctryPolys)) raster::projection(ctrySpPolys) <- sp::CRS(wgs84) ctrySpPolysDF <- methods::as(ctrySpPolys, "SpatialPolygonsDataFrame") ctryCodeTiles <- tilesPolygonIntersectVIIRS(ctrySpPolys) #Plot for debug #plot(tilesSpPolysDFs, add=TRUE) #plot(ctrySpPolysDF, add=TRUE) return (ctryCodeTiles) } ######################## validNlTileNameVIIRS ################################### #' Check valid VIIRS nightlight tile name #' #' Check if a tile name is valid for a given VIIRS nightlight type. #' #' @param tileName the name of the tile #' #' @param nlType character the nlType #' #' @return TRUE/FALSE #' #' @examples #' Rnightlights:::validNlTileNameVIIRS("00N060W", "VIIRS.M") #' #returns TRUE #' validNlTileNameVIIRS <- function(tileName, nlType) { if(missing(tileName)) stop(Sys.time(), ": Missing required parameter tileName") if(!is.character(tileName) || is.null(tileName) || is.na(tileName) || tileName == "") stop(Sys.time(), ": Invalid tileName: ", tileName) if(length(tileName2Idx(tileName, nlType)) != 0) return(TRUE) else return(FALSE) } ######################## tileName2Idx ################################### #' Get the index of a tile given its name #' #' Get the index of a VIIRS tile as given by getNlTiles() given its name #' #' @param tileName name as given by getNlTiles() #' #' @param nlType the nlType of interest #' #' @return Integer index of the tile #' #' @examples #' Rnightlights:::tileName2Idx("00N060W", "VIIRS.M") #' tileName2Idx <- function(tileName, nlType) { if (missing(tileName)) stop(Sys.time(), ": Missing required parameter tileName") if (missing(nlType)) stop(Sys.time(), ": Missing required parameter nlType") if(!is.character(tileName) || is.null(tileName) || is.na(tileName) || tileName == "") stop(Sys.time(), ": Invalid tileName: ", tileName) nlType <- toupper(nlType) tileName <- toupper(tileName) if (!exists("nlTiles")) nlTiles <- getNlTiles(nlType) return (which(nlTiles$name %in% tileName)) } ######################## tileIdx2Name ################################### #' Get the name of a tile given its index #' #' Get the name of a VIIRS tile as given by getNlTiles() given its index #' #' @param tileNum index as given by getNlTiles() #' #' @param nlType the nlType of interest #' #' @return Character name of the tile #' #' @examples #' Rnightlights:::tileName2Idx("00N060W", "VIIRS.M") #returns 6 #' tileIdx2Name <- function(tileNum, nlType) { if(missing(tileNum)) stop(Sys.time(), ": Missing required parameter tileNum") if(missing(nlType)) stop(Sys.time(), ": Missing required parameter nlType") if(!validNlTypes(nlType)) stop(Sys.time(), ": Invalid nlType: ", nlType) if(!validNlTileNumVIIRS(tileNum, nlType)) stop(Sys.time(), ": Invalid tileNum: ", tileNum) if (!exists("nlTiles")) nlTiles <- getNlTiles(nlType) nlType <- toupper(nlType) #return (nlTiles[tileNum, "name"]) return(nlTiles[as.numeric(tileNum), "name"]) } ######################## tilesPolygonIntersectVIIRS ################################### #' Get the list of VIIRS tiles that a polygon intersects with #' #' Get the list a VIIRS tiles that a polygon intersects with #' #' @param shpPolygon a SpatialPolygon or SpatialPolygons #' #' @return Character vector of the intersecting tiles as given by \code{getNlTiles} #' #' @examples #' \dontrun{ #' #download shapefile if it doesn't exist #' ctryShapefile <- Rnightlights:::dnldCtryPoly("KEN") #' #' #read in shapefile top layer #' ctryPoly <- readCtryPolyAdmLayer("KEN", #' Rnightlights:::getCtryShpLyrNames("KEN",0)) #' #' #get list of intersecting tiles #' tileList <- Rnightlights:::tilesPolygonIntersectVIIRS(ctryPoly) #' } #' tilesPolygonIntersectVIIRS <- function(shpPolygon) { if(missing(shpPolygon)) stop(Sys.time(), ": Missing required parameter shpPolygon") #given a polygon this function returns a list of the names of the viirs tiles #that it intersects with #Input: a Spatial Polygon e.g. from a loaded shapefile #Output: a character vector of tile names as given in the nlTiles dataframe if (!exists("tilesSpPolysDFs")) { tilesSpPolysDFs <- createNlTilesSpPolysDF() } if (!exists("nlTiles")) nlTiles <- getNlTiles(grep("VIIRS", getAllNlTypes(), value = TRUE)[1]) wgs84 <- getCRS() raster::projection(shpPolygon) <- sp::CRS(wgs84) #init list to hold tile indices tileIdx <- NULL #loop through the 6 tile rows in our SpatialPolygonsDataFrame for (i in 1:nrow(tilesSpPolysDFs)) { #check whether the polygon intersects with the current tile tileIdx[i] <- rgeos::gIntersects(tilesSpPolysDFs[i,], shpPolygon) } #return a list of tiles that intersected with the SpatialPolygon return (nlTiles[tileIdx, "name"]) } ######################## validNlTileNumVIIRS ################################### #' Check valid tile number for a given VIIRS nightlight type #' #' Check if a tile number is valid for a given VIIRS nightlight type. #' #' @param nlTileNum the index of the tile #' #' @param nlType A character string of nlType #' #' @return TRUE/FALSE #' #' @examples #' Rnightlights:::validNlTileNumVIIRS("1", "VIIRS.M") #' #returns TRUE #' #' Rnightlights:::validNlTileNumVIIRS("9", "VIIRS.D") #' #returns FALSE #' validNlTileNumVIIRS <- function(nlTileNum, nlType) { nlTileNum <- as.character(nlTileNum) if (missing(nlTileNum)) stop(Sys.time(), ": Missing parameter nlTileNum") if (missing(nlType)) stop(Sys.time(), ": Missing parameter nlType") if (class(nlTileNum) != "character" || nlTileNum =="" || length(nlTileNum)==0 || length(grep("[^[:digit:]]", nlTileNum) > 0)) return(FALSE) if(!exists("nlTiles")) nlTiles <- getNlTiles(nlType) nlT <- as.numeric(nlTileNum) if (nlT >= 1 && nlT <= length(nlTiles)) return(TRUE) else return(FALSE) }
/R/tiles.R
no_license
anhnguyendepocen/Rnightlights
R
false
false
26,575
r
######################## getAllNlConfigNames ################################### #' Generate a list of all possible configNames for a given nlType #' #' Generate a list of all possible configNames for a given nlType #' #' @param nlType if present show only configNames matching the nlType #' #' @examples #' getAllNlConfigNames("OLS.Y") #' #returns '"cf_cvg", "avg_vis", "stable_lights"' #' @export getAllNlConfigNames <- function(nlType) { allConfigNames <- list( "OLS.Y" = c("cf_cvg", "avg_vis", "stable_lights", "pct_lights", "avg_lights_x_pct"), "VIIRS.D" = c("vcmcfg", "vcmsl"), "VIIRS.M" = c("vcmcfg", "vcmsl"), "VIIRS.Y" = c("vcm-orm", "vcm-orm-ntl", "vcm-ntl")) if(missing(nlType)) return(allConfigNames) sapply(nlType, function(x) { pos <- grep(pattern = paste0("^",x,"$"), x = names(allConfigNames)) if(length(pos) == 0) return(NA) allConfigNames[pos] }, USE.NAMES = F) } ######################## validNlConfigName ################################### #' Check if a configName is valid for a given nlType #' #' Check if a configName is valid for a given nlType #' #' @param configName the raster in use #' #' @param nlType types of nightlight to check #' #' @return logical a vector of logical values #' #' @examples #' Rnightlights:::validNlConfigName("VCMCFG", "OLS.Y") #' #returns FALSE #' #' Rnightlights:::validNlConfigName("VCMCFG", "VIIRS.M") #' #returns TRUE #' validNlConfigName <- function(configName, nlType) { toupper(configName) %in% toupper(unlist(getAllNlConfigNames(nlType))) } ######################## downloadNlTiles ################################### #' Download the listed tiles for a given nlType in a given nlPeriod #' #' Download the listed tiles for a given nlType in a given nlPeriod #' #' @param nlType character The nightlight type #' #' @param nlPeriod character The nlPeriod to process in the appropriate #' format #' #' @param tileList integer vector or character vector of digits containing #' valid tile numbers as obtained by tileName2Idx for VIIRS. Ignore for #' nlType=="OLS" #' #' @return TRUE/FALSE if the download was successful #' #' @examples #' #download VIIRS tiles for "KEN" which are tiles 2 and 5 for the specified #' #time periods #' \dontrun{ #' Rnightlights:::downloadNlTiles("VIIRS.M", "201401", c(2, 5)) #' } #' #' #same as above but getting the tileList automatically #' \dontrun{ #' Rnightlights:::downloadNlTiles(nlType="VIIRS.M", #' nlPeriod="201401", #' tileList=Rnightlights:::getCtryTileList(ctryCodes="KEN", #' nlType="VIIRS.M") #' ) #' } #' #' #returns TRUE if the download was successful or tile is cached locally #' downloadNlTiles <- function(nlType, configName=pkgOptions(paste0("configName_", nlType)), nlPeriod, tileList, multiTileStrategy = pkgOptions("multiTileStrategy")) { if(missing(nlType)) stop(Sys.time(), ": Missing required parameter nlType") if(missing(nlPeriod)) stop(Sys.time(), ": Missing required parameter nlPeriod") if(stringr::str_detect(nlType, "VIIRS") && missing(tileList)) stop(Sys.time(), ": Missing required parameter tileList") if(!validNlTypes(nlType)) stop(Sys.time(), ": Invalid nlType detected") if(!allValidNlPeriods(nlPeriods = nlPeriod, nlTypes = nlType)) stop(Sys.time(), ": Invalid nlPeriod: ", nlPeriod) if(stringr::str_detect(nlType, "VIIRS") && !allValid(tileList, validNlTileNameVIIRS, nlType)) stop(Sys.time(), ": Invalid tile detected") success <- TRUE #ensure we have all required tiles if(stringr::str_detect(nlType, "OLS")) success <- success && downloadNlTilesOLS(nlPeriod = nlPeriod, downloadMethod = pkgOptions("downloadMethod"), nlType = nlType, configName = configName, multiTileStrategy = multiTileStrategy) else if(stringr::str_detect(nlType, "VIIRS")) for (tile in tileList) { nlTile <- tileName2Idx(tile, nlType) message(Sys.time(), ": Downloading tile: ", paste0(nlPeriod, nlTile)) #download tile success <- success && downloadNlTilesVIIRS(nlPeriod = nlPeriod, tileNum = nlTile, nlType = nlType, configName = configName) } return (success) } ######################## getCtryTileList ################################### #' Returns a list of VIIRS nightlight tiles that a country or countries #' intersects with #' #' Given a list of countries, this function will provide alist of VIIRS #' nightlight tiles that intersect with them. This helps in processing #' multiple countries by determining which nightlight tiles are required #' for processing by allowing the download of all required tiles before #' processing. Note all VIIRS_* nlTypes have the same nlTiles. #' #' @param ctryCodes character vector of country codes to process #' #' @param nlType character string The nlType of interest #' #' @param omitCountries countries to exclude from processing. This is #' helpful when the number of countries to exclude is smaller than #' the number to process e.g. when one wants to process all countries #' and exclude countries that take long to process i.e. #' omitCountries = "long" #' #' @return TRUE/FALSE #' #' @examples #' Rnightlights:::getCtryTileList(ctryCodes=c("BDI", "KEN", "RWA", "TZA", "UGA"), #' nlType="VIIRS.M", omitCountries="none") #' #' #only 1 tile for OLS #' Rnightlights:::getCtryTileList(ctryCodes=c("BDI", "KEN", "RWA", "TZA", "UGA"), #' nlType="OLS.Y", omitCountries="none") #' #returns "DUMMY" #' getCtryTileList <- function(ctryCodes, nlType, omitCountries="none") { if(missing(ctryCodes)) stop(Sys.time(), ": Missing required parameter ctryCodes") if(missing(nlType)) stop(Sys.time(), ": Missing required parameter nlType") if(!allValid(ctryCodes, validCtryCodes)) stop(Sys.time(), ": Invalid ctryCode(s) detected") if(!validNlTypes(nlType)) stop(Sys.time(), ": Invalid nlType: ", nlType) if(stringr::str_detect(nlType, "OLS")) ctryTiles <- "DUMMY" else if(stringr::str_detect(nlType, "VIIRS")) ctryTiles <- unlist(mapCtryPolyToTilesVIIRS(ctryCodes, omitCountries)$tiles) return (ctryTiles) } ######################## getNlTiles ################################### #' Create mapping of nightlight tiles #' #' Creates a data.frame mapping nightlight tile names to their vertice coordinates. This is used to #' identify nightlight tiles as well as to build a spatial polygons dataframe used to plot the tiles. OLS #' only has one tile for the whole world and thus has a dummy entry. OLS is included to #' prevent code duplication by writing separate functions for OLS. #' #' @param nlType the nlType of interest #' #' @return A data.frame of names of tiles and lon-lat coordinate of top-left corner of each #' #' @examples #' Rnightlights:::getNlTiles("VIIRS.M") #' #' Rnightlights:::getNlTiles("OLS.Y") #' getNlTiles <- function(nlType) { #6 nightlight tiles named by top-left geo coordinate numbered from left-right & top-bottom #creates columns as strings. createSpPolysDF converts relevant columns to numeric nlTiles <- data.frame( id=c(1,1,2,3,4,5,6), type=c("OLS","VIIRS","VIIRS","VIIRS","VIIRS","VIIRS","VIIRS"), name=c("DUMMY", "75N180W", "75N060W", "75N060E", "00N180W", "00N060W", "00N060E"), minx=c(-1, -180, -60, 60, -180, -60, 60), maxx=c(-1, -60, 60, 180, -60, 60, 180), miny=c(-1, 0, 0, 0, -75, -75, -75), maxy=c(-1, 75, 75, 75, 0, 0, 0), stringsAsFactors=FALSE) if(!missing(nlType)) { if(!validNlTypes(nlType)) stop(Sys.time(), ": Invalid nlType") if(length(grep("VIIRS", nlType)) > 0) nlType <- "VIIRS" else if(length(grep("OLS", nlType)) > 0) nlType <- "OLS" nlTiles <- nlTiles[grepl(nlType, nlTiles$type),] } return (nlTiles) } ######################## createNlTilesSpPolysDF ################################### #' Creates a tile Spatial Polygons DataFrame from the \code{"nlTiles"} dataframe #' #' Creates a Spatial Polygons DataFrame from the \code{"nlTiles"} dataframe of VIIRS tiles #' #' @return TRUE/FALSE #' #' @examples #' tilesSpPolysDFs <- Rnightlights:::createNlTilesSpPolysDF() #' createNlTilesSpPolysDF <- function() { if (!exists("nlTiles")) { nlTiles <- getNlTiles(grep("VIIRS", getAllNlTypes(), value = T)[1]) } wgs84 <- getCRS() #convert nlTiles min/max columns to numeric for (cIdx in grep("id|min|max", names(nlTiles))) nlTiles[,cIdx] <- as.numeric(as.character(nlTiles[, cIdx])) #create the empty obj to hold the data frame of tile PolygonsDataFrams tilesSpPolysDFs <- NULL #for each row in nlTiles for (i in 1:nrow(nlTiles)) { #grab the row containing the tile t <- nlTiles[i,] #convert the tile x,y extents to a matrix #format is 2 cols x & y tMat <- as.matrix(cbind(rbind(t$minx, t$maxx, t$maxx, t$minx), rbind(t$maxy, t$maxy, t$miny, t$miny))) #create a Polygon object from the tile extents matrix tPoly <- list(sp::Polygon(tMat)) #create a Polygons object with a list of 1 polygon tPolys <- sp::Polygons(srl = tPoly, ID = i) #create a SpatialPolygons object with a list of 1 list of Polygons tilesSpPolys <- sp::SpatialPolygons(Srl = list(tPolys)) #we assign the CRS at this point (note other objects cannot be assigned CRS) raster::projection(tilesSpPolys) <- sp::CRS(wgs84) #convert the SpatialPolygons object into a SpatialPolygonsDataFrame #tilesSpPolysDF <- methods::as(tilesSpPolys, "SpatialPolygonsDataFrame") #z used for plotCtryWithTilesVIIRS to color the tiles tilesSpPolysDF <- sp::SpatialPolygonsDataFrame(tilesSpPolys, data.frame(z=factor(i), name=nlTiles[i,"name"], row.names=i)) #append the SPDF into a dataframe of SPDFs if (is.null(tilesSpPolysDFs)) tilesSpPolysDFs <- tilesSpPolysDF else tilesSpPolysDFs <- sp::rbind.SpatialPolygonsDataFrame(tilesSpPolysDFs, tilesSpPolysDF) } return (tilesSpPolysDFs) } ######################## plotCtryWithTilesVIIRS ################################### #' Plot a country boundary with the VIIRS tiles and world map #' #' Plot a country boundary as defined in the \pkg{rworldmap} package along #' with the VIIRS nightlight tiles for a visual inspection of the tiles #' required for download in order to process a country's nightlight #' data. Output corresponds to that of \code{getCtryNlTiles()} #' #' It utilizes \code{rworldmap::rwmgetISO3()} to resolve country #' codes as well as names. #' #' @param ctry \code{character} the 3-letter ISO3 country code e.g. "KEN" #' or a common name of the country e.g. "Kenya" as found valid by #' \code{rworldmap::rwmgetISO3()} #' #' @return None #' #' @examples #' #by ctryCode #' \dontrun{plotCtryWithTilesVIIRS("KEN")} #' #' @export plotCtryWithTilesVIIRS <- function(ctry) { if(missing(ctry)) stop(Sys.time(), ": You must supply a country code or index") if(!is.character(ctry)) stop(Sys.time(), ": The parameter you supplied needs to be type character") wgs84 <- getCRS() #if the map variable does not exist map <- getWorldMap() #if the tiles spatial polygons dataframe does not exist create it if(!exists("tilesSpPolysDFs")) tilesSpPolysDFs <- createNlTilesSpPolysDF() ctryISO3 <- ctryNameToCode(ctry) if(is.na(ctryISO3)) ctryName <- ctryCodeToName(ctry) if(is.na(ctryISO3) && !is.na(ctryName)) ctryISO3 <- ctryNameToCode(ctryName) if(is.na(ctryISO3)) stop(Sys.time(), ": Invalid ctryCode/Name ", ctry) #if ctryISO3 is empty then the country was not found if (is.na(ctryISO3) || ctryISO3 == "") return("Country code/name not found") #otherwise we have a valid country ISO3 code. get its index idx <- which(as.character(map@data$ISO3) == ctryISO3) #get the polygon that matches the index ctryPolys <- map@polygons[[idx]] #get the name of the polygon ctryPolyTitle <- paste0("VIIRS Nightlight Tiles Required for:\n", map@data$ADMIN[[idx]], " (", map@data$ISO3[[idx]], ")") #create a SpatialPolygons object with the list of Polygons ctrySpPolys <- sp::SpatialPolygons(Srl = list(ctryPolys)) #set the coordinate reference system raster::projection(ctrySpPolys) <- sp::CRS(wgs84) #convert the spatial polygons to an SPsDF ctrySpPolysDF <- methods::as(ctrySpPolys, "SpatialPolygonsDataFrame") #set the 4 margins to 2 inches from the border to avoid boundary errors #graphics::par(mar=rep(2,4)) #plot the tiles first #sp::plot(tilesSpPolysDFs, main=ctryPolyName) #plot the country on the same plot and fill with blue #sp::plot(ctrySpPolysDF, col="blue", add=TRUE) #get the extents of the SpatialPolygonsDataFrame. Used to draw a bounding box around the plotted country. Especially helpful for very small countries e <- raster::extent(ctrySpPolysDF) #draw back the boundaries by 10 units e@xmin <- e@xmin - 10 e@xmax <- e@xmax + 10 e@ymin <- e@ymin - 10 e@ymax <- e@ymax + 10 #convert the Extents object to a SpatialLines object for spplot to use extents <- methods::as(e, 'SpatialLines') #get a list of the intersecting tiles. Used to highlight tiles which intersect with plotted country tilesIntersected <- tileName2Idx(tileName = getTilesCtryIntersectVIIRS(map@data$ISO3[[idx]]), nlType=grep("VIIRS", getAllNlTypes(), value = TRUE)[1]) #create a list which serves as a subtitle showing the mapping of tile index to tile name tileIdxNames <- paste(tilesSpPolysDFs@data$z, tilesSpPolysDFs@data$name, sep = "=") #plot the map sp::spplot(tilesSpPolysDFs, #the tiles SPDF zcol = "z", #the col in the tiles SPDF which determines their color col.regions = as.vector(ifelse(1:nrow(tilesSpPolysDFs) %in% tilesIntersected, "lightblue", "transparent")), #colors of the tiles. intersected tiles are lightblue, otherwise transparent colorkey = FALSE, sp.layout = list(list(map, col='grey', fill='transparent', first=FALSE), #plot the world map from rworldmap list(ctrySpPolysDF, col='black', fill='blue', first=FALSE), #plot the selected country list('sp.lines', extents, col='green', lwd=2), #plot the bounding box list('sp.text', sp::coordinates(tilesSpPolysDFs), 1:nrow(tilesSpPolysDFs), col='black', cex=2) #label the tiles with their index numbers ), main = ctryPolyTitle, #the main title sub = tileIdxNames #the sub title ) #ggplot(tilesSpPolysDFs, aes(x=long,y=lat))+geom_polygon(col="black", fill="white", alpha=0.5)#+geom_polygon(data=ctrySpPolysDF, alpha=0.5) #ggplot(ctrySpPolysDF, aes(x=long,y=lat, group=group))+geom_polygon(col="black", fill="white",alpha=0.5) #a <- spplot(tilesSpPolysDFs, main=map@polygons[[idx]]@ID) #b <- spplot(ctrySpPolysDF) #a+as.layer(b) } ######################## mapAllCtryPolyToTilesVIIRS ################################### #' Create a mapping of all countries and the tiles they intersect #' #' This is simply another name for mapCtryPolyToTilesVIIRS with ctryCodes="all" #' #' @param omitCountries A character vector or list of countries to leave #' out when processing. Default is \code{"none"} #' #' @return None #' #' @examples #' #no countries omitted #' \dontrun{ #' tileMap <- Rnightlights:::mapAllCtryPolyToTilesVIIRS() #' } #' #' #no countries omitted #' \dontrun{ #' tileMap <- Rnightlights:::mapAllCtryPolyToTilesVIIRS(omitCountries="none") #' } #' #' #include countries that take long to process #' \dontrun{ #' tileMap <- Rnightlights:::mapAllCtryPolyToTilesVIIRS(omitCountries=c("error", "long")) #' } #' mapAllCtryPolyToTilesVIIRS <- function(omitCountries=pkgOptions("omitCountries")) { mapCtryPolyToTilesVIIRS(ctryCodes="all", omitCountries) } ######################## mapCtryPolyToTilesVIIRS ################################### #' Create a mapping of all countries and the tiles they intersect #' #' Create a dataframe mapping each country in the rworldmap to the VIIRS #' tiles which they intersect with and thus need to be retrieved to #' process their nightlight imagery. Since some functions use this #' dataframe for long-term processing, omitCountries can eliminate #' countries that should be excluded from the list hence from processing. #' Countries can be added in the omitCountries function. Default is "none". #' #' @param ctryCodes A character vector or list of countries to map. Default #' is \code{"all"} #' @param omitCountries A character vector or list of countries to leave out. #' Default is \code{"none"} #' #' @return ctryCodeTiles A data frame of countries and the tiles they #' intersect with as give by \code{getNlTiles} #' #' @examples #' #map all countries #' \dontrun{ #' tileMap <- Rnightlights:::mapCtryPolyToTilesVIIRS() #' } #' #' #map all countries, no countries omitted #' \dontrun{ #' tileMap <- Rnightlights:::mapCtryPolyToTilesVIIRS(ctryCodes="all", omitCountries="none") #' } #' #' #will not omit countries that do not have polygons on GADM #' \dontrun{ #' tileMap <- Rnightlights:::mapCtryPolyToTilesVIIRS(omitCountries=c("error", "missing")) #' } #' mapCtryPolyToTilesVIIRS <- function(ctryCodes="all", omitCountries=pkgOptions("omitCountries")) { #if ctryCodes is "all" otherwise consider ctryCodes to be a list of countries if (length(ctryCodes) == 1 && tolower(ctryCodes) == "all") { #get list of all country codes ctryCodes <- getAllNlCtryCodes(omitCountries) } #if the rworldmap::getMap() hasn't been loaded, load it map <- getWorldMap() wgs84 <- getCRS() #get the indices of the country polygons from the rworldmap ctryCodeIdx <- which(map@data$ISO3 %in% ctryCodes) ctryCodeTiles <- NULL #for each ctryCode index for (i in ctryCodeIdx) { #get the matching polygon ctryPolys <- map@polygons[[i]] #create a SpatialPolygons object with a list of 1 list of Polygons ctrySpPolys <- sp::SpatialPolygons(Srl = list(ctryPolys)) #set the CRS raster::projection(ctrySpPolys) <- sp::CRS(wgs84) #convert the SpatialPolygons to a SpatialPolygonsDataFrame ctrySpPolysDF <- methods::as(ctrySpPolys, "SpatialPolygonsDataFrame") #find the tiles the SPDF intersects with and add to the list of tiles ctryCodeTiles <- rbind(ctryCodeTiles, list(tilesPolygonIntersectVIIRS(ctrySpPolys))) } #combine the ctryCodes and intersecting tile columns into a dataframe ctryCodeTiles <- as.data.frame(cbind(code = as.character(ctryCodes), tiles = ctryCodeTiles)) #name the columns names(ctryCodeTiles) <- c("code", "tiles") #convert the code column to character since it is picked as factor ctryCodeTiles$code <- as.character(ctryCodeTiles$code) #return the data frame return(ctryCodeTiles) } ######################## getTilesCtryIntersectVIIRS ################################### #' Get a list of tiles that a country polygon intersects with #' #' Create a dataframe mapping each country in the rworldmap to the VIIRS #' tiles which they intersect with and thus need to be retrieved to #' process their nightlight imagery. Since some functions use this #' dataframe for long-term processing, omitCountries can eliminate #' countries that should be excluded from the list hence from processing. #' Countries can be added in the omitCountries function. #' Default is "none". #' #' @param ctryCode The country's ISO3 code #' #' @return None #' #' @examples #' #' Rnightlights:::getTilesCtryIntersectVIIRS("KEN") #' getTilesCtryIntersectVIIRS <- function(ctryCode) { if(missing(ctryCode)) stop(Sys.time(), ": Missing equired parameter ctryCode") ctryCode <- as.character(ctryCode) if(!validCtryCodes(ctryCode)) { warning("Invalid/Unknown ctryCode: ", ctryCode) return(NA) } ctryISO3 <- ctryCode map <- getWorldMap() wgs84 <- getCRS() #print(ctryISO3) if (is.na(ctryISO3) || ctryISO3 == "") return("Unknown country") idx <- which(map@data$ISO3 == ctryISO3) ctryCodeTiles <- NULL ctryPolys <- map@polygons[[idx]] #create a SpatialPolygons object with a list of 1 list of Polygons ctrySpPolys <- sp::SpatialPolygons(Srl = list(ctryPolys)) raster::projection(ctrySpPolys) <- sp::CRS(wgs84) ctrySpPolysDF <- methods::as(ctrySpPolys, "SpatialPolygonsDataFrame") ctryCodeTiles <- tilesPolygonIntersectVIIRS(ctrySpPolys) #Plot for debug #plot(tilesSpPolysDFs, add=TRUE) #plot(ctrySpPolysDF, add=TRUE) return (ctryCodeTiles) } ######################## validNlTileNameVIIRS ################################### #' Check valid VIIRS nightlight tile name #' #' Check if a tile name is valid for a given VIIRS nightlight type. #' #' @param tileName the name of the tile #' #' @param nlType character the nlType #' #' @return TRUE/FALSE #' #' @examples #' Rnightlights:::validNlTileNameVIIRS("00N060W", "VIIRS.M") #' #returns TRUE #' validNlTileNameVIIRS <- function(tileName, nlType) { if(missing(tileName)) stop(Sys.time(), ": Missing required parameter tileName") if(!is.character(tileName) || is.null(tileName) || is.na(tileName) || tileName == "") stop(Sys.time(), ": Invalid tileName: ", tileName) if(length(tileName2Idx(tileName, nlType)) != 0) return(TRUE) else return(FALSE) } ######################## tileName2Idx ################################### #' Get the index of a tile given its name #' #' Get the index of a VIIRS tile as given by getNlTiles() given its name #' #' @param tileName name as given by getNlTiles() #' #' @param nlType the nlType of interest #' #' @return Integer index of the tile #' #' @examples #' Rnightlights:::tileName2Idx("00N060W", "VIIRS.M") #' tileName2Idx <- function(tileName, nlType) { if (missing(tileName)) stop(Sys.time(), ": Missing required parameter tileName") if (missing(nlType)) stop(Sys.time(), ": Missing required parameter nlType") if(!is.character(tileName) || is.null(tileName) || is.na(tileName) || tileName == "") stop(Sys.time(), ": Invalid tileName: ", tileName) nlType <- toupper(nlType) tileName <- toupper(tileName) if (!exists("nlTiles")) nlTiles <- getNlTiles(nlType) return (which(nlTiles$name %in% tileName)) } ######################## tileIdx2Name ################################### #' Get the name of a tile given its index #' #' Get the name of a VIIRS tile as given by getNlTiles() given its index #' #' @param tileNum index as given by getNlTiles() #' #' @param nlType the nlType of interest #' #' @return Character name of the tile #' #' @examples #' Rnightlights:::tileName2Idx("00N060W", "VIIRS.M") #returns 6 #' tileIdx2Name <- function(tileNum, nlType) { if(missing(tileNum)) stop(Sys.time(), ": Missing required parameter tileNum") if(missing(nlType)) stop(Sys.time(), ": Missing required parameter nlType") if(!validNlTypes(nlType)) stop(Sys.time(), ": Invalid nlType: ", nlType) if(!validNlTileNumVIIRS(tileNum, nlType)) stop(Sys.time(), ": Invalid tileNum: ", tileNum) if (!exists("nlTiles")) nlTiles <- getNlTiles(nlType) nlType <- toupper(nlType) #return (nlTiles[tileNum, "name"]) return(nlTiles[as.numeric(tileNum), "name"]) } ######################## tilesPolygonIntersectVIIRS ################################### #' Get the list of VIIRS tiles that a polygon intersects with #' #' Get the list a VIIRS tiles that a polygon intersects with #' #' @param shpPolygon a SpatialPolygon or SpatialPolygons #' #' @return Character vector of the intersecting tiles as given by \code{getNlTiles} #' #' @examples #' \dontrun{ #' #download shapefile if it doesn't exist #' ctryShapefile <- Rnightlights:::dnldCtryPoly("KEN") #' #' #read in shapefile top layer #' ctryPoly <- readCtryPolyAdmLayer("KEN", #' Rnightlights:::getCtryShpLyrNames("KEN",0)) #' #' #get list of intersecting tiles #' tileList <- Rnightlights:::tilesPolygonIntersectVIIRS(ctryPoly) #' } #' tilesPolygonIntersectVIIRS <- function(shpPolygon) { if(missing(shpPolygon)) stop(Sys.time(), ": Missing required parameter shpPolygon") #given a polygon this function returns a list of the names of the viirs tiles #that it intersects with #Input: a Spatial Polygon e.g. from a loaded shapefile #Output: a character vector of tile names as given in the nlTiles dataframe if (!exists("tilesSpPolysDFs")) { tilesSpPolysDFs <- createNlTilesSpPolysDF() } if (!exists("nlTiles")) nlTiles <- getNlTiles(grep("VIIRS", getAllNlTypes(), value = TRUE)[1]) wgs84 <- getCRS() raster::projection(shpPolygon) <- sp::CRS(wgs84) #init list to hold tile indices tileIdx <- NULL #loop through the 6 tile rows in our SpatialPolygonsDataFrame for (i in 1:nrow(tilesSpPolysDFs)) { #check whether the polygon intersects with the current tile tileIdx[i] <- rgeos::gIntersects(tilesSpPolysDFs[i,], shpPolygon) } #return a list of tiles that intersected with the SpatialPolygon return (nlTiles[tileIdx, "name"]) } ######################## validNlTileNumVIIRS ################################### #' Check valid tile number for a given VIIRS nightlight type #' #' Check if a tile number is valid for a given VIIRS nightlight type. #' #' @param nlTileNum the index of the tile #' #' @param nlType A character string of nlType #' #' @return TRUE/FALSE #' #' @examples #' Rnightlights:::validNlTileNumVIIRS("1", "VIIRS.M") #' #returns TRUE #' #' Rnightlights:::validNlTileNumVIIRS("9", "VIIRS.D") #' #returns FALSE #' validNlTileNumVIIRS <- function(nlTileNum, nlType) { nlTileNum <- as.character(nlTileNum) if (missing(nlTileNum)) stop(Sys.time(), ": Missing parameter nlTileNum") if (missing(nlType)) stop(Sys.time(), ": Missing parameter nlType") if (class(nlTileNum) != "character" || nlTileNum =="" || length(nlTileNum)==0 || length(grep("[^[:digit:]]", nlTileNum) > 0)) return(FALSE) if(!exists("nlTiles")) nlTiles <- getNlTiles(nlType) nlT <- as.numeric(nlTileNum) if (nlT >= 1 && nlT <= length(nlTiles)) return(TRUE) else return(FALSE) }
library(purrr) ### Name: splice ### Title: Splice objects and lists of objects into a list ### Aliases: splice ### ** Examples inputs <- list(arg1 = "a", arg2 = "b") # splice() concatenates the elements of inputs with arg3 splice(inputs, arg3 = c("c1", "c2")) %>% str() list(inputs, arg3 = c("c1", "c2")) %>% str() c(inputs, arg3 = c("c1", "c2")) %>% str()
/data/genthat_extracted_code/purrr/examples/splice.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
365
r
library(purrr) ### Name: splice ### Title: Splice objects and lists of objects into a list ### Aliases: splice ### ** Examples inputs <- list(arg1 = "a", arg2 = "b") # splice() concatenates the elements of inputs with arg3 splice(inputs, arg3 = c("c1", "c2")) %>% str() list(inputs, arg3 = c("c1", "c2")) %>% str() c(inputs, arg3 = c("c1", "c2")) %>% str()
######### Run all the scripts files to clean up the raw data for fish raw data ### By C. Boch Ph.D June 2, 2014 ### Micheli Laboratory, Hopkins Marine Station, Stanford Univerisity rm(list=ls()) ### Read the list of data processing scripts file.sources = list.files(pattern="*Fish_Data_Raw_Clean_Up") ### Run those processing scripts sapply(file.sources, source) ### the above returns cleaned up .csv files for each dataset entry but does not creat a single dataframe of AllBajaFishes to work from ### Thus, run a rbind command # access the dataset folder and create table containing the list of files and then redirect the output to a text file system("ls -lt ~/Desktop/NSF_CHN/Ecology/ReefCheck/Data_Clean/*fishes_clean_data* > ~/Desktop/NSF_CHN/Ecology/ReefCheck/Sandbox/All_Reef_Check_fishes_filelist_latest.txt") # load the list of files into a table currentTable = read.table("~/Desktop/NSF_CHN/Ecology/ReefCheck/Sandbox/All_Reef_Check_fishes_filelist_latest.txt",as.is=TRUE) # read all tables in the list tables = list() ##read in data and attach dataframe to each file in list for (i in 1:nrow(currentTable)){ tables[[i]] = read.csv(currentTable$V9[i],as.is=TRUE) } AllBajaFishes = do.call(rbind,tables) # a new master dataframe containing all the data from different files in one place write.table(AllBajaFishes, "~/Desktop/NSF_CHN/Ecology/ReefCheck/Data_Clean/All_locations_Reef_Check_fishes_data_latest.csv", sep=",", col.names=T, row.names=F)
/R_Scripts/0002_All_Baja_Reef_Check_Fish_Data_Raw_Clean_Up.R
no_license
rbeas/baja-eco-data
R
false
false
1,470
r
######### Run all the scripts files to clean up the raw data for fish raw data ### By C. Boch Ph.D June 2, 2014 ### Micheli Laboratory, Hopkins Marine Station, Stanford Univerisity rm(list=ls()) ### Read the list of data processing scripts file.sources = list.files(pattern="*Fish_Data_Raw_Clean_Up") ### Run those processing scripts sapply(file.sources, source) ### the above returns cleaned up .csv files for each dataset entry but does not creat a single dataframe of AllBajaFishes to work from ### Thus, run a rbind command # access the dataset folder and create table containing the list of files and then redirect the output to a text file system("ls -lt ~/Desktop/NSF_CHN/Ecology/ReefCheck/Data_Clean/*fishes_clean_data* > ~/Desktop/NSF_CHN/Ecology/ReefCheck/Sandbox/All_Reef_Check_fishes_filelist_latest.txt") # load the list of files into a table currentTable = read.table("~/Desktop/NSF_CHN/Ecology/ReefCheck/Sandbox/All_Reef_Check_fishes_filelist_latest.txt",as.is=TRUE) # read all tables in the list tables = list() ##read in data and attach dataframe to each file in list for (i in 1:nrow(currentTable)){ tables[[i]] = read.csv(currentTable$V9[i],as.is=TRUE) } AllBajaFishes = do.call(rbind,tables) # a new master dataframe containing all the data from different files in one place write.table(AllBajaFishes, "~/Desktop/NSF_CHN/Ecology/ReefCheck/Data_Clean/All_locations_Reef_Check_fishes_data_latest.csv", sep=",", col.names=T, row.names=F)
library(multicon) ### Name: popsd ### Title: Population Standard Deviation ### Aliases: popsd ### Keywords: standard deviation population descriptive statistics ### ** Examples x <- rnorm(100, mean = 12, sd = 10) sd(x) #sample standard deviation popsd(x) #population standard deviation
/data/genthat_extracted_code/multicon/examples/popsd.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
295
r
library(multicon) ### Name: popsd ### Title: Population Standard Deviation ### Aliases: popsd ### Keywords: standard deviation population descriptive statistics ### ** Examples x <- rnorm(100, mean = 12, sd = 10) sd(x) #sample standard deviation popsd(x) #population standard deviation
#!/usr/bin/env Rscript # Todo , el log debe decir rows en vez de register y 23 variables of total of 66 vars... # Poner en el rmarkdown los puntos outliers en rojo. # Para ejecutar desde linea de comandos dejando log de warnings y errores en # models_main.Rout: # R CMD BATCH --no-save --no-restore models_main.R # with args: # R CMD BATCH --no-save --no-restore '--args parallel=TRUE' models_main.R # Para ejecutar igual per imprimiendo warnings y errores a pantalla: # Rscript models_main.R #Parsear argumentos si es por script tipo etc args=(commandArgs(TRUE)) if(length(args)>0){ for(i in 1:length(args)){ eval(parse(text=args[[i]])) } } # Reset sink() (sink define the file output for messages. It should start at 0) while(sink.number() > 0) sink() Sys.setenv(TZ='UTC') TZ='UTC' iam='models_main' setwd(".") #asumimos que debajo esta functions/ etc. #If not set debug mode manually.... if(!exists("debug_mode")||(exists("debug_mode")&&!is.logical(debug_mode))) debug_mode <- FALSE # If true, messages will be displayed directly to console and not in log file. if(!exists("force_regenerate")||(exists("force_regenerate")&&!is.logical(force_regenerate))) force_regenerate <- FALSE # If true, will regenerate the model event if it exists. if(!exists("parallel")||(exists("parallel")&&!is.logical(parallel))) parallel <- FALSE # true if want parallelism if(!exists("table_cast_config")||(exists("table_cast_config")&&!is.character(table_cast_config))) table_cast_config <- '1_cast_config_compatible' # cast config table if(!exists("table_filter_config")||(exists("table_filter_config")&&!is.character(table_filter_config))) table_filter_config <- '1_filter_config' # cast config table if(!exists("table_cast_park_dic")||(exists("table_cast_park_dic")&&!is.character(table_cast_park_dic))) table_cast_park_dic <- '1_cast_park_table_dic' # cast config dic table if(!exists("type")||(exists("type")&&!is.character(type))) type <- 'som' # cast config table if(!exists("date_time_name")||(exists("date_time_name")&&!is.character(date_time_name))) date_time_name <- 'date_time' if(!exists("parallel_mode")||(exists("parallel_mode")&&!is.logical(parallel_mode))) parallel_mode <- T model_params<-list(type='gsom',spreadFactor=0.8,hex=T) #Use gsom model #model_params<-list(type='som',dim=10,rlen=10,alpha<-c(0.05,0.01),sizeRuleOfTheThumb=T,hex=F) #Use som model # DB data if(!exists("db_config_data")||(exists("db_config_data")&&!is.data.frame(db_config_data))) db_config_data<- data.frame(user='user', password='password', dbname='yourHistoricalBD', host='yourHost',#'127.0.0.1', # port=3306) if(!exists("db_config_realtime")||(exists("db_config_realtime")&&!is.data.frame(db_config_realtime))) db_config_realtime<- data.frame(user='user', password='password', dbname='smartcast_DB', host='yourHost', port=3306) if(Sys.info()["nodename"] != "smartbbdd"){ db_config_data$host='127.0.0.1' db_config_realtime$host='127.0.0.1' } default_path <- "windfarms/" # Default directory to store engines made by brain0 or brain1. The "/" at the end is obligatory. daily_alarms_threshold <- 0 # Define el umbral para decidir si necesita fusi?n de datos (<) o no (>=) if(!debug_mode) sink("models_main.log", append = TRUE) t_ini<-as.POSIXct(Sys.time(),tz=TZ,origin="1970-01-01"); t_ini_madrid<-t_ini attr(t_ini_madrid, "tzone") <- "Europe/Madrid" cat(paste0("\n\n>>------------------- INI models_main ",t_ini," UTC (",t_ini_madrid," Madrid ) ----------------------->>\n")) #Basic dependency if(!exists("dependencyLoader")){ if(!file.exists('functions_common/dependencyLoader.R')) return(list(error=TRUE,data=NULL,msg="Missing dependency function: functions_common/dependencyLoader.R")); source('functions_common/dependencyLoader.R') } dep<-dependencyLoader(c('RMySQL','parallel','doParallel','brain4.R',paste0("functions_common/",c('getModelsToBeCreated.R','db_query.R','ranking_by_id_walm.R','need_fusion.R','get_most_unhealthy.R','close_protocol.R')))) if(dep$error) return(list(error=TRUE,data=NULL,msg=paste0(iam,":on call dependencyLoader\n",dep$msg))) brain<-brain4 if(force_regenerate) #Force generate the model even there are created models. r2<-db_query(query=paste0("UPDATE ",table_cast_config," SET creation_date_ini=NULL,creation_date_end=NULL,creation_model_path=\"windfarms/\",creation_log_path=\"windfarms/\",creation_error=0 WHERE `type`='",type,"' AND creation_enable=1;"),db_config=db_config_data); models<-getModelsToBeCreated(table_cast_config,force_regenerate,type=type,db_config=db_config_data) if(models$error) { output_msg <- models$msg close_protocol(output_msg, iam, debug_mode, TZ = "UTC") stop(output_msg); } if(nrow(models$data)<=0) { output_msg <- "\nNo models to be generated, check DB.\n" close_protocol(output_msg, iam, debug_mode, TZ = "UTC") stop(output_msg); } n_models<-nrow(models$data) if(n_models>0){ for (model_id in unique(models$data$id)) {#Old for loop before parallel currentTimestampUTC<-floor(as.numeric(as.POSIXlt(Sys.time(),tz="UTC"))) #Actualizamos la tabla para marcar creandolo query <- paste0("UPDATE ",table_cast_config," SET creation_date_ini=FROM_UNIXTIME(",currentTimestampUTC,"),creation_error=0 WHERE id=",model_id) r2<-db_query(query=query,db_config=db_config_data) r<-try(brain4(currentTimestampUTC=currentTimestampUTC,m=models$data[models$data$id==model_id,],model_params=model_params,parallel_mode=parallel_mode,verbose=T,table_filter_config=table_filter_config,table_cast_park_dic=table_cast_park_dic,table_artificial_config=table_artificial_config,date_time_name=date_time_name,db_config=db_config_data)) if(inherits(r, "try-error")) { while(!debug_mode && sink.number() > 1) sink() cat(r) query <- paste0("UPDATE ",table_cast_config," SET creation_error=1 WHERE id=",model_id) r2<-db_query(query=query,db_config=db_config_data) next() } ##### NO ERROR: Update cast_config table with OK: #### currentTimestampUTC<-floor(as.numeric(as.POSIXlt(Sys.time(),tz="UTC"))) query <- paste0("UPDATE ",table_cast_config," SET creation_date_end=FROM_UNIXTIME(",currentTimestampUTC,"),creation_model_path='",r$data$modelPath,"',creation_log_path='",r$data$logPath,"' WHERE id=",model_id,";") # r2<-db_query(query=query,db_config=db_config_data) r2 <- lapply(query, db_query, db_config=db_config_data) #### } }else{ cat("No models to be generated, check DB") } output_msg <- "\nDONE.\n" close_protocol(output_msg, iam, debug_mode, TZ = "UTC")
/models_main.R
permissive
alecuba16/SOM_windturbine
R
false
false
6,791
r
#!/usr/bin/env Rscript # Todo , el log debe decir rows en vez de register y 23 variables of total of 66 vars... # Poner en el rmarkdown los puntos outliers en rojo. # Para ejecutar desde linea de comandos dejando log de warnings y errores en # models_main.Rout: # R CMD BATCH --no-save --no-restore models_main.R # with args: # R CMD BATCH --no-save --no-restore '--args parallel=TRUE' models_main.R # Para ejecutar igual per imprimiendo warnings y errores a pantalla: # Rscript models_main.R #Parsear argumentos si es por script tipo etc args=(commandArgs(TRUE)) if(length(args)>0){ for(i in 1:length(args)){ eval(parse(text=args[[i]])) } } # Reset sink() (sink define the file output for messages. It should start at 0) while(sink.number() > 0) sink() Sys.setenv(TZ='UTC') TZ='UTC' iam='models_main' setwd(".") #asumimos que debajo esta functions/ etc. #If not set debug mode manually.... if(!exists("debug_mode")||(exists("debug_mode")&&!is.logical(debug_mode))) debug_mode <- FALSE # If true, messages will be displayed directly to console and not in log file. if(!exists("force_regenerate")||(exists("force_regenerate")&&!is.logical(force_regenerate))) force_regenerate <- FALSE # If true, will regenerate the model event if it exists. if(!exists("parallel")||(exists("parallel")&&!is.logical(parallel))) parallel <- FALSE # true if want parallelism if(!exists("table_cast_config")||(exists("table_cast_config")&&!is.character(table_cast_config))) table_cast_config <- '1_cast_config_compatible' # cast config table if(!exists("table_filter_config")||(exists("table_filter_config")&&!is.character(table_filter_config))) table_filter_config <- '1_filter_config' # cast config table if(!exists("table_cast_park_dic")||(exists("table_cast_park_dic")&&!is.character(table_cast_park_dic))) table_cast_park_dic <- '1_cast_park_table_dic' # cast config dic table if(!exists("type")||(exists("type")&&!is.character(type))) type <- 'som' # cast config table if(!exists("date_time_name")||(exists("date_time_name")&&!is.character(date_time_name))) date_time_name <- 'date_time' if(!exists("parallel_mode")||(exists("parallel_mode")&&!is.logical(parallel_mode))) parallel_mode <- T model_params<-list(type='gsom',spreadFactor=0.8,hex=T) #Use gsom model #model_params<-list(type='som',dim=10,rlen=10,alpha<-c(0.05,0.01),sizeRuleOfTheThumb=T,hex=F) #Use som model # DB data if(!exists("db_config_data")||(exists("db_config_data")&&!is.data.frame(db_config_data))) db_config_data<- data.frame(user='user', password='password', dbname='yourHistoricalBD', host='yourHost',#'127.0.0.1', # port=3306) if(!exists("db_config_realtime")||(exists("db_config_realtime")&&!is.data.frame(db_config_realtime))) db_config_realtime<- data.frame(user='user', password='password', dbname='smartcast_DB', host='yourHost', port=3306) if(Sys.info()["nodename"] != "smartbbdd"){ db_config_data$host='127.0.0.1' db_config_realtime$host='127.0.0.1' } default_path <- "windfarms/" # Default directory to store engines made by brain0 or brain1. The "/" at the end is obligatory. daily_alarms_threshold <- 0 # Define el umbral para decidir si necesita fusi?n de datos (<) o no (>=) if(!debug_mode) sink("models_main.log", append = TRUE) t_ini<-as.POSIXct(Sys.time(),tz=TZ,origin="1970-01-01"); t_ini_madrid<-t_ini attr(t_ini_madrid, "tzone") <- "Europe/Madrid" cat(paste0("\n\n>>------------------- INI models_main ",t_ini," UTC (",t_ini_madrid," Madrid ) ----------------------->>\n")) #Basic dependency if(!exists("dependencyLoader")){ if(!file.exists('functions_common/dependencyLoader.R')) return(list(error=TRUE,data=NULL,msg="Missing dependency function: functions_common/dependencyLoader.R")); source('functions_common/dependencyLoader.R') } dep<-dependencyLoader(c('RMySQL','parallel','doParallel','brain4.R',paste0("functions_common/",c('getModelsToBeCreated.R','db_query.R','ranking_by_id_walm.R','need_fusion.R','get_most_unhealthy.R','close_protocol.R')))) if(dep$error) return(list(error=TRUE,data=NULL,msg=paste0(iam,":on call dependencyLoader\n",dep$msg))) brain<-brain4 if(force_regenerate) #Force generate the model even there are created models. r2<-db_query(query=paste0("UPDATE ",table_cast_config," SET creation_date_ini=NULL,creation_date_end=NULL,creation_model_path=\"windfarms/\",creation_log_path=\"windfarms/\",creation_error=0 WHERE `type`='",type,"' AND creation_enable=1;"),db_config=db_config_data); models<-getModelsToBeCreated(table_cast_config,force_regenerate,type=type,db_config=db_config_data) if(models$error) { output_msg <- models$msg close_protocol(output_msg, iam, debug_mode, TZ = "UTC") stop(output_msg); } if(nrow(models$data)<=0) { output_msg <- "\nNo models to be generated, check DB.\n" close_protocol(output_msg, iam, debug_mode, TZ = "UTC") stop(output_msg); } n_models<-nrow(models$data) if(n_models>0){ for (model_id in unique(models$data$id)) {#Old for loop before parallel currentTimestampUTC<-floor(as.numeric(as.POSIXlt(Sys.time(),tz="UTC"))) #Actualizamos la tabla para marcar creandolo query <- paste0("UPDATE ",table_cast_config," SET creation_date_ini=FROM_UNIXTIME(",currentTimestampUTC,"),creation_error=0 WHERE id=",model_id) r2<-db_query(query=query,db_config=db_config_data) r<-try(brain4(currentTimestampUTC=currentTimestampUTC,m=models$data[models$data$id==model_id,],model_params=model_params,parallel_mode=parallel_mode,verbose=T,table_filter_config=table_filter_config,table_cast_park_dic=table_cast_park_dic,table_artificial_config=table_artificial_config,date_time_name=date_time_name,db_config=db_config_data)) if(inherits(r, "try-error")) { while(!debug_mode && sink.number() > 1) sink() cat(r) query <- paste0("UPDATE ",table_cast_config," SET creation_error=1 WHERE id=",model_id) r2<-db_query(query=query,db_config=db_config_data) next() } ##### NO ERROR: Update cast_config table with OK: #### currentTimestampUTC<-floor(as.numeric(as.POSIXlt(Sys.time(),tz="UTC"))) query <- paste0("UPDATE ",table_cast_config," SET creation_date_end=FROM_UNIXTIME(",currentTimestampUTC,"),creation_model_path='",r$data$modelPath,"',creation_log_path='",r$data$logPath,"' WHERE id=",model_id,";") # r2<-db_query(query=query,db_config=db_config_data) r2 <- lapply(query, db_query, db_config=db_config_data) #### } }else{ cat("No models to be generated, check DB") } output_msg <- "\nDONE.\n" close_protocol(output_msg, iam, debug_mode, TZ = "UTC")
# Libraries library(dplyr) library(lubridate) library(data.table) # Load the data events <- fread("data/holidays_events.csv") items <- fread("data/items.csv") oil <- fread("data/oil.csv") stores <- fread("data/stores.csv") transactions <- fread("data/transactions.csv") test <- fread("data/test.csv") start <- Sys.time() train <- fread("data/train.csv") end <- Sys.time() print(end-start) # Check the data glimpse(oil) glimpse(events) glimpse(items) glimpse(stores) glimpse(transactions) glimpse(test) glimpse(train)
/R/Exploratory Data Analysis.R
no_license
ichigeki/forecast-grocery-sales
R
false
false
520
r
# Libraries library(dplyr) library(lubridate) library(data.table) # Load the data events <- fread("data/holidays_events.csv") items <- fread("data/items.csv") oil <- fread("data/oil.csv") stores <- fread("data/stores.csv") transactions <- fread("data/transactions.csv") test <- fread("data/test.csv") start <- Sys.time() train <- fread("data/train.csv") end <- Sys.time() print(end-start) # Check the data glimpse(oil) glimpse(events) glimpse(items) glimpse(stores) glimpse(transactions) glimpse(test) glimpse(train)
############################################################################## ## Worksheet 1 ############################################################################## ## 1. Basic data types --------------------------------------------------------- ## Divide 7 by 2. ## Add 3, 5, and 12. ## Multiply 9 by 124. ## Have the console print out the title of your favorite movie. ## Test the truth or falsity of three logical statements. ## 2.Variables --------------------------------------------------------- ## Create a numeric variable with the value of the day of the month you were born. ## Create another variable equal to the first variable times four. ## Have the console print out whether or not the second variable is equal to 64.
/worksheets/worksheet_1_data_types.r
no_license
bertozzivill/infx572_winter17
R
false
false
757
r
############################################################################## ## Worksheet 1 ############################################################################## ## 1. Basic data types --------------------------------------------------------- ## Divide 7 by 2. ## Add 3, 5, and 12. ## Multiply 9 by 124. ## Have the console print out the title of your favorite movie. ## Test the truth or falsity of three logical statements. ## 2.Variables --------------------------------------------------------- ## Create a numeric variable with the value of the day of the month you were born. ## Create another variable equal to the first variable times four. ## Have the console print out whether or not the second variable is equal to 64.
## Library library(tidyverse) library(ggplot2) library(glmnet) library(caret) library(e1071) library(party) library(class) library(randomForest) library(neuralnet) RNGkind(sample.kind = "Rounding") ########################################## ## Import dataset and check ## ########################################## # training data train_data <- read.csv("train.csv") head(train_data,5) summary(train_data) dim(train_data) # test data test_data <- read.csv("test.csv") head(test_data,5) summary(test_data) dim(test_data) # store the response variable activity_train <- train_data$Activity activity_test <- test_data$Activity # combine two data sets for EDA overall_data <- rbind(train_data,test_data) glimpse(train_data) dim(overall_data) ########################################## ## Data Preprocessing for EDA ## ########################################## # Checking for missing values sum(is.na(train_data)) sum(is.na(test_data)) # there are no missing values # Checking for duplicates summary(duplicated(train_data)) summary(duplicated(test_data)) # Checking for class imbalance table(overall_data$Activity) ggplot(data=overall_data, aes(x=Activity, color=Activity, fill=Activity)) + geom_bar() + labs(x= "Activities",title = "Smartphone activity labels distribution") # There is almost same number of observations across all the six activities # so this data does not have class imbalance problem. # frequency of 'Subject' Variable table(overall_data$subject) ggplot(overall_data, aes(subject)) + geom_bar(fill="gray") + ylim(0,500) + labs(title = "The distribution of 30 subjects in the overall dataset") # Static and dynamic activities ggplot(overall_data, aes(x=tBodyAccMag.mean.., color=Activity)) + geom_density() + labs(title = "Static and dynamic activities by tBodyAccMag-mean density plot") # figure out the variation in data for each activity # Mean Body Acceleration subject20 <- overall_data[overall_data$subject==20,] ggplot(subject20, aes(x=Activity, y=tBodyAcc.mean...X, color=Activity)) + geom_point() + labs(title = "Subject Mean Body Acceleration - X axis scatter plot") ggplot(subject20, aes(x=Activity, y=tBodyAcc.mean...Y, color=Activity)) + geom_point() + labs(title = "Subject Mean Body Acceleration - Y axis scatter plot") ggplot(subject20, aes(x=Activity, y=tBodyAcc.mean...Z, color=Activity)) + geom_point() + labs(title = "Subject Mean Body Acceleration - Z axis scatter plot") # Maximum Body Acceleration ggplot(subject20, aes(x=Activity, y=tBodyAcc.max...X, color=Activity)) + geom_point() + labs(title = "Subject Max Body Acceleration - X axis scatter plot") ggplot(subject20, aes(x=Activity, y=tBodyAcc.max...Y, color=Activity)) + geom_point() + labs(title = "Subject Max Body Acceleration - Y axis scatter plot") ggplot(subject20, aes(x=Activity, y=tBodyAcc.max...Z, color=Activity)) + geom_point() + labs(title = "Subject Max Body Acceleration - Z axis scatter plot") # Mean Gravity Acceleration signals ggplot(subject20, aes(x=Activity, y=tGravityAcc.mean...X, color=Activity)) + geom_point() + labs(y="tGravityAcc.mean.X",title = "Angle (X, GravityMean) Scatter Plot") ggplot(subject20, aes(x=Activity, y=tGravityAcc.mean...Y, color=Activity)) + geom_point() + labs(y="tGravityAcc.mean.Y",title = "Angle (Y, GravityMean) Scatter Plot") ggplot(subject20, aes(x=Activity, y=tGravityAcc.mean...Z, color=Activity)) + geom_point() + labs(y="tGravityAcc.mean.Y",title = "Angle (Z, GravityMean) Scatter Plot") ########################################## ## Drop some feature ### ########################################## # The variable "subject" is just a label, so we can romove it for feature selection train_data <- train_data %>% select(-"subject") test_data <- test_data %>% select(-"subject") ########################################## ## Modeling ### ########################################## # GLM with lasso, using cross validation # create a calibration and training set from the original training set set.seed(327) #set seed index <- sample(x=nrow(train_data), size=.80*nrow(train_data)) train_sub <- train_data[index, ] test_sub <- train_data[-index, ] dim(train_sub) dim(test_sub) train_mat <- model.matrix(Activity~., train_sub)[,-1] lasso.fit <- glmnet(train_mat, train_sub$Activity, family = "multinomial", type.multinomial = "grouped") plot(lasso.fit, xvar = "lambda", label = TRUE, type.coef = "2norm") plot(lasso.fit, xvar = "dev", label = TRUE) # use cross-validation to find estimate the best lambda with the lowest test-error, used 5-fold cvfit <- cv.glmnet(train_mat, train_sub$Activity, family="multinomial", type.measure="class", type.multinomial = "grouped", nfolds = 5, parallel = TRUE) plot(cvfit) bestlam <- cvfit$lambda.min log(bestlam) test_mat <- model.matrix(Activity~., test_sub)[,-1] lasso.pred <- predict(cvfit, s = bestlam, newx = test_mat, type = "class") table(lasso.pred, test_sub$Activity) acc <- sum(lasso.pred == test_sub$Activity) / length(lasso.pred) acc # we can perform on original test set test.pred <- predict(cvfit, s = bestlam, newx = model.matrix(Activity~., test_data)[,-1], type = "class") test.pred <- as.factor(test.pred) p_lasso <- mean(test.pred == test_data$Activity) lassoconfusionMatrix(test.pred, test_data$Activity) # Support vector machines svm_linear <- svm(Activity~ ., data=train_data, type='C-classification', kernel='linear') # Naive Bayes bnc <- naiveBayes(Activity~ ., data=train_data) # Decision Trees ctree <- ctree(Activity ~ ., data=train_data) # KNN knn9 <- knn(train = train_data[,1:561], test = test_data[,1:561], cl = train_data$Activity, k=9) # Random Forest rf <- randomForest(Activity ~ ., data = train_data) ### Test set predictions # Support vector machines pred_test_svm <- predict(svm_linear, test_data) p_svm <- mean(pred_test_svm == test_data$Activity) # Naive Bayes pred_test_bnc <- predict(bnc, test_data) p_bnc <- mean(pred_test_bnc == test_data$Activity) # Decision Trees pred_test_ctree <- predict(ctree, test_data) p_ctree <- mean(pred_test_ctree == test_data$Activity) # KNN p_knn9 <- mean(knn9 == test_data$Activity) # Random Forest pred_test_rf <- predict(rf, test_data) p_rf <- mean(pred_test_rf == test_data$Activity) # Confusion Matrix Analysis caret::confusionMatrix(test.pred, test_data$Activity, positive="1", mode="everything") caret::confusionMatrix(pred_test_svm, test_data$Activity, positive="1", mode="everything") caret::confusionMatrix(pred_test_bnc, test_data$Activity, positive="1", mode="everything") caret::confusionMatrix(pred_test_ctree, test_data$Activity, positive="1", mode="everything") caret::confusionMatrix(knn9, test_data$Activity, positive="1", mode="everything") caret::confusionMatrix(pred_test_rf, test_data$Activity, positive="1", mode="everything") #################### ##### Conclusion ### #################### # plot p <- as.vector(c(p_lasso, p_svm, p_bnc, p_ctree, p_knn9, p_rf)) p_round <- round(p,4) barplot(p_round, ylim = c(0,1), main = "Model comparison", xlab = "Models", names.arg = c("GLM/Lasso", "SVM","Naive Bayes","Decision Trees","KNN","Randomforest"), col = c("darkseagreen", "darkslategray3", "gold2", "lightpink2", "lightsalmon1", "mediumseagreen")) text(p, labels = as.character(p_round), pos = 3, cex = 0.75)
/FinalProject_RuohanDang.R
no_license
ruohandang/Human-Activity-Recognition-with-Smartphones
R
false
false
7,307
r
## Library library(tidyverse) library(ggplot2) library(glmnet) library(caret) library(e1071) library(party) library(class) library(randomForest) library(neuralnet) RNGkind(sample.kind = "Rounding") ########################################## ## Import dataset and check ## ########################################## # training data train_data <- read.csv("train.csv") head(train_data,5) summary(train_data) dim(train_data) # test data test_data <- read.csv("test.csv") head(test_data,5) summary(test_data) dim(test_data) # store the response variable activity_train <- train_data$Activity activity_test <- test_data$Activity # combine two data sets for EDA overall_data <- rbind(train_data,test_data) glimpse(train_data) dim(overall_data) ########################################## ## Data Preprocessing for EDA ## ########################################## # Checking for missing values sum(is.na(train_data)) sum(is.na(test_data)) # there are no missing values # Checking for duplicates summary(duplicated(train_data)) summary(duplicated(test_data)) # Checking for class imbalance table(overall_data$Activity) ggplot(data=overall_data, aes(x=Activity, color=Activity, fill=Activity)) + geom_bar() + labs(x= "Activities",title = "Smartphone activity labels distribution") # There is almost same number of observations across all the six activities # so this data does not have class imbalance problem. # frequency of 'Subject' Variable table(overall_data$subject) ggplot(overall_data, aes(subject)) + geom_bar(fill="gray") + ylim(0,500) + labs(title = "The distribution of 30 subjects in the overall dataset") # Static and dynamic activities ggplot(overall_data, aes(x=tBodyAccMag.mean.., color=Activity)) + geom_density() + labs(title = "Static and dynamic activities by tBodyAccMag-mean density plot") # figure out the variation in data for each activity # Mean Body Acceleration subject20 <- overall_data[overall_data$subject==20,] ggplot(subject20, aes(x=Activity, y=tBodyAcc.mean...X, color=Activity)) + geom_point() + labs(title = "Subject Mean Body Acceleration - X axis scatter plot") ggplot(subject20, aes(x=Activity, y=tBodyAcc.mean...Y, color=Activity)) + geom_point() + labs(title = "Subject Mean Body Acceleration - Y axis scatter plot") ggplot(subject20, aes(x=Activity, y=tBodyAcc.mean...Z, color=Activity)) + geom_point() + labs(title = "Subject Mean Body Acceleration - Z axis scatter plot") # Maximum Body Acceleration ggplot(subject20, aes(x=Activity, y=tBodyAcc.max...X, color=Activity)) + geom_point() + labs(title = "Subject Max Body Acceleration - X axis scatter plot") ggplot(subject20, aes(x=Activity, y=tBodyAcc.max...Y, color=Activity)) + geom_point() + labs(title = "Subject Max Body Acceleration - Y axis scatter plot") ggplot(subject20, aes(x=Activity, y=tBodyAcc.max...Z, color=Activity)) + geom_point() + labs(title = "Subject Max Body Acceleration - Z axis scatter plot") # Mean Gravity Acceleration signals ggplot(subject20, aes(x=Activity, y=tGravityAcc.mean...X, color=Activity)) + geom_point() + labs(y="tGravityAcc.mean.X",title = "Angle (X, GravityMean) Scatter Plot") ggplot(subject20, aes(x=Activity, y=tGravityAcc.mean...Y, color=Activity)) + geom_point() + labs(y="tGravityAcc.mean.Y",title = "Angle (Y, GravityMean) Scatter Plot") ggplot(subject20, aes(x=Activity, y=tGravityAcc.mean...Z, color=Activity)) + geom_point() + labs(y="tGravityAcc.mean.Y",title = "Angle (Z, GravityMean) Scatter Plot") ########################################## ## Drop some feature ### ########################################## # The variable "subject" is just a label, so we can romove it for feature selection train_data <- train_data %>% select(-"subject") test_data <- test_data %>% select(-"subject") ########################################## ## Modeling ### ########################################## # GLM with lasso, using cross validation # create a calibration and training set from the original training set set.seed(327) #set seed index <- sample(x=nrow(train_data), size=.80*nrow(train_data)) train_sub <- train_data[index, ] test_sub <- train_data[-index, ] dim(train_sub) dim(test_sub) train_mat <- model.matrix(Activity~., train_sub)[,-1] lasso.fit <- glmnet(train_mat, train_sub$Activity, family = "multinomial", type.multinomial = "grouped") plot(lasso.fit, xvar = "lambda", label = TRUE, type.coef = "2norm") plot(lasso.fit, xvar = "dev", label = TRUE) # use cross-validation to find estimate the best lambda with the lowest test-error, used 5-fold cvfit <- cv.glmnet(train_mat, train_sub$Activity, family="multinomial", type.measure="class", type.multinomial = "grouped", nfolds = 5, parallel = TRUE) plot(cvfit) bestlam <- cvfit$lambda.min log(bestlam) test_mat <- model.matrix(Activity~., test_sub)[,-1] lasso.pred <- predict(cvfit, s = bestlam, newx = test_mat, type = "class") table(lasso.pred, test_sub$Activity) acc <- sum(lasso.pred == test_sub$Activity) / length(lasso.pred) acc # we can perform on original test set test.pred <- predict(cvfit, s = bestlam, newx = model.matrix(Activity~., test_data)[,-1], type = "class") test.pred <- as.factor(test.pred) p_lasso <- mean(test.pred == test_data$Activity) lassoconfusionMatrix(test.pred, test_data$Activity) # Support vector machines svm_linear <- svm(Activity~ ., data=train_data, type='C-classification', kernel='linear') # Naive Bayes bnc <- naiveBayes(Activity~ ., data=train_data) # Decision Trees ctree <- ctree(Activity ~ ., data=train_data) # KNN knn9 <- knn(train = train_data[,1:561], test = test_data[,1:561], cl = train_data$Activity, k=9) # Random Forest rf <- randomForest(Activity ~ ., data = train_data) ### Test set predictions # Support vector machines pred_test_svm <- predict(svm_linear, test_data) p_svm <- mean(pred_test_svm == test_data$Activity) # Naive Bayes pred_test_bnc <- predict(bnc, test_data) p_bnc <- mean(pred_test_bnc == test_data$Activity) # Decision Trees pred_test_ctree <- predict(ctree, test_data) p_ctree <- mean(pred_test_ctree == test_data$Activity) # KNN p_knn9 <- mean(knn9 == test_data$Activity) # Random Forest pred_test_rf <- predict(rf, test_data) p_rf <- mean(pred_test_rf == test_data$Activity) # Confusion Matrix Analysis caret::confusionMatrix(test.pred, test_data$Activity, positive="1", mode="everything") caret::confusionMatrix(pred_test_svm, test_data$Activity, positive="1", mode="everything") caret::confusionMatrix(pred_test_bnc, test_data$Activity, positive="1", mode="everything") caret::confusionMatrix(pred_test_ctree, test_data$Activity, positive="1", mode="everything") caret::confusionMatrix(knn9, test_data$Activity, positive="1", mode="everything") caret::confusionMatrix(pred_test_rf, test_data$Activity, positive="1", mode="everything") #################### ##### Conclusion ### #################### # plot p <- as.vector(c(p_lasso, p_svm, p_bnc, p_ctree, p_knn9, p_rf)) p_round <- round(p,4) barplot(p_round, ylim = c(0,1), main = "Model comparison", xlab = "Models", names.arg = c("GLM/Lasso", "SVM","Naive Bayes","Decision Trees","KNN","Randomforest"), col = c("darkseagreen", "darkslategray3", "gold2", "lightpink2", "lightsalmon1", "mediumseagreen")) text(p, labels = as.character(p_round), pos = 3, cex = 0.75)
# Functions to make (and access) climatologies from the ERA5 data #' ERA5 get file name for hourly climatological data #' #' Get the file name for selected variable and date (hourly data) #' #' #' @export #' @param variable 'prmsl', 'prate', 'air.2m', 'uwnd.10m' or 'vwnd.10m' - or any supported variable #' @return File containing the requested data #' @param stream - 'ensda' for the lower resolution ensemble, otherwise 'oper' (default) ERA5.climatology.get.file.name<-function(variable,month,stream='oper', first.year=NULL,last.year=NULL) { base.dir<-ERA5.get.data.dir() dir.name<-sprintf("%s/normals/%s/hourly/%02d",base.dir,stream,month) if(!is.null(first.year) || !is.null(last.year)) { dir.name<-sprintf("%s/normals.%04d-%04d/%s/hourly/%02d",base.dir, first.year,last.year,stream,month) } file.name<-sprintf("%s/%s.nc",dir.name,variable) return(file.name) } #' Make (and store) a climatological average #' #' Data are stored as .nc files in a parallel directory to the original #' data - same as original data except only for 1 year. #' Loads a whole month of data at once #' #' Chose to use 1981 as the storage year in the netcdf file. #' #' @export #' @param variable - 'air.2m', 'prmsl', 'prate', - 20CR names #' @param month - 1-12, month to make climatology for. #' If NULL (default) run for all months #' @return Nothing - a climatological file will be created as a side effect. #' @param stream - 'ensda' for the lower resolution ensemble, otherwise 'oper' (default) ERA5.make.climatology<-function(variable,month=NULL,stream='oper', first.year=1981,last.year=2010) { for(mnth in seq(1,12)) { if(!is.null(month) && month != mnth) next # Load the data 1 year at a time. result<-NULL for(year in seq(first.year,last.year)) { mnth.file<-ERA5.hourly.get.file.name(variable,year,mnth,15,0,stream) if(!file.exists(mnth.file)) { stop("Missing data for %s %04d-%02d",variable,year,mnth) } t<-chron(sprintf("%04d/%02d/%02d",year,mnth,1), sprintf("%02d:00:00",0), format=c(dates='y/m/d',times='h:m:s')) nmd<-lubridate::days_in_month(as.POSIXct(t)) if(nmd==29) nmd<-28 # death to leap years t2<-chron(sprintf("%04d/%02d/%02d",year,mnth,nmd), sprintf("%02d:59:59",23), format=c(dates='y/m/d',times='h:m:s')) if(stream=='ensda') { slab<-GSDF.ncdf.load(mnth.file, ERA5.translate.for.variable.names(variable), lat.range=c(-90,90),lon.range=c(0,360), ens.range=rep(0,9), time.range=c(t,t2)) } else { slab<-GSDF.ncdf.load(mnth.file, ERA5.translate.for.variable.names(variable), lat.range=c(-90,90),lon.range=c(0,360), time.range=c(t,t2)) } if(is.null(slab)) { stop("Data for climatology is not on disc.") } if(stream=='ensda') { slab<-GSDF.reduce.1d(slab,'ensemble',mean) } if(is.null(result)) { result<-slab } else { result$data[]<-result$data+slab$data } gc(verbose=FALSE ) } result$data[]<-result$data/(last.year-first.year+1) # set the year to 1981 t.i<-GSDF.find.dimension(result,'time') m<-stringr::str_match(as.POSIXlt(result$dimensions[[t.i]]$values,tz='GMT'), "(\\d\\d\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)") result$dimensions[[t.i]]$values<-sprintf("1981-%s-%s:%s:%s", m[,3],m[,4],m[,5],m[,6]) result$meta$calendar='gregorian' # Write the result to a disc location paralleling the data fn<-ERA5.climatology.get.file.name(variable,mnth,stream) if(!file.exists(dirname(fn))) dir.create(dirname(fn),recursive=TRUE) GSDF.ncdf.write(result,fn,name=ERA5.translate.for.variable.names(variable)) } }
/GSDF.ERA5/R/climatology.R
permissive
oldweather/GSDF
R
false
false
4,130
r
# Functions to make (and access) climatologies from the ERA5 data #' ERA5 get file name for hourly climatological data #' #' Get the file name for selected variable and date (hourly data) #' #' #' @export #' @param variable 'prmsl', 'prate', 'air.2m', 'uwnd.10m' or 'vwnd.10m' - or any supported variable #' @return File containing the requested data #' @param stream - 'ensda' for the lower resolution ensemble, otherwise 'oper' (default) ERA5.climatology.get.file.name<-function(variable,month,stream='oper', first.year=NULL,last.year=NULL) { base.dir<-ERA5.get.data.dir() dir.name<-sprintf("%s/normals/%s/hourly/%02d",base.dir,stream,month) if(!is.null(first.year) || !is.null(last.year)) { dir.name<-sprintf("%s/normals.%04d-%04d/%s/hourly/%02d",base.dir, first.year,last.year,stream,month) } file.name<-sprintf("%s/%s.nc",dir.name,variable) return(file.name) } #' Make (and store) a climatological average #' #' Data are stored as .nc files in a parallel directory to the original #' data - same as original data except only for 1 year. #' Loads a whole month of data at once #' #' Chose to use 1981 as the storage year in the netcdf file. #' #' @export #' @param variable - 'air.2m', 'prmsl', 'prate', - 20CR names #' @param month - 1-12, month to make climatology for. #' If NULL (default) run for all months #' @return Nothing - a climatological file will be created as a side effect. #' @param stream - 'ensda' for the lower resolution ensemble, otherwise 'oper' (default) ERA5.make.climatology<-function(variable,month=NULL,stream='oper', first.year=1981,last.year=2010) { for(mnth in seq(1,12)) { if(!is.null(month) && month != mnth) next # Load the data 1 year at a time. result<-NULL for(year in seq(first.year,last.year)) { mnth.file<-ERA5.hourly.get.file.name(variable,year,mnth,15,0,stream) if(!file.exists(mnth.file)) { stop("Missing data for %s %04d-%02d",variable,year,mnth) } t<-chron(sprintf("%04d/%02d/%02d",year,mnth,1), sprintf("%02d:00:00",0), format=c(dates='y/m/d',times='h:m:s')) nmd<-lubridate::days_in_month(as.POSIXct(t)) if(nmd==29) nmd<-28 # death to leap years t2<-chron(sprintf("%04d/%02d/%02d",year,mnth,nmd), sprintf("%02d:59:59",23), format=c(dates='y/m/d',times='h:m:s')) if(stream=='ensda') { slab<-GSDF.ncdf.load(mnth.file, ERA5.translate.for.variable.names(variable), lat.range=c(-90,90),lon.range=c(0,360), ens.range=rep(0,9), time.range=c(t,t2)) } else { slab<-GSDF.ncdf.load(mnth.file, ERA5.translate.for.variable.names(variable), lat.range=c(-90,90),lon.range=c(0,360), time.range=c(t,t2)) } if(is.null(slab)) { stop("Data for climatology is not on disc.") } if(stream=='ensda') { slab<-GSDF.reduce.1d(slab,'ensemble',mean) } if(is.null(result)) { result<-slab } else { result$data[]<-result$data+slab$data } gc(verbose=FALSE ) } result$data[]<-result$data/(last.year-first.year+1) # set the year to 1981 t.i<-GSDF.find.dimension(result,'time') m<-stringr::str_match(as.POSIXlt(result$dimensions[[t.i]]$values,tz='GMT'), "(\\d\\d\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)") result$dimensions[[t.i]]$values<-sprintf("1981-%s-%s:%s:%s", m[,3],m[,4],m[,5],m[,6]) result$meta$calendar='gregorian' # Write the result to a disc location paralleling the data fn<-ERA5.climatology.get.file.name(variable,mnth,stream) if(!file.exists(dirname(fn))) dir.create(dirname(fn),recursive=TRUE) GSDF.ncdf.write(result,fn,name=ERA5.translate.for.variable.names(variable)) } }
######################### Hot/Cold +/- ################################# ## adding to data ## adding to app filters # balls and strikes hot.cold.plus.minus <- function(gameids){ hitterhc <- read.csv(file = "Hitter Hot Cold.csv", stringsAsFactors = F) data %>% filter(GameID %in% gameids & endsWith(Date, "9") & BatterTeam %in% c("NOR_TAR", "NOR_TAR2")) %>% left_join(., hitterhc, by = "Batter", suffix = c("", ".Hot")) %>% mutate(Swing = PitchCall %in% c("StrikeSwinging", "FoulBall", "InPlay"), In_Hot_Zone = sqrt((PlateLocSide_Catcher - PlateLocSide_Catcher.Hot)^2 + (PlateLocHeight - PlateLocHeight.Hot)^2) < radius, In_Strike_Zone = (PitchCall == "StrikeCalled" | Exp.Called.Strike > 0.5), HitterHotColdPM = ifelse(Strikes < 2, ifelse(Swing, ifelse(In_Hot_Zone, "Hot Zone Swing", ifelse(In_Strike_Zone, "Cold Zone Swing", "Chase")), ifelse(In_Hot_Zone, "Hot Zone Take", ifelse(In_Strike_Zone, "Cold Zone Take", "Ball Take"))), ifelse(as.logical(Hit) | as.logical(BB), "Reached Base", ifelse(BBtype %in% c("GB", "FB"), "Solid Contact", ifelse(as.logical(K) | ContactType %in% c("Under", "Weak"), "Strikeout/Weak Contact", "None")))), HotColdPoints = ifelse(HitterHotColdPM == "Hot Zone Swing", 2, ifelse(HitterHotColdPM %in% c("Cold Zone Take", "Reached Base"), 1, ifelse(HitterHotColdPM %in% c("Ball Take", "Solid Contact"), .5, ifelse(HitterHotColdPM %in% c("Cold Zone Swing", "Hot Zone Take", "Strikeout/Weak Contact"), -1, ifelse(HitterHotColdPM == "Chase", -2, 0))))), BadSwing = as.logical(HotColdPoints < 0 & as.logical(Swing)),# == T BadTake = as.logical(HotColdPoints < 0 & !as.logical(Swing))) %>% # == F group_by(Batter) %>% summarise(HotColdPoints = sum(HotColdPoints, na.rm = T), BadTake = sum(BadTake, na.rm = T), BadSwings = sum(BadSwing, na.rm = T), BadCallByUmp = sum(PitchCall == "StrikeCalled" & Exp.Called.Strike < .5, na.rm = T), Pitches_Seen = n()) } # table(dat2[,c("Date", "HotColdPoints", "Batter")]) # # # hitterhc %<>% mutate(minSide = PlateLocSide_Catcher * -1 - radius, maxSide = PlateLocSide_Catcher * -1 + radius, # minHeight = PlateLocHeight - radius, maxHeight = PlateLocHeight + radius, # minSideft = floor(abs(minSide)) * sign(minSide), minSidein = ceiling((minSide - minSideft) * 12), # maxSideft = floor(abs(maxSide)) * sign(maxSide), maxSidein = ceiling((maxSide - maxSideft) * 12), # minHeightft = floor(minHeight), minHeightin = ceiling((minHeight - minHeightft) * 12), # maxHeightft = floor(maxHeight), maxHeightin = ceiling((maxHeight - maxHeightft) * 12) # ) # # hitterhc2 <- cbind(hitterhc[,c(1:4)], minSide = paste(hitterhc$minSideft, "' ", hitterhc$minSidein, '"'), # maxSide = paste(hitterhc$maxSideft, "' ", hitterhc$maxSidein, '"'), # minHeight = paste(hitterhc$minHeightft, "' ", hitterhc$minHeightin, '"'), # maxHeight = paste(hitterhc$maxHeightft, "' ", hitterhc$maxHeightin, '"')) ## BELOW CAME FROM GLOBAL.R hot.cold.plus.minus <- function(gameids){ hitterhc <- read.csv(file = "Hitter Hot Cold.csv", stringsAsFactors = F) data %>% filter(GameID %in% gameids & endsWith(Date, "9") & BatterTeam %in% c("NOR_TAR", "NOR_TAR2")) %>% left_join(., hitterhc, by = "Batter", suffix = c("", ".Hot")) %>% mutate(Swing = PitchCall %in% c("StrikeSwinging", "FoulBall", "InPlay"), In_Hot_Zone = sqrt((PlateLocSide_Catcher - PlateLocSide_Catcher.Hot)^2 + (PlateLocHeight - PlateLocHeight.Hot)^2) < radius, In_Strike_Zone = (PitchCall == "StrikeCalled" | Exp.Called.Strike > 0.5), HitterHotColdPM = ifelse(Strikes < 2, ifelse(Swing, ifelse(In_Hot_Zone, "Hot Zone Swing", ifelse(In_Strike_Zone, "Cold Zone Swing", "Chase")), ifelse(In_Hot_Zone, "Hot Zone Take", ifelse(In_Strike_Zone, "Cold Zone Take", "Ball Take"))), ifelse(as.logical(Hit) | as.logical(BB), "Reached Base", ifelse(BBtype %in% c("GB", "FB"), "Solid Contact", ifelse(as.logical(K) | ContactType %in% c("Under", "Weak"), "Strikeout/Weak Contact", "None")))), HotColdPoints = ifelse(HitterHotColdPM == "Hot Zone Swing", 2, ifelse(HitterHotColdPM %in% c("Cold Zone Take", "Reached Base"), 1, ifelse(HitterHotColdPM %in% c("Ball Take", "Solid Contact"), .5, ifelse(HitterHotColdPM %in% c("Cold Zone Swing", "Hot Zone Take", "Strikeout/Weak Contact"), -1, ifelse(HitterHotColdPM == "Chase", -2, 0))))), BadSwing = as.logical(HotColdPoints < 0 & as.logical(Swing)),# == T BadTake = as.logical(HotColdPoints < 0 & !as.logical(Swing))) %>% # == F group_by(Batter, Game) %>% summarise(HotColdPoints = sum(HotColdPoints, na.rm = T), BadTake = sum(BadTake, na.rm = T), BadSwings = sum(BadSwing, na.rm = T), BadCallByUmp = sum(PitchCall == "StrikeCalled" & Exp.Called.Strike < .5, na.rm = T), Pitches_Seen = n()) }
/Hittter Hot Cold Plus Minus.R
no_license
chadraines/TAR
R
false
false
6,101
r
######################### Hot/Cold +/- ################################# ## adding to data ## adding to app filters # balls and strikes hot.cold.plus.minus <- function(gameids){ hitterhc <- read.csv(file = "Hitter Hot Cold.csv", stringsAsFactors = F) data %>% filter(GameID %in% gameids & endsWith(Date, "9") & BatterTeam %in% c("NOR_TAR", "NOR_TAR2")) %>% left_join(., hitterhc, by = "Batter", suffix = c("", ".Hot")) %>% mutate(Swing = PitchCall %in% c("StrikeSwinging", "FoulBall", "InPlay"), In_Hot_Zone = sqrt((PlateLocSide_Catcher - PlateLocSide_Catcher.Hot)^2 + (PlateLocHeight - PlateLocHeight.Hot)^2) < radius, In_Strike_Zone = (PitchCall == "StrikeCalled" | Exp.Called.Strike > 0.5), HitterHotColdPM = ifelse(Strikes < 2, ifelse(Swing, ifelse(In_Hot_Zone, "Hot Zone Swing", ifelse(In_Strike_Zone, "Cold Zone Swing", "Chase")), ifelse(In_Hot_Zone, "Hot Zone Take", ifelse(In_Strike_Zone, "Cold Zone Take", "Ball Take"))), ifelse(as.logical(Hit) | as.logical(BB), "Reached Base", ifelse(BBtype %in% c("GB", "FB"), "Solid Contact", ifelse(as.logical(K) | ContactType %in% c("Under", "Weak"), "Strikeout/Weak Contact", "None")))), HotColdPoints = ifelse(HitterHotColdPM == "Hot Zone Swing", 2, ifelse(HitterHotColdPM %in% c("Cold Zone Take", "Reached Base"), 1, ifelse(HitterHotColdPM %in% c("Ball Take", "Solid Contact"), .5, ifelse(HitterHotColdPM %in% c("Cold Zone Swing", "Hot Zone Take", "Strikeout/Weak Contact"), -1, ifelse(HitterHotColdPM == "Chase", -2, 0))))), BadSwing = as.logical(HotColdPoints < 0 & as.logical(Swing)),# == T BadTake = as.logical(HotColdPoints < 0 & !as.logical(Swing))) %>% # == F group_by(Batter) %>% summarise(HotColdPoints = sum(HotColdPoints, na.rm = T), BadTake = sum(BadTake, na.rm = T), BadSwings = sum(BadSwing, na.rm = T), BadCallByUmp = sum(PitchCall == "StrikeCalled" & Exp.Called.Strike < .5, na.rm = T), Pitches_Seen = n()) } # table(dat2[,c("Date", "HotColdPoints", "Batter")]) # # # hitterhc %<>% mutate(minSide = PlateLocSide_Catcher * -1 - radius, maxSide = PlateLocSide_Catcher * -1 + radius, # minHeight = PlateLocHeight - radius, maxHeight = PlateLocHeight + radius, # minSideft = floor(abs(minSide)) * sign(minSide), minSidein = ceiling((minSide - minSideft) * 12), # maxSideft = floor(abs(maxSide)) * sign(maxSide), maxSidein = ceiling((maxSide - maxSideft) * 12), # minHeightft = floor(minHeight), minHeightin = ceiling((minHeight - minHeightft) * 12), # maxHeightft = floor(maxHeight), maxHeightin = ceiling((maxHeight - maxHeightft) * 12) # ) # # hitterhc2 <- cbind(hitterhc[,c(1:4)], minSide = paste(hitterhc$minSideft, "' ", hitterhc$minSidein, '"'), # maxSide = paste(hitterhc$maxSideft, "' ", hitterhc$maxSidein, '"'), # minHeight = paste(hitterhc$minHeightft, "' ", hitterhc$minHeightin, '"'), # maxHeight = paste(hitterhc$maxHeightft, "' ", hitterhc$maxHeightin, '"')) ## BELOW CAME FROM GLOBAL.R hot.cold.plus.minus <- function(gameids){ hitterhc <- read.csv(file = "Hitter Hot Cold.csv", stringsAsFactors = F) data %>% filter(GameID %in% gameids & endsWith(Date, "9") & BatterTeam %in% c("NOR_TAR", "NOR_TAR2")) %>% left_join(., hitterhc, by = "Batter", suffix = c("", ".Hot")) %>% mutate(Swing = PitchCall %in% c("StrikeSwinging", "FoulBall", "InPlay"), In_Hot_Zone = sqrt((PlateLocSide_Catcher - PlateLocSide_Catcher.Hot)^2 + (PlateLocHeight - PlateLocHeight.Hot)^2) < radius, In_Strike_Zone = (PitchCall == "StrikeCalled" | Exp.Called.Strike > 0.5), HitterHotColdPM = ifelse(Strikes < 2, ifelse(Swing, ifelse(In_Hot_Zone, "Hot Zone Swing", ifelse(In_Strike_Zone, "Cold Zone Swing", "Chase")), ifelse(In_Hot_Zone, "Hot Zone Take", ifelse(In_Strike_Zone, "Cold Zone Take", "Ball Take"))), ifelse(as.logical(Hit) | as.logical(BB), "Reached Base", ifelse(BBtype %in% c("GB", "FB"), "Solid Contact", ifelse(as.logical(K) | ContactType %in% c("Under", "Weak"), "Strikeout/Weak Contact", "None")))), HotColdPoints = ifelse(HitterHotColdPM == "Hot Zone Swing", 2, ifelse(HitterHotColdPM %in% c("Cold Zone Take", "Reached Base"), 1, ifelse(HitterHotColdPM %in% c("Ball Take", "Solid Contact"), .5, ifelse(HitterHotColdPM %in% c("Cold Zone Swing", "Hot Zone Take", "Strikeout/Weak Contact"), -1, ifelse(HitterHotColdPM == "Chase", -2, 0))))), BadSwing = as.logical(HotColdPoints < 0 & as.logical(Swing)),# == T BadTake = as.logical(HotColdPoints < 0 & !as.logical(Swing))) %>% # == F group_by(Batter, Game) %>% summarise(HotColdPoints = sum(HotColdPoints, na.rm = T), BadTake = sum(BadTake, na.rm = T), BadSwings = sum(BadSwing, na.rm = T), BadCallByUmp = sum(PitchCall == "StrikeCalled" & Exp.Called.Strike < .5, na.rm = T), Pitches_Seen = n()) }
## Put comments here that give an overall description of what your ## functions do ##This function is used to cache the inverse of a matrix to save time of computation because matrix inversion is usually a costly computation. ## Write a short comment describing this function ##This function creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { a<- NULL set <- function(y) { x <<- y a <<- NULL } get <- function() x setinverse <- function(inverse) a<<-inverse getinverse <- function()a list(set = set, get = get, setinverse = setinverse, getinverse= getinverse) } ## Write a short comment describing this function ## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been ##calculated (and the matrix has not changed), then cacheSolve should retrieve the inverse from the cache. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' a <- x$getinverse() if(!is.null(a)) { message("getting cached data") return(a) } data <- x$get() a <- solve(data, ...) x$setinverse(a) a }
/cachematrix.R
no_license
Cclal/ProgrammingAssignment2
R
false
false
1,197
r
## Put comments here that give an overall description of what your ## functions do ##This function is used to cache the inverse of a matrix to save time of computation because matrix inversion is usually a costly computation. ## Write a short comment describing this function ##This function creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { a<- NULL set <- function(y) { x <<- y a <<- NULL } get <- function() x setinverse <- function(inverse) a<<-inverse getinverse <- function()a list(set = set, get = get, setinverse = setinverse, getinverse= getinverse) } ## Write a short comment describing this function ## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been ##calculated (and the matrix has not changed), then cacheSolve should retrieve the inverse from the cache. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' a <- x$getinverse() if(!is.null(a)) { message("getting cached data") return(a) } data <- x$get() a <- solve(data, ...) x$setinverse(a) a }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/accounts.R \name{list_accounts} \alias{list_accounts} \title{Accounts} \usage{ list_accounts(start.index = NULL, max.results = NULL, token) } \arguments{ \item{start.index}{integer. An index of the first account to retrieve. Use this parameter as a pagination mechanism along with the max-results parameter.} \item{max.results}{integer. The maximum number of accounts to include in this response.} \item{token}{\code{\link[httr]{Token2.0}} class object with a valid authorization data.} } \value{ The Accounts collection is a set of Account resources, each of which describes the account of an authenticated user. \item{id}{Account ID.} \item{kind}{Resource type for Analytics account.} \item{name}{Account name.} \item{permissions}{Permissions the user has for this account.} \item{created}{Time the account was created.} \item{updated}{Time the account was last modified.} \item{starred}{Indicates whether this account is starred or not.} \item{permissions}{All the permissions that the user has for this account. These include any implied permissions (e.g., EDIT implies VIEW).} } \description{ Lists all accounts to which the user has access. } \references{ \href{https://developers.google.com/analytics/devguides/config/mgmt/v3/mgmtReference/management/accounts}{Management API - Accounts Overview} } \seealso{ Other Management API: \code{\link{get_custom_dimension}}, \code{\link{get_custom_metric}}, \code{\link{get_experiment}}, \code{\link{get_filter}}, \code{\link{get_goal}}, \code{\link{get_profile}}, \code{\link{get_remarketing_audience}}, \code{\link{get_unsampled_report}}, \code{\link{get_upload}}, \code{\link{get_webproperty}}, \code{\link{list_custom_data_sources}}, \code{\link{list_custom_dimensions}}, \code{\link{list_custom_metrics}}, \code{\link{list_experiments}}, \code{\link{list_filters}}, \code{\link{list_goals}}, \code{\link{list_profiles}}, \code{\link{list_remarketing_audiences}}, \code{\link{list_segments}}, \code{\link{list_unsampled_reports}}, \code{\link{list_uploads}}, \code{\link{list_webproperties}} }
/man/list_accounts.Rd
no_license
selesnow/RGA
R
false
true
2,162
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/accounts.R \name{list_accounts} \alias{list_accounts} \title{Accounts} \usage{ list_accounts(start.index = NULL, max.results = NULL, token) } \arguments{ \item{start.index}{integer. An index of the first account to retrieve. Use this parameter as a pagination mechanism along with the max-results parameter.} \item{max.results}{integer. The maximum number of accounts to include in this response.} \item{token}{\code{\link[httr]{Token2.0}} class object with a valid authorization data.} } \value{ The Accounts collection is a set of Account resources, each of which describes the account of an authenticated user. \item{id}{Account ID.} \item{kind}{Resource type for Analytics account.} \item{name}{Account name.} \item{permissions}{Permissions the user has for this account.} \item{created}{Time the account was created.} \item{updated}{Time the account was last modified.} \item{starred}{Indicates whether this account is starred or not.} \item{permissions}{All the permissions that the user has for this account. These include any implied permissions (e.g., EDIT implies VIEW).} } \description{ Lists all accounts to which the user has access. } \references{ \href{https://developers.google.com/analytics/devguides/config/mgmt/v3/mgmtReference/management/accounts}{Management API - Accounts Overview} } \seealso{ Other Management API: \code{\link{get_custom_dimension}}, \code{\link{get_custom_metric}}, \code{\link{get_experiment}}, \code{\link{get_filter}}, \code{\link{get_goal}}, \code{\link{get_profile}}, \code{\link{get_remarketing_audience}}, \code{\link{get_unsampled_report}}, \code{\link{get_upload}}, \code{\link{get_webproperty}}, \code{\link{list_custom_data_sources}}, \code{\link{list_custom_dimensions}}, \code{\link{list_custom_metrics}}, \code{\link{list_experiments}}, \code{\link{list_filters}}, \code{\link{list_goals}}, \code{\link{list_profiles}}, \code{\link{list_remarketing_audiences}}, \code{\link{list_segments}}, \code{\link{list_unsampled_reports}}, \code{\link{list_uploads}}, \code{\link{list_webproperties}} }
#' Modelling of sterilization and immigration of comapnion animals. #' @description System of ordinary differential equations to simulate the effect of sterilization and immigration on population dynamics. #' @param pars \code{\link{vector}} of length 4. The values are point estimates of birth rate, death rate, carrying capacity and sterilization rate. The names of this values must be "b", "d", "k" and "s", respectively. #' @param init \code{\link{vector}} of length 2. The values are initial population size and initial proportion of sterilized animals. The names of this values must be "n" and "q", respectively. #' @param time time sequence for which output is wanted; the first value of times must be the initial time. #' @param dd string equal to \code{b} or \code{d} to define if density-dependece act on birth or death rartes respectively. #' @param im a number representing the total of immigrants per time unit. #' @param s.range optional sequence (between 0 and 1) of the sterilization rates to be simulated. #' @param ... further arguments passed to \link[deSolve]{ode} function. #' @details The implemented model is described by Amaku, et. al., 2009 and the function is a wrapper around the defaults of \link[deSolve]{ode} function, whose help page must be consulted for details. #' @return \code{\link{list}}. The first element, \code{name}, is a string with the name of the function, the second element, \code{model}, is the model function. The third, fourth and fifth elements are vectors (\code{pars}, \code{init}, \code{time}, respectively) containing the \code{pars}, \code{init} and \code{time} arguments of the function. The sisxth element \code{results} is a \code{\link{data.frame}} with up to as many rows as elements in time. First column contains the time, second column the population size and third column the proportion of sterilized animals. If \code{s.range} is specified, fourth column contains its specific instances. #' @note Logistic growth models are not intended for scenarios in which population size is greater than carrying capacity and growth rate is negative. #' @references Amaku M, Dias R and Ferreira F (2009). Dinamica populacional canina: potenciais efeitos de campanhas de esterilizacao. Revista Panamericana de Salud Publica, 25(4), pp. 300-304. #' #' Soetaert K, Cash J and Mazzia F (2012). Solving differential equations in R. Springer. #' #' \url{http://oswaldosantos.github.io/capm} #' @seealso \link[deSolve]{ode}. #' @export #' @examples #' # Parameters and initial conditions from estimates #' # obtained in examples section from svysumm function but #' # estimating a proportion insted of a total for births. #' pars.solve.si = c(b = 0.245, d = 0.101, #' k = 98050, s = 0.048) #' init.solve.si = c(n = 89137, q = 0.198) #' #' # Solve for a specific sterilization rate. #' solvesi.pt = SolveSI(pars = pars.solve.si, #' init = init.solve.si, #' time = 0:15, dd = 'b', #' im = 100, method = 'rk4') #' #' # Solve for a range of sterilization rates. #' solvesi.rg = SolveSI(pars = pars.solve.si, #' init = init.solve.si, #' time = 0:15, dd = 'b', im = 100, #' s.range = seq(0, .4, l = 50), #' method = 'rk4') #' SolveSI <- function(pars = NULL, init = NULL, time = NULL, dd = 'b', im = 0, s.range = NULL, ...) { SolveSIfu <- function(pars = NULL, init = NULL, time = NULL) { SolveSI.fu <- function(time, init, pars) { with(as.list(c(init, pars)), { if (dd == 'b') { nat = b - (b - d) * (n / k) mor = d } if (dd == 'd') { nat = b mor = d + (b - d) * (n / k) } dn = n * (nat * (1 - q) - mor) + im dq = (1 - q) * (s - q * nat) list(c(dn, dq)) }) } init <- c(init['n'], init['q']) SolveSI.out <- ode(times = time, func = SolveSI.fu, y = init, parms = pars, ...) return(as.data.frame(SolveSI.out)) } if (!is.null(s.range)) { output <- NULL paras <- pars for(i in 1:length(s.range)) { paras['s'] = s.range[i] tmp = SolveSIfu(pars = paras, init = init, time = time) output = rbind(output, tmp) } s.rate <- rep(s.range , each = length(time)) SolveSI <- list( name = 'SolveSI', model = SolveSIfu, pars = pars, init = init, time = time, results = as.data.frame(cbind(output, s.rate)) ) class(SolveSI) <- 'capmModels' return(SolveSI) } else { output <- SolveSIfu(pars = pars, init = init, time = time) SolveSI <- list( name = 'SolveSI', model = SolveSIfu, pars = pars, init = init, time = time, results = as.data.frame(output) ) class(SolveSI) <- 'capmModels' return(SolveSI) } }
/capm/R/SolveSI.R
no_license
ingted/R-Examples
R
false
false
4,926
r
#' Modelling of sterilization and immigration of comapnion animals. #' @description System of ordinary differential equations to simulate the effect of sterilization and immigration on population dynamics. #' @param pars \code{\link{vector}} of length 4. The values are point estimates of birth rate, death rate, carrying capacity and sterilization rate. The names of this values must be "b", "d", "k" and "s", respectively. #' @param init \code{\link{vector}} of length 2. The values are initial population size and initial proportion of sterilized animals. The names of this values must be "n" and "q", respectively. #' @param time time sequence for which output is wanted; the first value of times must be the initial time. #' @param dd string equal to \code{b} or \code{d} to define if density-dependece act on birth or death rartes respectively. #' @param im a number representing the total of immigrants per time unit. #' @param s.range optional sequence (between 0 and 1) of the sterilization rates to be simulated. #' @param ... further arguments passed to \link[deSolve]{ode} function. #' @details The implemented model is described by Amaku, et. al., 2009 and the function is a wrapper around the defaults of \link[deSolve]{ode} function, whose help page must be consulted for details. #' @return \code{\link{list}}. The first element, \code{name}, is a string with the name of the function, the second element, \code{model}, is the model function. The third, fourth and fifth elements are vectors (\code{pars}, \code{init}, \code{time}, respectively) containing the \code{pars}, \code{init} and \code{time} arguments of the function. The sisxth element \code{results} is a \code{\link{data.frame}} with up to as many rows as elements in time. First column contains the time, second column the population size and third column the proportion of sterilized animals. If \code{s.range} is specified, fourth column contains its specific instances. #' @note Logistic growth models are not intended for scenarios in which population size is greater than carrying capacity and growth rate is negative. #' @references Amaku M, Dias R and Ferreira F (2009). Dinamica populacional canina: potenciais efeitos de campanhas de esterilizacao. Revista Panamericana de Salud Publica, 25(4), pp. 300-304. #' #' Soetaert K, Cash J and Mazzia F (2012). Solving differential equations in R. Springer. #' #' \url{http://oswaldosantos.github.io/capm} #' @seealso \link[deSolve]{ode}. #' @export #' @examples #' # Parameters and initial conditions from estimates #' # obtained in examples section from svysumm function but #' # estimating a proportion insted of a total for births. #' pars.solve.si = c(b = 0.245, d = 0.101, #' k = 98050, s = 0.048) #' init.solve.si = c(n = 89137, q = 0.198) #' #' # Solve for a specific sterilization rate. #' solvesi.pt = SolveSI(pars = pars.solve.si, #' init = init.solve.si, #' time = 0:15, dd = 'b', #' im = 100, method = 'rk4') #' #' # Solve for a range of sterilization rates. #' solvesi.rg = SolveSI(pars = pars.solve.si, #' init = init.solve.si, #' time = 0:15, dd = 'b', im = 100, #' s.range = seq(0, .4, l = 50), #' method = 'rk4') #' SolveSI <- function(pars = NULL, init = NULL, time = NULL, dd = 'b', im = 0, s.range = NULL, ...) { SolveSIfu <- function(pars = NULL, init = NULL, time = NULL) { SolveSI.fu <- function(time, init, pars) { with(as.list(c(init, pars)), { if (dd == 'b') { nat = b - (b - d) * (n / k) mor = d } if (dd == 'd') { nat = b mor = d + (b - d) * (n / k) } dn = n * (nat * (1 - q) - mor) + im dq = (1 - q) * (s - q * nat) list(c(dn, dq)) }) } init <- c(init['n'], init['q']) SolveSI.out <- ode(times = time, func = SolveSI.fu, y = init, parms = pars, ...) return(as.data.frame(SolveSI.out)) } if (!is.null(s.range)) { output <- NULL paras <- pars for(i in 1:length(s.range)) { paras['s'] = s.range[i] tmp = SolveSIfu(pars = paras, init = init, time = time) output = rbind(output, tmp) } s.rate <- rep(s.range , each = length(time)) SolveSI <- list( name = 'SolveSI', model = SolveSIfu, pars = pars, init = init, time = time, results = as.data.frame(cbind(output, s.rate)) ) class(SolveSI) <- 'capmModels' return(SolveSI) } else { output <- SolveSIfu(pars = pars, init = init, time = time) SolveSI <- list( name = 'SolveSI', model = SolveSIfu, pars = pars, init = init, time = time, results = as.data.frame(output) ) class(SolveSI) <- 'capmModels' return(SolveSI) } }
\name{plot.eqCorTestByRows} \alias{plot.eqCorTestByRows} \title{ plot for equality of two correlation matrices by rows test } \description{ graphical representation for the equality of two correlation matrices test by rows: confidence intervals of the test statistics. } \usage{ \method{plot}{eqCorTestByRows}(x, mains = c("AS CI", "max CI"), xlabs = c("",""), ylabs = c("",""), pch = "-", ownCols = TRUE, ...) } \arguments{ \item{x}{ object of class \code{eqCorTestByRows}. } \item{mains}{ vector of size two with main of plots (for average of squares test and extreme value test). } \item{xlabs}{ vector of size two with xlabs of plots (for average of squares test and extreme value test). } \item{ylabs}{ vector of size two with ylabs of plots (for average of squares test and extreme value test). } \item{pch}{ \code{pch} given to identify confidence interval limits. } \item{ownCols}{ if \code{ownCols = TRUE} green and black colors are generated with green lines identifying significant variables. } \item{\dots}{ arguments passed to or from other methods to the low level. } } \author{ Caballe, Adria <a.caballe@sms.ed.ac.uk>, Natalia Bochkina and Claus Mayer. } \seealso{ \code{\link{eqCorTestByRows}} for equality of two correlation matrices by rows test. } \examples{ EX2 <- pcorSimulatorJoint(nobs = 200, nclusters = 3, nnodesxcluster = c(60,40,50), pattern = "pow", diffType = "cluster", dataDepend = "diag", pdiff=0.5) #### eq corr by rows ## not run #eqCorEX2 <- eqCorTestByRows(EX2$D1, EX2$D2, testStatistic = c("AS", "max"), # nite = 200, paired = TRUE, exact = TRUE, # subMatComp = FALSE, iniP = 1, finP = 40, # conf.level = 0.95) #plot(eqCorEX2) }
/man/plotequalityCorrelationsByRows.Rd
no_license
cran/ldstatsHD
R
false
false
1,931
rd
\name{plot.eqCorTestByRows} \alias{plot.eqCorTestByRows} \title{ plot for equality of two correlation matrices by rows test } \description{ graphical representation for the equality of two correlation matrices test by rows: confidence intervals of the test statistics. } \usage{ \method{plot}{eqCorTestByRows}(x, mains = c("AS CI", "max CI"), xlabs = c("",""), ylabs = c("",""), pch = "-", ownCols = TRUE, ...) } \arguments{ \item{x}{ object of class \code{eqCorTestByRows}. } \item{mains}{ vector of size two with main of plots (for average of squares test and extreme value test). } \item{xlabs}{ vector of size two with xlabs of plots (for average of squares test and extreme value test). } \item{ylabs}{ vector of size two with ylabs of plots (for average of squares test and extreme value test). } \item{pch}{ \code{pch} given to identify confidence interval limits. } \item{ownCols}{ if \code{ownCols = TRUE} green and black colors are generated with green lines identifying significant variables. } \item{\dots}{ arguments passed to or from other methods to the low level. } } \author{ Caballe, Adria <a.caballe@sms.ed.ac.uk>, Natalia Bochkina and Claus Mayer. } \seealso{ \code{\link{eqCorTestByRows}} for equality of two correlation matrices by rows test. } \examples{ EX2 <- pcorSimulatorJoint(nobs = 200, nclusters = 3, nnodesxcluster = c(60,40,50), pattern = "pow", diffType = "cluster", dataDepend = "diag", pdiff=0.5) #### eq corr by rows ## not run #eqCorEX2 <- eqCorTestByRows(EX2$D1, EX2$D2, testStatistic = c("AS", "max"), # nite = 200, paired = TRUE, exact = TRUE, # subMatComp = FALSE, iniP = 1, finP = 40, # conf.level = 0.95) #plot(eqCorEX2) }