content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#Load library
library(plyr)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#create png file
png(filename = "plot4.png", width = 480, height = 480, units = "px")
#plot the graph
emission_df = ddply(NEI,.(year,SCC),summarize, Emissions=sum(Emissions))
coal_df = SCC[grep('[cC]oal',SCC$Short.Name),c('SCC','Short.Name')]
data_df= merge(emission_df, coal_df, by='SCC')
plot4 <- ggplot(ddply(data_df, .(year),summarize,Emissions=sum(Emissions)) )
plot4 <- plot4 + geom_line(aes(year,Emissions))
print(plot4)
#close device
dev.off()
| /plot4.R | no_license | dushyantlad/ExDataAna_Prj2 | R | false | false | 664 | r | #Load library
library(plyr)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#create png file
png(filename = "plot4.png", width = 480, height = 480, units = "px")
#plot the graph
emission_df = ddply(NEI,.(year,SCC),summarize, Emissions=sum(Emissions))
coal_df = SCC[grep('[cC]oal',SCC$Short.Name),c('SCC','Short.Name')]
data_df= merge(emission_df, coal_df, by='SCC')
plot4 <- ggplot(ddply(data_df, .(year),summarize,Emissions=sum(Emissions)) )
plot4 <- plot4 + geom_line(aes(year,Emissions))
print(plot4)
#close device
dev.off()
|
library(comato)
### Name: pathfinder.conceptmaps
### Title: Creating a Pathfinder network from a conceptmaps object
### Aliases: pathfinder.conceptmaps
### ** Examples
#Create concept maps from three random graphs
require("igraph")
g1 = set.vertex.attribute(erdos.renyi.game(5, 0.7, type="gnp"), "name", value=1:5)
g2 = set.vertex.attribute(erdos.renyi.game(5, 0.7, type="gnp"), "name", value=1:5)
g3 = set.vertex.attribute(erdos.renyi.game(5, 0.7, type="gnp"), "name", value=1:5)
#Create conceptmaps object from three conceptmap objects
simple_cms = conceptmaps(list(conceptmap(g1), conceptmap(g2), conceptmap(g3)))
#Create Pathfinder network from data and return a conceptmap object
cm = pathfinder(simple_cms, q=1, return.cm=TRUE)
| /data/genthat_extracted_code/comato/examples/pathfinder.conceptmaps.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 743 | r | library(comato)
### Name: pathfinder.conceptmaps
### Title: Creating a Pathfinder network from a conceptmaps object
### Aliases: pathfinder.conceptmaps
### ** Examples
#Create concept maps from three random graphs
require("igraph")
g1 = set.vertex.attribute(erdos.renyi.game(5, 0.7, type="gnp"), "name", value=1:5)
g2 = set.vertex.attribute(erdos.renyi.game(5, 0.7, type="gnp"), "name", value=1:5)
g3 = set.vertex.attribute(erdos.renyi.game(5, 0.7, type="gnp"), "name", value=1:5)
#Create conceptmaps object from three conceptmap objects
simple_cms = conceptmaps(list(conceptmap(g1), conceptmap(g2), conceptmap(g3)))
#Create Pathfinder network from data and return a conceptmap object
cm = pathfinder(simple_cms, q=1, return.cm=TRUE)
|
panImpute <- function(data, type, formula, n.burn=5000, n.iter=100, m=10,
group=NULL, prior=NULL, seed=NULL, save.pred=FALSE,
keep.chains=c("full","diagonal"), silent=FALSE){
# wrapper function for the Gibbs sampler in the pan package
# *** checks
if(!missing(type) && !missing(formula)) stop("Only one of 'type' or 'formula' may be specified.")
if(save.pred && !missing(type)){
warning("Option 'save.pred' is ignored if 'type' is specified")
save.pred=FALSE
}
keep.chains <- match.arg(keep.chains)
# convert type
if(!missing(type)){
formula <- .type2formula(data,type)
group <- attr(formula, "group")
}
# empty objects to assign to
clname <- yvrs <- y <- ycat <- zcol <- xcol <- pred <- clus <- psave <-
pvrs <- qvrs <- pnames <- qnames <- NULL
# preserve original order
if(!is.data.frame(data)) as.data.frame(data)
data <- cbind(data, original.order=1:nrow(data))
# address additional grouping
grname <- group
if(is.null(group)){
group <- rep(1,nrow(data))
}else{
group <- data[,group]
if(length(group)!=nrow(data)) stop("Argument 'group' is not correctly specified.")
}
group.original <- group
group <- as.numeric(factor(group,levels=unique(group)))
# ***
# model input
# populate local frame
.model.byFormula(data, formula, group, group.original, method="pan")
# check model input
if(any(is.na(group)))
stop("Grouping variable must not contain missing data.")
if(any(is.na(pred)))
stop("Predictor variables must not contain missing data.")
if(sum(is.na(y))==0)
stop("Target variables do not contain any missing data.")
if(any(!sapply(y,is.numeric)))
stop("Target variables must be numeric. You may either convert them or use jomoImpute() instead.")
if(any(duplicated(yvrs)))
stop("Found duplicate target variables.")
# reorder colums
cc <- which(colnames(data) %in% c(clname,grname,yvrs))
data.ord <- cbind(data[c(clname,grname,yvrs)],data[-cc])
# ***
# pan setup
if(is.null(prior)){
prior <- list( a=ncol(y), Binv=diag(1,ncol(y)),
c=ncol(y)*length(zcol), Dinv=diag(1,ncol(y)*length(zcol)) )
}
if(is.null(seed)){
set.seed(as.integer(runif(1,0,10^6)))
}else{
set.seed(as.integer(seed))
}
rns <- sapply(unique(group), function(x,m) as.integer(runif(m+1,0,10^6)), m=m)
# prepare output
ind <- which(is.na(data.ord), arr.ind=TRUE, useNames=FALSE)
ind <- ind[ ind[,2] %in% which(colnames(data.ord)%in%colnames(y)),,drop=FALSE ]
rpm <- matrix(NA, nrow(ind), m)
# standard dimensions
ng <- length(unique(group))
np <- length(xcol)
nq <- length(zcol)
nr <- ncol(y)
# reduced dimensions
dpsi <- nr*nq
dsig <- nr
if(keep.chains=="diagonal"){
dpsi <- dsig <- 1
}
bpar <- list(beta=array( NA, c(np,nr,n.burn,ng) ),
psi=array( NA, c(nr*nq,dpsi,n.burn,ng) ),
sigma=array( NA, c(nr,dsig,n.burn,ng) ))
ipar <- list(beta=array( NA, c(np,nr,n.iter*m,ng) ),
psi=array( NA, c(nr*nq,dpsi,n.iter*m,ng) ),
sigma=array( NA, c(nr,dsig,n.iter*m,ng) ))
# burn-in
if(!silent){
cat("Running burn-in phase ...\n")
flush.console()
}
glast <- as.list(unique(group))
for(gg in unique(group)){
gi <- group==gg
gy <- y[gi,]
gpred <- pred[gi,]
gclus <- clus[gi]
# sort 1, ..., k
gclus <- match(gclus, unique(gclus))
cur <- pan::pan(gy, subj=gclus, gpred, xcol, zcol, prior, seed=rns[1,gg], iter=n.burn)
glast[[gg]] <- cur$last
# save parameter chains
bpar[["beta"]][,,,gg] <- cur$beta
if(keep.chains=="diagonal"){
bpar[["psi"]][,,,gg] <- .adiag( cur$psi )
bpar[["sigma"]][,,,gg] <-.adiag( cur$sigma )
}else{
bpar[["psi"]][,,,gg] <- cur$psi
bpar[["sigma"]][,,,gg] <- cur$sigma
}
}
# imputation
for(ii in 1:m){
if(!silent){
cat("Creating imputed data set (",ii,"/",m,") ...\n")
flush.console()
}
gy.imp <- as.list(unique(group))
for(gg in unique(group)){
gi <- group==gg
gy <- y[gi,]
gpred <- pred[gi,]
gclus <- clus[gi]
# sort 1, ..., k
gclus <- match(gclus, unique(gclus))
cur <- pan::pan(gy, subj=gclus, gpred, xcol, zcol, prior, seed=rns[ii+1,gg], iter=n.iter,
start=glast[[gg]])
glast[[gg]] <- cur$last
# save imputations
gy.imp[[gg]] <- cur$y
# save parameter chains
i0 <- seq.int(n.iter*(ii-1)+1, n.iter*ii)
ipar[["beta"]][,,i0,gg] <- cur$beta
if(keep.chains=="diagonal"){
ipar[["psi"]][,,i0,gg] <- .adiag( cur$psi )
ipar[["sigma"]][,,i0,gg] <- .adiag( cur$sigma )
}else{
ipar[["psi"]][,,i0,gg] <- cur$psi
ipar[["sigma"]][,,i0,gg] <- cur$sigma
}
}
y.imp <- do.call(rbind,gy.imp)
rpm[,ii] <- y.imp[is.na(y)]
}
if(!silent){
cat("Done!\n")
}
# clean up
srt <- data.ord[,ncol(data.ord)]
data.ord <- data.ord[,-ncol(data.ord)]
# prepare output data
if( save.pred && !missing(formula) ) data.ord <- cbind(data.ord,pred[,psave,drop=F])
# ordering
attr(data.ord,"sort") <- srt
attr(data.ord,"group") <- group.original
# model summary
model <- list(clus=clname, yvrs=yvrs, pvrs=pvrs, qvrs=qvrs)
attr(model,"is.ML") <- TRUE
attr(model,"is.L2") <- FALSE
attr(model,"full.names") <- list(pvrs=pnames, qvrs=qnames)
out <- list(
data=data.ord,
replacement.mat=rpm,
index.mat=ind,
call=match.call(),
model=model,
random.L1="none",
prior=prior,
iter=list(burn=n.burn, iter=n.iter, m=m),
keep.chains=keep.chains,
par.burnin=bpar,
par.imputation=ipar
)
class(out) <- c("mitml","pan")
out
}
| /R/panImpute.R | no_license | stefvanbuuren/mitml | R | false | false | 5,738 | r | panImpute <- function(data, type, formula, n.burn=5000, n.iter=100, m=10,
group=NULL, prior=NULL, seed=NULL, save.pred=FALSE,
keep.chains=c("full","diagonal"), silent=FALSE){
# wrapper function for the Gibbs sampler in the pan package
# *** checks
if(!missing(type) && !missing(formula)) stop("Only one of 'type' or 'formula' may be specified.")
if(save.pred && !missing(type)){
warning("Option 'save.pred' is ignored if 'type' is specified")
save.pred=FALSE
}
keep.chains <- match.arg(keep.chains)
# convert type
if(!missing(type)){
formula <- .type2formula(data,type)
group <- attr(formula, "group")
}
# empty objects to assign to
clname <- yvrs <- y <- ycat <- zcol <- xcol <- pred <- clus <- psave <-
pvrs <- qvrs <- pnames <- qnames <- NULL
# preserve original order
if(!is.data.frame(data)) as.data.frame(data)
data <- cbind(data, original.order=1:nrow(data))
# address additional grouping
grname <- group
if(is.null(group)){
group <- rep(1,nrow(data))
}else{
group <- data[,group]
if(length(group)!=nrow(data)) stop("Argument 'group' is not correctly specified.")
}
group.original <- group
group <- as.numeric(factor(group,levels=unique(group)))
# ***
# model input
# populate local frame
.model.byFormula(data, formula, group, group.original, method="pan")
# check model input
if(any(is.na(group)))
stop("Grouping variable must not contain missing data.")
if(any(is.na(pred)))
stop("Predictor variables must not contain missing data.")
if(sum(is.na(y))==0)
stop("Target variables do not contain any missing data.")
if(any(!sapply(y,is.numeric)))
stop("Target variables must be numeric. You may either convert them or use jomoImpute() instead.")
if(any(duplicated(yvrs)))
stop("Found duplicate target variables.")
# reorder colums
cc <- which(colnames(data) %in% c(clname,grname,yvrs))
data.ord <- cbind(data[c(clname,grname,yvrs)],data[-cc])
# ***
# pan setup
if(is.null(prior)){
prior <- list( a=ncol(y), Binv=diag(1,ncol(y)),
c=ncol(y)*length(zcol), Dinv=diag(1,ncol(y)*length(zcol)) )
}
if(is.null(seed)){
set.seed(as.integer(runif(1,0,10^6)))
}else{
set.seed(as.integer(seed))
}
rns <- sapply(unique(group), function(x,m) as.integer(runif(m+1,0,10^6)), m=m)
# prepare output
ind <- which(is.na(data.ord), arr.ind=TRUE, useNames=FALSE)
ind <- ind[ ind[,2] %in% which(colnames(data.ord)%in%colnames(y)),,drop=FALSE ]
rpm <- matrix(NA, nrow(ind), m)
# standard dimensions
ng <- length(unique(group))
np <- length(xcol)
nq <- length(zcol)
nr <- ncol(y)
# reduced dimensions
dpsi <- nr*nq
dsig <- nr
if(keep.chains=="diagonal"){
dpsi <- dsig <- 1
}
bpar <- list(beta=array( NA, c(np,nr,n.burn,ng) ),
psi=array( NA, c(nr*nq,dpsi,n.burn,ng) ),
sigma=array( NA, c(nr,dsig,n.burn,ng) ))
ipar <- list(beta=array( NA, c(np,nr,n.iter*m,ng) ),
psi=array( NA, c(nr*nq,dpsi,n.iter*m,ng) ),
sigma=array( NA, c(nr,dsig,n.iter*m,ng) ))
# burn-in
if(!silent){
cat("Running burn-in phase ...\n")
flush.console()
}
glast <- as.list(unique(group))
for(gg in unique(group)){
gi <- group==gg
gy <- y[gi,]
gpred <- pred[gi,]
gclus <- clus[gi]
# sort 1, ..., k
gclus <- match(gclus, unique(gclus))
cur <- pan::pan(gy, subj=gclus, gpred, xcol, zcol, prior, seed=rns[1,gg], iter=n.burn)
glast[[gg]] <- cur$last
# save parameter chains
bpar[["beta"]][,,,gg] <- cur$beta
if(keep.chains=="diagonal"){
bpar[["psi"]][,,,gg] <- .adiag( cur$psi )
bpar[["sigma"]][,,,gg] <-.adiag( cur$sigma )
}else{
bpar[["psi"]][,,,gg] <- cur$psi
bpar[["sigma"]][,,,gg] <- cur$sigma
}
}
# imputation
for(ii in 1:m){
if(!silent){
cat("Creating imputed data set (",ii,"/",m,") ...\n")
flush.console()
}
gy.imp <- as.list(unique(group))
for(gg in unique(group)){
gi <- group==gg
gy <- y[gi,]
gpred <- pred[gi,]
gclus <- clus[gi]
# sort 1, ..., k
gclus <- match(gclus, unique(gclus))
cur <- pan::pan(gy, subj=gclus, gpred, xcol, zcol, prior, seed=rns[ii+1,gg], iter=n.iter,
start=glast[[gg]])
glast[[gg]] <- cur$last
# save imputations
gy.imp[[gg]] <- cur$y
# save parameter chains
i0 <- seq.int(n.iter*(ii-1)+1, n.iter*ii)
ipar[["beta"]][,,i0,gg] <- cur$beta
if(keep.chains=="diagonal"){
ipar[["psi"]][,,i0,gg] <- .adiag( cur$psi )
ipar[["sigma"]][,,i0,gg] <- .adiag( cur$sigma )
}else{
ipar[["psi"]][,,i0,gg] <- cur$psi
ipar[["sigma"]][,,i0,gg] <- cur$sigma
}
}
y.imp <- do.call(rbind,gy.imp)
rpm[,ii] <- y.imp[is.na(y)]
}
if(!silent){
cat("Done!\n")
}
# clean up
srt <- data.ord[,ncol(data.ord)]
data.ord <- data.ord[,-ncol(data.ord)]
# prepare output data
if( save.pred && !missing(formula) ) data.ord <- cbind(data.ord,pred[,psave,drop=F])
# ordering
attr(data.ord,"sort") <- srt
attr(data.ord,"group") <- group.original
# model summary
model <- list(clus=clname, yvrs=yvrs, pvrs=pvrs, qvrs=qvrs)
attr(model,"is.ML") <- TRUE
attr(model,"is.L2") <- FALSE
attr(model,"full.names") <- list(pvrs=pnames, qvrs=qnames)
out <- list(
data=data.ord,
replacement.mat=rpm,
index.mat=ind,
call=match.call(),
model=model,
random.L1="none",
prior=prior,
iter=list(burn=n.burn, iter=n.iter, m=m),
keep.chains=keep.chains,
par.burnin=bpar,
par.imputation=ipar
)
class(out) <- c("mitml","pan")
out
}
|
library(tidyverse)
library(cowplot)
config = yaml::yaml.load_file('config.yaml')
all_parameters = read_csv(config$model_parameter_file) %>%
filter(!parameter_name %in% c('run_time','num_iterations'))
#Pull out phenophase
all_parameters = all_parameters %>%
mutate(phenophase = stringr::word(species,2,2, ' - '),
species = stringr::word(species,1,1,' - '))
all_parameters$phenophase = as.numeric(all_parameters$phenophase)
#Keep only species/phenophases that are present in NPN dataset
npn_species = all_parameters %>%
filter(dataset == 'npn') %>%
select(species, phenophase) %>%
distinct()
all_parameters = all_parameters %>%
filter(paste(species,phenophase) %in% paste(npn_species$species,npn_species$phenophase) )
############################################################################
# Organize the two spatial models, MSB and M1
# These models add a correction to the Alternating and GDD models, respectivaely,
# to hopefully account for spatial variation. Since fitting these models to LTS
# sites doesn't make sense (as there is no spatial variation in them ) I'll
# compare them to LTS datasets fitted to the original Alternating and GDD models.
# Remove spatial models fit to LTS data
all_parameters = all_parameters %>%
filter(!(dataset!='npn' & model %in% c('msb','m1')))
# Copy the LTS GDD and Alternating model to compare with the
# corrected NPN ones.
lts_models_parameters = all_parameters %>%
filter(dataset!='npn', model %in% c('gdd','alternating'))
lts_models_parameters$model = with(lts_models_parameters, ifelse(model=='gdd','m1',
ifelse(model=='alternating','msb','unk')))
if(any(lts_models_parameters$model=='unk')){stop('unknown model in lts subset')}
all_parameters = all_parameters %>%
bind_rows(lts_models_parameters)
# Remove the additional parameters which are only in the corrected models
all_parameters = all_parameters %>%
filter(!(model=='msb' & parameter_name=='d')) %>%
filter(!(model=='m1' & parameter_name=='k'))
rm(lts_models_parameters)
############################################################################
#The distribution of all parameters derived using bootstrapping
make_parameter_histograms = FALSE
save_histogram = function(r){
histogram_data = all_parameters %>%
filter(species==r$species, phenophase==r$phenophase, model==r$model, parameter_name==r$parameter_name, dataset==r$dataset)
plot_name = paste0('parameter_histograms/parameter_',r$id,'.png')
histogram = ggplot(histogram_data, aes(value)) +
geom_histogram(bins=50) +
facet_wrap(species~phenophase~model~parameter_name~dataset)
ggsave(plot_name, plot=histogram, height=20, width=20, units = 'cm', limitsize = FALSE)
}
if(make_parameter_histograms){
possible_histograms = all_parameters %>%
select(species,phenophase,model,parameter_name,dataset) %>%
distinct() %>%
mutate(id = 1:n()) %>%
purrrlyr::by_row(save_histogram)
}
#Comparison of parameters in npn vs other datasets
x = all_parameters %>%
filter(dataset %in% c('harvard','npn'),model=='uniforc',species=='populus tremuloides', phenophase==501)
ggplot(x, aes(x=value, group=dataset, fill=dataset)) +
geom_histogram(bins=50, position = 'identity', alpha=0.7) +
scale_fill_brewer(palette='Set2') +
facet_wrap(parameter_name~model~species~phenophase, scales = 'free')
###########################################################################
# Mann Whitney and/or ks test for parameter distribution comparison.
# Are these results robust to a sample size smaller than 250 bootstraps?
#
# all_parameters_subset = all_parameters %>%
# filter(bootstrap_num %in% sample(1:250, size=20))
#
# #Statistical test of parameters
# npn_parameters = all_parameters_subset %>%
# filter(dataset=='npn') %>%
# rename(npn_value = value) %>%
# select(-dataset)
#
# p_values = all_parameters_subset %>%
# filter(dataset!='npn') %>%
# rename(dataset_value = value) %>%
# left_join(npn_parameters, by=c('model','parameter_name','bootstrap_num','species','phenophase')) %>%
# group_by(dataset, model, parameter_name, species, phenophase) %>%
# #summarise(p_value = ks.test(.$dataset_value, .$npn_value, alternative='two.side', exact=TRUE)$p.value, n=n()) %>%
# summarise(p_value = wilcox.test(.$dataset_value, .$npn_value, alternative = 'two.sided')$p.value) %>%
# ungroup()
###############################################################################
###############################################################################
# Everything below if for making the fairly complicated Figure 2.
# Once this image is rendered I do some slight editing in a photo editor to mask out all the
# dummy variables and adjust the text and legend positioning.
###############################################################################
###############################################################################
#scatter plots of npn vs long term datasets
budburst_phenophases = c(371, 496, 488, 480)
flower_phenophases = c(501)
parameter_means = all_parameters %>%
mutate(phenophase = ifelse(phenophase %in% budburst_phenophases, 'Budburst','Flower')) %>%
group_by(species, parameter_name, dataset, model, phenophase) %>%
summarise(param_mean = mean(value)) %>%
ungroup()
npn_parameters = parameter_means %>%
filter(dataset=='npn') %>%
spread(dataset, param_mean) %>%
rename(npn_derived_parameter = npn)
parameter_means = parameter_means %>%
filter(dataset!='npn') %>%
rename(lts_derived_parameter = param_mean) %>%
left_join(npn_parameters, by=c('species','parameter_name','model', 'phenophase'))
datasets = c('harvard','hjandrews','hubbard','jornada','npn')
pretty_dataset_names = c('Harvard Forest','H.J. Andrews','Hubbard Brook','Jornada','NPN')
parameter_means$dataset = factor(parameter_means$dataset, levels = datasets, labels = pretty_dataset_names)
#################################################################################
# R^2 values
parameter_name_plotmath = tribble(
~model, ~parameter_name, ~parameter_symbol,
'naive','mean_doy', 'widehat(DOY)',
'gdd_fixed','F','F',
'linear_temp','intercept','beta[1]',
'linear_temp','slope','beta[2]',
'gdd','F','F',
'gdd','t1','t[1]',
'gdd','T','T[base]',
'm1','F','F',
'm1','t1','t[1]',
'm1','T','T[base]',
'm1','k','k',
'alternating','a','a',
'alternating','b','b',
'alternating','c','c',
'msb','a','a',
'msb','b','b',
'msb','c','c',
'msb','d','d',
'uniforc','t1','t[1]',
'uniforc','F','F',
'uniforc','b','b',
'uniforc','c','c'
)
r2_values = parameter_means %>%
group_by(model, parameter_name) %>%
summarise(r2= 1 - (sum((npn_derived_parameter - lts_derived_parameter)**2) / sum((npn_derived_parameter - mean(npn_derived_parameter))**2)) , n=n()) %>%
ungroup() %>%
mutate(r2_text=paste('R^2 == ',round(r2,2)))
# Put the r2 values in the parameter_name column so they are included in the
# labels of the plot
parameter_means = parameter_means %>%
left_join(r2_values, by=c('model','parameter_name')) %>%
left_join(parameter_name_plotmath, by = c('model','parameter_name'))
#################################################################################
# These dummy lines help create padding on the right side of Figure 2.
dummy_parameters = tribble(
~parameter_name,~facet_strip_text, ~model, ~lts_derived_parameter, ~npn_derived_parameter, ~r2_text,
'naive_dummy1','naive_dummy1', 'naive', 1,1,'naive_dummy1',
'naive_dummy2','naive_dummy2', 'naive', 1,1,'naive_dummy2',
'naive_dummy3','naive_dummy3', 'naive', 1,1,'naive_dummy3',
'fixed_gdd_dummy1','fixed_gdd_dummy1', 'gdd_fixed', 1,1,'fixed_gdd_dummy1',
'fixed_gdd_dummy2','fixed_gdd_dummy2', 'gdd_fixed', 1,1,'fixed_gdd_dummy2',
'fixed_gdd_dummy3','fixed_gdd_dummy3', 'gdd_fixed', 1,1,'fixed_gdd_dummy3',
'linear_dummy1','fixed_gdd_dummy1', 'linear_temp', 1,1,'linear_dummy1',
'linear_dummy2','fixed_gdd_dummy2', 'linear_temp', 1,1,'linear_dummy2',
'gdd_dummy1','gdd_dummy1', 'gdd', 1,1,'gdd_dummy1',
'm1_dummy1','m1_dummy1', 'm1', 1,1,'m1_dummy1',
'alternating_dummy1','alternating_dummy1', 'alternating', 1,1,'alternating_dummy1',
'msb_dummy1','msb_dummy1', 'msb', 1,1,'msb_dummy1'
)
# This ordering sets up the 32 subplots (20 model variables + 12 dummy variables for padding)
# in the correct grid.
subplot_order = tribble(
~model, ~parameter_name, ~plot_order_number,
'naive','mean_doy',1,
'naive','naive_dummy1',2,
'naive','naive_dummy2',3,
'naive','naive_dummy3',4,
'gdd_fixed','F',5,
'gdd_fixed','fixed_gdd_dummy1',6,
'gdd_fixed','fixed_gdd_dummy2',7,
'gdd_fixed','fixed_gdd_dummy3',8,
'linear_temp','intercept',9,
'linear_temp','slope',10,
'linear_temp','linear_dummy1',11,
'linear_temp','linear_dummy2',12,
'gdd','F',13,
'gdd','t1',14,
'gdd','T',15,
'gdd','gdd_dummy1',16,
'm1','F',17,
'm1','t1',18,
'm1','T',19,
'm1','m1_dummy1',20,
'alternating','a',21,
'alternating','b',22,
'alternating','c',23,
'alternating','alternating_dummy1',24,
'msb','a',25,
'msb','b',26,
'msb','c',27,
'msb','msb_dummy1',28,
'uniforc','b',29,
'uniforc','c',30,
'uniforc','F',31,
'uniforc','t1',32
)
#################################################
#################################################
# Put it all together
parameters_with_dummy_vars = parameter_means %>%
bind_rows(dummy_parameters) %>%
left_join(subplot_order, by=c('model','parameter_name'))
parameters_with_dummy_vars$model = factor(parameters_with_dummy_vars$model, levels = c("naive","gdd_fixed","linear_temp","gdd","m1","alternating","msb","uniforc"),
labels = c("naive","gdd_fixed","linear_temp","gdd","m1","alternating","msb","uniforc"),
ordered = TRUE)
parameters_with_dummy_vars$facet_strip_text = with(parameters_with_dummy_vars, paste0('list(',parameter_symbol,',', r2_text,')'))
# in the main ggplot call below, facet_wrap will use facet_strip_text to organize the different subplots.
# the order within the facet_strip_text factor (defined in this next line by the plot_order_number) will dictate how they are drawn
# starting at the top-left and going left-right,top-bottom.
parameters_with_dummy_vars$facet_strip_text = forcats::fct_reorder(parameters_with_dummy_vars$facet_strip_text, parameters_with_dummy_vars$plot_order_number)
y_axis_text = c('LTER Derived Parameter Estimates\n
Uniforc MSB Alternating M1 GDD Linear Fixed GDD Naive')
whole_plot = ggplot(parameters_with_dummy_vars, aes(x=npn_derived_parameter, y=lts_derived_parameter, color=dataset, group=dataset)) +
geom_point(size=3, aes(shape = phenophase)) +
scale_shape_manual(values=c(17,13)) +
scale_color_manual(values=c("grey42", "#E69F00", "#56B4E9", "#CC79A7")) +
geom_abline(intercept=0, slope=1) +
facet_wrap(~facet_strip_text, scales='free', nrow=8, labeller = label_parsed) +
theme_bw() +
labs(x='USA-NPN Derived Parameter Estimates',y=y_axis_text,
color = "LTER Dataset", shape = "Phenophase") +
theme(strip.text = element_text(size=10),
strip.background = element_rect(fill='grey95'),
strip.switch.pad.wrap = unit(2, 'cm'),
axis.text = element_text(size=8),
axis.title = element_text(size=14),
legend.text = element_text(size=14),
legend.title = element_text(size=16))
ggsave(paste0(config$image_save_directory,'figure_2_param_comparison.png'), plot=whole_plot, height=28, width=25, units = 'cm', dpi=1000)
# Note, after rendering I deleted the dummy placeholder subplots inside a photo editing program and save as figure_param_comparison_final.png
| /analysis/compare_parameters.R | no_license | sdtaylor/phenology_dataset_study | R | false | false | 11,797 | r | library(tidyverse)
library(cowplot)
config = yaml::yaml.load_file('config.yaml')
all_parameters = read_csv(config$model_parameter_file) %>%
filter(!parameter_name %in% c('run_time','num_iterations'))
#Pull out phenophase
all_parameters = all_parameters %>%
mutate(phenophase = stringr::word(species,2,2, ' - '),
species = stringr::word(species,1,1,' - '))
all_parameters$phenophase = as.numeric(all_parameters$phenophase)
#Keep only species/phenophases that are present in NPN dataset
npn_species = all_parameters %>%
filter(dataset == 'npn') %>%
select(species, phenophase) %>%
distinct()
all_parameters = all_parameters %>%
filter(paste(species,phenophase) %in% paste(npn_species$species,npn_species$phenophase) )
############################################################################
# Organize the two spatial models, MSB and M1
# These models add a correction to the Alternating and GDD models, respectivaely,
# to hopefully account for spatial variation. Since fitting these models to LTS
# sites doesn't make sense (as there is no spatial variation in them ) I'll
# compare them to LTS datasets fitted to the original Alternating and GDD models.
# Remove spatial models fit to LTS data
all_parameters = all_parameters %>%
filter(!(dataset!='npn' & model %in% c('msb','m1')))
# Copy the LTS GDD and Alternating model to compare with the
# corrected NPN ones.
lts_models_parameters = all_parameters %>%
filter(dataset!='npn', model %in% c('gdd','alternating'))
lts_models_parameters$model = with(lts_models_parameters, ifelse(model=='gdd','m1',
ifelse(model=='alternating','msb','unk')))
if(any(lts_models_parameters$model=='unk')){stop('unknown model in lts subset')}
all_parameters = all_parameters %>%
bind_rows(lts_models_parameters)
# Remove the additional parameters which are only in the corrected models
all_parameters = all_parameters %>%
filter(!(model=='msb' & parameter_name=='d')) %>%
filter(!(model=='m1' & parameter_name=='k'))
rm(lts_models_parameters)
############################################################################
#The distribution of all parameters derived using bootstrapping
make_parameter_histograms = FALSE
save_histogram = function(r){
histogram_data = all_parameters %>%
filter(species==r$species, phenophase==r$phenophase, model==r$model, parameter_name==r$parameter_name, dataset==r$dataset)
plot_name = paste0('parameter_histograms/parameter_',r$id,'.png')
histogram = ggplot(histogram_data, aes(value)) +
geom_histogram(bins=50) +
facet_wrap(species~phenophase~model~parameter_name~dataset)
ggsave(plot_name, plot=histogram, height=20, width=20, units = 'cm', limitsize = FALSE)
}
if(make_parameter_histograms){
possible_histograms = all_parameters %>%
select(species,phenophase,model,parameter_name,dataset) %>%
distinct() %>%
mutate(id = 1:n()) %>%
purrrlyr::by_row(save_histogram)
}
#Comparison of parameters in npn vs other datasets
x = all_parameters %>%
filter(dataset %in% c('harvard','npn'),model=='uniforc',species=='populus tremuloides', phenophase==501)
ggplot(x, aes(x=value, group=dataset, fill=dataset)) +
geom_histogram(bins=50, position = 'identity', alpha=0.7) +
scale_fill_brewer(palette='Set2') +
facet_wrap(parameter_name~model~species~phenophase, scales = 'free')
###########################################################################
# Mann Whitney and/or ks test for parameter distribution comparison.
# Are these results robust to a sample size smaller than 250 bootstraps?
#
# all_parameters_subset = all_parameters %>%
# filter(bootstrap_num %in% sample(1:250, size=20))
#
# #Statistical test of parameters
# npn_parameters = all_parameters_subset %>%
# filter(dataset=='npn') %>%
# rename(npn_value = value) %>%
# select(-dataset)
#
# p_values = all_parameters_subset %>%
# filter(dataset!='npn') %>%
# rename(dataset_value = value) %>%
# left_join(npn_parameters, by=c('model','parameter_name','bootstrap_num','species','phenophase')) %>%
# group_by(dataset, model, parameter_name, species, phenophase) %>%
# #summarise(p_value = ks.test(.$dataset_value, .$npn_value, alternative='two.side', exact=TRUE)$p.value, n=n()) %>%
# summarise(p_value = wilcox.test(.$dataset_value, .$npn_value, alternative = 'two.sided')$p.value) %>%
# ungroup()
###############################################################################
###############################################################################
# Everything below if for making the fairly complicated Figure 2.
# Once this image is rendered I do some slight editing in a photo editor to mask out all the
# dummy variables and adjust the text and legend positioning.
###############################################################################
###############################################################################
#scatter plots of npn vs long term datasets
budburst_phenophases = c(371, 496, 488, 480)
flower_phenophases = c(501)
parameter_means = all_parameters %>%
mutate(phenophase = ifelse(phenophase %in% budburst_phenophases, 'Budburst','Flower')) %>%
group_by(species, parameter_name, dataset, model, phenophase) %>%
summarise(param_mean = mean(value)) %>%
ungroup()
npn_parameters = parameter_means %>%
filter(dataset=='npn') %>%
spread(dataset, param_mean) %>%
rename(npn_derived_parameter = npn)
parameter_means = parameter_means %>%
filter(dataset!='npn') %>%
rename(lts_derived_parameter = param_mean) %>%
left_join(npn_parameters, by=c('species','parameter_name','model', 'phenophase'))
datasets = c('harvard','hjandrews','hubbard','jornada','npn')
pretty_dataset_names = c('Harvard Forest','H.J. Andrews','Hubbard Brook','Jornada','NPN')
parameter_means$dataset = factor(parameter_means$dataset, levels = datasets, labels = pretty_dataset_names)
#################################################################################
# R^2 values
parameter_name_plotmath = tribble(
~model, ~parameter_name, ~parameter_symbol,
'naive','mean_doy', 'widehat(DOY)',
'gdd_fixed','F','F',
'linear_temp','intercept','beta[1]',
'linear_temp','slope','beta[2]',
'gdd','F','F',
'gdd','t1','t[1]',
'gdd','T','T[base]',
'm1','F','F',
'm1','t1','t[1]',
'm1','T','T[base]',
'm1','k','k',
'alternating','a','a',
'alternating','b','b',
'alternating','c','c',
'msb','a','a',
'msb','b','b',
'msb','c','c',
'msb','d','d',
'uniforc','t1','t[1]',
'uniforc','F','F',
'uniforc','b','b',
'uniforc','c','c'
)
r2_values = parameter_means %>%
group_by(model, parameter_name) %>%
summarise(r2= 1 - (sum((npn_derived_parameter - lts_derived_parameter)**2) / sum((npn_derived_parameter - mean(npn_derived_parameter))**2)) , n=n()) %>%
ungroup() %>%
mutate(r2_text=paste('R^2 == ',round(r2,2)))
# Put the r2 values in the parameter_name column so they are included in the
# labels of the plot
parameter_means = parameter_means %>%
left_join(r2_values, by=c('model','parameter_name')) %>%
left_join(parameter_name_plotmath, by = c('model','parameter_name'))
#################################################################################
# These dummy lines help create padding on the right side of Figure 2.
dummy_parameters = tribble(
~parameter_name,~facet_strip_text, ~model, ~lts_derived_parameter, ~npn_derived_parameter, ~r2_text,
'naive_dummy1','naive_dummy1', 'naive', 1,1,'naive_dummy1',
'naive_dummy2','naive_dummy2', 'naive', 1,1,'naive_dummy2',
'naive_dummy3','naive_dummy3', 'naive', 1,1,'naive_dummy3',
'fixed_gdd_dummy1','fixed_gdd_dummy1', 'gdd_fixed', 1,1,'fixed_gdd_dummy1',
'fixed_gdd_dummy2','fixed_gdd_dummy2', 'gdd_fixed', 1,1,'fixed_gdd_dummy2',
'fixed_gdd_dummy3','fixed_gdd_dummy3', 'gdd_fixed', 1,1,'fixed_gdd_dummy3',
'linear_dummy1','fixed_gdd_dummy1', 'linear_temp', 1,1,'linear_dummy1',
'linear_dummy2','fixed_gdd_dummy2', 'linear_temp', 1,1,'linear_dummy2',
'gdd_dummy1','gdd_dummy1', 'gdd', 1,1,'gdd_dummy1',
'm1_dummy1','m1_dummy1', 'm1', 1,1,'m1_dummy1',
'alternating_dummy1','alternating_dummy1', 'alternating', 1,1,'alternating_dummy1',
'msb_dummy1','msb_dummy1', 'msb', 1,1,'msb_dummy1'
)
# This ordering sets up the 32 subplots (20 model variables + 12 dummy variables for padding)
# in the correct grid.
subplot_order = tribble(
~model, ~parameter_name, ~plot_order_number,
'naive','mean_doy',1,
'naive','naive_dummy1',2,
'naive','naive_dummy2',3,
'naive','naive_dummy3',4,
'gdd_fixed','F',5,
'gdd_fixed','fixed_gdd_dummy1',6,
'gdd_fixed','fixed_gdd_dummy2',7,
'gdd_fixed','fixed_gdd_dummy3',8,
'linear_temp','intercept',9,
'linear_temp','slope',10,
'linear_temp','linear_dummy1',11,
'linear_temp','linear_dummy2',12,
'gdd','F',13,
'gdd','t1',14,
'gdd','T',15,
'gdd','gdd_dummy1',16,
'm1','F',17,
'm1','t1',18,
'm1','T',19,
'm1','m1_dummy1',20,
'alternating','a',21,
'alternating','b',22,
'alternating','c',23,
'alternating','alternating_dummy1',24,
'msb','a',25,
'msb','b',26,
'msb','c',27,
'msb','msb_dummy1',28,
'uniforc','b',29,
'uniforc','c',30,
'uniforc','F',31,
'uniforc','t1',32
)
#################################################
#################################################
# Put it all together
parameters_with_dummy_vars = parameter_means %>%
bind_rows(dummy_parameters) %>%
left_join(subplot_order, by=c('model','parameter_name'))
parameters_with_dummy_vars$model = factor(parameters_with_dummy_vars$model, levels = c("naive","gdd_fixed","linear_temp","gdd","m1","alternating","msb","uniforc"),
labels = c("naive","gdd_fixed","linear_temp","gdd","m1","alternating","msb","uniforc"),
ordered = TRUE)
parameters_with_dummy_vars$facet_strip_text = with(parameters_with_dummy_vars, paste0('list(',parameter_symbol,',', r2_text,')'))
# in the main ggplot call below, facet_wrap will use facet_strip_text to organize the different subplots.
# the order within the facet_strip_text factor (defined in this next line by the plot_order_number) will dictate how they are drawn
# starting at the top-left and going left-right,top-bottom.
parameters_with_dummy_vars$facet_strip_text = forcats::fct_reorder(parameters_with_dummy_vars$facet_strip_text, parameters_with_dummy_vars$plot_order_number)
y_axis_text = c('LTER Derived Parameter Estimates\n
Uniforc MSB Alternating M1 GDD Linear Fixed GDD Naive')
whole_plot = ggplot(parameters_with_dummy_vars, aes(x=npn_derived_parameter, y=lts_derived_parameter, color=dataset, group=dataset)) +
geom_point(size=3, aes(shape = phenophase)) +
scale_shape_manual(values=c(17,13)) +
scale_color_manual(values=c("grey42", "#E69F00", "#56B4E9", "#CC79A7")) +
geom_abline(intercept=0, slope=1) +
facet_wrap(~facet_strip_text, scales='free', nrow=8, labeller = label_parsed) +
theme_bw() +
labs(x='USA-NPN Derived Parameter Estimates',y=y_axis_text,
color = "LTER Dataset", shape = "Phenophase") +
theme(strip.text = element_text(size=10),
strip.background = element_rect(fill='grey95'),
strip.switch.pad.wrap = unit(2, 'cm'),
axis.text = element_text(size=8),
axis.title = element_text(size=14),
legend.text = element_text(size=14),
legend.title = element_text(size=16))
ggsave(paste0(config$image_save_directory,'figure_2_param_comparison.png'), plot=whole_plot, height=28, width=25, units = 'cm', dpi=1000)
# Note, after rendering I deleted the dummy placeholder subplots inside a photo editing program and save as figure_param_comparison_final.png
|
library(xml2)
library(rvest)
library(tidyverse)
library(stringi)
# Liste of url
url <- "https://ongafed.wordpress.com/parlez-le-moore/"
content <- read_html(url)
tables <- content %>% html_table(fill = TRUE)
content_table <- tables[[1]]
colnames(content_table) <- content_table[1,]
content <- content_table[-1,]
# Save
data.table::fwrite(content,
"C:/Users/aso.RCTS/Downloads/Armel/github/jw-web/ongafed.txt",
row.names = FALSE)
| /Corpus-Web/ongafed/ongafed.R | no_license | armelsoubeiga/projet-data-moore-fr | R | false | false | 496 | r | library(xml2)
library(rvest)
library(tidyverse)
library(stringi)
# Liste of url
url <- "https://ongafed.wordpress.com/parlez-le-moore/"
content <- read_html(url)
tables <- content %>% html_table(fill = TRUE)
content_table <- tables[[1]]
colnames(content_table) <- content_table[1,]
content <- content_table[-1,]
# Save
data.table::fwrite(content,
"C:/Users/aso.RCTS/Downloads/Armel/github/jw-web/ongafed.txt",
row.names = FALSE)
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{pfweights}
\alias{pfweights}
\title{Compute optimal portfolio weights}
\usage{
pfweights(sigma)
}
\arguments{
\item{sigma}{covariance matrix}
}
\value{
new portfolio weights
}
\description{
Compute optimal portfolio weights
}
| /man/pfweights.Rd | no_license | cran/CondReg | R | false | false | 286 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{pfweights}
\alias{pfweights}
\title{Compute optimal portfolio weights}
\usage{
pfweights(sigma)
}
\arguments{
\item{sigma}{covariance matrix}
}
\value{
new portfolio weights
}
\description{
Compute optimal portfolio weights
}
|
library(testthat)
library(workflowHelper)
Sys.setenv("R_TESTS" = "")
test_check("workflowHelper")
| /tests/testthat.R | no_license | wlandau/workflowHelper | R | false | false | 99 | r | library(testthat)
library(workflowHelper)
Sys.setenv("R_TESTS" = "")
test_check("workflowHelper")
|
#' @docType data
#' @title Sample dataset from the EUROFAMCARE project
#' @name efc
#' @keywords data
#'
#' @description A SPSS sample data set, read with the \code{\link[haven]{read_spss}}
#' function and "converted" with \code{\link{to_sjPlot}}.
#'
#' @references \url{http://www.uke.de/eurofamcare/}
#'
#' @note There are two further datasets, \code{efc2} and \code{efc3}, which
#' slightly differ in their structure.
#' \describe{
#' \item{efc2}{was read with the \code{\link[foreign]{read.spss}} function
#' and also has attached variable label attributes to each variable.}
#' \item{efc3}{categorical variables have been converted to labelled factors,
#' i.e. value labels are set as factor levels. However, factors
#' in \code{efc3} no longer have variable label attributes.}
#' }
#'
#' @examples
#' # Attach EFC-data
#' data(efc)
#'
#' # Show structure
#' str(efc)
#'
#' # show first rows
#' head(efc)
#'
#' # show variables
#' \dontrun{
#' library(sjPlot)
#' view_spss(efc)
#'
#' # show variable labels
#' get_var_labels(efc)
#'
#' # plot efc-data frame summary
#' sjt.df(efc, alternateRowColor=TRUE)}
#'
NULL
#' @docType data
#' @title Sample dataset from the EUROFAMCARE project
#' @name efc2
#' @keywords data
#'
#' @references \url{http://www.uke.de/eurofamcare/}
#'
#' @examples
#' # Attach EFC-data
#' data(efc2)
#'
#' \dontrun{
#' # show variables
#' view_spss(efc2)
#'
#' # print frq of dependency
#' sjt.frq(efc2$e42dep)}
#'
NULL
#' @docType data
#' @title Sample dataset from the EUROFAMCARE project
#' @name efc3
#' @keywords data
#'
#' @references \url{http://www.uke.de/eurofamcare/}
#'
#' @examples
#' # Attach EFC-data
#' data(efc3)
#'
#' str(efc3$e15relat)
#' table(efc3$e15relat)
#'
#' \dontrun{
#' # print frq of relationships
#' sjt.frq(efc3$e15relat)}
#'
NULL
| /R/efc.R | no_license | msaidf/sjmisc | R | false | false | 1,914 | r | #' @docType data
#' @title Sample dataset from the EUROFAMCARE project
#' @name efc
#' @keywords data
#'
#' @description A SPSS sample data set, read with the \code{\link[haven]{read_spss}}
#' function and "converted" with \code{\link{to_sjPlot}}.
#'
#' @references \url{http://www.uke.de/eurofamcare/}
#'
#' @note There are two further datasets, \code{efc2} and \code{efc3}, which
#' slightly differ in their structure.
#' \describe{
#' \item{efc2}{was read with the \code{\link[foreign]{read.spss}} function
#' and also has attached variable label attributes to each variable.}
#' \item{efc3}{categorical variables have been converted to labelled factors,
#' i.e. value labels are set as factor levels. However, factors
#' in \code{efc3} no longer have variable label attributes.}
#' }
#'
#' @examples
#' # Attach EFC-data
#' data(efc)
#'
#' # Show structure
#' str(efc)
#'
#' # show first rows
#' head(efc)
#'
#' # show variables
#' \dontrun{
#' library(sjPlot)
#' view_spss(efc)
#'
#' # show variable labels
#' get_var_labels(efc)
#'
#' # plot efc-data frame summary
#' sjt.df(efc, alternateRowColor=TRUE)}
#'
NULL
#' @docType data
#' @title Sample dataset from the EUROFAMCARE project
#' @name efc2
#' @keywords data
#'
#' @references \url{http://www.uke.de/eurofamcare/}
#'
#' @examples
#' # Attach EFC-data
#' data(efc2)
#'
#' \dontrun{
#' # show variables
#' view_spss(efc2)
#'
#' # print frq of dependency
#' sjt.frq(efc2$e42dep)}
#'
NULL
#' @docType data
#' @title Sample dataset from the EUROFAMCARE project
#' @name efc3
#' @keywords data
#'
#' @references \url{http://www.uke.de/eurofamcare/}
#'
#' @examples
#' # Attach EFC-data
#' data(efc3)
#'
#' str(efc3$e15relat)
#' table(efc3$e15relat)
#'
#' \dontrun{
#' # print frq of relationships
#' sjt.frq(efc3$e15relat)}
#'
NULL
|
#' @title Read in data on all available graduating classes
#' @description Read in data on all available graduating classes. The information of interest
#' includes student names, latin honors, department honors, Clark fellowship,
#' Phi Kappa Beta membership and Sigma Xi membership.
#' @keywords StudentInfo readStudent
#' @return A dataframe object containing information about all available graduating classes
#'
#' @usage
#' data_scraping()
#' @export
data_scraping <- function(){
year0001 <- readStudentInfo("2000-01.txt", 2001)
year0102 <- readStudentInfo("2001-02.txt", 2002)
year0203 <- readStudentInfo("2002-03.txt", 2003)
year0304 <- readStudentInfo("2003-04.txt", 2004)
year0405 <- readStudentInfo("2004-05.txt", 2005)
year0506 <- readStudentInfo("2005-06.txt", 2006)
year0607 <- readStudentInfo("2006-07.txt", 2007)
year0708 <- readStudentInfo("2007-08.txt", 2008)
year0809 <- readStudentInfo("2008-09.txt", 2009)
year0910 <- readStudentInfo("2009-10.txt", 2010)
year1011 <- readStudentInfo("2010-11.txt", 2011)
year1112 <- readStudentInfo("2011-12.txt", 2012)
year1213 <- readStudentInfo("2012-13.txt", 2013)
year1314 <- readStudentInfo("2013-14.txt", 2014)
year1415 <- readStudentInfo("2014-15.txt", 2015)
year1516 <- readStudentInfo("2015-16.txt", 2016)
williams_grad <- rbind(year0001, year0102, year0203, year0304, year0405, year0506, year0607,
year0708, year0809, year0910, year1011, year1112, year1213, year1314, year1415,
year1516)
williams_grad <- extract(williams_grad, Name, c("First.Name", "Last.and.Middle.Name"),"([^ ]+) (.*)")
gender <- gender(williams_grad$First.Name, method = "kantrowitz")
gender$gender[which(gender$gender == "either" | is.na(gender$gender))] <- sample(c("male","female"), 1)
gender <- gender[c(-1066, -7002),] # Remove duplicated lines
williams_grad <- cbind(williams_grad, Gender = gender$gender)
devtools::use_data(williams_grad, pkg = "data", overwrite = TRUE)
}
| /R/data_scraping.R | no_license | nnguyen2496/gradinfo | R | false | false | 2,042 | r | #' @title Read in data on all available graduating classes
#' @description Read in data on all available graduating classes. The information of interest
#' includes student names, latin honors, department honors, Clark fellowship,
#' Phi Kappa Beta membership and Sigma Xi membership.
#' @keywords StudentInfo readStudent
#' @return A dataframe object containing information about all available graduating classes
#'
#' @usage
#' data_scraping()
#' @export
data_scraping <- function(){
year0001 <- readStudentInfo("2000-01.txt", 2001)
year0102 <- readStudentInfo("2001-02.txt", 2002)
year0203 <- readStudentInfo("2002-03.txt", 2003)
year0304 <- readStudentInfo("2003-04.txt", 2004)
year0405 <- readStudentInfo("2004-05.txt", 2005)
year0506 <- readStudentInfo("2005-06.txt", 2006)
year0607 <- readStudentInfo("2006-07.txt", 2007)
year0708 <- readStudentInfo("2007-08.txt", 2008)
year0809 <- readStudentInfo("2008-09.txt", 2009)
year0910 <- readStudentInfo("2009-10.txt", 2010)
year1011 <- readStudentInfo("2010-11.txt", 2011)
year1112 <- readStudentInfo("2011-12.txt", 2012)
year1213 <- readStudentInfo("2012-13.txt", 2013)
year1314 <- readStudentInfo("2013-14.txt", 2014)
year1415 <- readStudentInfo("2014-15.txt", 2015)
year1516 <- readStudentInfo("2015-16.txt", 2016)
williams_grad <- rbind(year0001, year0102, year0203, year0304, year0405, year0506, year0607,
year0708, year0809, year0910, year1011, year1112, year1213, year1314, year1415,
year1516)
williams_grad <- extract(williams_grad, Name, c("First.Name", "Last.and.Middle.Name"),"([^ ]+) (.*)")
gender <- gender(williams_grad$First.Name, method = "kantrowitz")
gender$gender[which(gender$gender == "either" | is.na(gender$gender))] <- sample(c("male","female"), 1)
gender <- gender[c(-1066, -7002),] # Remove duplicated lines
williams_grad <- cbind(williams_grad, Gender = gender$gender)
devtools::use_data(williams_grad, pkg = "data", overwrite = TRUE)
}
|
# parse pre-filled form URL
# @param url prefilled form URL with 'room' in the room key and 'enter' or 'exit' in the direction field
# @return list with form_url, room_field_id and direction_field_id
parse_prefilled_form_url <- function(url) {
reg_exp <- "^(.*)/viewform\\??(.*)$"
if(!stringr::str_detect(url, reg_exp)) {
warning("does not look like a prefilled URL, missing 'viewform'", call. = FALSE)
return (list(
form_url = NA_character_,
room_field_id = NA_character_,
direction_field_id = NA_character_
))
}
reg_exp_matches <- stringr::str_match(url, reg_exp)
# parse entries
params <- tibble(
param = stringr::str_split(reg_exp_matches[,3], fixed("&"))[[1]],
key = stringr::str_extract(param, "^[^=]*"),
value = stringr::str_extract(param, "[^=]*$"),
)
room_param <- dplyr::filter(params, stringr::str_to_lower(value) == "room")
dir_param <- dplyr::filter(params, stringr::str_to_lower(value) %in% c("enter", "exit"))
values <- list(
form_url = reg_exp_matches[,2],
room_field_id = if (nrow(room_param) == 1) room_param$key else NA_character_,
direction_field_id = if (nrow(dir_param) == 1) dir_param$key else NA_character_
)
return(values)
}
# generate qr code url
# generic function to generate qr code urls for any form and set of parameters
generate_qr_code_url <- function(
form_url,
form_parameters = c(),
api_url = "http://api.qrserver.com/v1/create-qr-code",
qr_code_size = "500x500"
) {
form_url <-
sprintf(
"%s/formResponse?%s&submit=SUBMIT", form_url,
paste0(sprintf("%s=%s", names(form_parameters), purrr::map_chr(as.character(form_parameters), URLencode)), collapse = "&")
)
qr_code_url <- sprintf("%s/?data=%s&size=%s", api_url, URLencode(form_url, reserved = TRUE, repeated = TRUE), qr_code_size)
return(qr_code_url)
}
# generate png file path
generate_png_file_path <- function(room, direction, pngs_dir = "pngs") {
if (!dir.exists(pngs_dir)) dir.create(pngs_dir)
return(file.path(pngs_dir, sprintf("%s_%s.png", room, direction)))
}
# generate docx save path
generate_docx_file_path <- function(room, direction, docx_dir = "docx") {
if (!dir.exists(docx_dir)) dir.create(docx_dir)
return(file.path(docx_dir, sprintf("%s-%s.docx", room, direction)))
}
# download qr code for room
# @param form_url the url of the google form (usually https://docs.google.com/forms/d/e/xxxxxxxx)
# @param room_field_id the ID of the room field in the google form, usually something like entry.12342356
# @param room the actual value for the room field
# @param direction_field_id the ID of the directtion field in the google form, usually something like entry.12342356
# @param direction usually either "Enter" or "Exit"
# @param ... additional parameters passed on to the generate_qr_code_url function
# @return png_path returns the png path
download_qr_code_for_room <- function(room, direction, form_url, room_field_id, direction_field_id, pngs_dir = "pngs", ...) {
stopifnot(!missing(room) && !missing(direction) && !missing(form_url) && !missing(room_field_id) && !missing(direction_field_id))
form_parameters <- c(room, direction) %>% setNames(c(room_field_id, direction_field_id))
qr_code_url <- generate_qr_code_url(form_url = form_url, form_parameters = form_parameters, ...)
png_path <- generate_png_file_path(room, direction)
if (file.exists(png_path)) file.remove(png_path)
download.file(qr_code_url, png_path)
return(png_path)
}
# insert qr codes into a docx template
# @param template_path path to template docx
# @inheritParams download_qr_code_for_room
# @param data optional list of values to make available for code chunks inside the template doc (Room and Dir are automatically available)
# @param save whether to save the resulting docx
# @param save_path where to save it to if save=TRUE
# @return docx with barcode png tags evaluated, save with print(docx, target = filepath) or using save=TRUE
insert_qr_codes_into_doc <- function(template_path, room, direction, form_url, room_field_id, direction_field_id, data = list(), save = FALSE, save_path = generate_docx_file_path(room, direction)) {
# template
stopifnot(file.exists(template_path))
doc <- officer::read_docx(template_path)
# Room and Direction info
data$Room <- room
data$Dir <- direction
# function available to create barcode inside the document generation scope
generate_qr_code <- function() {
download_qr_code_for_room(
room = room, direction = direction,
form_url = form_url, room_field_id = room_field_id,
direction_field_id = direction_field_id
)
}
# regular expressions for r expression and png tags
r_exp_regex <- "`r ([^`]*)`"
png_exp_regex <- "`png(\\d+)x(\\d+) ([^`]*)`"
# pull out styles
styles <- officer::styles_info(doc)
# get all text elements
elements <- officer::docx_summary(doc) %>%
# add in the additional information
dplyr::mutate(
expr = ifelse(
stringr::str_detect(text, r_exp_regex),
stringr::str_match_all(text, r_exp_regex),
list()),
expr_value = purrr::map(expr, ~{
if (length(.x) > 0) {
expr_code <- .x[,2]
values <- purrr::map_chr(expr_code, ~{
tryCatch(rlang::parse_expr(.x) %>% rlang::eval_tidy(data = !!data) %>% as.character(),
error = function(e) {
warning(e$message, immediate. = TRUE, call. = FALSE)
"MISSING VALUE"
})
}
) %>% unname()
return(values)
}
else list()
}),
text_with_value = purrr::pmap_chr(
list(text = text, expr = expr, expr_value = expr_value),
function(text, expr, expr_value) {
if (length(expr) > 0) {
full_expr <- expr[,1]
for (i in 1:length(full_expr))
text <- stringr::str_replace(text, fixed(full_expr[i]), expr_value[i])
return(text)
} else {
return(text)
}
}),
png =
ifelse(
stringr::str_detect(text, png_exp_regex),
stringr::str_match(text, png_exp_regex)[,4],
NA_character_),
png_width =
ifelse(
stringr::str_detect(text, png_exp_regex),
stringr::str_match(text, png_exp_regex)[,2],
NA_character_) %>% as.numeric(),
png_height =
ifelse(
stringr::str_detect(text, png_exp_regex),
stringr::str_match(text, png_exp_regex)[,3],
NA_character_) %>% as.numeric(),
png_path = purrr::map_chr(png, ~{
if (!is.na(.x)) {
rlang::parse_expr(.x) %>% rlang::eval_tidy(data = !!data) %>% as.character()
}
else NA_character_
})
) %>%
# add in styles
dplyr::left_join(
select(styles, style_type, style_name, style_id) %>% unique(),
by = c("content_type" = "style_type", "style_name")
)
# add interpreted expressions
exprs <- dplyr::filter(elements, purrr::map_int(expr, length) > 0)
if (nrow(exprs) > 0) {
for (i in 1:nrow(exprs)) {
doc <-
doc %>%
officer::cursor_reach(keyword = paste0("\\Q", exprs$text[i], "\\E")) %>%
officer::body_remove() %>%
officer::cursor_backward() %>%
officer::body_add_par(
exprs$text_with_value[i], pos = "after",
style = if (is.na(exprs$style_name[i])) NULL else exprs$style_name[i]
)
}
}
# add pngs
pngs <- dplyr::filter(elements, !is.na(png))
if (nrow(pngs) > 0) {
for (i in 1:nrow(pngs)) {
doc <-
doc %>%
officer::cursor_reach(keyword = paste0("\\Q", pngs$text[i], "\\E")) %>%
officer::body_remove() %>%
officer::cursor_backward() %>%
officer::body_add_img(
pngs$png_path[i],
width = pngs$png_width[i], height = pngs$png_height[i],
pos = "after",
style = if (is.na(exprs$style_name[i])) NULL else exprs$style_name[i]
)
}
}
# save?
if (save) {
if (file.exists(save_path)) file.remove(save_path)
print(doc, target = save_path)
}
# return the doc
return(invisible(doc))
}
# generate complete docx for one or more rooms
#
# This is the main function to use to generate the QR codes docs.
#
# @inheritParams insert_qr_codes_into_doc
# @param data optional additional data frame, if provided must have a Room column and include all values in rooms
# @param directions - which directions to include, usually no need to change the default
# @export
generate_qr_codes_doc <- function(template_path, rooms, form_url, room_field_id, direction_field_id, data = tibble(Room = rooms), directions = c("Enter", "Exit"), save_path = sprintf("%s.docx", paste(rooms, collapse = "_"))) {
# safety checks
stopifnot(is.data.frame(data) && "Room" %in% names(data))
stopifnot(all(rooms %in% data$Room))
# generate individual room docs
docs <- tibble::tibble(room = rooms) %>%
tidyr::crossing(tibble(direction = directions)) %>%
dplyr::mutate(
save_path = purrr::map2_chr(room, direction, generate_docx_file_path) ,
doc = purrr::map2(room, direction, ~{
insert_qr_codes_into_doc(
template_path = template_path,
room = .x,
direction = .y,
form_url = form_url,
room_field_id = room_field_id,
direction_field_id = direction_field_id,
data = dplyr::filter(data, Room == .x)[1,] %>% as.list(),
save = TRUE
)
})
)
# combine all docs
doc <- docs$doc[[1]]
if (nrow(docs) > 1) {
for (i in 2:nrow(docs)) {
doc <- doc %>% officer::cursor_end() %>% officer::body_add_docx(docs$save_path[i])
}
}
if (file.exists(save_path)) file.remove(save_path)
print(doc, target = save_path)
return(invisible(docs$doc))
} | /benson_qr_codes/qr_code_funcs.R | no_license | KopfLab/covid_apps | R | false | false | 9,960 | r | # parse pre-filled form URL
# @param url prefilled form URL with 'room' in the room key and 'enter' or 'exit' in the direction field
# @return list with form_url, room_field_id and direction_field_id
parse_prefilled_form_url <- function(url) {
reg_exp <- "^(.*)/viewform\\??(.*)$"
if(!stringr::str_detect(url, reg_exp)) {
warning("does not look like a prefilled URL, missing 'viewform'", call. = FALSE)
return (list(
form_url = NA_character_,
room_field_id = NA_character_,
direction_field_id = NA_character_
))
}
reg_exp_matches <- stringr::str_match(url, reg_exp)
# parse entries
params <- tibble(
param = stringr::str_split(reg_exp_matches[,3], fixed("&"))[[1]],
key = stringr::str_extract(param, "^[^=]*"),
value = stringr::str_extract(param, "[^=]*$"),
)
room_param <- dplyr::filter(params, stringr::str_to_lower(value) == "room")
dir_param <- dplyr::filter(params, stringr::str_to_lower(value) %in% c("enter", "exit"))
values <- list(
form_url = reg_exp_matches[,2],
room_field_id = if (nrow(room_param) == 1) room_param$key else NA_character_,
direction_field_id = if (nrow(dir_param) == 1) dir_param$key else NA_character_
)
return(values)
}
# generate qr code url
# generic function to generate qr code urls for any form and set of parameters
generate_qr_code_url <- function(
form_url,
form_parameters = c(),
api_url = "http://api.qrserver.com/v1/create-qr-code",
qr_code_size = "500x500"
) {
form_url <-
sprintf(
"%s/formResponse?%s&submit=SUBMIT", form_url,
paste0(sprintf("%s=%s", names(form_parameters), purrr::map_chr(as.character(form_parameters), URLencode)), collapse = "&")
)
qr_code_url <- sprintf("%s/?data=%s&size=%s", api_url, URLencode(form_url, reserved = TRUE, repeated = TRUE), qr_code_size)
return(qr_code_url)
}
# generate png file path
generate_png_file_path <- function(room, direction, pngs_dir = "pngs") {
if (!dir.exists(pngs_dir)) dir.create(pngs_dir)
return(file.path(pngs_dir, sprintf("%s_%s.png", room, direction)))
}
# generate docx save path
generate_docx_file_path <- function(room, direction, docx_dir = "docx") {
if (!dir.exists(docx_dir)) dir.create(docx_dir)
return(file.path(docx_dir, sprintf("%s-%s.docx", room, direction)))
}
# download qr code for room
# @param form_url the url of the google form (usually https://docs.google.com/forms/d/e/xxxxxxxx)
# @param room_field_id the ID of the room field in the google form, usually something like entry.12342356
# @param room the actual value for the room field
# @param direction_field_id the ID of the directtion field in the google form, usually something like entry.12342356
# @param direction usually either "Enter" or "Exit"
# @param ... additional parameters passed on to the generate_qr_code_url function
# @return png_path returns the png path
download_qr_code_for_room <- function(room, direction, form_url, room_field_id, direction_field_id, pngs_dir = "pngs", ...) {
stopifnot(!missing(room) && !missing(direction) && !missing(form_url) && !missing(room_field_id) && !missing(direction_field_id))
form_parameters <- c(room, direction) %>% setNames(c(room_field_id, direction_field_id))
qr_code_url <- generate_qr_code_url(form_url = form_url, form_parameters = form_parameters, ...)
png_path <- generate_png_file_path(room, direction)
if (file.exists(png_path)) file.remove(png_path)
download.file(qr_code_url, png_path)
return(png_path)
}
# insert qr codes into a docx template
# @param template_path path to template docx
# @inheritParams download_qr_code_for_room
# @param data optional list of values to make available for code chunks inside the template doc (Room and Dir are automatically available)
# @param save whether to save the resulting docx
# @param save_path where to save it to if save=TRUE
# @return docx with barcode png tags evaluated, save with print(docx, target = filepath) or using save=TRUE
insert_qr_codes_into_doc <- function(template_path, room, direction, form_url, room_field_id, direction_field_id, data = list(), save = FALSE, save_path = generate_docx_file_path(room, direction)) {
# template
stopifnot(file.exists(template_path))
doc <- officer::read_docx(template_path)
# Room and Direction info
data$Room <- room
data$Dir <- direction
# function available to create barcode inside the document generation scope
generate_qr_code <- function() {
download_qr_code_for_room(
room = room, direction = direction,
form_url = form_url, room_field_id = room_field_id,
direction_field_id = direction_field_id
)
}
# regular expressions for r expression and png tags
r_exp_regex <- "`r ([^`]*)`"
png_exp_regex <- "`png(\\d+)x(\\d+) ([^`]*)`"
# pull out styles
styles <- officer::styles_info(doc)
# get all text elements
elements <- officer::docx_summary(doc) %>%
# add in the additional information
dplyr::mutate(
expr = ifelse(
stringr::str_detect(text, r_exp_regex),
stringr::str_match_all(text, r_exp_regex),
list()),
expr_value = purrr::map(expr, ~{
if (length(.x) > 0) {
expr_code <- .x[,2]
values <- purrr::map_chr(expr_code, ~{
tryCatch(rlang::parse_expr(.x) %>% rlang::eval_tidy(data = !!data) %>% as.character(),
error = function(e) {
warning(e$message, immediate. = TRUE, call. = FALSE)
"MISSING VALUE"
})
}
) %>% unname()
return(values)
}
else list()
}),
text_with_value = purrr::pmap_chr(
list(text = text, expr = expr, expr_value = expr_value),
function(text, expr, expr_value) {
if (length(expr) > 0) {
full_expr <- expr[,1]
for (i in 1:length(full_expr))
text <- stringr::str_replace(text, fixed(full_expr[i]), expr_value[i])
return(text)
} else {
return(text)
}
}),
png =
ifelse(
stringr::str_detect(text, png_exp_regex),
stringr::str_match(text, png_exp_regex)[,4],
NA_character_),
png_width =
ifelse(
stringr::str_detect(text, png_exp_regex),
stringr::str_match(text, png_exp_regex)[,2],
NA_character_) %>% as.numeric(),
png_height =
ifelse(
stringr::str_detect(text, png_exp_regex),
stringr::str_match(text, png_exp_regex)[,3],
NA_character_) %>% as.numeric(),
png_path = purrr::map_chr(png, ~{
if (!is.na(.x)) {
rlang::parse_expr(.x) %>% rlang::eval_tidy(data = !!data) %>% as.character()
}
else NA_character_
})
) %>%
# add in styles
dplyr::left_join(
select(styles, style_type, style_name, style_id) %>% unique(),
by = c("content_type" = "style_type", "style_name")
)
# add interpreted expressions
exprs <- dplyr::filter(elements, purrr::map_int(expr, length) > 0)
if (nrow(exprs) > 0) {
for (i in 1:nrow(exprs)) {
doc <-
doc %>%
officer::cursor_reach(keyword = paste0("\\Q", exprs$text[i], "\\E")) %>%
officer::body_remove() %>%
officer::cursor_backward() %>%
officer::body_add_par(
exprs$text_with_value[i], pos = "after",
style = if (is.na(exprs$style_name[i])) NULL else exprs$style_name[i]
)
}
}
# add pngs
pngs <- dplyr::filter(elements, !is.na(png))
if (nrow(pngs) > 0) {
for (i in 1:nrow(pngs)) {
doc <-
doc %>%
officer::cursor_reach(keyword = paste0("\\Q", pngs$text[i], "\\E")) %>%
officer::body_remove() %>%
officer::cursor_backward() %>%
officer::body_add_img(
pngs$png_path[i],
width = pngs$png_width[i], height = pngs$png_height[i],
pos = "after",
style = if (is.na(exprs$style_name[i])) NULL else exprs$style_name[i]
)
}
}
# save?
if (save) {
if (file.exists(save_path)) file.remove(save_path)
print(doc, target = save_path)
}
# return the doc
return(invisible(doc))
}
# generate complete docx for one or more rooms
#
# This is the main function to use to generate the QR codes docs.
#
# @inheritParams insert_qr_codes_into_doc
# @param data optional additional data frame, if provided must have a Room column and include all values in rooms
# @param directions - which directions to include, usually no need to change the default
# @export
generate_qr_codes_doc <- function(template_path, rooms, form_url, room_field_id, direction_field_id, data = tibble(Room = rooms), directions = c("Enter", "Exit"), save_path = sprintf("%s.docx", paste(rooms, collapse = "_"))) {
# safety checks
stopifnot(is.data.frame(data) && "Room" %in% names(data))
stopifnot(all(rooms %in% data$Room))
# generate individual room docs
docs <- tibble::tibble(room = rooms) %>%
tidyr::crossing(tibble(direction = directions)) %>%
dplyr::mutate(
save_path = purrr::map2_chr(room, direction, generate_docx_file_path) ,
doc = purrr::map2(room, direction, ~{
insert_qr_codes_into_doc(
template_path = template_path,
room = .x,
direction = .y,
form_url = form_url,
room_field_id = room_field_id,
direction_field_id = direction_field_id,
data = dplyr::filter(data, Room == .x)[1,] %>% as.list(),
save = TRUE
)
})
)
# combine all docs
doc <- docs$doc[[1]]
if (nrow(docs) > 1) {
for (i in 2:nrow(docs)) {
doc <- doc %>% officer::cursor_end() %>% officer::body_add_docx(docs$save_path[i])
}
}
if (file.exists(save_path)) file.remove(save_path)
print(doc, target = save_path)
return(invisible(docs$doc))
} |
#' Linear regression
#'
#' Runs an OLS regression not unlike \code{\link{lm}}
#'
#' @param x response vector (1 x n)
#' @param y covariate matrix (p x n) with no intercept
#'
#' @return A list with 4 elements: coefficients, vcov, sigma, df
#'
#' @examples
#' data(mtcars)
#' X <- as.matrix(mtcars[, c("cyl", "disp", "hp")])
#' y <- mtcars[, "mpg"]
#' linmodEst(y, X)
#'
#' @export
#'
linmodEst <- function(x, y) {
coef <- solve(t(x) %*% x) %*% t(x) %*% y
## degrees of freedom and standard deviation of residuals
df <- nrow(x) - ncol(x)
sigma2 <- sum((y - x %*% coef) ^ 2) / df
## compute sigma^2 * (x’x)^-1
vcov <- sigma2 * solve(t(x) %*% x)
colnames(vcov) <- rownames(vcov) <- colnames(x)
list(
coefficients = coef,
vcov = vcov,
sigma = sqrt(sigma2),
df = df
)
}
| /R/linreg.R | no_license | veronicawang21/Linreg | R | false | false | 802 | r | #' Linear regression
#'
#' Runs an OLS regression not unlike \code{\link{lm}}
#'
#' @param x response vector (1 x n)
#' @param y covariate matrix (p x n) with no intercept
#'
#' @return A list with 4 elements: coefficients, vcov, sigma, df
#'
#' @examples
#' data(mtcars)
#' X <- as.matrix(mtcars[, c("cyl", "disp", "hp")])
#' y <- mtcars[, "mpg"]
#' linmodEst(y, X)
#'
#' @export
#'
linmodEst <- function(x, y) {
coef <- solve(t(x) %*% x) %*% t(x) %*% y
## degrees of freedom and standard deviation of residuals
df <- nrow(x) - ncol(x)
sigma2 <- sum((y - x %*% coef) ^ 2) / df
## compute sigma^2 * (x’x)^-1
vcov <- sigma2 * solve(t(x) %*% x)
colnames(vcov) <- rownames(vcov) <- colnames(x)
list(
coefficients = coef,
vcov = vcov,
sigma = sqrt(sigma2),
df = df
)
}
|
#' Presidential Debates
#'
#' Presidential/vice presidential primary and general election debates,
#'
#' @details
#' Corpus:
#' \itemize{
#' \item id. An id the maps between the \code{corpus} and \code{meta} datasets
#' \item author. The speaker/writer of the text element
#' \item text. The text variable
#' \item order. The order of speakers within debates
#' }
#'
#' Meta:
#' \itemize{
#' \item id. An id the maps between the \code{corpus} and \code{meta} datasets
#' \item type. The type of debate (e.g., primary, general, vice, etc.)
#' \item institution. Institution where the debate took place
#' \item city. City where the debate took place
#' \item date. Date when the debate took place
#' \item state. State where the debate took place
#' \item state_abb. State abbreviation of where the debate took place
#' \item latitude. Latitude of the city where the debate took place
#' \item longitude. Longitude of the city where the debate took place
#' }
#'
#' @docType data
#' @name presidential_debates
#' @usage data(presidential_debates)
#' @format A list with two tibbles:
#' \code{corpus} with 226,900 rows and 4 variables &
#' \code{meta} with 138 rows and 9 variables
#' @references
#' http://www.presidency.ucsb.edu
NULL
| /R/data_presidential_debates.R | no_license | trinker/textcorpus | R | false | false | 1,262 | r | #' Presidential Debates
#'
#' Presidential/vice presidential primary and general election debates,
#'
#' @details
#' Corpus:
#' \itemize{
#' \item id. An id the maps between the \code{corpus} and \code{meta} datasets
#' \item author. The speaker/writer of the text element
#' \item text. The text variable
#' \item order. The order of speakers within debates
#' }
#'
#' Meta:
#' \itemize{
#' \item id. An id the maps between the \code{corpus} and \code{meta} datasets
#' \item type. The type of debate (e.g., primary, general, vice, etc.)
#' \item institution. Institution where the debate took place
#' \item city. City where the debate took place
#' \item date. Date when the debate took place
#' \item state. State where the debate took place
#' \item state_abb. State abbreviation of where the debate took place
#' \item latitude. Latitude of the city where the debate took place
#' \item longitude. Longitude of the city where the debate took place
#' }
#'
#' @docType data
#' @name presidential_debates
#' @usage data(presidential_debates)
#' @format A list with two tibbles:
#' \code{corpus} with 226,900 rows and 4 variables &
#' \code{meta} with 138 rows and 9 variables
#' @references
#' http://www.presidency.ucsb.edu
NULL
|
# This script scrapes the DEP data for secchi disk readings, combines it with the Colby data
# and does a trend analysis on the entire dataset and the last 10 years following the DEP
# code per JD
# DJW 20MAY20
library(readxl)
library(dplyr)
library(lubridate)
library(tidyr)
library(stringr)
library(ggplot2)
library(Kendall)
# Load secchi data from LSM
filepath <- "/Users/djw56/Documents/Research/7LA-Colby/Belgrade Lakes/Lakes/Belgrades/Historical/"
filename <- paste(filepath,"MaineLakes_Secchi_ByDate.xlsx",sep="")
dat <- read_excel(filename, sheet = 2)
# MIDAS is identifier for LSM data
MIDAS1 <- 5349
SDT_LSM <- dat %>% filter(`Lake Code (MIDAS)` == MIDAS1)
# Load data from Colby
lake <- 'East Pond'
site <- 'EPDEP1'
years <- 2015:2020
filepathC <- paste("~/Documents/Research/7LA-Colby/Belgrade Lakes/Lakes/",lake,sep="","/Transparency/")
filename1 <- paste(filepathC,site,sep=""," - Secchi 2015-2020.xlsx")
dat1 <- ""
for(i in 1:length(years)){
temp <- read_xlsx(filename1, sheet = as.character(years[i]))
temp$Date <- as.character(temp$Date)
dat1 <- rbind(dat1,temp)
}
dat1 <- dat1[-1,]
dat1$STATION <- 1
Colby <- dat1
Colby <- Colby %>% rename('DATE'='Date','SECCHI DEPTH'='Depth(m)')
LSM <- select(SDT_LSM,'DATE','SECCHI DEPTH','STATION')
Secchi <- rbind.data.frame(LSM,Colby)
a = ymd_hms(Secchi$DATE)
Secchi$YEAR <- year(a)
Secchi$MONTH <- month(a)
Secchi %>% group_by(MONTH) %>% summarize(npts = n())
# Use June-Sept
S <- Secchi %>% filter(MONTH >= 5 & MONTH <= 10 & STATION == 1)
S <- na.omit(S)
S <- distinct(S)
S$`SECCHI DEPTH` <- as.numeric(S$`SECCHI DEPTH`)
# Average data from the same month in the same year
Savg <- aggregate(`SECCHI DEPTH`~YEAR+MONTH, mean, data=S)
Smin <- S %>% group_by(YEAR) %>% summarise(yrmin = min(`SECCHI DEPTH`))
# Compute year average
Syr <- S %>% group_by(YEAR) %>% summarise(yrmean = mean(`SECCHI DEPTH`))
Syr$ft <- Syr$yrmean*3.28
mk<-MannKendall(Syr$yrmean)
ggplot(Syr, aes(YEAR, yrmean)) +
geom_point() +
labs(title = "Secchi Depth", y = "SDT (m)", x = "Year") +
geom_smooth(method='loess') +
scale_y_continuous(trans = 'reverse', lim = c(7.5,0)) +
labs(title = 'Average East Pond Secchi')
# labs(title = 'GP1 Secchi, May-Oct Avg, tau = -0.18, p = 0.083')
dev.print(png,paste(filepath,"EP Avg Secchi.png"), res = 300, width = 1000)
S10 <- filter(Syr,YEAR >= 2011)
mk<-MannKendall(S10$yrmean)
ggplot(S10, aes(YEAR, yrmean)) +
geom_point() +
labs(title = "Secchi Depth", y = "zS (m)", x = "Year") +
#geom_smooth(method='loess') +
scale_y_continuous(trans = 'reverse', lim = c(7.5,0)) +
labs(title = 'Average East Pond Secchi')
dev.print(png,paste(filepath,"EP Avg Secchi 10yr.png"), res = 300, width = 1000)
mk<-MannKendall(Smin$yrmin)
ggplot(Smin, aes(YEAR, yrmin)) +
geom_point() +
labs(title = "Secchi Depth", y = "SDT (m)", x = "Year") +
geom_smooth(method='loess') +
scale_y_continuous(trans = 'reverse', lim = c(7.5,0)) +
labs(title = 'Minimum East Pond Secchi')
#labs(title = 'GP1 Secchi, May-Oct Min, tau = -0.32, p = 0.0017')
dev.print(png,paste(filepath,"EP Min Secchi.png"), res = 300, width = 1000)
S10 <- filter(Smin,YEAR >= 2011)
mk<-MannKendall(S10$yrmin)
ggplot(S10, aes(YEAR, yrmin)) +
geom_point()+
labs(title = "Secchi Depth", y = "zS (m)", x = "Year") +
#geom_smooth(method='loess') +
scale_y_continuous(trans = 'reverse', lim = c(7.5,0)) +
labs(title = 'Minimum East Pond Secchi')
dev.print(png,paste(filepath,"EP Min Secchi 10yr.png"), res = 300, width = 1000)
| /Historical-R/East Pond/EP_Secchi_historical_trends.R | no_license | djwain/7LA-Colby-WQI | R | false | false | 3,525 | r | # This script scrapes the DEP data for secchi disk readings, combines it with the Colby data
# and does a trend analysis on the entire dataset and the last 10 years following the DEP
# code per JD
# DJW 20MAY20
library(readxl)
library(dplyr)
library(lubridate)
library(tidyr)
library(stringr)
library(ggplot2)
library(Kendall)
# Load secchi data from LSM
filepath <- "/Users/djw56/Documents/Research/7LA-Colby/Belgrade Lakes/Lakes/Belgrades/Historical/"
filename <- paste(filepath,"MaineLakes_Secchi_ByDate.xlsx",sep="")
dat <- read_excel(filename, sheet = 2)
# MIDAS is identifier for LSM data
MIDAS1 <- 5349
SDT_LSM <- dat %>% filter(`Lake Code (MIDAS)` == MIDAS1)
# Load data from Colby
lake <- 'East Pond'
site <- 'EPDEP1'
years <- 2015:2020
filepathC <- paste("~/Documents/Research/7LA-Colby/Belgrade Lakes/Lakes/",lake,sep="","/Transparency/")
filename1 <- paste(filepathC,site,sep=""," - Secchi 2015-2020.xlsx")
dat1 <- ""
for(i in 1:length(years)){
temp <- read_xlsx(filename1, sheet = as.character(years[i]))
temp$Date <- as.character(temp$Date)
dat1 <- rbind(dat1,temp)
}
dat1 <- dat1[-1,]
dat1$STATION <- 1
Colby <- dat1
Colby <- Colby %>% rename('DATE'='Date','SECCHI DEPTH'='Depth(m)')
LSM <- select(SDT_LSM,'DATE','SECCHI DEPTH','STATION')
Secchi <- rbind.data.frame(LSM,Colby)
a = ymd_hms(Secchi$DATE)
Secchi$YEAR <- year(a)
Secchi$MONTH <- month(a)
Secchi %>% group_by(MONTH) %>% summarize(npts = n())
# Use June-Sept
S <- Secchi %>% filter(MONTH >= 5 & MONTH <= 10 & STATION == 1)
S <- na.omit(S)
S <- distinct(S)
S$`SECCHI DEPTH` <- as.numeric(S$`SECCHI DEPTH`)
# Average data from the same month in the same year
Savg <- aggregate(`SECCHI DEPTH`~YEAR+MONTH, mean, data=S)
Smin <- S %>% group_by(YEAR) %>% summarise(yrmin = min(`SECCHI DEPTH`))
# Compute year average
Syr <- S %>% group_by(YEAR) %>% summarise(yrmean = mean(`SECCHI DEPTH`))
Syr$ft <- Syr$yrmean*3.28
mk<-MannKendall(Syr$yrmean)
ggplot(Syr, aes(YEAR, yrmean)) +
geom_point() +
labs(title = "Secchi Depth", y = "SDT (m)", x = "Year") +
geom_smooth(method='loess') +
scale_y_continuous(trans = 'reverse', lim = c(7.5,0)) +
labs(title = 'Average East Pond Secchi')
# labs(title = 'GP1 Secchi, May-Oct Avg, tau = -0.18, p = 0.083')
dev.print(png,paste(filepath,"EP Avg Secchi.png"), res = 300, width = 1000)
S10 <- filter(Syr,YEAR >= 2011)
mk<-MannKendall(S10$yrmean)
ggplot(S10, aes(YEAR, yrmean)) +
geom_point() +
labs(title = "Secchi Depth", y = "zS (m)", x = "Year") +
#geom_smooth(method='loess') +
scale_y_continuous(trans = 'reverse', lim = c(7.5,0)) +
labs(title = 'Average East Pond Secchi')
dev.print(png,paste(filepath,"EP Avg Secchi 10yr.png"), res = 300, width = 1000)
mk<-MannKendall(Smin$yrmin)
ggplot(Smin, aes(YEAR, yrmin)) +
geom_point() +
labs(title = "Secchi Depth", y = "SDT (m)", x = "Year") +
geom_smooth(method='loess') +
scale_y_continuous(trans = 'reverse', lim = c(7.5,0)) +
labs(title = 'Minimum East Pond Secchi')
#labs(title = 'GP1 Secchi, May-Oct Min, tau = -0.32, p = 0.0017')
dev.print(png,paste(filepath,"EP Min Secchi.png"), res = 300, width = 1000)
S10 <- filter(Smin,YEAR >= 2011)
mk<-MannKendall(S10$yrmin)
ggplot(S10, aes(YEAR, yrmin)) +
geom_point()+
labs(title = "Secchi Depth", y = "zS (m)", x = "Year") +
#geom_smooth(method='loess') +
scale_y_continuous(trans = 'reverse', lim = c(7.5,0)) +
labs(title = 'Minimum East Pond Secchi')
dev.print(png,paste(filepath,"EP Min Secchi 10yr.png"), res = 300, width = 1000)
|
## ----setup, include=FALSE-------------------------------------------------------------------------------------------------------------
knitr::opts_chunk$set(echo = FALSE)
## ---- echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE----------------------------------------------------------------------------
# import libraries
library(rmarkdown)
library(plyr)
library(dplyr)
library(ggplot2)
library(tidyr)
library(pivottabler)
library(gtsummary)
library(ggpubr)
library(ggfortify)
library(cluster)
library(MASS)
library(lmtest)
library(fBasics)
library(rcompanion)
library(gridExtra)
library(cowplot)
library(kableExtra)
library(haven)
library(tidyverse)
library(rstatix)
library(ggpubr)
library(lme4)
library(reshape2)
library(kableExtra)
library(pander)
library(performance)
library(pROC)
library(sqldf)
library(nlme)
library(ggeffects)
library(doBy)
library(tseries)
library(forecast)
## ---- echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE----------------------------------------------------------------------------
# data <- read.csv("https://opendata.ecdc.europa.eu/covid19/casedistribution/csv", na.strings = # "", fileEncoding = "UTF-8-BOM")
#setwd("School/courses/applied_stats/p4")
#write.csv( data, 'data.csv')
data <- read.csv('download')
## ----initial_plot , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE--------------------------------------------------------------
#convert dates columns
# plot the weekly cases in france
data$dateRep<-as.Date(data$dateRep, '%d/%m/%Y')
france <- data[ data$countriesAndTerritories=='France' ,]
p <- ggplot(france, aes(x=dateRep, y=cases_weekly)) +
geom_line() +
xlab("")+ ggtitle("Plot of weekly cases in France")
grid.arrange(p)
par(mfrow=c(1,1))
# dickey fuller test
options(warn=-1)
pander(adf.test(france$cases_weekly), caption = "Dickey Fuller Test")
## ----exlplore_data , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE-------------------------------------------------------------
par(mfrow=c(1,2))
# dickey fuller test
options(warn=-1)
#pander(adf.test(france$cases_weekly), caption = "Dickey Fuller Test")
#par(mfrow=c(1,2))
p1<- acf(france$cases_weekly, plot=FALSE)
plot(p1,main = "ACF")
p2<- pacf(france$cases_weekly, plot=FALSE)
plot(p2,main = "PACF")
# box cox transofrmation
tseries_h<- france$cases_weekly
#bx<- BoxCox(tseries_h, lambda = 0.5)
#plot.ts(bx)
#lambda <- BoxCox.lambda(tseries_h)
#adf.test(bx)
## ----auto_arima1 , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE---------------------------------------------------------------
#Aproach 1
par(mfrow=c(1,1))
options(warn = -1)
f<- auto.arima(tseries_h)
plot(forecast(f,h=20))
#pander(summary(f) , caption ='Summary Stepwise ARIMA')
#pander(f$coef , caption ='Coefficients non Stepwise ARIMA')
#pander(f$aic , caption ='AIC non Stepwise ARIMA')
#pandoc.table(f$aic, keep.line.breaks = FALSE,caption ='AIC Stepwise ARIMA',style = 'rmarkdown')
par(mfrow=c(1,3))
# lewts check auto coreelation since we are looking at the diff (0,1,0)
plot(diff(tseries_h),main = "Scatter Differences")
p1<- acf(diff(tseries_h),plot = FALSE)
plot(p1,main = "ACF")
p2<- pacf(diff(tseries_h),plot = FALSE)
plot(p2,main = "PACF")
## ----auto_arima2 , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE---------------------------------------------------------------
#Aproach 2
options(warn = -1)
f<- auto.arima(tseries_h, stepwise = FALSE,seasonal=FALSE)
pander(f$coef , caption ='Coefficients non Stepwise ARIMA')
#pandoc.table(f$aic, keep.line.breaks = FALSE,caption ='AIC non Stepwise ARIMA',style = #'rmarkdown')
#pander(f$aic , caption ='AIC non Stepwise ARIMA')
# lewts check auto coreelation since we are looking at the diff (0,1,0)
#resid<- checkresiduals(f, plot=FALSE,test=FALSE)
#par(mfrow=c(1,1))
#plot(forecast(f,h=20))
#checkresiduals(f,test = FALSE)
## ----function , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE------------------------------------------------------------------
# define plotting function
plot_predictions <- function(france, m , title){
pred <- data.frame(week = france$week,
cases_weekly = france$cases_weekly,
predicted_values = predict(m, newdata = france))
ggplot(pred, aes(x = week)) +
geom_point(aes(y = cases_weekly), size = 1, alpha = 0.5) + geom_line(aes(y = predicted_values), colour = "red")+ ggtitle(title)
}
## ----gam , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE-----------------------------------------------------------------------
library(mgcv)
library(stringr)
# split the column to use the week as an input
france$week <- str_split_fixed(france$year_week,'-',2)[,2]
france$week <- as.numeric(france$week)
p1 <- ggplot(france, aes(week, cases_weekly)) + geom_point()
p1
## ----s_week , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE--------------------------------------------------------------------
###########################################################
# s(week)
m1 <- gam(cases_weekly ~ s(week), data = france , method = "REML")
# plot diagnostics
par(mfrow = c(2,2))
gam.check(m1)
#the larger the number, the more wiggly the fitted model.
summary(m1)
#model of s(week)
#p1<- ggplot(france, aes(week, cases_weekly)) + geom_point() + geom_smooth(method = "gam", formula = y ~s(x))
p1 <- plot_predictions(france,m1,"cases_weekly ~ s(week)")
grid.arrange(p1)
## ----ti_week , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE-------------------------------------------------------------------
###########################################################
# ti(week)
m2 <- gam(cases_weekly ~ ti(week), data = france)
# plot diagnostics
par(mfrow = c(2,2))
gam.check(m2)
#the larger the number, the more wiggly the fitted model.
#summary(m2)
#model of s(week)
#ggplot(france, aes(week, cases_weekly)) + geom_point() + geom_smooth(method = "gam", formula = #y ~ti(x))
#grid.arrange(p1)
p2<- plot_predictions(france,m2,"cases_weekly ~ ti(week)")
###########################################################
# ti(week) +s(week)
m3 <- gam(cases_weekly ~ ti(week) +s(week), data = france)
# plot diagnostics
par(mfrow = c(2,2))
gam.check(m3)
#the larger the number, the more wiggly the fitted model.
#summary(m3)
#model of s(week)
#p2<- ggplot(france, aes(week, cases_weekly)) + geom_point() + geom_smooth(method = "gam", #formula = y ~ti(x) + s(x))
#grid.arrange(p2)
p3<- plot_predictions(france,m3,"cases_weekly ~ ti(week) +s(week)")
###########################################################
m11 <- gam(cases_weekly ~ te(week), data = france)
#the larger the number, the more wiggly the fitted model.
#summary(m11)
#model of te(week)
#ggplot(france, aes(week, cases_weekly)) + geom_point() + geom_smooth(method = "gam", formula = y ~te(x))
p11<- plot_predictions(france,m11,"cases_weekly ~ te(week) ")
###########################################################
grid.arrange(p2,p3,p11)
## ----k , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE-------------------------------------------------------------------------
# change the number of basis functions
m4 <- gam(cases_weekly ~ s(week, bs = 'cc', k = 52), data = france , method = "REML")
m5 <- gam(cases_weekly ~ s(week, bs = 'cc', k = 1), data = france , method = "REML")
m6 <- gam(cases_weekly ~ s(week, bs = 'cc', k = 10), data = france , method = "REML")
m7 <- gam(cases_weekly ~ s(week, bs = 'cc', k = 20), data = france , method = "REML")
#model of s(week)
#ggplot(france, aes(week, cases_weekly)) + geom_point() + geom_smooth(method = "gam", formula = y ~s(x, bs = 'cc', k = 52))
p4<- plot_predictions(france,m4,"Cyclic ~ 52 knots")
p5<-plot_predictions(france,m5,"Cyclic ~ 1 knot")
p6<-plot_predictions(france,m6,"Cyclic ~ 10 knots")
p7<-plot_predictions(france,m7,"Cyclic ~ 20 knots")
grid.arrange(p4,p5,p6,p7)
## ----gamma , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE---------------------------------------------------------------------
###########################################################
# change the smoothing parameter - gamma
m8 <- gam(cases_weekly ~ s(week), gamma=1 ,data = france , method = "REML")
p8 <-plot_predictions(france,m8, 'gamma = 1')
m9 <- gam(cases_weekly ~ s(week), gamma=20 ,data = france , method = "REML")
p9 <-plot_predictions(france,m9, 'gamma = 20')
m10 <- gam(cases_weekly ~ s(week), gamma=0.1 ,data = france , method = "REML")
p10 <- plot_predictions(france,m10, 'gamma = 0.1')
grid.arrange(p8,p9,p10)
## ----anova , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE---------------------------------------------------------------------
m1_an <- gamm(cases_weekly ~ ti(week), data = france)
m2_an <- gamm(cases_weekly ~ s(week), data = france)
m3_an <- gamm(cases_weekly ~ s(week) + ti(week), data = france)
m4_an <- gamm(cases_weekly ~ s(week, bs = 'cc', k = 52), data = france , method = "REML")
m5_an <- gamm(cases_weekly ~ s(week, bs = 'cc', k = 1), data = france , method = "REML")
m6_an <- gamm(cases_weekly ~ s(week, bs = 'cc', k = 10), data = france , method = "REML")
m7_an <- gamm(cases_weekly ~ s(week, bs = 'cc', k = 20), data = france , method = "REML")
m8_an <- gamm(cases_weekly ~ s(week), gamma=1 ,data = france , method = "REML")
m9_an <- gamm(cases_weekly ~ s(week), gamma=20 ,data = france , method = "REML")
m10_an <- gamm(cases_weekly ~ s(week), gamma=0.1 ,data = france , method = "REML")
m11_an <- gamm(cases_weekly ~ te(week) ,data = france , method = "REML")
anova(m1_an$lme,
m2_an$lme,
m3_an$lme)
anova(m4_an$lme,
m5_an$lme,
m6_an$lme,
m7_an$lme)
anova(m7_an$lme,
m8_an$lme,
m9_an$lme)
## ----bam , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE-----------------------------------------------------------------------
# check where the drop of the rho is
#once we hit a high rho , we see that the trend is random - and they are due to auto correlated errors
aicvec<-remlvec<-c()
rovec<- seq(.01,1,0.05)
for (k in 1:length(rovec)){
tmp<- bam( cases_weekly ~ s(week) , rho = rovec[k],data = france)
aicvec[k] <- AIC(tmp)
remlvec[k] <- tmp$gcv.ubre
#cat(rovec[k], aicvec[k], remlvec[k],"\n")
}
#matplot(rovec, cbind(aicvec,remlvec))
q1<- qplot(rovec, aicvec)+ ggtitle('Rho value vs AIC ')
q2<- qplot(rovec, remlvec)+ ggtitle('Rho values vs REML' )
# check certain rhos
m13 <- bam(cases_weekly ~ s(week) , rho =0.7,data = france)
p13 <- plot_predictions(france,m13, 'BAM with rho 0.7')
m14 <- bam(cases_weekly ~ s(week) , rho =0.5,data = france)
p14 <- plot_predictions(france,m14, 'BAM with rho 0.5')
grid.arrange(q1,q2)
grid.arrange(p13,p14)
## ----vc , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE------------------------------------------------------------------------
data$cases_per_capita <- data$cases_weekly / data$popData2019 * 100000
data$week <- str_split_fixed(data$year_week,'-',2)[,2]
data$week <- as.numeric(data$week)
# crete set of data aggregated by continent and sum weekly cases and population (per week)
agg_data <- data %>%
group_by(continentExp,week) %>%
summarise(cases_weekly = sum(cases_weekly),
popData2019 = sum(popData2019))
agg_data$cases_per_capita <- agg_data$cases_weekly / agg_data$popData2019 * 100000
agg_data <- agg_data[(agg_data$continentExp == 'Asia')|((agg_data$continentExp == 'Europe')),]
agg_data$cont <- factor(agg_data$continentExp)
agg_data<- na.omit(agg_data)
cases_per_capita<- as.vector(agg_data$cases_per_capita)
dateRep <- agg_data$dateRep
cont <- agg_data$cont
cont_plot <- ggplot(agg_data, aes(x = week, y = cases_per_capita, colour = factor(cont)))+
geom_point(size=2.5)+ ggtitle('Plot of Avg Adjusted cases per 100K weekly')
cont_plot$labels$colour <- "Continent"
grid.arrange(cont_plot)
## ----vc_gam , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE--------------------------------------------------------------------
m3_1 <- gam(cases_per_capita ~ s(week)+ s(week,by=cont,bs="cc"), data = agg_data , method = "REML")
m3_2 <- gam(cases_per_capita ~ s(week)+ te(week,by=cont), data = agg_data , method = "REML")
m3_3 <- gam(cases_per_capita ~ s(week)+ s(week,by=cont,bs="cc")+ te(week,by=cont), data = agg_data , method = "REML")
summary(m3_4 <- gam(cases_per_capita ~ cont + s(week)+ s(week,by=cont,bs="cc")+ te(week,by=cont), data = agg_data ,
method = "REML"))
par(mfrow=c(3,3))
plot(m3_4)
pander(anova(m3_1,m3_2,m3_3,m3_4),caption = 'Anova 4 models')
## ----tstat , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE---------------------------------------------------------------------
##### -------- aggregating all countries in same continent a- sum all weekly cases and adjust
asia <- agg_data[(agg_data$continentExp == 'Asia'),]$cases_per_capita
europe <- agg_data[(agg_data$continentExp == 'Europe'),]$cases_per_capita
pander(adf.test(europe), caption = "Dickey Fuller Test - Europe Lag Differences")
pander(adf.test(asia), caption = "Dickey Fuller Test- Asia Lag Differences")
# low p val -> not equal
pander(t.test(diff(asia,1), y = diff(europe,1), alternative = c("two.sided"), paired = FALSE, var.equal = FALSE, conf.level = 0.95))
## ----ts_clustering , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE-------------------------------------------------------------
cols <- c('countriesAndTerritories','week','cases_weekly')
pv <- data[cols]
library(reshape2)
#pv <- pv[pv$week > 10 , ]
rr <- recast(pv, countriesAndTerritories ~ week, id.var = c( "week",'countriesAndTerritories'))
rr[is.na(rr)] <- 0
require(dtw)
jj <- dist(rr[,-1], method="dtw")
mds <- cmdscale(jj, eig=TRUE)
plot(mds$points[,1:2], pch=16, cex=.8, xlab="Principal coordinate 1", ylab="Principal coordinate 2")
require(cluster)
p3 <- pam(jj,3) # k-medoids clustering
t_table <- table(p3$clust,rr[,1])
#merge with continent and compare
tr_table <- t(t_table)
plot(mds$points[,1:2], pch=16, cex=.8, xlab="Principal coordinate 1", ylab="Principal coordinate 2",col=p3$cluster)
continents_and_countries <- as.data.frame(unique(data[c('countriesAndTerritories','continentExp')]))
rownames(continents_and_countries) <- continents_and_countries$countriesAndTerritories
df_tr_table <- as.data.frame.matrix(tr_table)
df_tr_table$countriesAndTerritories <- rownames(df_tr_table)
m<- merge(x = df_tr_table,y = continents_and_countries, by.x='countriesAndTerritories', by.y='countriesAndTerritories', all.x=FALSE, all.y=FALSE)
## ----summaries , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE-----------------------------------------------------------------
summary(m2)
summary(m3)
summary(m11)
## ----p3_summaries , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE--------------------------------------------------------------
summary(m3_1)
summary(m3_2)
summary(m3_3)
## ----cluster , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE-------------------------------------------------------------------
# all cluster 1
tr_table[tr_table[,1] ==1,]
# all cluster 2
tr_table[tr_table[,2] ==1,]
# all cluster 3
tr_table[tr_table[,3] ==1,]
m %>%
group_by(continentExp) %>%
summarise(across(c(2,3,4), list(mean)))
| /applied_statistics/p4/hw4.R | no_license | NachiLieder/StatisticsMA | R | false | false | 15,121 | r | ## ----setup, include=FALSE-------------------------------------------------------------------------------------------------------------
knitr::opts_chunk$set(echo = FALSE)
## ---- echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE----------------------------------------------------------------------------
# import libraries
library(rmarkdown)
library(plyr)
library(dplyr)
library(ggplot2)
library(tidyr)
library(pivottabler)
library(gtsummary)
library(ggpubr)
library(ggfortify)
library(cluster)
library(MASS)
library(lmtest)
library(fBasics)
library(rcompanion)
library(gridExtra)
library(cowplot)
library(kableExtra)
library(haven)
library(tidyverse)
library(rstatix)
library(ggpubr)
library(lme4)
library(reshape2)
library(kableExtra)
library(pander)
library(performance)
library(pROC)
library(sqldf)
library(nlme)
library(ggeffects)
library(doBy)
library(tseries)
library(forecast)
## ---- echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE----------------------------------------------------------------------------
# data <- read.csv("https://opendata.ecdc.europa.eu/covid19/casedistribution/csv", na.strings = # "", fileEncoding = "UTF-8-BOM")
#setwd("School/courses/applied_stats/p4")
#write.csv( data, 'data.csv')
data <- read.csv('download')
## ----initial_plot , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE--------------------------------------------------------------
#convert dates columns
# plot the weekly cases in france
data$dateRep<-as.Date(data$dateRep, '%d/%m/%Y')
france <- data[ data$countriesAndTerritories=='France' ,]
p <- ggplot(france, aes(x=dateRep, y=cases_weekly)) +
geom_line() +
xlab("")+ ggtitle("Plot of weekly cases in France")
grid.arrange(p)
par(mfrow=c(1,1))
# dickey fuller test
options(warn=-1)
pander(adf.test(france$cases_weekly), caption = "Dickey Fuller Test")
## ----exlplore_data , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE-------------------------------------------------------------
par(mfrow=c(1,2))
# dickey fuller test
options(warn=-1)
#pander(adf.test(france$cases_weekly), caption = "Dickey Fuller Test")
#par(mfrow=c(1,2))
p1<- acf(france$cases_weekly, plot=FALSE)
plot(p1,main = "ACF")
p2<- pacf(france$cases_weekly, plot=FALSE)
plot(p2,main = "PACF")
# box cox transofrmation
tseries_h<- france$cases_weekly
#bx<- BoxCox(tseries_h, lambda = 0.5)
#plot.ts(bx)
#lambda <- BoxCox.lambda(tseries_h)
#adf.test(bx)
## ----auto_arima1 , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE---------------------------------------------------------------
#Aproach 1
par(mfrow=c(1,1))
options(warn = -1)
f<- auto.arima(tseries_h)
plot(forecast(f,h=20))
#pander(summary(f) , caption ='Summary Stepwise ARIMA')
#pander(f$coef , caption ='Coefficients non Stepwise ARIMA')
#pander(f$aic , caption ='AIC non Stepwise ARIMA')
#pandoc.table(f$aic, keep.line.breaks = FALSE,caption ='AIC Stepwise ARIMA',style = 'rmarkdown')
par(mfrow=c(1,3))
# lewts check auto coreelation since we are looking at the diff (0,1,0)
plot(diff(tseries_h),main = "Scatter Differences")
p1<- acf(diff(tseries_h),plot = FALSE)
plot(p1,main = "ACF")
p2<- pacf(diff(tseries_h),plot = FALSE)
plot(p2,main = "PACF")
## ----auto_arima2 , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE---------------------------------------------------------------
#Aproach 2
options(warn = -1)
f<- auto.arima(tseries_h, stepwise = FALSE,seasonal=FALSE)
pander(f$coef , caption ='Coefficients non Stepwise ARIMA')
#pandoc.table(f$aic, keep.line.breaks = FALSE,caption ='AIC non Stepwise ARIMA',style = #'rmarkdown')
#pander(f$aic , caption ='AIC non Stepwise ARIMA')
# lewts check auto coreelation since we are looking at the diff (0,1,0)
#resid<- checkresiduals(f, plot=FALSE,test=FALSE)
#par(mfrow=c(1,1))
#plot(forecast(f,h=20))
#checkresiduals(f,test = FALSE)
## ----function , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE------------------------------------------------------------------
# define plotting function
plot_predictions <- function(france, m , title){
pred <- data.frame(week = france$week,
cases_weekly = france$cases_weekly,
predicted_values = predict(m, newdata = france))
ggplot(pred, aes(x = week)) +
geom_point(aes(y = cases_weekly), size = 1, alpha = 0.5) + geom_line(aes(y = predicted_values), colour = "red")+ ggtitle(title)
}
## ----gam , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE-----------------------------------------------------------------------
library(mgcv)
library(stringr)
# split the column to use the week as an input
france$week <- str_split_fixed(france$year_week,'-',2)[,2]
france$week <- as.numeric(france$week)
p1 <- ggplot(france, aes(week, cases_weekly)) + geom_point()
p1
## ----s_week , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE--------------------------------------------------------------------
###########################################################
# s(week)
m1 <- gam(cases_weekly ~ s(week), data = france , method = "REML")
# plot diagnostics
par(mfrow = c(2,2))
gam.check(m1)
#the larger the number, the more wiggly the fitted model.
summary(m1)
#model of s(week)
#p1<- ggplot(france, aes(week, cases_weekly)) + geom_point() + geom_smooth(method = "gam", formula = y ~s(x))
p1 <- plot_predictions(france,m1,"cases_weekly ~ s(week)")
grid.arrange(p1)
## ----ti_week , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE-------------------------------------------------------------------
###########################################################
# ti(week)
m2 <- gam(cases_weekly ~ ti(week), data = france)
# plot diagnostics
par(mfrow = c(2,2))
gam.check(m2)
#the larger the number, the more wiggly the fitted model.
#summary(m2)
#model of s(week)
#ggplot(france, aes(week, cases_weekly)) + geom_point() + geom_smooth(method = "gam", formula = #y ~ti(x))
#grid.arrange(p1)
p2<- plot_predictions(france,m2,"cases_weekly ~ ti(week)")
###########################################################
# ti(week) +s(week)
m3 <- gam(cases_weekly ~ ti(week) +s(week), data = france)
# plot diagnostics
par(mfrow = c(2,2))
gam.check(m3)
#the larger the number, the more wiggly the fitted model.
#summary(m3)
#model of s(week)
#p2<- ggplot(france, aes(week, cases_weekly)) + geom_point() + geom_smooth(method = "gam", #formula = y ~ti(x) + s(x))
#grid.arrange(p2)
p3<- plot_predictions(france,m3,"cases_weekly ~ ti(week) +s(week)")
###########################################################
m11 <- gam(cases_weekly ~ te(week), data = france)
#the larger the number, the more wiggly the fitted model.
#summary(m11)
#model of te(week)
#ggplot(france, aes(week, cases_weekly)) + geom_point() + geom_smooth(method = "gam", formula = y ~te(x))
p11<- plot_predictions(france,m11,"cases_weekly ~ te(week) ")
###########################################################
grid.arrange(p2,p3,p11)
## ----k , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE-------------------------------------------------------------------------
# change the number of basis functions
m4 <- gam(cases_weekly ~ s(week, bs = 'cc', k = 52), data = france , method = "REML")
m5 <- gam(cases_weekly ~ s(week, bs = 'cc', k = 1), data = france , method = "REML")
m6 <- gam(cases_weekly ~ s(week, bs = 'cc', k = 10), data = france , method = "REML")
m7 <- gam(cases_weekly ~ s(week, bs = 'cc', k = 20), data = france , method = "REML")
#model of s(week)
#ggplot(france, aes(week, cases_weekly)) + geom_point() + geom_smooth(method = "gam", formula = y ~s(x, bs = 'cc', k = 52))
p4<- plot_predictions(france,m4,"Cyclic ~ 52 knots")
p5<-plot_predictions(france,m5,"Cyclic ~ 1 knot")
p6<-plot_predictions(france,m6,"Cyclic ~ 10 knots")
p7<-plot_predictions(france,m7,"Cyclic ~ 20 knots")
grid.arrange(p4,p5,p6,p7)
## ----gamma , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE---------------------------------------------------------------------
###########################################################
# change the smoothing parameter - gamma
m8 <- gam(cases_weekly ~ s(week), gamma=1 ,data = france , method = "REML")
p8 <-plot_predictions(france,m8, 'gamma = 1')
m9 <- gam(cases_weekly ~ s(week), gamma=20 ,data = france , method = "REML")
p9 <-plot_predictions(france,m9, 'gamma = 20')
m10 <- gam(cases_weekly ~ s(week), gamma=0.1 ,data = france , method = "REML")
p10 <- plot_predictions(france,m10, 'gamma = 0.1')
grid.arrange(p8,p9,p10)
## ----anova , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE---------------------------------------------------------------------
m1_an <- gamm(cases_weekly ~ ti(week), data = france)
m2_an <- gamm(cases_weekly ~ s(week), data = france)
m3_an <- gamm(cases_weekly ~ s(week) + ti(week), data = france)
m4_an <- gamm(cases_weekly ~ s(week, bs = 'cc', k = 52), data = france , method = "REML")
m5_an <- gamm(cases_weekly ~ s(week, bs = 'cc', k = 1), data = france , method = "REML")
m6_an <- gamm(cases_weekly ~ s(week, bs = 'cc', k = 10), data = france , method = "REML")
m7_an <- gamm(cases_weekly ~ s(week, bs = 'cc', k = 20), data = france , method = "REML")
m8_an <- gamm(cases_weekly ~ s(week), gamma=1 ,data = france , method = "REML")
m9_an <- gamm(cases_weekly ~ s(week), gamma=20 ,data = france , method = "REML")
m10_an <- gamm(cases_weekly ~ s(week), gamma=0.1 ,data = france , method = "REML")
m11_an <- gamm(cases_weekly ~ te(week) ,data = france , method = "REML")
anova(m1_an$lme,
m2_an$lme,
m3_an$lme)
anova(m4_an$lme,
m5_an$lme,
m6_an$lme,
m7_an$lme)
anova(m7_an$lme,
m8_an$lme,
m9_an$lme)
## ----bam , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE-----------------------------------------------------------------------
# check where the drop of the rho is
#once we hit a high rho , we see that the trend is random - and they are due to auto correlated errors
aicvec<-remlvec<-c()
rovec<- seq(.01,1,0.05)
for (k in 1:length(rovec)){
tmp<- bam( cases_weekly ~ s(week) , rho = rovec[k],data = france)
aicvec[k] <- AIC(tmp)
remlvec[k] <- tmp$gcv.ubre
#cat(rovec[k], aicvec[k], remlvec[k],"\n")
}
#matplot(rovec, cbind(aicvec,remlvec))
q1<- qplot(rovec, aicvec)+ ggtitle('Rho value vs AIC ')
q2<- qplot(rovec, remlvec)+ ggtitle('Rho values vs REML' )
# check certain rhos
m13 <- bam(cases_weekly ~ s(week) , rho =0.7,data = france)
p13 <- plot_predictions(france,m13, 'BAM with rho 0.7')
m14 <- bam(cases_weekly ~ s(week) , rho =0.5,data = france)
p14 <- plot_predictions(france,m14, 'BAM with rho 0.5')
grid.arrange(q1,q2)
grid.arrange(p13,p14)
## ----vc , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE------------------------------------------------------------------------
data$cases_per_capita <- data$cases_weekly / data$popData2019 * 100000
data$week <- str_split_fixed(data$year_week,'-',2)[,2]
data$week <- as.numeric(data$week)
# crete set of data aggregated by continent and sum weekly cases and population (per week)
agg_data <- data %>%
group_by(continentExp,week) %>%
summarise(cases_weekly = sum(cases_weekly),
popData2019 = sum(popData2019))
agg_data$cases_per_capita <- agg_data$cases_weekly / agg_data$popData2019 * 100000
agg_data <- agg_data[(agg_data$continentExp == 'Asia')|((agg_data$continentExp == 'Europe')),]
agg_data$cont <- factor(agg_data$continentExp)
agg_data<- na.omit(agg_data)
cases_per_capita<- as.vector(agg_data$cases_per_capita)
dateRep <- agg_data$dateRep
cont <- agg_data$cont
cont_plot <- ggplot(agg_data, aes(x = week, y = cases_per_capita, colour = factor(cont)))+
geom_point(size=2.5)+ ggtitle('Plot of Avg Adjusted cases per 100K weekly')
cont_plot$labels$colour <- "Continent"
grid.arrange(cont_plot)
## ----vc_gam , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE--------------------------------------------------------------------
m3_1 <- gam(cases_per_capita ~ s(week)+ s(week,by=cont,bs="cc"), data = agg_data , method = "REML")
m3_2 <- gam(cases_per_capita ~ s(week)+ te(week,by=cont), data = agg_data , method = "REML")
m3_3 <- gam(cases_per_capita ~ s(week)+ s(week,by=cont,bs="cc")+ te(week,by=cont), data = agg_data , method = "REML")
summary(m3_4 <- gam(cases_per_capita ~ cont + s(week)+ s(week,by=cont,bs="cc")+ te(week,by=cont), data = agg_data ,
method = "REML"))
par(mfrow=c(3,3))
plot(m3_4)
pander(anova(m3_1,m3_2,m3_3,m3_4),caption = 'Anova 4 models')
## ----tstat , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE---------------------------------------------------------------------
##### -------- aggregating all countries in same continent a- sum all weekly cases and adjust
asia <- agg_data[(agg_data$continentExp == 'Asia'),]$cases_per_capita
europe <- agg_data[(agg_data$continentExp == 'Europe'),]$cases_per_capita
pander(adf.test(europe), caption = "Dickey Fuller Test - Europe Lag Differences")
pander(adf.test(asia), caption = "Dickey Fuller Test- Asia Lag Differences")
# low p val -> not equal
pander(t.test(diff(asia,1), y = diff(europe,1), alternative = c("two.sided"), paired = FALSE, var.equal = FALSE, conf.level = 0.95))
## ----ts_clustering , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE-------------------------------------------------------------
cols <- c('countriesAndTerritories','week','cases_weekly')
pv <- data[cols]
library(reshape2)
#pv <- pv[pv$week > 10 , ]
rr <- recast(pv, countriesAndTerritories ~ week, id.var = c( "week",'countriesAndTerritories'))
rr[is.na(rr)] <- 0
require(dtw)
jj <- dist(rr[,-1], method="dtw")
mds <- cmdscale(jj, eig=TRUE)
plot(mds$points[,1:2], pch=16, cex=.8, xlab="Principal coordinate 1", ylab="Principal coordinate 2")
require(cluster)
p3 <- pam(jj,3) # k-medoids clustering
t_table <- table(p3$clust,rr[,1])
#merge with continent and compare
tr_table <- t(t_table)
plot(mds$points[,1:2], pch=16, cex=.8, xlab="Principal coordinate 1", ylab="Principal coordinate 2",col=p3$cluster)
continents_and_countries <- as.data.frame(unique(data[c('countriesAndTerritories','continentExp')]))
rownames(continents_and_countries) <- continents_and_countries$countriesAndTerritories
df_tr_table <- as.data.frame.matrix(tr_table)
df_tr_table$countriesAndTerritories <- rownames(df_tr_table)
m<- merge(x = df_tr_table,y = continents_and_countries, by.x='countriesAndTerritories', by.y='countriesAndTerritories', all.x=FALSE, all.y=FALSE)
## ----summaries , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE-----------------------------------------------------------------
summary(m2)
summary(m3)
summary(m11)
## ----p3_summaries , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE--------------------------------------------------------------
summary(m3_1)
summary(m3_2)
summary(m3_3)
## ----cluster , echo=FALSE , echo=FALSE, warning=FALSE,message=FALSE-------------------------------------------------------------------
# all cluster 1
tr_table[tr_table[,1] ==1,]
# all cluster 2
tr_table[tr_table[,2] ==1,]
# all cluster 3
tr_table[tr_table[,3] ==1,]
m %>%
group_by(continentExp) %>%
summarise(across(c(2,3,4), list(mean)))
|
plot.isoph=function(x, ...){
pcex=0.7
lgcex=0.9
pch=19
y=y.obs=x$psi
z=z.obs=x$z
z.range=x$z.range
hr=hr.obs=exp(y.obs)
#-Inf due to censorship
if(x$shape=='increasing'){
if(z.range[1]!=z[1]){
y=c(-Inf,y)
hr=c(0,hr)
z=c(z.range[1],z)
}
}else if(x$shape=='decreasing'){
if(z.range[2]!=max(z)){
y=c(y,-Inf)
hr=c(hr,0)
z=c(z,z.range[2])
}
}
xlab=x$formula[[3]]
xlim=z.range
lglab="Potential jump points"
if(x$shape=='increasing'){
l.type='s'
lgloc='topleft'
}
if(x$shape=='decreasing'){
l.type='S'
lgloc='topright'
}
#for psi.hat
#ylab=expression(hat(psi))
#ylim=range(y[is.finite(y)])
#plot(y~z, type=l.type, xlab=xlab, ylab=ylab, xlim=xlim, ylim=ylim)
#points(y.obs~z.obs, pch=pch, cex=pcex)
#for exp(psi.hat)
ylab=expression(exp(hat(psi)))
ylim=range(exp(y))
plot(hr~z, type=l.type, xlab=xlab, ylab=ylab, xlim=xlim, ylim=ylim)
points(hr.obs~z.obs, pch=pch, cex=pcex)
#legends
legend(x=lgloc, pch=pch, legend=lglab, bty='n', cex=lgcex)
}
| /R/plot.isoph.R | no_license | cran/isoph | R | false | false | 1,073 | r | plot.isoph=function(x, ...){
pcex=0.7
lgcex=0.9
pch=19
y=y.obs=x$psi
z=z.obs=x$z
z.range=x$z.range
hr=hr.obs=exp(y.obs)
#-Inf due to censorship
if(x$shape=='increasing'){
if(z.range[1]!=z[1]){
y=c(-Inf,y)
hr=c(0,hr)
z=c(z.range[1],z)
}
}else if(x$shape=='decreasing'){
if(z.range[2]!=max(z)){
y=c(y,-Inf)
hr=c(hr,0)
z=c(z,z.range[2])
}
}
xlab=x$formula[[3]]
xlim=z.range
lglab="Potential jump points"
if(x$shape=='increasing'){
l.type='s'
lgloc='topleft'
}
if(x$shape=='decreasing'){
l.type='S'
lgloc='topright'
}
#for psi.hat
#ylab=expression(hat(psi))
#ylim=range(y[is.finite(y)])
#plot(y~z, type=l.type, xlab=xlab, ylab=ylab, xlim=xlim, ylim=ylim)
#points(y.obs~z.obs, pch=pch, cex=pcex)
#for exp(psi.hat)
ylab=expression(exp(hat(psi)))
ylim=range(exp(y))
plot(hr~z, type=l.type, xlab=xlab, ylab=ylab, xlim=xlim, ylim=ylim)
points(hr.obs~z.obs, pch=pch, cex=pcex)
#legends
legend(x=lgloc, pch=pch, legend=lglab, bty='n', cex=lgcex)
}
|
library(tidyverse)
library(gganimate)
library(gapminder)
anim = gapminder %>%
ggplot(aes(gdpPercap, lifeExp, size = pop, color = continent)) +
geom_point(alpha = 0.7) +
scale_colour_manual(values = continent_colors) +
scale_size(range = c(2, 12)) +
scale_x_log10() +
labs(
title = 'Year: {frame_time}',
x = 'GDP per capita',
y = 'life expectancy'
) +
transition_time(year) +
ease_aes('linear')
anim_save(
'gapminder-gganimate.gif',
anim,
width = 700,
height = 700,
res = 150,
nframes = 250,
fps = 25,
renderer = gifski_renderer()
)
| /Gapminder/gapminder-animation.R | no_license | jon-kane/R-Animations | R | false | false | 594 | r | library(tidyverse)
library(gganimate)
library(gapminder)
anim = gapminder %>%
ggplot(aes(gdpPercap, lifeExp, size = pop, color = continent)) +
geom_point(alpha = 0.7) +
scale_colour_manual(values = continent_colors) +
scale_size(range = c(2, 12)) +
scale_x_log10() +
labs(
title = 'Year: {frame_time}',
x = 'GDP per capita',
y = 'life expectancy'
) +
transition_time(year) +
ease_aes('linear')
anim_save(
'gapminder-gganimate.gif',
anim,
width = 700,
height = 700,
res = 150,
nframes = 250,
fps = 25,
renderer = gifski_renderer()
)
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function creates a matrix containing a list of functions as
## 1- set the value of the matrix
## 2- get the value of the matrix
## 3- set the value of the inverse
## 4- get the value of the inverse
##
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get= get, setsolve = setsolve, getsolve = getsolve)
}
## Write a short comment describing this function
##
## 0. Attempt to get the inverse
## 1. If a non-null value is found, return the value
## 2. if null is returned,get the inverse using the solve() function
## 3. store the inverse matrix using the setsolve function
## 4. return the inverted value
##
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if (!is.null(m)) {
message("Getting cached data")
return (m)
}
m <- solve(x$get(), ...)
x$setsolve(m)
m
}
| /cachematrix.R | no_license | wonkyung/ProgrammingAssignment2 | R | false | false | 1,223 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function creates a matrix containing a list of functions as
## 1- set the value of the matrix
## 2- get the value of the matrix
## 3- set the value of the inverse
## 4- get the value of the inverse
##
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get= get, setsolve = setsolve, getsolve = getsolve)
}
## Write a short comment describing this function
##
## 0. Attempt to get the inverse
## 1. If a non-null value is found, return the value
## 2. if null is returned,get the inverse using the solve() function
## 3. store the inverse matrix using the setsolve function
## 4. return the inverted value
##
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if (!is.null(m)) {
message("Getting cached data")
return (m)
}
m <- solve(x$get(), ...)
x$setsolve(m)
m
}
|
interflex.plot.pool <- function( # only for discrete treatments
out,
diff.values = NULL,
order = NULL,
subtitles = NULL,
show.subtitles = NULL,
legend.title = NULL,
CI = TRUE,
Xdistr = "histogram",
main = NULL,
Ylabel = NULL,
Dlabel = NULL,
Xlabel = NULL,
xlab = NULL,
ylab = NULL,
xlim = NULL,
ylim = NULL,
theme.bw = FALSE,
show.grid = TRUE,
cex.main = NULL,
cex.lab = NULL,
cex.axis = NULL,
cex.sub = NULL,
bin.labs = TRUE, # bin labels
interval = NULL, # interval in replicated papers
color = NULL,
file = NULL,
scale = 1.1,
height = 7,
width = 10
){
X <- NULL
TE <- NULL
Treatment <- NULL
CI_lower <- NULL
CI_upper <- NULL
ME <- NULL
x0 <- NULL
CI.lower <- NULL
CI.upper <- NULL
x <- NULL
y <- NULL
end_level <- NULL
xmin <- NULL
xmax <- NULL
count1 <- NULL
ymax <- NULL
D <- NULL
name <- NULL
r <- NULL
if (!class(out) %in% c("interflex")) {
stop("Not an \"interflex\" object.")
}
treat.info <- out$treat.info
treat.type <- treat.info[["treat.type"]]
if(treat.type=='discrete'){
other.treat <- names(treat.info[["other.treat"]])
all.treat <- names(treat.info[["all.treat"]])
base <- names(treat.info[["base"]])
}
if(treat.type=='continuous'){
D.sample <- treat.info[["D.sample"]]
label.name <- names(D.sample)
}
de <- out$de
de.tr <- out$de.tr
hist.out <- out$hist.out
count.tr <- out$count.tr
estimator <- out$estimator
if(is.null(show.subtitles)==FALSE){
if (is.logical(show.subtitles) == FALSE & is.numeric(show.subtitles)==FALSE) {
stop("\"show.subtitles\" is not a logical flag.")
}
}else{
show.subtitles <- TRUE
}
# CI
if(is.null(CI)==FALSE){
if(is.logical(CI) == FALSE & is.numeric(CI)==FALSE) {
stop("\"CI\" is not a logical flag.")
}
if(estimator=='kernel'){
if(CI==TRUE & out$CI==FALSE){
stop("Confidence intervals are not estimated, please set CI to FALSE.")
}
}
}
if(estimator=='kernel'){
if(is.null(CI)==TRUE){
CI <- out$CI
}
}
if(estimator=='binning'|estimator=='linear'){
if(is.null(CI)==TRUE){
CI <- TRUE
}
}
# Xdistr
if (!Xdistr %in% c("hist","histogram","density","none")){
stop("\"Xdistr\" must be \"histogram\", \"density\", or \"none\".")
}
#main
if (is.null(main)==FALSE) {
main <- as.character(main)[1]
}
#Ylabel
if (is.null(Ylabel)==TRUE) {
Ylabel <- out$Ylabel
} else {
if (is.character(Ylabel) == FALSE) {
stop("\"Ylabel\" is not a string.")
} else {
Ylabel <- Ylabel[1]
}
}
#Dlabel
if (is.null(Dlabel)==TRUE) {
Dlabel <- out$Dlabel
} else {
if (is.character(Dlabel) == FALSE) {
stop("\"Dlabel\" is not a string.")
} else {
Dlabel <- Dlabel[1]
}
}
#Xlabel
if (is.null(Xlabel)==TRUE) {
Xlabel <- out$Xlabel
} else {
if (is.character(Xlabel) == FALSE) {
stop("\"Xlabel\" is not a string.")
} else {
Xlabel <- Xlabel[1]
}
}
## axis labels
if(is.null(xlab)==FALSE){
if (is.character(xlab) == FALSE) {
stop("\"xlab\" is not a string.")
}
}
if(is.null(ylab)==FALSE){
if (is.character(ylab) == FALSE) {
stop("\"ylab\" is not a string.")
}
}
if(is.null(xlab)==TRUE){
xlab<-c(paste("Moderator: ", Xlabel, sep=""))
} else {
if (is.character(xlab) == FALSE) {
stop("\"xlab\" is not a string.")
}
}
if(is.null(ylab)==TRUE){
ylab<-c(paste("Marginal Effect of ",Dlabel," on ",Ylabel,sep=""))
} else {
if (is.character(ylab) == FALSE) {
stop("\"ylab\" is not a string.")
}
}
## xlim ylim
if (is.null(xlim)==FALSE) {
if (is.numeric(xlim)==FALSE) {
stop("Some element in \"xlim\" is not numeric.")
} else {
if (length(xlim)!=2) {
stop("\"xlim\" must be of length 2.")
}
}
}
if (is.null(ylim)==FALSE) {
if (is.numeric(ylim)==FALSE) {
stop("Some element in \"ylim\" is not numeric.")
} else {
if (length(ylim)!=2) {
stop("\"ylim\" must be of length 2.")
}
}
}
## theme.bw
if (is.logical(theme.bw) == FALSE & is.numeric(theme.bw)==FALSE) {
stop("\"theme.bw\" is not a logical flag.")
}
## show.grid
if (is.logical(show.grid) == FALSE & is.numeric(show.grid)==FALSE) {
stop("\"show.grid\" is not a logical flag.")
}
## font size
if (is.null(cex.main)==FALSE) {
if (is.numeric(cex.main)==FALSE) {
stop("\"cex.main\" is not numeric.")
}
}
if (is.null(cex.sub)==FALSE) {
if (is.numeric(cex.sub)==FALSE) {
stop("\"cex.sub\" is not numeric.")
}
}
if (is.null(cex.lab)==FALSE) {
if (is.numeric(cex.lab)==FALSE) {
stop("\"cex.lab\" is not numeric.")
}
}
if (is.null(cex.axis)==FALSE) {
if (is.numeric(cex.axis)==FALSE) {
stop("\"cex.axis\" is not numeric.")
}
}
## bin.labs
if (is.logical(bin.labs) == FALSE & is.numeric(bin.labs)==FALSE) {
stop("\"bin.labs\" is not a logical flag.")
}
## interval
if (is.null(interval)==FALSE) {
if (is.numeric(interval)==FALSE) {
stop("Some element in \"interval\" is not numeric.")
}
}
## file
if (is.null(file)==FALSE) {
if (is.character(file)==FALSE) {
stop("Wrong file name.")
}
}
## color
if(is.null(color)==FALSE){
color <- as.character(color)
color.in <- c()
for(char in color){
res <- try(col2rgb(char),silent=TRUE)
if(!"try-error"%in%class(res)){
color.in <- c(color.in,char)
}else{stop(paste0(char," is not one name for a color.\n"))}
}
color <- color.in
}
if (is.null(legend.title)==FALSE){
legend.title <- as.character(legend.title)[1]
}
if(treat.type=='discrete' & (estimator=='linear'|estimator=='binning')){
tempxx <- out$est.lin[[other.treat[1]]][,'X']
}
if(treat.type=='discrete' & estimator=='kernel'){
tempxx <- out$est.kernel[[other.treat[1]]][,'X']
}
if(treat.type=='continuous' & (estimator=='linear'|estimator=='binning')){
tempxx <- out$est.lin[[label.name[1]]][,'X']
}
if(treat.type=='continuous' & estimator=='kernel'){
tempxx <- out$est.kernel[[label.name[1]]][,'X']
}
min.XX <- min(tempxx)
max.XX <- max(tempxx)
## order/subtitles
if(treat.type=='discrete') {
other.treat <- sort(all.treat[which(all.treat!=base)])
if(is.null(order)==FALSE){
order <- as.character(order)
if(length(order)!=length(unique(order))){
stop("\"order\" should not contain repeated values.")
}
if(length(order)!=length(other.treat)){
stop("\"order\" should include all kinds of treatment arms except for the baseline group.")
}
if(sum(!is.element(order,other.treat))!=0 | sum(!is.element(other.treat,order))!=0){
stop("\"order\" should include all kinds of treatment arms except for the baseline group.")
}
other.treat <- order
}
if(is.null(show.subtitles)==TRUE){
show.subtitles <- TRUE
}
if(is.null(subtitles)==FALSE){
if(length(subtitles)!=length(all.treat)){
stop("The number of elements in \"subtitles\" should be m(m is the number of different treatment arms including the baseline group).")
}
}
if(is.null(subtitles)==TRUE){
base.name <- paste0("Base Group (",base,")")
subtitles <- c(base.name,other.treat)
}
else{base.name <- subtitles[1]}
subtitles.all <- as.character(subtitles)
subtitles <- subtitles.all[2:length(subtitles.all)]
}
if(treat.type=='continuous'){
if(is.null(order)==FALSE){
if(is.numeric(order)==FALSE){
stop("\"order\" should be numeric.")
}
if(length(order)!=length(unique(order))){
stop("\"order\" should not contain repeated values.")
}
if(length(order)!=length(D.sample)){
stop("\"order\" should contain all reference values of D.")
}
if(sum(!is.element(order,D.sample))!=0 | sum(!is.element(D.sample,order))!=0){
stop("\"order\" should contain all reference values of D.")
}
label.name.order <- c()
for(a in order){
label.name.order <- c(label.name.order,names(D.sample[which(D.sample==a)]))
}
label.name <- label.name.order
}
if(is.null(show.subtitles)==TRUE){
show.subtitles <- TRUE
}
if(is.null(subtitles)==FALSE){
if(length(subtitles)!=length(label.name)){
stop("The number of elements in \"subtitles\" should equal to the number of values in D.ref.")
}
}
if(is.null(subtitles)==TRUE){
subtitles <- label.name
}
subtitles.all <- subtitles
}
if(is.null(diff.values)==FALSE){
if(estimator=='binning'){
stop("\"diff.values\" can only work after linear or kernel model is applied.")
}
if(is.numeric(diff.values)==FALSE){
stop("\"diff.values\" is not numeric.")
}
if(length(diff.values)<2){
stop("\"diff.values\" must be of length 2 or more.")
}
if(treat.type=='discrete' & estimator=='linear'){
tempxx <- out$est.lin[[other.treat[1]]][,'X']
}
if(treat.type=='discrete' & estimator=='kernel'){
tempxx <- out$est.kernel[[other.treat[1]]][,'X']
}
if(treat.type=='continuous' & estimator=='linear'){
tempxx <- out$est.lin[[label.name[1]]][,'X']
}
if(treat.type=='continuous' & estimator=='kernel'){
tempxx <- out$est.kernel[[label.name[1]]][,'X']
}
min.XX <- min(tempxx)
max.XX <- max(tempxx)
for(a in diff.values){
if(a<min.XX|a>max.XX){
stop("Elements in \"diff.values\" should be within the range of the moderator.")
}
}
}else{
if(estimator=='binning'){
diff.values <- NULL
}
else{
#diff.values <- out$diff.info[["diff.values.plot"]]
diff.values <- NULL
}
}
#yrange
if(estimator=='binning'){
nbins <- out$nbins
if(treat.type=='discrete'){
est.lin <- out$est.lin
est.bin <- out$est.bin
est.bin2 <- list() ## non missing part
est.bin3 <- list() ## missing part
yrange <- c(0)
for(char in other.treat) {
est.bin2[[char]] <- as.matrix(est.bin[[char]][which(is.na(est.bin[[char]][,2])==FALSE),])
est.bin3[[char]] <- as.matrix(est.bin[[char]][which(is.na(est.bin[[char]][,2])==TRUE),])
if(dim(est.bin2[[char]])[2]==1){
est.bin2[[char]] <- t(est.bin2[[char]])
}
if(dim(est.bin3[[char]])[2]==1){
est.bin3[[char]] <- t(est.bin3[[char]])
}
if(CI==TRUE){
yrange <- c(yrange,na.omit(unlist(c(est.lin[[char]][,c(4,5)],est.bin[[char]][,c(4,5)]))))
}else{
yrange <- c(yrange,na.omit(unlist(c(est.lin[[char]][,2],est.bin[[char]][,2]))))
}
}
if (is.null(ylim)==FALSE) {yrange<-c(ylim[2],ylim[1]+(ylim[2]-ylim[1])*1/8)}
X.lvls <- est.lin[[other.treat[1]]][,1]
errorbar.width<-(max(X.lvls)-min(X.lvls))/20
maxdiff<-(max(yrange)-min(yrange))
pos<-max(yrange)-maxdiff/20
}
if (treat.type=='continuous'){
est.lin <- out$est.lin
est.bin<-out$est.bin
est.bin2 <- list() ## non missing part
est.bin3 <- list() ## missing part
yrange <- c(0)
for(label in label.name){
est.bin2[[label]] <- as.matrix(est.bin[[label]][which(is.na(est.bin[[label]][,2])==FALSE),])
est.bin3[[label]] <- as.matrix(est.bin[[label]][which(is.na(est.bin[[label]][,2])==TRUE),])
if(dim(est.bin2[[label]])[2]==1){
est.bin2[[label]] <- t(est.bin2[[label]])
}
if(dim(est.bin3[[label]])[2]==1){
est.bin3[[label]] <- t(est.bin3[[label]])
}
if(CI==TRUE){
yrange <- c(yrange,na.omit(unlist(c(est.lin[[label]][,c(4,5)],est.bin[[label]][,c(4,5)]))))
}else{
yrange <- c(yrange,na.omit(unlist(c(est.lin[[label]][,2],est.bin[[label]][,2]))))
}
}
X.lvls <- est.lin[[label.name[1]]][,1]
errorbar.width<-(max(X.lvls)-min(X.lvls))/20
if (is.null(ylim)==FALSE) {yrange<-c(ylim[2],ylim[1]+(ylim[2]-ylim[1])*1/8)}
maxdiff<-(max(yrange)-min(yrange))
pos<-max(yrange)-maxdiff/20
}
ymin <- min(yrange)-maxdiff/5
}
if(estimator=='linear'){
if(treat.type=='discrete'){
est.lin <- out$est.lin
yrange <- c(0)
for(char in other.treat) {
if(CI==TRUE){
yrange <- c(yrange,na.omit(unlist(c(est.lin[[char]][,c(4,5)]))))
}else{
yrange <- c(yrange,na.omit(unlist(c(est.lin[[char]][,2]))))
}
}
X.lvls <- est.lin[[other.treat[1]]][,1]
}
if(treat.type=='continuous'){
est.lin <- out$est.lin
yrange <- c(0)
for(label in label.name){
if(CI==TRUE){
yrange <- c(yrange,na.omit(unlist(c(est.lin[[label]][,c(4,5)]))))
}else{
yrange <- c(yrange,na.omit(unlist(c(est.lin[[label]][,2]))))
}
}
X.lvls <- est.lin[[label.name[1]]][,1]
}
errorbar.width <- (max(X.lvls)-min(X.lvls))/20
if (is.null(ylim)==FALSE) {yrange<-c(ylim[2],ylim[1]+(ylim[2]-ylim[1])*1/8)}
maxdiff<-(max(yrange)-min(yrange))
pos<-max(yrange)-maxdiff/20
ymin <- min(yrange)-maxdiff/5
}
if(estimator=='kernel'){
est.kernel <- out$est.kernel
yrange <- c(0)
if(CI==FALSE){
if(treat.type=='discrete'){
for(char in other.treat) {
yrange <- c(yrange,na.omit(unlist(c(est.kernel[[char]][,2]))))
}
X.lvls <- est.kernel[[other.treat[1]]][,1]
}
if (treat.type=='continuous'){
for(label in label.name){
yrange <- c(yrange,na.omit(unlist(c(est.kernel[[label]][,2]))))
}
X.lvls <- est.kernel[[label.name[1]]][,1]
}
}
if(CI==TRUE){
if(treat.type=='discrete'){
for(char in other.treat) {
yrange <- c(yrange,na.omit(unlist(c(est.kernel[[char]][,c(4,5)]))))
}
X.lvls <- est.kernel[[other.treat[1]]][,1]
}
if (treat.type=='continuous'){
for(label in label.name){
yrange <- c(yrange,na.omit(unlist(c(est.kernel[[label]][,c(4,5)]))))
}
X.lvls <- est.kernel[[label.name[1]]][,1]
}
}
if (is.null(ylim)==FALSE) {yrange<-c(ylim[2],ylim[1]+(ylim[2]-ylim[1])*1/8)}
errorbar.width<-(max(X.lvls)-min(X.lvls))/20
maxdiff<-(max(yrange)-min(yrange))
pos<-max(yrange)-maxdiff/20
ymin <- min(yrange)-maxdiff/5
}
#color
#get base.color & platte(for discrete data)
requireNamespace("RColorBrewer")
platte <- brewer.pal(n=8, "Set2")
if(is.null(color)==TRUE){
base.color <- 'gray50'
}
if(is.null(color)==FALSE){
if(treat.type=='discrete'){
base.color <- color[1]
if(length(color)==1){
platte <- platte
}else{platte <- c(color[2:length(color)],platte)}
}
if(treat.type=='continuous'){
platte <- c(color,platte)
}
}
if(treat.type=='discrete'){
num.treat <- length(other.treat)
}
if(treat.type=='continuous'){
num.treat <- length(label.name)
}
platte <- platte[1:num.treat]
# initialize
p1 <- ggplot()
## black white theme and mark zero
if (theme.bw == FALSE) {
p1 <- p1 + geom_hline(yintercept=0,colour="white",size=2)
} else {
p1 <- p1 + theme_bw() + geom_hline(yintercept=0,colour="#AAAAAA50",size=2)
}
if (show.grid == FALSE) {
p1 <- p1 + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
}
if(estimator=='kernel'|estimator=='linear'){
if(estimator=='kernel'){
est <- est.kernel
}else{est <- est.lin}
if(treat.type=='discrete'){
for(char in other.treat){
est.touse <- est[[char]]
if(CI==TRUE){
colnames(est.touse) <- c("X","TE","sd","CI_lower","CI_upper")
}
if(CI==FALSE){
est.touse <- est.touse[,c(1,2)]
colnames(est.touse) <- c("X","TE")
}
est.touse <- as.data.frame(est.touse)
est.touse[['Treatment']] <- rep(char,dim(est.touse)[1])
if(char==other.treat[1]){
tempest <- est.touse
}else{
tempest <- rbind(tempest,est.touse)
}
}
tempest <- as.data.frame(tempest)
tempest$Treatment <- factor(tempest$Treatment, levels = other.treat)
p1 <- p1 + geom_line(data=tempest,aes(x = X,y = TE,color = Treatment),show.legend = FALSE)
p1 <- p1 + scale_color_manual(values = platte,labels = subtitles)
if (CI == TRUE) {
p1 <- p1 + geom_ribbon(data=tempest, aes(x=X,ymin=CI_lower,ymax=CI_upper,fill = Treatment),alpha=0.2,show.legend = F)
p1 <- p1 + scale_fill_manual(values = platte,labels = subtitles)
}
if(is.null(diff.values)==FALSE){
k <- 1
for(char in other.treat){
tempest <- est[[char]]
for(target.value in diff.values){
Xnew<-abs(tempest[,'X']-target.value)
d1<-min(Xnew)
label1<-which.min(Xnew)
Xnew[label1]<-Inf
d2<-min(Xnew)
label2<-which.min(Xnew)
if(d1==0){
est.mark <- tempest[label1,2]
if(CI==TRUE){
lb.mark <- tempest[label1,4]
ub.mark <- tempest[label1,5]
}
}
else if(d2==0){
est.mark <- tempest[label2,2]
if(CI==TRUE){
lb.mark <- tempest[label2,4]
ub.mark <- tempest[label2,5]
}
}
else{ ## weighted average
est.mark1 <- tempest[label1,2]
est.mark2 <- tempest[label2,2]
est.mark <- ((est.mark1 * d2 + est.mark2 * d1)/(d1 + d2))
if(CI==TRUE){
lb.mark1 <- tempest[label1,4]
ub.mark1 <- tempest[label1,5]
lb.mark2 <- tempest[label2,4]
ub.mark2 <- tempest[label2,5]
lb.mark <- ((lb.mark1 * d2 + lb.mark2 * d1)/(d1 + d2))
ub.mark <- ((ub.mark1 * d2 + ub.mark2 * d1)/(d1 + d2))
}
}
p1 <- p1 + annotate("point",x=target.value,y=est.mark,size=1,colour=platte[k])
if(CI==TRUE){
p1 <- p1+ annotate("errorbar",x=target.value,ymin=lb.mark,ymax=ub.mark,colour=platte[k],size=0.5,width= (max(tempxx)-min(tempxx))/20)
}
}
k <- k + 1
}
}
}
if(treat.type=='continuous'){
for(label in label.name){
est.touse <- est[[label]]
if(CI==TRUE){
colnames(est.touse) <- c("X","ME","sd","CI_lower","CI_upper")
}
if(CI==FALSE){
est.touse <- est.touse[,c(1,2)]
colnames(est.touse) <- c("X","ME")
}
est.touse <- as.data.frame(est.touse)
est.touse[['Treatment']] <- rep(label,dim(est.touse)[1])
if(label==label.name[1]){
tempest <- est.touse
}else{
tempest <- rbind(tempest,est.touse)
}
}
tempest <- as.data.frame(tempest)
tempest$Treatment <- factor(tempest$Treatment, levels = label.name)
p1 <- p1 + geom_line(data=tempest,aes(x = X,y = ME,color = Treatment),show.legend = FALSE)
p1 <- p1 + scale_color_manual(values = platte,labels = subtitles)
if (CI == TRUE) {
p1 <- p1 + geom_ribbon(data=tempest, aes(x=X,ymin=CI_lower,ymax=CI_upper,fill = Treatment),alpha=0.2,show.legend = FALSE)
p1 <- p1 + scale_fill_manual(values = platte,labels = subtitles)
}
if(is.null(diff.values)==FALSE){
k <- 1
for(label in label.name){
tempest <- est[[label]]
for(target.value in diff.values){
Xnew<-abs(tempest[,'X']-target.value)
d1<-min(Xnew)
label1<-which.min(Xnew)
Xnew[label1]<-Inf
d2<-min(Xnew)
label2<-which.min(Xnew)
if(d1==0){
est.mark <- tempest[label1,2]
if(CI==TRUE){
lb.mark <- tempest[label1,4]
ub.mark <- tempest[label1,5]
}
}
else if(d2==0){
est.mark <- tempest[label2,2]
if(CI==TRUE){
lb.mark <- tempest[label2,4]
ub.mark <- tempest[label2,5]
}
}
else{ ## weighted average
est.mark1 <- tempest[label1,2]
est.mark2 <- tempest[label2,2]
est.mark <- ((est.mark1 * d2 + est.mark2 * d1)/(d1 + d2))
if(CI==TRUE){
lb.mark1 <- tempest[label1,4]
ub.mark1 <- tempest[label1,5]
lb.mark2 <- tempest[label2,4]
ub.mark2 <- tempest[label2,5]
lb.mark <- ((lb.mark1 * d2 + lb.mark2 * d1)/(d1 + d2))
ub.mark <- ((ub.mark1 * d2 + ub.mark2 * d1)/(d1 + d2))
}
}
p1 <- p1 + annotate("point",x=target.value,y=est.mark,size=1,colour=platte[k])
if(CI==TRUE){
p1 <- p1+ annotate("errorbar",x=target.value,ymin=lb.mark,ymax=ub.mark,colour=platte[k],size=0.5,width= (max(tempxx)-min(tempxx))/20)
}
}
k <- k + 1
}
}
}
}
if(estimator=='binning'){
#est <- est.lin
if(treat.type=='discrete'){
for(char in other.treat){
est.touse <- est.lin[[char]]
if(CI==TRUE){
colnames(est.touse) <- c("X","TE","sd","CI_lower","CI_upper")
}
if(CI==FALSE){
est.touse <- est.touse[,c(1,2)]
colnames(est.touse) <- c("X","TE")
}
est.touse <- as.data.frame(est.touse)
est.touse[['Treatment']] <- rep(char,dim(est.touse)[1])
if(char==other.treat[1]){
tempest <- est.touse
}else{
tempest <- rbind(tempest,est.touse)
}
}
tempest$Treatment <- factor(tempest$Treatment, levels = other.treat)
p1 <- p1 + geom_line(data=tempest,aes(x = X,y = TE,color = Treatment),show.legend = FALSE)
p1 <- p1 + scale_color_manual(values = platte, labels = subtitles)
if (CI == TRUE){
p1 <- p1 + geom_ribbon(data=tempest, aes(x=X,ymin=CI_lower,ymax=CI_upper,fill = Treatment),alpha=0.2,show.legend = FALSE)
p1 <- p1 + scale_fill_manual(values = platte,labels = subtitles)
}
k <- 1
for(char in other.treat){
tempest2 <- as.data.frame(est.bin2[[char]])
tempest3 <- as.data.frame(est.bin3[[char]])
p1 <- p1+ geom_errorbar(data=tempest2, aes(x=x0, ymin=CI.lower, ymax=CI.upper),color = platte[k],
width= errorbar.width/3)+
geom_point(data=tempest2,aes(x=x0,y=coef),size=3,shape=21,fill = platte[k],color = platte[k])
if(dim(tempest3)[1]!=0){
p1 <- p1+geom_text(data=tempest3,aes(x=x0,y=0),label="NaN",color = platte[k])
}
k <- k+1
}
}
if(treat.type=='continuous'){
for(label in label.name){
est.touse <- est.lin[[label]]
if(CI==TRUE){
colnames(est.touse) <- c("X","ME","sd","CI_lower","CI_upper")
}
if(CI==FALSE){
est.touse <- est.touse[,c(1,2)]
colnames(est.touse) <- c("X","ME")
}
est.touse <- as.data.frame(est.touse)
est.touse[['Treatment']] <- rep(label,dim(est.touse)[1])
if(label==label.name[1]){
tempest <- est.touse
}else{
tempest <- rbind(tempest,est.touse)
}
}
tempest$Treatment <- factor(tempest$Treatment, levels = label.name)
p1 <- p1 + geom_line(data=tempest,aes(x = X,y = ME,color = Treatment),show.legend = FALSE)
p1 <- p1 + scale_color_manual(values = platte, labels = subtitles)
if (CI == TRUE){
p1 <- p1 + geom_ribbon(data=tempest, aes(x=X,ymin=CI_lower,ymax=CI_upper,fill = Treatment),alpha=0.2,show.legend = FALSE)
p1 <- p1 + scale_fill_manual(values = platte,labels = subtitles)
}
k <- 1
for(label in label.name){
tempest2 <- as.data.frame(est.bin2[[label]])
tempest3 <- as.data.frame(est.bin3[[label]])
p1 <- p1+ geom_errorbar(data=tempest2, aes(x=x0, ymin=CI.lower, ymax=CI.upper),color = platte[k],
width= errorbar.width/3)+
geom_point(data=tempest2,aes(x=x0,y=coef),size=3,shape=21,fill = platte[k],color = platte[k])
if(dim(tempest3)[1]!=0){
p1 <- p1+geom_text(data=tempest3,aes(x=x0,y=0),label="NaN",color = platte[k])
}
k <- k+1
}
}
if (bin.labs == TRUE){
if(treat.type=='discrete'){
char0 <- other.treat[1]
}
if(treat.type=='continuous'){
char0 <- label.name[1]
}
if (nbins==3){
p1 <- p1 + annotate(geom="text", x=est.bin[[char0]][1,1], y=pos,
label="L",colour="gray50",size=10) +
annotate(geom="text", x=est.bin[[char0]][2,1], y=pos,
label="M",colour="gray50",size=10) +
annotate(geom="text", x=est.bin[[char0]][3,1], y=pos,
label="H",colour="gray50",size=10)
}
else if (nbins==4){
p1 <- p1 + annotate(geom="text", x=est.bin[[char0]][1,1], y=pos,
label="L",colour="gray50",size=10) +
annotate(geom="text", x=est.bin[[char0]][2,1], y=pos,
label="M1",colour="gray50",size=10) +
annotate(geom="text", x=est.bin[[char0]][3,1], y=pos,
label="M2",colour="gray50",size=10) +
annotate(geom="text", x=est.bin[[char0]][4,1], y=pos,
label="H",colour="gray50",size=10)
}
else if (nbins==2){
p1 <- p1 + annotate(geom="text", x=est.bin[[char0]][1,1], y=pos,
label="L",colour="gray50",size=10) +
annotate(geom="text", x=est.bin[[char0]][2,1], y=pos,
label="H",colour="gray50",size=10)
}
}
}
if(Xdistr == "density") { # density plot
if(treat.type=='discrete'){
## put in data frames
dist<-hist.out$mids[2]-hist.out$mids[1]
deX.ymin <- min(yrange)-maxdiff/5
deX.co <- data.frame(x = de.tr[[base]]$x,
y = de.tr[[base]]$y/max(de.tr[[base]]$y) * maxdiff/10 + min(yrange) - maxdiff/5)
## color
p1 <- p1 + geom_ribbon(data = deX.co, aes(x = x, ymax = y, ymin = deX.ymin),color=base.color,
fill = base.color, alpha = 0.0, size=0.3)
k <- 1
char0 <- other.treat[1]
start_level <- rep(deX.ymin,length(de.tr[[char0]]$x))
for(char in other.treat){
dex.tr.plot <- data.frame(x = de.tr[[char]]$x,
start_level = start_level,
end_level = de.tr[[char]]$y/max(de.tr[[char0]]$y)*maxdiff/10+start_level)
p1 <- p1+geom_ribbon(data = dex.tr.plot, aes(x = x, ymax = end_level, ymin = start_level), color=platte[k],
alpha = 0.0,fill = platte[k],size=0.3)
k <- k+1
}
p1 <- p1+geom_line(data = dex.tr.plot, aes(x = x, y = ymin), color='gray50',size=0.3)
}
if(treat.type=='continuous'){
deX.ymin <- min(yrange)-maxdiff/5
deX <- data.frame(x = de$x,
y = de$y/max(de$y) * maxdiff/5 + min(yrange) - maxdiff/5)
## color
feed.col<-col2rgb("gray50")
col<-rgb(feed.col[1]/1000, feed.col[2]/1000,feed.col[3]/1000)
p1 <- p1 + geom_ribbon(data = deX, aes(x = x, ymax = y, ymin = deX.ymin),
fill = col, alpha = 0.2)
}
}
if (Xdistr %in% c("histogram","hist")){ # histogram plot
if(treat.type=='discrete'){
n.hist<-length(hist.out$mids)
dist<-hist.out$mids[2]-hist.out$mids[1]
hist.max<-max(hist.out$counts)
hist.col<-data.frame(ymin=rep(min(yrange)-maxdiff/5,n.hist),
ymax=hist.out$counts/hist.max*maxdiff/5+min(yrange)-maxdiff/5,
xmin=hist.out$mids-dist/2,
xmax=hist.out$mids+dist/2,
count1=count.tr[[base]]/hist.max*maxdiff/5+min(yrange)-maxdiff/5)
p1 <- p1 + geom_rect(data=hist.col,aes(xmin=xmin,xmax=xmax,ymin=ymin,ymax=count1),fill=base.color,color='gray50',
alpha=0.3,size=0.5)
k <- 1
start_level <- count.tr[[base]]/hist.max*maxdiff/5+min(yrange)-maxdiff/5
for (char in other.treat){
hist.treat<-data.frame(ymin=start_level,
ymax=count.tr[[char]]/hist.max*maxdiff/5+start_level,
xmin=hist.out$mids-dist/2,
xmax=hist.out$mids+dist/2)
start_level <- count.tr[[char]]/hist.max*maxdiff/5+start_level
p1 <- p1 + geom_rect(data=hist.treat,aes(xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax),fill=platte[k],color='gray50',
alpha=0.5,size=0.5)
k <- k + 1
}
}
if(treat.type=='continuous'){
n.hist <- length(hist.out$mids)
dist <- hist.out$mids[2]-hist.out$mids[1]
hist.max <- max(hist.out$counts)
histX <- data.frame(ymin=rep(min(yrange)-maxdiff/5,n.hist),
ymax=hist.out$counts/hist.max*maxdiff/5+min(yrange)-maxdiff/5,
xmin=hist.out$mids-dist/2,
xmax=hist.out$mids+dist/2)
p1 <- p1 + geom_rect(data=histX,aes(xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax),
colour="gray50",alpha=0.3,size=0.5)
}
}
## other properties
if(is.null(legend.title)==FALSE){
p1 <- p1 + labs(fill = legend.title,color = legend.title)
}
## mark the original interval (in replicated papers)
if (is.null(interval)==FALSE) {
p1<- p1 + geom_vline(xintercept=interval,colour="steelblue", linetype=2,size=1.5)
}
## Other universal options
## axis labels
if (is.null(cex.lab)==TRUE) {
cex.lab <- 15
} else {
cex.lab <- 15 * cex.lab
}
if (is.null(cex.axis)==TRUE) {
cex.axis <- 15
} else {
cex.axis <- 15 * cex.axis
}
p1 <- p1 + xlab(xlab) + ylab(ylab) +
theme(axis.text = element_text(size=cex.axis), axis.title = element_text(size=cex.lab))
## title
if (is.null(cex.main)==TRUE) {
cex.main <- 18
} else {
cex.main <- 18 * cex.main
}
if (is.null(cex.sub)==TRUE) {
cex.sub <- 10
} else {
cex.sub <- 10 * cex.sub
}
if (is.null(main)==FALSE) {
p1<-p1 + ggtitle(main) +
theme(plot.title = element_text(hjust = 0.5, size=cex.main,
lineheight=.8, face="bold"))
}
## xlim and ylim
if (is.null(ylim)==FALSE) {
ylim2 = c(ylim[1]-(ylim[2]-ylim[1])*0.25/6, ylim[2]+(ylim[2]-ylim[1])*0.4/6)
}
if (is.null(xlim)==FALSE & is.null(ylim)==FALSE) {
p1<-p1+coord_cartesian(xlim = xlim, ylim = ylim2)
}
if (is.null(xlim)==TRUE & is.null(ylim)==FALSE) {
p1<-p1+coord_cartesian(ylim = ylim2)
}
if (is.null(xlim)==FALSE & is.null(ylim)==TRUE) {
p1<-p1+coord_cartesian(xlim = xlim)
}
#legend
if(show.subtitles==TRUE){
if(treat.type=='discrete'){
p1_table <- ggplot_gtable(ggplot_build(p1))
data.touse3 <- data.frame(X=rep(1,length(all.treat)),ymin=-1,ymax=1,D=all.treat)
data.touse3$D <- factor(data.touse3$D,levels = all.treat)
p0 <- ggplot() + geom_ribbon(data=data.touse3, aes(x=X,ymin=ymin,ymax=ymax,fill=D),
alpha=0.3)
p0 <- p0 + scale_fill_manual(values = c(base.color,platte[1:length(other.treat)]),
labels = as.character(subtitles.all))
if(is.null(legend.title)==FALSE){
p0 <- p0 + labs(fill = legend.title,color = legend.title)
}else{p0 <- p0 + labs(fill = "Treatment",color = "Treatment")}
p0 <- p0 + theme(legend.title = element_text(colour="black", size=cex.sub),
legend.text = element_text(color = "black", size = cex.sub*0.95))
p0 <- p0 + xlab(xlab) + ylab(ylab) + theme(axis.text = element_text(size=cex.axis), axis.title = element_text(size=cex.lab))
if(is.null(main)==FALSE){
p0 <- p0 + ggtitle(main) + theme(plot.title = element_text(hjust = 0.5, size=cex.main,
lineheight=.8, face="bold"))
}
if (is.null(ylim)==FALSE) {
ylim2 = c(ylim[1]-(ylim[2]-ylim[1])*0.25/6, ylim[2]+(ylim[2]-ylim[1])*0.4/6)
}
if (is.null(xlim)==FALSE & is.null(ylim)==FALSE) {
p0<-p0+coord_cartesian(xlim = xlim, ylim = ylim2)
}
if (is.null(xlim)==TRUE & is.null(ylim)==FALSE) {
p0<-p0+coord_cartesian(ylim = ylim2)
}
if (is.null(xlim)==FALSE & is.null(ylim)==TRUE) {
p0<-p0+coord_cartesian(xlim = xlim)
}
y.limits <- layer_scales(p1)$y$range$range
x.limits <- layer_scales(p1)$x$range$range
ymaxmax <- y.limits[2]
yminmin <- y.limits[1]
xmaxmax <- x.limits[2]
xminmin <- x.limits[1]
suppressWarnings(
p0 <- p0+ylim(c(yminmin,ymaxmax))+xlim(c(xminmin,xmaxmax))
)
suppressWarnings(
p0 <- ggplot_gtable(ggplot_build(p0))
)
suppressWarnings(
pp <-c(subset(p0$layout, name == "panel", se=t:r))
)
gt <- gtable_add_grob(p0,
p1_table$grobs[[which(p1_table$layout$name == "panel")]],
pp$t,pp$l,pp$b,pp$l)
gt <- as.ggplot(gt)
p1 <- gt
}
if(treat.type=='continuous'){
p1_table <- ggplot_gtable(ggplot_build(p1))
data.touse3 <- data.frame(X=rep(1,length(label.name)),ymin=-1,ymax=1,D=label.name)
data.touse3$D <- factor(data.touse3$D,levels = label.name)
p0 <- ggplot() + geom_ribbon(data=data.touse3, aes(x=X,ymin=ymin,ymax=ymax,fill=D),
alpha=0.3)
p0 <- p0 + scale_fill_manual(values = platte[1:length(label.name)],
labels = as.character(subtitles.all))
if(is.null(legend.title)==FALSE){
p0 <- p0 + labs(fill = legend.title,color = legend.title)
}else{p0 <- p0 + labs(fill = "Treatment",color = "Treatment")}
p0 <- p0 + theme(legend.title = element_text(colour="black", size=cex.sub),
legend.text = element_text(color = "black", size = cex.sub*0.95))
p0 <- p0 + xlab(xlab) + ylab(ylab) + theme(axis.text = element_text(size=cex.axis), axis.title = element_text(size=cex.lab))
if(is.null(main)==FALSE){
p0 <- p0 + ggtitle(main) + theme(plot.title = element_text(hjust = 0.5, size=cex.main,
lineheight=.8, face="bold"))
}
## xlim and ylim
if (is.null(ylim)==FALSE) {
ylim2 = c(ylim[1]-(ylim[2]-ylim[1])*0.25/6, ylim[2]+(ylim[2]-ylim[1])*0.4/6)
}
if (is.null(xlim)==FALSE & is.null(ylim)==FALSE) {
p0<-p0+coord_cartesian(xlim = xlim, ylim = ylim2)
}
if (is.null(xlim)==TRUE & is.null(ylim)==FALSE) {
p0<-p0+coord_cartesian(ylim = ylim2)
}
if (is.null(xlim)==FALSE & is.null(ylim)==TRUE) {
p0<-p0+coord_cartesian(xlim = xlim)
}
y.limits <- layer_scales(p1)$y$range$range
x.limits <- layer_scales(p1)$x$range$range
ymaxmax <- y.limits[2]
yminmin <- y.limits[1]
xmaxmax <- x.limits[2]
xminmin <- x.limits[1]
suppressWarnings(
p0 <- p0+ylim(c(yminmin,ymaxmax))+xlim(c(xminmin,xmaxmax))
)
suppressWarnings(
p0 <- ggplot_gtable(ggplot_build(p0))
)
suppressWarnings(
pp <-c(subset(p0$layout, name == "panel", se=t:r))
)
gt <- gtable_add_grob(p0,
p1_table$grobs[[which(p1_table$layout$name == "panel")]],
pp$t,pp$l,pp$b,pp$l)
gt <- as.ggplot(gt)
p1 <- gt
}
}
## save to file
if (is.null(file)==FALSE) {
ggsave(file, p1,scale = scale,width=width,height = height)
}
return(p1)
} | /R/plot_pool.R | permissive | xuyiqing/interflex | R | false | false | 34,197 | r | interflex.plot.pool <- function( # only for discrete treatments
out,
diff.values = NULL,
order = NULL,
subtitles = NULL,
show.subtitles = NULL,
legend.title = NULL,
CI = TRUE,
Xdistr = "histogram",
main = NULL,
Ylabel = NULL,
Dlabel = NULL,
Xlabel = NULL,
xlab = NULL,
ylab = NULL,
xlim = NULL,
ylim = NULL,
theme.bw = FALSE,
show.grid = TRUE,
cex.main = NULL,
cex.lab = NULL,
cex.axis = NULL,
cex.sub = NULL,
bin.labs = TRUE, # bin labels
interval = NULL, # interval in replicated papers
color = NULL,
file = NULL,
scale = 1.1,
height = 7,
width = 10
){
X <- NULL
TE <- NULL
Treatment <- NULL
CI_lower <- NULL
CI_upper <- NULL
ME <- NULL
x0 <- NULL
CI.lower <- NULL
CI.upper <- NULL
x <- NULL
y <- NULL
end_level <- NULL
xmin <- NULL
xmax <- NULL
count1 <- NULL
ymax <- NULL
D <- NULL
name <- NULL
r <- NULL
if (!class(out) %in% c("interflex")) {
stop("Not an \"interflex\" object.")
}
treat.info <- out$treat.info
treat.type <- treat.info[["treat.type"]]
if(treat.type=='discrete'){
other.treat <- names(treat.info[["other.treat"]])
all.treat <- names(treat.info[["all.treat"]])
base <- names(treat.info[["base"]])
}
if(treat.type=='continuous'){
D.sample <- treat.info[["D.sample"]]
label.name <- names(D.sample)
}
de <- out$de
de.tr <- out$de.tr
hist.out <- out$hist.out
count.tr <- out$count.tr
estimator <- out$estimator
if(is.null(show.subtitles)==FALSE){
if (is.logical(show.subtitles) == FALSE & is.numeric(show.subtitles)==FALSE) {
stop("\"show.subtitles\" is not a logical flag.")
}
}else{
show.subtitles <- TRUE
}
# CI
if(is.null(CI)==FALSE){
if(is.logical(CI) == FALSE & is.numeric(CI)==FALSE) {
stop("\"CI\" is not a logical flag.")
}
if(estimator=='kernel'){
if(CI==TRUE & out$CI==FALSE){
stop("Confidence intervals are not estimated, please set CI to FALSE.")
}
}
}
if(estimator=='kernel'){
if(is.null(CI)==TRUE){
CI <- out$CI
}
}
if(estimator=='binning'|estimator=='linear'){
if(is.null(CI)==TRUE){
CI <- TRUE
}
}
# Xdistr
if (!Xdistr %in% c("hist","histogram","density","none")){
stop("\"Xdistr\" must be \"histogram\", \"density\", or \"none\".")
}
#main
if (is.null(main)==FALSE) {
main <- as.character(main)[1]
}
#Ylabel
if (is.null(Ylabel)==TRUE) {
Ylabel <- out$Ylabel
} else {
if (is.character(Ylabel) == FALSE) {
stop("\"Ylabel\" is not a string.")
} else {
Ylabel <- Ylabel[1]
}
}
#Dlabel
if (is.null(Dlabel)==TRUE) {
Dlabel <- out$Dlabel
} else {
if (is.character(Dlabel) == FALSE) {
stop("\"Dlabel\" is not a string.")
} else {
Dlabel <- Dlabel[1]
}
}
#Xlabel
if (is.null(Xlabel)==TRUE) {
Xlabel <- out$Xlabel
} else {
if (is.character(Xlabel) == FALSE) {
stop("\"Xlabel\" is not a string.")
} else {
Xlabel <- Xlabel[1]
}
}
## axis labels
if(is.null(xlab)==FALSE){
if (is.character(xlab) == FALSE) {
stop("\"xlab\" is not a string.")
}
}
if(is.null(ylab)==FALSE){
if (is.character(ylab) == FALSE) {
stop("\"ylab\" is not a string.")
}
}
if(is.null(xlab)==TRUE){
xlab<-c(paste("Moderator: ", Xlabel, sep=""))
} else {
if (is.character(xlab) == FALSE) {
stop("\"xlab\" is not a string.")
}
}
if(is.null(ylab)==TRUE){
ylab<-c(paste("Marginal Effect of ",Dlabel," on ",Ylabel,sep=""))
} else {
if (is.character(ylab) == FALSE) {
stop("\"ylab\" is not a string.")
}
}
## xlim ylim
if (is.null(xlim)==FALSE) {
if (is.numeric(xlim)==FALSE) {
stop("Some element in \"xlim\" is not numeric.")
} else {
if (length(xlim)!=2) {
stop("\"xlim\" must be of length 2.")
}
}
}
if (is.null(ylim)==FALSE) {
if (is.numeric(ylim)==FALSE) {
stop("Some element in \"ylim\" is not numeric.")
} else {
if (length(ylim)!=2) {
stop("\"ylim\" must be of length 2.")
}
}
}
## theme.bw
if (is.logical(theme.bw) == FALSE & is.numeric(theme.bw)==FALSE) {
stop("\"theme.bw\" is not a logical flag.")
}
## show.grid
if (is.logical(show.grid) == FALSE & is.numeric(show.grid)==FALSE) {
stop("\"show.grid\" is not a logical flag.")
}
## font size
if (is.null(cex.main)==FALSE) {
if (is.numeric(cex.main)==FALSE) {
stop("\"cex.main\" is not numeric.")
}
}
if (is.null(cex.sub)==FALSE) {
if (is.numeric(cex.sub)==FALSE) {
stop("\"cex.sub\" is not numeric.")
}
}
if (is.null(cex.lab)==FALSE) {
if (is.numeric(cex.lab)==FALSE) {
stop("\"cex.lab\" is not numeric.")
}
}
if (is.null(cex.axis)==FALSE) {
if (is.numeric(cex.axis)==FALSE) {
stop("\"cex.axis\" is not numeric.")
}
}
## bin.labs
if (is.logical(bin.labs) == FALSE & is.numeric(bin.labs)==FALSE) {
stop("\"bin.labs\" is not a logical flag.")
}
## interval
if (is.null(interval)==FALSE) {
if (is.numeric(interval)==FALSE) {
stop("Some element in \"interval\" is not numeric.")
}
}
## file
if (is.null(file)==FALSE) {
if (is.character(file)==FALSE) {
stop("Wrong file name.")
}
}
## color
if(is.null(color)==FALSE){
color <- as.character(color)
color.in <- c()
for(char in color){
res <- try(col2rgb(char),silent=TRUE)
if(!"try-error"%in%class(res)){
color.in <- c(color.in,char)
}else{stop(paste0(char," is not one name for a color.\n"))}
}
color <- color.in
}
if (is.null(legend.title)==FALSE){
legend.title <- as.character(legend.title)[1]
}
if(treat.type=='discrete' & (estimator=='linear'|estimator=='binning')){
tempxx <- out$est.lin[[other.treat[1]]][,'X']
}
if(treat.type=='discrete' & estimator=='kernel'){
tempxx <- out$est.kernel[[other.treat[1]]][,'X']
}
if(treat.type=='continuous' & (estimator=='linear'|estimator=='binning')){
tempxx <- out$est.lin[[label.name[1]]][,'X']
}
if(treat.type=='continuous' & estimator=='kernel'){
tempxx <- out$est.kernel[[label.name[1]]][,'X']
}
min.XX <- min(tempxx)
max.XX <- max(tempxx)
## order/subtitles
if(treat.type=='discrete') {
other.treat <- sort(all.treat[which(all.treat!=base)])
if(is.null(order)==FALSE){
order <- as.character(order)
if(length(order)!=length(unique(order))){
stop("\"order\" should not contain repeated values.")
}
if(length(order)!=length(other.treat)){
stop("\"order\" should include all kinds of treatment arms except for the baseline group.")
}
if(sum(!is.element(order,other.treat))!=0 | sum(!is.element(other.treat,order))!=0){
stop("\"order\" should include all kinds of treatment arms except for the baseline group.")
}
other.treat <- order
}
if(is.null(show.subtitles)==TRUE){
show.subtitles <- TRUE
}
if(is.null(subtitles)==FALSE){
if(length(subtitles)!=length(all.treat)){
stop("The number of elements in \"subtitles\" should be m(m is the number of different treatment arms including the baseline group).")
}
}
if(is.null(subtitles)==TRUE){
base.name <- paste0("Base Group (",base,")")
subtitles <- c(base.name,other.treat)
}
else{base.name <- subtitles[1]}
subtitles.all <- as.character(subtitles)
subtitles <- subtitles.all[2:length(subtitles.all)]
}
if(treat.type=='continuous'){
if(is.null(order)==FALSE){
if(is.numeric(order)==FALSE){
stop("\"order\" should be numeric.")
}
if(length(order)!=length(unique(order))){
stop("\"order\" should not contain repeated values.")
}
if(length(order)!=length(D.sample)){
stop("\"order\" should contain all reference values of D.")
}
if(sum(!is.element(order,D.sample))!=0 | sum(!is.element(D.sample,order))!=0){
stop("\"order\" should contain all reference values of D.")
}
label.name.order <- c()
for(a in order){
label.name.order <- c(label.name.order,names(D.sample[which(D.sample==a)]))
}
label.name <- label.name.order
}
if(is.null(show.subtitles)==TRUE){
show.subtitles <- TRUE
}
if(is.null(subtitles)==FALSE){
if(length(subtitles)!=length(label.name)){
stop("The number of elements in \"subtitles\" should equal to the number of values in D.ref.")
}
}
if(is.null(subtitles)==TRUE){
subtitles <- label.name
}
subtitles.all <- subtitles
}
if(is.null(diff.values)==FALSE){
if(estimator=='binning'){
stop("\"diff.values\" can only work after linear or kernel model is applied.")
}
if(is.numeric(diff.values)==FALSE){
stop("\"diff.values\" is not numeric.")
}
if(length(diff.values)<2){
stop("\"diff.values\" must be of length 2 or more.")
}
if(treat.type=='discrete' & estimator=='linear'){
tempxx <- out$est.lin[[other.treat[1]]][,'X']
}
if(treat.type=='discrete' & estimator=='kernel'){
tempxx <- out$est.kernel[[other.treat[1]]][,'X']
}
if(treat.type=='continuous' & estimator=='linear'){
tempxx <- out$est.lin[[label.name[1]]][,'X']
}
if(treat.type=='continuous' & estimator=='kernel'){
tempxx <- out$est.kernel[[label.name[1]]][,'X']
}
min.XX <- min(tempxx)
max.XX <- max(tempxx)
for(a in diff.values){
if(a<min.XX|a>max.XX){
stop("Elements in \"diff.values\" should be within the range of the moderator.")
}
}
}else{
if(estimator=='binning'){
diff.values <- NULL
}
else{
#diff.values <- out$diff.info[["diff.values.plot"]]
diff.values <- NULL
}
}
#yrange
if(estimator=='binning'){
nbins <- out$nbins
if(treat.type=='discrete'){
est.lin <- out$est.lin
est.bin <- out$est.bin
est.bin2 <- list() ## non missing part
est.bin3 <- list() ## missing part
yrange <- c(0)
for(char in other.treat) {
est.bin2[[char]] <- as.matrix(est.bin[[char]][which(is.na(est.bin[[char]][,2])==FALSE),])
est.bin3[[char]] <- as.matrix(est.bin[[char]][which(is.na(est.bin[[char]][,2])==TRUE),])
if(dim(est.bin2[[char]])[2]==1){
est.bin2[[char]] <- t(est.bin2[[char]])
}
if(dim(est.bin3[[char]])[2]==1){
est.bin3[[char]] <- t(est.bin3[[char]])
}
if(CI==TRUE){
yrange <- c(yrange,na.omit(unlist(c(est.lin[[char]][,c(4,5)],est.bin[[char]][,c(4,5)]))))
}else{
yrange <- c(yrange,na.omit(unlist(c(est.lin[[char]][,2],est.bin[[char]][,2]))))
}
}
if (is.null(ylim)==FALSE) {yrange<-c(ylim[2],ylim[1]+(ylim[2]-ylim[1])*1/8)}
X.lvls <- est.lin[[other.treat[1]]][,1]
errorbar.width<-(max(X.lvls)-min(X.lvls))/20
maxdiff<-(max(yrange)-min(yrange))
pos<-max(yrange)-maxdiff/20
}
if (treat.type=='continuous'){
est.lin <- out$est.lin
est.bin<-out$est.bin
est.bin2 <- list() ## non missing part
est.bin3 <- list() ## missing part
yrange <- c(0)
for(label in label.name){
est.bin2[[label]] <- as.matrix(est.bin[[label]][which(is.na(est.bin[[label]][,2])==FALSE),])
est.bin3[[label]] <- as.matrix(est.bin[[label]][which(is.na(est.bin[[label]][,2])==TRUE),])
if(dim(est.bin2[[label]])[2]==1){
est.bin2[[label]] <- t(est.bin2[[label]])
}
if(dim(est.bin3[[label]])[2]==1){
est.bin3[[label]] <- t(est.bin3[[label]])
}
if(CI==TRUE){
yrange <- c(yrange,na.omit(unlist(c(est.lin[[label]][,c(4,5)],est.bin[[label]][,c(4,5)]))))
}else{
yrange <- c(yrange,na.omit(unlist(c(est.lin[[label]][,2],est.bin[[label]][,2]))))
}
}
X.lvls <- est.lin[[label.name[1]]][,1]
errorbar.width<-(max(X.lvls)-min(X.lvls))/20
if (is.null(ylim)==FALSE) {yrange<-c(ylim[2],ylim[1]+(ylim[2]-ylim[1])*1/8)}
maxdiff<-(max(yrange)-min(yrange))
pos<-max(yrange)-maxdiff/20
}
ymin <- min(yrange)-maxdiff/5
}
if(estimator=='linear'){
if(treat.type=='discrete'){
est.lin <- out$est.lin
yrange <- c(0)
for(char in other.treat) {
if(CI==TRUE){
yrange <- c(yrange,na.omit(unlist(c(est.lin[[char]][,c(4,5)]))))
}else{
yrange <- c(yrange,na.omit(unlist(c(est.lin[[char]][,2]))))
}
}
X.lvls <- est.lin[[other.treat[1]]][,1]
}
if(treat.type=='continuous'){
est.lin <- out$est.lin
yrange <- c(0)
for(label in label.name){
if(CI==TRUE){
yrange <- c(yrange,na.omit(unlist(c(est.lin[[label]][,c(4,5)]))))
}else{
yrange <- c(yrange,na.omit(unlist(c(est.lin[[label]][,2]))))
}
}
X.lvls <- est.lin[[label.name[1]]][,1]
}
errorbar.width <- (max(X.lvls)-min(X.lvls))/20
if (is.null(ylim)==FALSE) {yrange<-c(ylim[2],ylim[1]+(ylim[2]-ylim[1])*1/8)}
maxdiff<-(max(yrange)-min(yrange))
pos<-max(yrange)-maxdiff/20
ymin <- min(yrange)-maxdiff/5
}
if(estimator=='kernel'){
est.kernel <- out$est.kernel
yrange <- c(0)
if(CI==FALSE){
if(treat.type=='discrete'){
for(char in other.treat) {
yrange <- c(yrange,na.omit(unlist(c(est.kernel[[char]][,2]))))
}
X.lvls <- est.kernel[[other.treat[1]]][,1]
}
if (treat.type=='continuous'){
for(label in label.name){
yrange <- c(yrange,na.omit(unlist(c(est.kernel[[label]][,2]))))
}
X.lvls <- est.kernel[[label.name[1]]][,1]
}
}
if(CI==TRUE){
if(treat.type=='discrete'){
for(char in other.treat) {
yrange <- c(yrange,na.omit(unlist(c(est.kernel[[char]][,c(4,5)]))))
}
X.lvls <- est.kernel[[other.treat[1]]][,1]
}
if (treat.type=='continuous'){
for(label in label.name){
yrange <- c(yrange,na.omit(unlist(c(est.kernel[[label]][,c(4,5)]))))
}
X.lvls <- est.kernel[[label.name[1]]][,1]
}
}
if (is.null(ylim)==FALSE) {yrange<-c(ylim[2],ylim[1]+(ylim[2]-ylim[1])*1/8)}
errorbar.width<-(max(X.lvls)-min(X.lvls))/20
maxdiff<-(max(yrange)-min(yrange))
pos<-max(yrange)-maxdiff/20
ymin <- min(yrange)-maxdiff/5
}
#color
#get base.color & platte(for discrete data)
requireNamespace("RColorBrewer")
platte <- brewer.pal(n=8, "Set2")
if(is.null(color)==TRUE){
base.color <- 'gray50'
}
if(is.null(color)==FALSE){
if(treat.type=='discrete'){
base.color <- color[1]
if(length(color)==1){
platte <- platte
}else{platte <- c(color[2:length(color)],platte)}
}
if(treat.type=='continuous'){
platte <- c(color,platte)
}
}
if(treat.type=='discrete'){
num.treat <- length(other.treat)
}
if(treat.type=='continuous'){
num.treat <- length(label.name)
}
platte <- platte[1:num.treat]
# initialize
p1 <- ggplot()
## black white theme and mark zero
if (theme.bw == FALSE) {
p1 <- p1 + geom_hline(yintercept=0,colour="white",size=2)
} else {
p1 <- p1 + theme_bw() + geom_hline(yintercept=0,colour="#AAAAAA50",size=2)
}
if (show.grid == FALSE) {
p1 <- p1 + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
}
if(estimator=='kernel'|estimator=='linear'){
if(estimator=='kernel'){
est <- est.kernel
}else{est <- est.lin}
if(treat.type=='discrete'){
for(char in other.treat){
est.touse <- est[[char]]
if(CI==TRUE){
colnames(est.touse) <- c("X","TE","sd","CI_lower","CI_upper")
}
if(CI==FALSE){
est.touse <- est.touse[,c(1,2)]
colnames(est.touse) <- c("X","TE")
}
est.touse <- as.data.frame(est.touse)
est.touse[['Treatment']] <- rep(char,dim(est.touse)[1])
if(char==other.treat[1]){
tempest <- est.touse
}else{
tempest <- rbind(tempest,est.touse)
}
}
tempest <- as.data.frame(tempest)
tempest$Treatment <- factor(tempest$Treatment, levels = other.treat)
p1 <- p1 + geom_line(data=tempest,aes(x = X,y = TE,color = Treatment),show.legend = FALSE)
p1 <- p1 + scale_color_manual(values = platte,labels = subtitles)
if (CI == TRUE) {
p1 <- p1 + geom_ribbon(data=tempest, aes(x=X,ymin=CI_lower,ymax=CI_upper,fill = Treatment),alpha=0.2,show.legend = F)
p1 <- p1 + scale_fill_manual(values = platte,labels = subtitles)
}
if(is.null(diff.values)==FALSE){
k <- 1
for(char in other.treat){
tempest <- est[[char]]
for(target.value in diff.values){
Xnew<-abs(tempest[,'X']-target.value)
d1<-min(Xnew)
label1<-which.min(Xnew)
Xnew[label1]<-Inf
d2<-min(Xnew)
label2<-which.min(Xnew)
if(d1==0){
est.mark <- tempest[label1,2]
if(CI==TRUE){
lb.mark <- tempest[label1,4]
ub.mark <- tempest[label1,5]
}
}
else if(d2==0){
est.mark <- tempest[label2,2]
if(CI==TRUE){
lb.mark <- tempest[label2,4]
ub.mark <- tempest[label2,5]
}
}
else{ ## weighted average
est.mark1 <- tempest[label1,2]
est.mark2 <- tempest[label2,2]
est.mark <- ((est.mark1 * d2 + est.mark2 * d1)/(d1 + d2))
if(CI==TRUE){
lb.mark1 <- tempest[label1,4]
ub.mark1 <- tempest[label1,5]
lb.mark2 <- tempest[label2,4]
ub.mark2 <- tempest[label2,5]
lb.mark <- ((lb.mark1 * d2 + lb.mark2 * d1)/(d1 + d2))
ub.mark <- ((ub.mark1 * d2 + ub.mark2 * d1)/(d1 + d2))
}
}
p1 <- p1 + annotate("point",x=target.value,y=est.mark,size=1,colour=platte[k])
if(CI==TRUE){
p1 <- p1+ annotate("errorbar",x=target.value,ymin=lb.mark,ymax=ub.mark,colour=platte[k],size=0.5,width= (max(tempxx)-min(tempxx))/20)
}
}
k <- k + 1
}
}
}
if(treat.type=='continuous'){
for(label in label.name){
est.touse <- est[[label]]
if(CI==TRUE){
colnames(est.touse) <- c("X","ME","sd","CI_lower","CI_upper")
}
if(CI==FALSE){
est.touse <- est.touse[,c(1,2)]
colnames(est.touse) <- c("X","ME")
}
est.touse <- as.data.frame(est.touse)
est.touse[['Treatment']] <- rep(label,dim(est.touse)[1])
if(label==label.name[1]){
tempest <- est.touse
}else{
tempest <- rbind(tempest,est.touse)
}
}
tempest <- as.data.frame(tempest)
tempest$Treatment <- factor(tempest$Treatment, levels = label.name)
p1 <- p1 + geom_line(data=tempest,aes(x = X,y = ME,color = Treatment),show.legend = FALSE)
p1 <- p1 + scale_color_manual(values = platte,labels = subtitles)
if (CI == TRUE) {
p1 <- p1 + geom_ribbon(data=tempest, aes(x=X,ymin=CI_lower,ymax=CI_upper,fill = Treatment),alpha=0.2,show.legend = FALSE)
p1 <- p1 + scale_fill_manual(values = platte,labels = subtitles)
}
if(is.null(diff.values)==FALSE){
k <- 1
for(label in label.name){
tempest <- est[[label]]
for(target.value in diff.values){
Xnew<-abs(tempest[,'X']-target.value)
d1<-min(Xnew)
label1<-which.min(Xnew)
Xnew[label1]<-Inf
d2<-min(Xnew)
label2<-which.min(Xnew)
if(d1==0){
est.mark <- tempest[label1,2]
if(CI==TRUE){
lb.mark <- tempest[label1,4]
ub.mark <- tempest[label1,5]
}
}
else if(d2==0){
est.mark <- tempest[label2,2]
if(CI==TRUE){
lb.mark <- tempest[label2,4]
ub.mark <- tempest[label2,5]
}
}
else{ ## weighted average
est.mark1 <- tempest[label1,2]
est.mark2 <- tempest[label2,2]
est.mark <- ((est.mark1 * d2 + est.mark2 * d1)/(d1 + d2))
if(CI==TRUE){
lb.mark1 <- tempest[label1,4]
ub.mark1 <- tempest[label1,5]
lb.mark2 <- tempest[label2,4]
ub.mark2 <- tempest[label2,5]
lb.mark <- ((lb.mark1 * d2 + lb.mark2 * d1)/(d1 + d2))
ub.mark <- ((ub.mark1 * d2 + ub.mark2 * d1)/(d1 + d2))
}
}
p1 <- p1 + annotate("point",x=target.value,y=est.mark,size=1,colour=platte[k])
if(CI==TRUE){
p1 <- p1+ annotate("errorbar",x=target.value,ymin=lb.mark,ymax=ub.mark,colour=platte[k],size=0.5,width= (max(tempxx)-min(tempxx))/20)
}
}
k <- k + 1
}
}
}
}
if(estimator=='binning'){
#est <- est.lin
if(treat.type=='discrete'){
for(char in other.treat){
est.touse <- est.lin[[char]]
if(CI==TRUE){
colnames(est.touse) <- c("X","TE","sd","CI_lower","CI_upper")
}
if(CI==FALSE){
est.touse <- est.touse[,c(1,2)]
colnames(est.touse) <- c("X","TE")
}
est.touse <- as.data.frame(est.touse)
est.touse[['Treatment']] <- rep(char,dim(est.touse)[1])
if(char==other.treat[1]){
tempest <- est.touse
}else{
tempest <- rbind(tempest,est.touse)
}
}
tempest$Treatment <- factor(tempest$Treatment, levels = other.treat)
p1 <- p1 + geom_line(data=tempest,aes(x = X,y = TE,color = Treatment),show.legend = FALSE)
p1 <- p1 + scale_color_manual(values = platte, labels = subtitles)
if (CI == TRUE){
p1 <- p1 + geom_ribbon(data=tempest, aes(x=X,ymin=CI_lower,ymax=CI_upper,fill = Treatment),alpha=0.2,show.legend = FALSE)
p1 <- p1 + scale_fill_manual(values = platte,labels = subtitles)
}
k <- 1
for(char in other.treat){
tempest2 <- as.data.frame(est.bin2[[char]])
tempest3 <- as.data.frame(est.bin3[[char]])
p1 <- p1+ geom_errorbar(data=tempest2, aes(x=x0, ymin=CI.lower, ymax=CI.upper),color = platte[k],
width= errorbar.width/3)+
geom_point(data=tempest2,aes(x=x0,y=coef),size=3,shape=21,fill = platte[k],color = platte[k])
if(dim(tempest3)[1]!=0){
p1 <- p1+geom_text(data=tempest3,aes(x=x0,y=0),label="NaN",color = platte[k])
}
k <- k+1
}
}
if(treat.type=='continuous'){
for(label in label.name){
est.touse <- est.lin[[label]]
if(CI==TRUE){
colnames(est.touse) <- c("X","ME","sd","CI_lower","CI_upper")
}
if(CI==FALSE){
est.touse <- est.touse[,c(1,2)]
colnames(est.touse) <- c("X","ME")
}
est.touse <- as.data.frame(est.touse)
est.touse[['Treatment']] <- rep(label,dim(est.touse)[1])
if(label==label.name[1]){
tempest <- est.touse
}else{
tempest <- rbind(tempest,est.touse)
}
}
tempest$Treatment <- factor(tempest$Treatment, levels = label.name)
p1 <- p1 + geom_line(data=tempest,aes(x = X,y = ME,color = Treatment),show.legend = FALSE)
p1 <- p1 + scale_color_manual(values = platte, labels = subtitles)
if (CI == TRUE){
p1 <- p1 + geom_ribbon(data=tempest, aes(x=X,ymin=CI_lower,ymax=CI_upper,fill = Treatment),alpha=0.2,show.legend = FALSE)
p1 <- p1 + scale_fill_manual(values = platte,labels = subtitles)
}
k <- 1
for(label in label.name){
tempest2 <- as.data.frame(est.bin2[[label]])
tempest3 <- as.data.frame(est.bin3[[label]])
p1 <- p1+ geom_errorbar(data=tempest2, aes(x=x0, ymin=CI.lower, ymax=CI.upper),color = platte[k],
width= errorbar.width/3)+
geom_point(data=tempest2,aes(x=x0,y=coef),size=3,shape=21,fill = platte[k],color = platte[k])
if(dim(tempest3)[1]!=0){
p1 <- p1+geom_text(data=tempest3,aes(x=x0,y=0),label="NaN",color = platte[k])
}
k <- k+1
}
}
if (bin.labs == TRUE){
if(treat.type=='discrete'){
char0 <- other.treat[1]
}
if(treat.type=='continuous'){
char0 <- label.name[1]
}
if (nbins==3){
p1 <- p1 + annotate(geom="text", x=est.bin[[char0]][1,1], y=pos,
label="L",colour="gray50",size=10) +
annotate(geom="text", x=est.bin[[char0]][2,1], y=pos,
label="M",colour="gray50",size=10) +
annotate(geom="text", x=est.bin[[char0]][3,1], y=pos,
label="H",colour="gray50",size=10)
}
else if (nbins==4){
p1 <- p1 + annotate(geom="text", x=est.bin[[char0]][1,1], y=pos,
label="L",colour="gray50",size=10) +
annotate(geom="text", x=est.bin[[char0]][2,1], y=pos,
label="M1",colour="gray50",size=10) +
annotate(geom="text", x=est.bin[[char0]][3,1], y=pos,
label="M2",colour="gray50",size=10) +
annotate(geom="text", x=est.bin[[char0]][4,1], y=pos,
label="H",colour="gray50",size=10)
}
else if (nbins==2){
p1 <- p1 + annotate(geom="text", x=est.bin[[char0]][1,1], y=pos,
label="L",colour="gray50",size=10) +
annotate(geom="text", x=est.bin[[char0]][2,1], y=pos,
label="H",colour="gray50",size=10)
}
}
}
if(Xdistr == "density") { # density plot
if(treat.type=='discrete'){
## put in data frames
dist<-hist.out$mids[2]-hist.out$mids[1]
deX.ymin <- min(yrange)-maxdiff/5
deX.co <- data.frame(x = de.tr[[base]]$x,
y = de.tr[[base]]$y/max(de.tr[[base]]$y) * maxdiff/10 + min(yrange) - maxdiff/5)
## color
p1 <- p1 + geom_ribbon(data = deX.co, aes(x = x, ymax = y, ymin = deX.ymin),color=base.color,
fill = base.color, alpha = 0.0, size=0.3)
k <- 1
char0 <- other.treat[1]
start_level <- rep(deX.ymin,length(de.tr[[char0]]$x))
for(char in other.treat){
dex.tr.plot <- data.frame(x = de.tr[[char]]$x,
start_level = start_level,
end_level = de.tr[[char]]$y/max(de.tr[[char0]]$y)*maxdiff/10+start_level)
p1 <- p1+geom_ribbon(data = dex.tr.plot, aes(x = x, ymax = end_level, ymin = start_level), color=platte[k],
alpha = 0.0,fill = platte[k],size=0.3)
k <- k+1
}
p1 <- p1+geom_line(data = dex.tr.plot, aes(x = x, y = ymin), color='gray50',size=0.3)
}
if(treat.type=='continuous'){
deX.ymin <- min(yrange)-maxdiff/5
deX <- data.frame(x = de$x,
y = de$y/max(de$y) * maxdiff/5 + min(yrange) - maxdiff/5)
## color
feed.col<-col2rgb("gray50")
col<-rgb(feed.col[1]/1000, feed.col[2]/1000,feed.col[3]/1000)
p1 <- p1 + geom_ribbon(data = deX, aes(x = x, ymax = y, ymin = deX.ymin),
fill = col, alpha = 0.2)
}
}
if (Xdistr %in% c("histogram","hist")){ # histogram plot
if(treat.type=='discrete'){
n.hist<-length(hist.out$mids)
dist<-hist.out$mids[2]-hist.out$mids[1]
hist.max<-max(hist.out$counts)
hist.col<-data.frame(ymin=rep(min(yrange)-maxdiff/5,n.hist),
ymax=hist.out$counts/hist.max*maxdiff/5+min(yrange)-maxdiff/5,
xmin=hist.out$mids-dist/2,
xmax=hist.out$mids+dist/2,
count1=count.tr[[base]]/hist.max*maxdiff/5+min(yrange)-maxdiff/5)
p1 <- p1 + geom_rect(data=hist.col,aes(xmin=xmin,xmax=xmax,ymin=ymin,ymax=count1),fill=base.color,color='gray50',
alpha=0.3,size=0.5)
k <- 1
start_level <- count.tr[[base]]/hist.max*maxdiff/5+min(yrange)-maxdiff/5
for (char in other.treat){
hist.treat<-data.frame(ymin=start_level,
ymax=count.tr[[char]]/hist.max*maxdiff/5+start_level,
xmin=hist.out$mids-dist/2,
xmax=hist.out$mids+dist/2)
start_level <- count.tr[[char]]/hist.max*maxdiff/5+start_level
p1 <- p1 + geom_rect(data=hist.treat,aes(xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax),fill=platte[k],color='gray50',
alpha=0.5,size=0.5)
k <- k + 1
}
}
if(treat.type=='continuous'){
n.hist <- length(hist.out$mids)
dist <- hist.out$mids[2]-hist.out$mids[1]
hist.max <- max(hist.out$counts)
histX <- data.frame(ymin=rep(min(yrange)-maxdiff/5,n.hist),
ymax=hist.out$counts/hist.max*maxdiff/5+min(yrange)-maxdiff/5,
xmin=hist.out$mids-dist/2,
xmax=hist.out$mids+dist/2)
p1 <- p1 + geom_rect(data=histX,aes(xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax),
colour="gray50",alpha=0.3,size=0.5)
}
}
## other properties
if(is.null(legend.title)==FALSE){
p1 <- p1 + labs(fill = legend.title,color = legend.title)
}
## mark the original interval (in replicated papers)
if (is.null(interval)==FALSE) {
p1<- p1 + geom_vline(xintercept=interval,colour="steelblue", linetype=2,size=1.5)
}
## Other universal options
## axis labels
if (is.null(cex.lab)==TRUE) {
cex.lab <- 15
} else {
cex.lab <- 15 * cex.lab
}
if (is.null(cex.axis)==TRUE) {
cex.axis <- 15
} else {
cex.axis <- 15 * cex.axis
}
p1 <- p1 + xlab(xlab) + ylab(ylab) +
theme(axis.text = element_text(size=cex.axis), axis.title = element_text(size=cex.lab))
## title
if (is.null(cex.main)==TRUE) {
cex.main <- 18
} else {
cex.main <- 18 * cex.main
}
if (is.null(cex.sub)==TRUE) {
cex.sub <- 10
} else {
cex.sub <- 10 * cex.sub
}
if (is.null(main)==FALSE) {
p1<-p1 + ggtitle(main) +
theme(plot.title = element_text(hjust = 0.5, size=cex.main,
lineheight=.8, face="bold"))
}
## xlim and ylim
if (is.null(ylim)==FALSE) {
ylim2 = c(ylim[1]-(ylim[2]-ylim[1])*0.25/6, ylim[2]+(ylim[2]-ylim[1])*0.4/6)
}
if (is.null(xlim)==FALSE & is.null(ylim)==FALSE) {
p1<-p1+coord_cartesian(xlim = xlim, ylim = ylim2)
}
if (is.null(xlim)==TRUE & is.null(ylim)==FALSE) {
p1<-p1+coord_cartesian(ylim = ylim2)
}
if (is.null(xlim)==FALSE & is.null(ylim)==TRUE) {
p1<-p1+coord_cartesian(xlim = xlim)
}
#legend
if(show.subtitles==TRUE){
if(treat.type=='discrete'){
p1_table <- ggplot_gtable(ggplot_build(p1))
data.touse3 <- data.frame(X=rep(1,length(all.treat)),ymin=-1,ymax=1,D=all.treat)
data.touse3$D <- factor(data.touse3$D,levels = all.treat)
p0 <- ggplot() + geom_ribbon(data=data.touse3, aes(x=X,ymin=ymin,ymax=ymax,fill=D),
alpha=0.3)
p0 <- p0 + scale_fill_manual(values = c(base.color,platte[1:length(other.treat)]),
labels = as.character(subtitles.all))
if(is.null(legend.title)==FALSE){
p0 <- p0 + labs(fill = legend.title,color = legend.title)
}else{p0 <- p0 + labs(fill = "Treatment",color = "Treatment")}
p0 <- p0 + theme(legend.title = element_text(colour="black", size=cex.sub),
legend.text = element_text(color = "black", size = cex.sub*0.95))
p0 <- p0 + xlab(xlab) + ylab(ylab) + theme(axis.text = element_text(size=cex.axis), axis.title = element_text(size=cex.lab))
if(is.null(main)==FALSE){
p0 <- p0 + ggtitle(main) + theme(plot.title = element_text(hjust = 0.5, size=cex.main,
lineheight=.8, face="bold"))
}
if (is.null(ylim)==FALSE) {
ylim2 = c(ylim[1]-(ylim[2]-ylim[1])*0.25/6, ylim[2]+(ylim[2]-ylim[1])*0.4/6)
}
if (is.null(xlim)==FALSE & is.null(ylim)==FALSE) {
p0<-p0+coord_cartesian(xlim = xlim, ylim = ylim2)
}
if (is.null(xlim)==TRUE & is.null(ylim)==FALSE) {
p0<-p0+coord_cartesian(ylim = ylim2)
}
if (is.null(xlim)==FALSE & is.null(ylim)==TRUE) {
p0<-p0+coord_cartesian(xlim = xlim)
}
y.limits <- layer_scales(p1)$y$range$range
x.limits <- layer_scales(p1)$x$range$range
ymaxmax <- y.limits[2]
yminmin <- y.limits[1]
xmaxmax <- x.limits[2]
xminmin <- x.limits[1]
suppressWarnings(
p0 <- p0+ylim(c(yminmin,ymaxmax))+xlim(c(xminmin,xmaxmax))
)
suppressWarnings(
p0 <- ggplot_gtable(ggplot_build(p0))
)
suppressWarnings(
pp <-c(subset(p0$layout, name == "panel", se=t:r))
)
gt <- gtable_add_grob(p0,
p1_table$grobs[[which(p1_table$layout$name == "panel")]],
pp$t,pp$l,pp$b,pp$l)
gt <- as.ggplot(gt)
p1 <- gt
}
if(treat.type=='continuous'){
p1_table <- ggplot_gtable(ggplot_build(p1))
data.touse3 <- data.frame(X=rep(1,length(label.name)),ymin=-1,ymax=1,D=label.name)
data.touse3$D <- factor(data.touse3$D,levels = label.name)
p0 <- ggplot() + geom_ribbon(data=data.touse3, aes(x=X,ymin=ymin,ymax=ymax,fill=D),
alpha=0.3)
p0 <- p0 + scale_fill_manual(values = platte[1:length(label.name)],
labels = as.character(subtitles.all))
if(is.null(legend.title)==FALSE){
p0 <- p0 + labs(fill = legend.title,color = legend.title)
}else{p0 <- p0 + labs(fill = "Treatment",color = "Treatment")}
p0 <- p0 + theme(legend.title = element_text(colour="black", size=cex.sub),
legend.text = element_text(color = "black", size = cex.sub*0.95))
p0 <- p0 + xlab(xlab) + ylab(ylab) + theme(axis.text = element_text(size=cex.axis), axis.title = element_text(size=cex.lab))
if(is.null(main)==FALSE){
p0 <- p0 + ggtitle(main) + theme(plot.title = element_text(hjust = 0.5, size=cex.main,
lineheight=.8, face="bold"))
}
## xlim and ylim
if (is.null(ylim)==FALSE) {
ylim2 = c(ylim[1]-(ylim[2]-ylim[1])*0.25/6, ylim[2]+(ylim[2]-ylim[1])*0.4/6)
}
if (is.null(xlim)==FALSE & is.null(ylim)==FALSE) {
p0<-p0+coord_cartesian(xlim = xlim, ylim = ylim2)
}
if (is.null(xlim)==TRUE & is.null(ylim)==FALSE) {
p0<-p0+coord_cartesian(ylim = ylim2)
}
if (is.null(xlim)==FALSE & is.null(ylim)==TRUE) {
p0<-p0+coord_cartesian(xlim = xlim)
}
y.limits <- layer_scales(p1)$y$range$range
x.limits <- layer_scales(p1)$x$range$range
ymaxmax <- y.limits[2]
yminmin <- y.limits[1]
xmaxmax <- x.limits[2]
xminmin <- x.limits[1]
suppressWarnings(
p0 <- p0+ylim(c(yminmin,ymaxmax))+xlim(c(xminmin,xmaxmax))
)
suppressWarnings(
p0 <- ggplot_gtable(ggplot_build(p0))
)
suppressWarnings(
pp <-c(subset(p0$layout, name == "panel", se=t:r))
)
gt <- gtable_add_grob(p0,
p1_table$grobs[[which(p1_table$layout$name == "panel")]],
pp$t,pp$l,pp$b,pp$l)
gt <- as.ggplot(gt)
p1 <- gt
}
}
## save to file
if (is.null(file)==FALSE) {
ggsave(file, p1,scale = scale,width=width,height = height)
}
return(p1)
} |
#' Gets Zendesk users via increment API
#'
#' @export
#' @param subdomain organisation subdomain on zendesk
#' @param start.time starting point for new users based on the update_date
#' @return data.table with zendesk users
zdGetUsers <- function(subdomain, start.time) {
users <- zdGetObjects("users", subdomain, start.time)
if(length(users) == 0) {
return(NULL)
}
dt <- zdExtractData(users, c("id", "email", "created_at", "updated_at", "role"))
return(dt)
}
| /R/users.R | no_license | byapparov/rzendesk | R | false | false | 475 | r | #' Gets Zendesk users via increment API
#'
#' @export
#' @param subdomain organisation subdomain on zendesk
#' @param start.time starting point for new users based on the update_date
#' @return data.table with zendesk users
zdGetUsers <- function(subdomain, start.time) {
users <- zdGetObjects("users", subdomain, start.time)
if(length(users) == 0) {
return(NULL)
}
dt <- zdExtractData(users, c("id", "email", "created_at", "updated_at", "role"))
return(dt)
}
|
# perturb the stabilized communities
library(tidyverse)
source("R/gradient_asymmetry.R")
source("R/gradient_fitness_diff.R")
source("R/gradient_niche_diff.R")
source("R/gradient_strength_dist.R")
source("R/gradient_diag_dominance.R")
# some constants ----------------------------------------------------------
# model_family <- c("BH","LW","RK")
model_family <- "LV"
steps <- 10
types <- c("obs","ia","id","dd")
communities <- list()
# read data ---------------------------------------------------------------
load("results/communities_subplot_stabilized.Rdata")
years <- names(stable_communities)
plots <- 1:length(stable_communities[[1]])
subplots <- names(stable_communities[[1]][[1]])
# generate perturbed values -----------------------------------------------
for(i.year in 1:length(years)){
communities[[i.year]] <- list()
for(i.plot in 1:length(plots)){
communities[[i.year]][[i.plot]] <- list()
for(i.sub in 1:length(subplots)){
if(!is.na(stable_communities[[years[i.year]]][[i.plot]][[subplots[i.sub]]][["corrected_alpha"]])){
abund.obs <- stable_communities[[years[i.year]]][[i.plot]][[subplots[i.sub]]][["abundances"]]
corrected_r <- stable_communities[[years[i.year]]][[i.plot]][[subplots[i.sub]]][["corrected_r"]]
corrected_alpha <- stable_communities[[years[i.year]]][[i.plot]][[subplots[i.sub]]][["corrected_alpha"]]
communities[[i.year]][[i.plot]][[i.sub]] <- list()
for(i.type in 1:length(types)){
communities[[i.year]][[i.plot]][[i.sub]][[i.type]] <- list()
if(types[i.type] == "obs"){
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[2]] <- corrected_r
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[3]] <- corrected_alpha
}else if(types[i.type] == "nd"){
alpha.nd <- gradient_niche_diff(A = corrected_alpha,steps = steps)
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[2]] <- corrected_r
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[3]] <- alpha.nd
}else if(types[i.type] == "fd"){
r.fd <- gradient_fitness_diff(lambda = corrected_r$rfit,
steps = steps)
r.fd.list <- list()
for(i.step in 1:length(lambda.fd)){
r.fd.list[[i.step]] <- data.frame(sp = corrected_r$sp,
r = r.fd[[i.step]])
}
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[2]] <- r.fd.list
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[3]] <- corrected_alpha
}else if(types[i.type] == "ia"){
alpha.ia <- gradient_asymmetry(A = corrected_alpha,steps = steps)
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[2]] <- corrected_r
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[3]] <- alpha.ia
}else if(types[i.type] == "id"){
alpha.id <- gradient_strength_dist(A = corrected_alpha,steps = steps)
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[2]] <- corrected_r
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[3]] <- alpha.id
}else if(types[i.type] == "dd"){
alpha.id <- gradient_diag_dominance(A = corrected_alpha,steps = steps)
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[2]] <- corrected_r
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[3]] <- alpha.id
}# if-else types
names(communities[[i.year]][[i.plot]][[i.sub]][[i.type]]) <- c("abundances",
"r","alpha")
}# for each type
names(communities[[i.year]][[i.plot]][[i.sub]]) <- types
}else{
communities[[i.year]][[i.plot]][[i.sub]] <- NA
}# if-else valid corrected community
}# for i.sub
names(communities[[i.year]][[i.plot]]) <- subplots
}# for i.plot
}# for i.year
names(communities) <- years
# write to disk -----------------------------------------------------------
save(communities,file = "results/communities_subplot_perturbed.Rdata")
| /R/03B_generate_perturbed_communities.R | no_license | garciacallejas/MCT_SAD | R | false | false | 5,006 | r |
# perturb the stabilized communities
library(tidyverse)
source("R/gradient_asymmetry.R")
source("R/gradient_fitness_diff.R")
source("R/gradient_niche_diff.R")
source("R/gradient_strength_dist.R")
source("R/gradient_diag_dominance.R")
# some constants ----------------------------------------------------------
# model_family <- c("BH","LW","RK")
model_family <- "LV"
steps <- 10
types <- c("obs","ia","id","dd")
communities <- list()
# read data ---------------------------------------------------------------
load("results/communities_subplot_stabilized.Rdata")
years <- names(stable_communities)
plots <- 1:length(stable_communities[[1]])
subplots <- names(stable_communities[[1]][[1]])
# generate perturbed values -----------------------------------------------
for(i.year in 1:length(years)){
communities[[i.year]] <- list()
for(i.plot in 1:length(plots)){
communities[[i.year]][[i.plot]] <- list()
for(i.sub in 1:length(subplots)){
if(!is.na(stable_communities[[years[i.year]]][[i.plot]][[subplots[i.sub]]][["corrected_alpha"]])){
abund.obs <- stable_communities[[years[i.year]]][[i.plot]][[subplots[i.sub]]][["abundances"]]
corrected_r <- stable_communities[[years[i.year]]][[i.plot]][[subplots[i.sub]]][["corrected_r"]]
corrected_alpha <- stable_communities[[years[i.year]]][[i.plot]][[subplots[i.sub]]][["corrected_alpha"]]
communities[[i.year]][[i.plot]][[i.sub]] <- list()
for(i.type in 1:length(types)){
communities[[i.year]][[i.plot]][[i.sub]][[i.type]] <- list()
if(types[i.type] == "obs"){
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[2]] <- corrected_r
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[3]] <- corrected_alpha
}else if(types[i.type] == "nd"){
alpha.nd <- gradient_niche_diff(A = corrected_alpha,steps = steps)
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[2]] <- corrected_r
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[3]] <- alpha.nd
}else if(types[i.type] == "fd"){
r.fd <- gradient_fitness_diff(lambda = corrected_r$rfit,
steps = steps)
r.fd.list <- list()
for(i.step in 1:length(lambda.fd)){
r.fd.list[[i.step]] <- data.frame(sp = corrected_r$sp,
r = r.fd[[i.step]])
}
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[2]] <- r.fd.list
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[3]] <- corrected_alpha
}else if(types[i.type] == "ia"){
alpha.ia <- gradient_asymmetry(A = corrected_alpha,steps = steps)
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[2]] <- corrected_r
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[3]] <- alpha.ia
}else if(types[i.type] == "id"){
alpha.id <- gradient_strength_dist(A = corrected_alpha,steps = steps)
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[2]] <- corrected_r
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[3]] <- alpha.id
}else if(types[i.type] == "dd"){
alpha.id <- gradient_diag_dominance(A = corrected_alpha,steps = steps)
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[1]] <- abund.obs
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[2]] <- corrected_r
communities[[i.year]][[i.plot]][[i.sub]][[i.type]][[3]] <- alpha.id
}# if-else types
names(communities[[i.year]][[i.plot]][[i.sub]][[i.type]]) <- c("abundances",
"r","alpha")
}# for each type
names(communities[[i.year]][[i.plot]][[i.sub]]) <- types
}else{
communities[[i.year]][[i.plot]][[i.sub]] <- NA
}# if-else valid corrected community
}# for i.sub
names(communities[[i.year]][[i.plot]]) <- subplots
}# for i.plot
}# for i.year
names(communities) <- years
# write to disk -----------------------------------------------------------
save(communities,file = "results/communities_subplot_perturbed.Rdata")
|
#!/usr/bin/env Rscript
args = commandArgs(TRUE)
paradigmFile = args[1]
pData = read.table(paradigmFile, sep = "\t", header = T)
X = c()
snRatio = c()
for (i in 1:(dim(pData)[1])) {
X = c(X, as.numeric(pData[i, 2:(dim(pData)[2])]))
#snRatio = c(snRatio, sd(as.numeric(pData[i, 2:(dim(pData)[2])]))/abs(mean(as.numeric(pData[i, 2:(dim(pData)[2])]))))
}
pdf("IPL-density.pdf")
plot(density(abs(X)), xlim = c(0,1))
dev.off()
#plot(density(snRatio[!is.na(snRatio)])) | /utilties/densityIPL.R | no_license | graim/paradigm-scripts | R | false | false | 472 | r | #!/usr/bin/env Rscript
args = commandArgs(TRUE)
paradigmFile = args[1]
pData = read.table(paradigmFile, sep = "\t", header = T)
X = c()
snRatio = c()
for (i in 1:(dim(pData)[1])) {
X = c(X, as.numeric(pData[i, 2:(dim(pData)[2])]))
#snRatio = c(snRatio, sd(as.numeric(pData[i, 2:(dim(pData)[2])]))/abs(mean(as.numeric(pData[i, 2:(dim(pData)[2])]))))
}
pdf("IPL-density.pdf")
plot(density(abs(X)), xlim = c(0,1))
dev.off()
#plot(density(snRatio[!is.na(snRatio)])) |
## ----echo=FALSE---------------------------------------------------------------
knitr::opts_chunk$set(
comment = "#>",
collapse = TRUE,
warning = FALSE,
message = FALSE
)
| /inst/doc/query_tutorial.R | permissive | cran/sofa | R | false | false | 177 | r | ## ----echo=FALSE---------------------------------------------------------------
knitr::opts_chunk$set(
comment = "#>",
collapse = TRUE,
warning = FALSE,
message = FALSE
)
|
# Looking at dnorm, pnorm, and qnorm - p. 3
# d stands for density and calculates the probability density function
# at some value x (i.e. tells us how tall the curve is at that point)
dnorm(0) # At x = 0, the pdf of x is around 0.4 (see the graph later)
# p stands for probability and it calculates the probability of a value below
# whatever we enter for q
pnorm(0) # There is a 0.5 probability of getting an x value less than 0
pnorm(-1.96) # There is a 0.025 probability of an x value less than -1.96
# q stands for quantile and it calculates the value that has probability p
# to the left of it (i.e. the reverse of pnorm())
qnorm(0.5) # The x value with probability 0.5 to the left of it is x = 0
qnorm(0.025) # The x value with probability 0.025 to the left of it x = -1.96
# Graphs on pages 4 and 5
library(ggplot2)
library(gridExtra)
# Normal Distribution Graphs
g1 <- ggplot(data = data.frame(x = c(-3, 3)), aes(x)) +
stat_function(fun = dnorm, n = 101) +
labs(title = "Standard Normal Distribution")
g2 <- ggplot(data = data.frame(x = c(70, 130)), aes(x)) +
stat_function(fun = dnorm, n = 101, args = list(mean = 100, sd = 10)) +
labs(title = "Normal Distribution with Mean = 100, SD = 10")
# Notice that we're using a function called stat_function() to add a normal
# distribution curve to both graphs. In the first, the default is a standard
# normal (mean = 0 and s.d. = 1). In the second, we use the args = ___ argument
# to say which arguments of the dnorm() function we want to use
grid.arrange(g1, g2, ncol = 2)
# T-Distribution Graph
# The dplyr library contains the mutate() function to add variables to our data
# The tidyr library contains pivot_longer() which lets us convert our data from
# a "wide" format where each t-distribution gets it's own column, to a "long"
# or "tidy" format where our variables are distribution and density function
# value. This lets us easily plot separate lines by the distribution variable.
library(dplyr)
library(tidyr)
df <- data.frame(x = seq(-3, 3, .01))
df <- mutate(df, t_5 = dt(x, 5), t_10 = dt(x, 10), t_30 = dt(x , 30),
norm = dnorm(x))
df <- pivot_longer(df, -x, names_to = "dist", values_to = "density")
df$dist <- factor(df$dist, levels = c("t_5", "t_10", "t_30", "norm"))
ggplot(df, aes(x = x, y = density, color = dist)) +
geom_line(aes(linetype = dist)) +
labs(title = "T-Distributions")
pnorm(-2)
pt(-2, 5)
# F-Distribution Graph
df <- data.frame(x = seq(0, 10, .01))
df <- mutate(df, f_2_27 = df(x, 2, 27), f_5_45 = df(x, 5, 45),
f_8_2 = df(x, 8, 2))
df <- pivot_longer(df, -x, names_to = "dist", values_to = "density")
ggplot(df, aes(x = x, y = density, color = dist)) +
geom_line(aes(linetype = dist)) +
labs(title = "F-Distributions")
# Chi-Squared Distribution Graph
df <- data.frame(x = seq(0, 30, .01))
df <- mutate(df, chisq_2 = dchisq(x, 2), chisq_10 = dchisq(x, 10),
chisq_5 = dchisq(x, 5))
df <- pivot_longer(df, -x, names_to = "dist", values_to = "density")
df$dist <- factor(df$dist, levels = c("chisq_2", "chisq_5", "chisq_10"))
ggplot(df, aes(x = x, y = density, color = dist)) +
geom_line(aes(linetype = dist)) +
labs(title = "Chi-Squared Distributions")
# Working with probabities - Example 1
# by default the lower tail is shaded/calculated, so these are the same
pt(1.28, df = 24, lower.tail = TRUE)
pt(1.28, df = 24)
# Working with probabities - Example 2
# We can use lower.tail = FALSE or calculate 1 minus the lower tail to calculate
# areas ABOVE some cutoff
pt(1.28, df = 24, lower.tail = FALSE)
1 - pt(1.28, 24)
# T distribution probabilties on p. 7
# two options for a
pt(2, 12)
pt(2, 12, lower.tail = TRUE)
# b
pt(-0.5, 5, lower.tail = FALSE)
#c
pt(1.28, 999, lower.tail = FALSE) | /0324_TuesdayMarch24.R | no_license | vank-stats/STS347-Spring2020 | R | false | false | 3,783 | r | # Looking at dnorm, pnorm, and qnorm - p. 3
# d stands for density and calculates the probability density function
# at some value x (i.e. tells us how tall the curve is at that point)
dnorm(0) # At x = 0, the pdf of x is around 0.4 (see the graph later)
# p stands for probability and it calculates the probability of a value below
# whatever we enter for q
pnorm(0) # There is a 0.5 probability of getting an x value less than 0
pnorm(-1.96) # There is a 0.025 probability of an x value less than -1.96
# q stands for quantile and it calculates the value that has probability p
# to the left of it (i.e. the reverse of pnorm())
qnorm(0.5) # The x value with probability 0.5 to the left of it is x = 0
qnorm(0.025) # The x value with probability 0.025 to the left of it x = -1.96
# Graphs on pages 4 and 5
library(ggplot2)
library(gridExtra)
# Normal Distribution Graphs
g1 <- ggplot(data = data.frame(x = c(-3, 3)), aes(x)) +
stat_function(fun = dnorm, n = 101) +
labs(title = "Standard Normal Distribution")
g2 <- ggplot(data = data.frame(x = c(70, 130)), aes(x)) +
stat_function(fun = dnorm, n = 101, args = list(mean = 100, sd = 10)) +
labs(title = "Normal Distribution with Mean = 100, SD = 10")
# Notice that we're using a function called stat_function() to add a normal
# distribution curve to both graphs. In the first, the default is a standard
# normal (mean = 0 and s.d. = 1). In the second, we use the args = ___ argument
# to say which arguments of the dnorm() function we want to use
grid.arrange(g1, g2, ncol = 2)
# T-Distribution Graph
# The dplyr library contains the mutate() function to add variables to our data
# The tidyr library contains pivot_longer() which lets us convert our data from
# a "wide" format where each t-distribution gets it's own column, to a "long"
# or "tidy" format where our variables are distribution and density function
# value. This lets us easily plot separate lines by the distribution variable.
library(dplyr)
library(tidyr)
df <- data.frame(x = seq(-3, 3, .01))
df <- mutate(df, t_5 = dt(x, 5), t_10 = dt(x, 10), t_30 = dt(x , 30),
norm = dnorm(x))
df <- pivot_longer(df, -x, names_to = "dist", values_to = "density")
df$dist <- factor(df$dist, levels = c("t_5", "t_10", "t_30", "norm"))
ggplot(df, aes(x = x, y = density, color = dist)) +
geom_line(aes(linetype = dist)) +
labs(title = "T-Distributions")
pnorm(-2)
pt(-2, 5)
# F-Distribution Graph
df <- data.frame(x = seq(0, 10, .01))
df <- mutate(df, f_2_27 = df(x, 2, 27), f_5_45 = df(x, 5, 45),
f_8_2 = df(x, 8, 2))
df <- pivot_longer(df, -x, names_to = "dist", values_to = "density")
ggplot(df, aes(x = x, y = density, color = dist)) +
geom_line(aes(linetype = dist)) +
labs(title = "F-Distributions")
# Chi-Squared Distribution Graph
df <- data.frame(x = seq(0, 30, .01))
df <- mutate(df, chisq_2 = dchisq(x, 2), chisq_10 = dchisq(x, 10),
chisq_5 = dchisq(x, 5))
df <- pivot_longer(df, -x, names_to = "dist", values_to = "density")
df$dist <- factor(df$dist, levels = c("chisq_2", "chisq_5", "chisq_10"))
ggplot(df, aes(x = x, y = density, color = dist)) +
geom_line(aes(linetype = dist)) +
labs(title = "Chi-Squared Distributions")
# Working with probabities - Example 1
# by default the lower tail is shaded/calculated, so these are the same
pt(1.28, df = 24, lower.tail = TRUE)
pt(1.28, df = 24)
# Working with probabities - Example 2
# We can use lower.tail = FALSE or calculate 1 minus the lower tail to calculate
# areas ABOVE some cutoff
pt(1.28, df = 24, lower.tail = FALSE)
1 - pt(1.28, 24)
# T distribution probabilties on p. 7
# two options for a
pt(2, 12)
pt(2, 12, lower.tail = TRUE)
# b
pt(-0.5, 5, lower.tail = FALSE)
#c
pt(1.28, 999, lower.tail = FALSE) |
testlist <- list(a = 3.226051041085e-319, b = 0)
result <- do.call(BayesMRA::rmvn_arma_scalar,testlist)
str(result) | /BayesMRA/inst/testfiles/rmvn_arma_scalar/AFL_rmvn_arma_scalar/rmvn_arma_scalar_valgrind_files/1615926921-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 115 | r | testlist <- list(a = 3.226051041085e-319, b = 0)
result <- do.call(BayesMRA::rmvn_arma_scalar,testlist)
str(result) |
# Read the data from csv-file produced by the C++-Program
woPR_data <- read.csv2(file = "Test-20-15-35-woPR.csv", dec = ".", strip.white = T, header=TRUE, fill = F,
colClasses = c('factor', 'factor', 'factor','factor','factor','factor','numeric','numeric','numeric','numeric','numeric') )
PR_data <- read.csv2(file = "ResultsLS+PR-Tests.csv", dec = ".", strip.white = T, header=TRUE, fill = F,
colClasses = c('factor', 'factor', 'factor','factor','factor','factor','numeric','numeric','numeric','numeric','numeric') )
data <- rbind(woPR_data, PR_data)
data <- subset(data, select = -X)
# label data accoring to code book
data$StartSol <- factor(data$StartSol,
levels = c(0,1,2,3,4),
labels = c("Random init", "Greedy init", "Fixed alpha", "Reactive alpha", "Uniform random alpha"))
data$LSStrategy <- factor(data$LSStrategy,
levels = c(0,1,2),
labels = c("best improve", "random improve", "none"))
data$PRMeth <- factor(data$PRMeth,
levels = c(0,1,2,3),
labels = c("Random PR-Move Selector", "Greedy PR-Move Selector", "GRASP PR-Move Selector", "no PR"))
data$PRDir <- factor(data$PRDir,
levels = c(0,1,2),
labels = c("Forward", "Backward", "Mixed"))
data$PRPoolSelect <- factor(data$PRPoolSelect,
levels = c(0,1,2),
labels = c("Diverse", "Random", "All"))
# read and transform quick GRASP data
quick_PR_data <- read.csv2(file = "ResultsLS+PR-Tests-quick.csv", dec = ".", strip.white = T, header=TRUE, fill = F,
colClasses = c('factor', 'factor', 'factor','factor','factor','factor','numeric','numeric','numeric','numeric','numeric') )
quick_woPR_data <- read.csv2(file = "Test-20-15-35-woPR-quick.csv", dec = ".", strip.white = T, header=TRUE, fill = F,
colClasses = c('factor', 'factor', 'factor','factor','factor','factor','numeric','numeric','numeric','numeric','numeric') )
quick_data <- rbind(quick_PR_data, quick_woPR_data)
quick_data <- subset(quick_data, select = -PoolSize)
levels(quick_data$StartSol)[levels(quick_data$StartSol) == 3] <- 7
levels(quick_data$StartSol)[levels(quick_data$StartSol) == 4] <- 8
# label data accoring to code book
quick_data$StartSol <- factor(quick_data$StartSol,
levels = c(0,1,2,3,4,5,6,7,8),
labels = c("Random init",
"Greedy init", "Fixed alpha", "Reactive alpha", "Uniform random alpha",
"Quick Greedy", "Quick Fixed", "Quick Reactive", "Quick Uniform Random"))
quick_data$LSStrategy <- factor(quick_data$LSStrategy,
levels = c(0,1,2),
labels = c("best improve", "random improve", "none"))
quick_data$PRMeth <- factor(quick_data$PRMeth,
levels = c(0,1,2,3),
labels = c("Random PR-Move Selector", "Greedy PR-Move Selector", "GRASP PR-Move Selector", "no PR"))
quick_data$PRDir <- factor(quick_data$PRDir,
levels = c(0,1,2),
labels = c("Forward", "Backward", "Mixed"))
quick_data$PRPoolSelect <- factor(quick_data$PRPoolSelect,
levels = c(0,1,2),
labels = c("Diverse", "Random", "All"))
## get nice summary table
# use dplyr-Packe for easier handling
library(dplyr)
# filter relevant experiments
plotdata_with_PR_Random <- filter(data, StartSol == "Random init", PRMeth == "Greedy PR-Move Selector",
LSStrategy == "random improve", PRDir == "Forward", PRPoolSelect == "Random")
plotdata_with_PR_Random <- mutate(plotdata_with_PR_Random, Algo = 1)
plotdata_quick_with_PR_Reactive <- filter(quick_data, StartSol == "Quick Reactive", PRMeth == "Greedy PR-Move Selector",
LSStrategy == "random improve", PRDir == "Forward", PRPoolSelect == "All")
plotdata_quick_with_PR_Reactive <- mutate(plotdata_quick_with_PR_Reactive, Algo = 2)
plotdata_with_PR_Reactive <- filter(data, StartSol == "Reactive alpha", PRMeth == "Greedy PR-Move Selector",
LSStrategy == "random improve", PRDir == "Forward", PRPoolSelect == "All")
plotdata_with_PR_Reactive <- mutate(plotdata_with_PR_Reactive, Algo = 3)
plotdata_wo_PR_Random <- filter(data, StartSol == "Random init", PRMeth == "no PR", LSStrategy == "random improve")
plotdata_wo_PR_Random <- mutate(plotdata_wo_PR_Random, Algo = 4)
plotdata_quick_wo_PR_Reactive <- filter(quick_data, StartSol == "Quick Reactive", PRMeth == "no PR", LSStrategy == "random improve")
plotdata_quick_wo_PR_Reactive <- mutate(plotdata_quick_wo_PR_Reactive, Algo = 5)
plotdata_wo_PR_Reactive <- filter(data, StartSol == "Reactive alpha", PRMeth == "no PR", LSStrategy == "random improve")
plotdata_wo_PR_Reactive <- mutate(plotdata_wo_PR_Reactive, Algo = 6)
# combine them to on dataframe and label data frame
plotdata <- rbind(plotdata_with_PR_Reactive,
plotdata_with_PR_Random,
plotdata_wo_PR_Random,
plotdata_wo_PR_Reactive,
plotdata_quick_wo_PR_Reactive,
plotdata_quick_with_PR_Reactive)
plotdata$Algo <- factor(plotdata$Algo,
levels = c(1,2,3,4,5,6),
labels = c("Random Init\nwith PR",
"Quick GRASP\nwith PR",
"GRASP Init\nwith PR",
"Random Init\nwithout PR",
"Quick GRASP\nwithout PR",
"GRASP Init\nwithout PR"))
# group data by Neighborhood, Algorithm and Alphavalue
plotdata <- group_by(plotdata, StartSol, LSStrategy, PRMeth, PRDir, PRPoolSelect, Algo)
plotdata <- mutate(plotdata, Time = Time / 1000)
plotdata <- arrange(plotdata, PRMeth, StartSol)
library(ggplot2)
png("Init_vs_Time_Violin_woPR.png",
width = 20,
height = 12,
units = "cm",
res = 1200,
pointsize = 14,
antialias = "cleartype"
)
ggplot(data = plotdata, aes(x=Algo, y=Time)) +
geom_violin() +
#geom_boxplot(width=0.1, outlier.shape = NA) +
geom_boxplot(width=0.1) +
#scale_y_log10() +
#coord_flip() +
theme_bw(base_size = 14, base_family = "serif") +
theme (legend.position = "bottom",
legend.direction = "horizontal",
legend.background = element_rect(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.x = element_blank()
) +
labs(y = "Runtime in sec")
dev.off()
| /tests/Time2Target-Cordeau/20-15-35/Initit_vs_PRpresent_vs_SolTime.R | no_license | HaSaKL/Metaheuristic_GQAP | R | false | false | 6,926 | r | # Read the data from csv-file produced by the C++-Program
woPR_data <- read.csv2(file = "Test-20-15-35-woPR.csv", dec = ".", strip.white = T, header=TRUE, fill = F,
colClasses = c('factor', 'factor', 'factor','factor','factor','factor','numeric','numeric','numeric','numeric','numeric') )
PR_data <- read.csv2(file = "ResultsLS+PR-Tests.csv", dec = ".", strip.white = T, header=TRUE, fill = F,
colClasses = c('factor', 'factor', 'factor','factor','factor','factor','numeric','numeric','numeric','numeric','numeric') )
data <- rbind(woPR_data, PR_data)
data <- subset(data, select = -X)
# label data accoring to code book
data$StartSol <- factor(data$StartSol,
levels = c(0,1,2,3,4),
labels = c("Random init", "Greedy init", "Fixed alpha", "Reactive alpha", "Uniform random alpha"))
data$LSStrategy <- factor(data$LSStrategy,
levels = c(0,1,2),
labels = c("best improve", "random improve", "none"))
data$PRMeth <- factor(data$PRMeth,
levels = c(0,1,2,3),
labels = c("Random PR-Move Selector", "Greedy PR-Move Selector", "GRASP PR-Move Selector", "no PR"))
data$PRDir <- factor(data$PRDir,
levels = c(0,1,2),
labels = c("Forward", "Backward", "Mixed"))
data$PRPoolSelect <- factor(data$PRPoolSelect,
levels = c(0,1,2),
labels = c("Diverse", "Random", "All"))
# read and transform quick GRASP data
quick_PR_data <- read.csv2(file = "ResultsLS+PR-Tests-quick.csv", dec = ".", strip.white = T, header=TRUE, fill = F,
colClasses = c('factor', 'factor', 'factor','factor','factor','factor','numeric','numeric','numeric','numeric','numeric') )
quick_woPR_data <- read.csv2(file = "Test-20-15-35-woPR-quick.csv", dec = ".", strip.white = T, header=TRUE, fill = F,
colClasses = c('factor', 'factor', 'factor','factor','factor','factor','numeric','numeric','numeric','numeric','numeric') )
quick_data <- rbind(quick_PR_data, quick_woPR_data)
quick_data <- subset(quick_data, select = -PoolSize)
levels(quick_data$StartSol)[levels(quick_data$StartSol) == 3] <- 7
levels(quick_data$StartSol)[levels(quick_data$StartSol) == 4] <- 8
# label data accoring to code book
quick_data$StartSol <- factor(quick_data$StartSol,
levels = c(0,1,2,3,4,5,6,7,8),
labels = c("Random init",
"Greedy init", "Fixed alpha", "Reactive alpha", "Uniform random alpha",
"Quick Greedy", "Quick Fixed", "Quick Reactive", "Quick Uniform Random"))
quick_data$LSStrategy <- factor(quick_data$LSStrategy,
levels = c(0,1,2),
labels = c("best improve", "random improve", "none"))
quick_data$PRMeth <- factor(quick_data$PRMeth,
levels = c(0,1,2,3),
labels = c("Random PR-Move Selector", "Greedy PR-Move Selector", "GRASP PR-Move Selector", "no PR"))
quick_data$PRDir <- factor(quick_data$PRDir,
levels = c(0,1,2),
labels = c("Forward", "Backward", "Mixed"))
quick_data$PRPoolSelect <- factor(quick_data$PRPoolSelect,
levels = c(0,1,2),
labels = c("Diverse", "Random", "All"))
## get nice summary table
# use dplyr-Packe for easier handling
library(dplyr)
# filter relevant experiments
plotdata_with_PR_Random <- filter(data, StartSol == "Random init", PRMeth == "Greedy PR-Move Selector",
LSStrategy == "random improve", PRDir == "Forward", PRPoolSelect == "Random")
plotdata_with_PR_Random <- mutate(plotdata_with_PR_Random, Algo = 1)
plotdata_quick_with_PR_Reactive <- filter(quick_data, StartSol == "Quick Reactive", PRMeth == "Greedy PR-Move Selector",
LSStrategy == "random improve", PRDir == "Forward", PRPoolSelect == "All")
plotdata_quick_with_PR_Reactive <- mutate(plotdata_quick_with_PR_Reactive, Algo = 2)
plotdata_with_PR_Reactive <- filter(data, StartSol == "Reactive alpha", PRMeth == "Greedy PR-Move Selector",
LSStrategy == "random improve", PRDir == "Forward", PRPoolSelect == "All")
plotdata_with_PR_Reactive <- mutate(plotdata_with_PR_Reactive, Algo = 3)
plotdata_wo_PR_Random <- filter(data, StartSol == "Random init", PRMeth == "no PR", LSStrategy == "random improve")
plotdata_wo_PR_Random <- mutate(plotdata_wo_PR_Random, Algo = 4)
plotdata_quick_wo_PR_Reactive <- filter(quick_data, StartSol == "Quick Reactive", PRMeth == "no PR", LSStrategy == "random improve")
plotdata_quick_wo_PR_Reactive <- mutate(plotdata_quick_wo_PR_Reactive, Algo = 5)
plotdata_wo_PR_Reactive <- filter(data, StartSol == "Reactive alpha", PRMeth == "no PR", LSStrategy == "random improve")
plotdata_wo_PR_Reactive <- mutate(plotdata_wo_PR_Reactive, Algo = 6)
# combine them to on dataframe and label data frame
plotdata <- rbind(plotdata_with_PR_Reactive,
plotdata_with_PR_Random,
plotdata_wo_PR_Random,
plotdata_wo_PR_Reactive,
plotdata_quick_wo_PR_Reactive,
plotdata_quick_with_PR_Reactive)
plotdata$Algo <- factor(plotdata$Algo,
levels = c(1,2,3,4,5,6),
labels = c("Random Init\nwith PR",
"Quick GRASP\nwith PR",
"GRASP Init\nwith PR",
"Random Init\nwithout PR",
"Quick GRASP\nwithout PR",
"GRASP Init\nwithout PR"))
# group data by Neighborhood, Algorithm and Alphavalue
plotdata <- group_by(plotdata, StartSol, LSStrategy, PRMeth, PRDir, PRPoolSelect, Algo)
plotdata <- mutate(plotdata, Time = Time / 1000)
plotdata <- arrange(plotdata, PRMeth, StartSol)
library(ggplot2)
png("Init_vs_Time_Violin_woPR.png",
width = 20,
height = 12,
units = "cm",
res = 1200,
pointsize = 14,
antialias = "cleartype"
)
ggplot(data = plotdata, aes(x=Algo, y=Time)) +
geom_violin() +
#geom_boxplot(width=0.1, outlier.shape = NA) +
geom_boxplot(width=0.1) +
#scale_y_log10() +
#coord_flip() +
theme_bw(base_size = 14, base_family = "serif") +
theme (legend.position = "bottom",
legend.direction = "horizontal",
legend.background = element_rect(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.x = element_blank()
) +
labs(y = "Runtime in sec")
dev.off()
|
run_analysis <- function () {
#library
require(data.table)
require(bit64)
require(dplyr)
###-----------------------------------------------------------------------
### 1. Merges the training and the test sets to create one data set.
###-----------------------------------------------------------------------
###common files
features <- fread(".\\UCI HAR Dataset\\features.txt", data.table = FALSE)
### TEST read all the datasets. assume files are at working directory
test_subj <- fread(".\\UCI HAR Dataset\\test\\subject_test.txt", data.table = FALSE)
test_actv <- fread(".\\UCI HAR Dataset\\test\\y_test.txt", data.table = FALSE)
test_data <- fread(".\\UCI HAR Dataset\\test\\x_test.txt", data.table = FALSE)
# put name in columns
names(test_data) <- features[,2]
names(test_subj) <- "subject"
names(test_actv) <- "activity"
# aggregate test data frame
test <- bind_cols(test_subj,test_actv) %>% mutate(type="test") %>% bind_cols(test_data)
### TRAIN read all the datasets. assume files are at working directory
train_subj <- fread(".\\UCI HAR Dataset\\train\\subject_train.txt", data.table = FALSE)
train_actv <- fread(".\\UCI HAR Dataset\\train\\y_train.txt", data.table = FALSE)
train_data <- fread(".\\UCI HAR Dataset\\train\\x_train.txt", data.table = FALSE)
# put name in columns
names(train_data) <- features[,2]
names(train_subj) <- "subject"
names(train_actv) <- "activity"
# aggregate train data frame
train <- bind_cols(train_subj,train_actv) %>% mutate(type="train") %>% bind_cols(train_data)
## merged data test and train
dt <- bind_rows(test,train)
###-----------------------------------------------------------------------
### 2. Extracts only the measurements on the mean and standard deviation for each measurement
###-----------------------------------------------------------------------
dt_mean <- dt %>% select(contains("mean",ignore.case=TRUE))
dt_std <- dt %>% select(contains("std",ignore.case=TRUE))
dt2 <- bind_cols(dt[,1:3],dt_mean,dt_std)
###-----------------------------------------------------------------------
### 3. Uses descriptive activity names to name the activities in the data set
###-----------------------------------------------------------------------
activities <- features <- fread(".\\UCI HAR Dataset\\activity_labels.txt", data.table = FALSE)
dt3 <- dt2
for (i in 1:nrow(dt3)) {
v <- as.numeric(dt3[i,2])
dt3[i,2] <- activities[v,2]
}
###-----------------------------------------------------------------------
### 4. Appropriately labels the data set with descriptive variable names.
###-----------------------------------------------------------------------
dt4 <- dt3
n <- names(dt4)
n <- gsub("tBody","Time.Body.",n)
n <- gsub("tGravity","Time.Gravity.",n)
n <- gsub("fBody","Frecuency.Body.",n)
n <- gsub("Acc","Accelerometer.",n)
n <- gsub("Body.Body","Body.",n)
n <- gsub("Gyro","Gyroscope.",n)
n <- gsub("Mag-","Magnitude.",n)
n <- gsub("Jerk-","Jerk.",n)
n <- gsub("JerkMagnitude","Jerk.Magnitude",n)
names(dt4) <- n
###-----------------------------------------------------------------------
### 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
###-----------------------------------------------------------------------
dt5 <- dt4 %>%
mutate(subject = as.factor(subject)) %>%
mutate(activity = as.factor(activity)) %>%
select(-matches("type")) %>%
group_by(activity,subject) %>%
summarise_each(funs(mean))
write.table(dt5,".\\analized.txt",row.name = FALSE)
} | /run_analysis.R | no_license | jnombela/GetData_project | R | false | false | 3,705 | r | run_analysis <- function () {
#library
require(data.table)
require(bit64)
require(dplyr)
###-----------------------------------------------------------------------
### 1. Merges the training and the test sets to create one data set.
###-----------------------------------------------------------------------
###common files
features <- fread(".\\UCI HAR Dataset\\features.txt", data.table = FALSE)
### TEST read all the datasets. assume files are at working directory
test_subj <- fread(".\\UCI HAR Dataset\\test\\subject_test.txt", data.table = FALSE)
test_actv <- fread(".\\UCI HAR Dataset\\test\\y_test.txt", data.table = FALSE)
test_data <- fread(".\\UCI HAR Dataset\\test\\x_test.txt", data.table = FALSE)
# put name in columns
names(test_data) <- features[,2]
names(test_subj) <- "subject"
names(test_actv) <- "activity"
# aggregate test data frame
test <- bind_cols(test_subj,test_actv) %>% mutate(type="test") %>% bind_cols(test_data)
### TRAIN read all the datasets. assume files are at working directory
train_subj <- fread(".\\UCI HAR Dataset\\train\\subject_train.txt", data.table = FALSE)
train_actv <- fread(".\\UCI HAR Dataset\\train\\y_train.txt", data.table = FALSE)
train_data <- fread(".\\UCI HAR Dataset\\train\\x_train.txt", data.table = FALSE)
# put name in columns
names(train_data) <- features[,2]
names(train_subj) <- "subject"
names(train_actv) <- "activity"
# aggregate train data frame
train <- bind_cols(train_subj,train_actv) %>% mutate(type="train") %>% bind_cols(train_data)
## merged data test and train
dt <- bind_rows(test,train)
###-----------------------------------------------------------------------
### 2. Extracts only the measurements on the mean and standard deviation for each measurement
###-----------------------------------------------------------------------
dt_mean <- dt %>% select(contains("mean",ignore.case=TRUE))
dt_std <- dt %>% select(contains("std",ignore.case=TRUE))
dt2 <- bind_cols(dt[,1:3],dt_mean,dt_std)
###-----------------------------------------------------------------------
### 3. Uses descriptive activity names to name the activities in the data set
###-----------------------------------------------------------------------
activities <- features <- fread(".\\UCI HAR Dataset\\activity_labels.txt", data.table = FALSE)
dt3 <- dt2
for (i in 1:nrow(dt3)) {
v <- as.numeric(dt3[i,2])
dt3[i,2] <- activities[v,2]
}
###-----------------------------------------------------------------------
### 4. Appropriately labels the data set with descriptive variable names.
###-----------------------------------------------------------------------
dt4 <- dt3
n <- names(dt4)
n <- gsub("tBody","Time.Body.",n)
n <- gsub("tGravity","Time.Gravity.",n)
n <- gsub("fBody","Frecuency.Body.",n)
n <- gsub("Acc","Accelerometer.",n)
n <- gsub("Body.Body","Body.",n)
n <- gsub("Gyro","Gyroscope.",n)
n <- gsub("Mag-","Magnitude.",n)
n <- gsub("Jerk-","Jerk.",n)
n <- gsub("JerkMagnitude","Jerk.Magnitude",n)
names(dt4) <- n
###-----------------------------------------------------------------------
### 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
###-----------------------------------------------------------------------
dt5 <- dt4 %>%
mutate(subject = as.factor(subject)) %>%
mutate(activity = as.factor(activity)) %>%
select(-matches("type")) %>%
group_by(activity,subject) %>%
summarise_each(funs(mean))
write.table(dt5,".\\analized.txt",row.name = FALSE)
} |
/Lab6/vicky6.R | no_license | nabiljesus/Estadistica | R | false | false | 8,554 | r | ||
## Functions makeCacheMatrix and cacheSolve are used to implement an extended matrix
## representation that can be used to cache the inverse of the matrix after computation.
## The extended representation can be used to reduce computational cost when the inverse of
## matrix is used multiple instances without the need to implement ad hoc caching strategies.
##
## makeCacheMatrix creates an instance of the extended matrix
## cacheSolve calculates the inverse of extended matrix using cahced value if available
##
## matrix is supposed to be invertible
## Function makeCacheMatrix creates an extended matrix representation that supports
## caching of the matrix inverse. In order to take advantage of cached computations
## the function cacheSolve should be used to compute the inverse of the extended
## matrix.
##
## x1<-makeCacheMatrix(x) returns extended representation of matrix x
##
## x1$set(y) changes matrix value and clears the cached inverse
## x1$get() gets the underlining matrix value
## x1$setinverse(inverse) sets the value of the cached inverse
## x1$getinverse() returns the value of the cached inverse
##
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Function cacheSolve calculates the inverse of an extended matrix representation
## obtained from makeCacheMatrix, it takes advantage of extended matrix caching
## capabilities to store the inverse and retrieve it when available.
##
## makeSolve(x) returns the inverse of extended cahecing matrix x
##
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
| /cachematrix.R | no_license | libertil/ProgrammingAssignment2 | R | false | false | 2,039 | r | ## Functions makeCacheMatrix and cacheSolve are used to implement an extended matrix
## representation that can be used to cache the inverse of the matrix after computation.
## The extended representation can be used to reduce computational cost when the inverse of
## matrix is used multiple instances without the need to implement ad hoc caching strategies.
##
## makeCacheMatrix creates an instance of the extended matrix
## cacheSolve calculates the inverse of extended matrix using cahced value if available
##
## matrix is supposed to be invertible
## Function makeCacheMatrix creates an extended matrix representation that supports
## caching of the matrix inverse. In order to take advantage of cached computations
## the function cacheSolve should be used to compute the inverse of the extended
## matrix.
##
## x1<-makeCacheMatrix(x) returns extended representation of matrix x
##
## x1$set(y) changes matrix value and clears the cached inverse
## x1$get() gets the underlining matrix value
## x1$setinverse(inverse) sets the value of the cached inverse
## x1$getinverse() returns the value of the cached inverse
##
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Function cacheSolve calculates the inverse of an extended matrix representation
## obtained from makeCacheMatrix, it takes advantage of extended matrix caching
## capabilities to store the inverse and retrieve it when available.
##
## makeSolve(x) returns the inverse of extended cahecing matrix x
##
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
mtcars
mpg.lm <- lm(mpg ~ hp, data = mtcars)
summary(mpg.lm)
plot(mpg ~ hp, data = mtcars)
mtcars
mpg2.lm <- lm(mpg ~ hp + hp^2, data = mtcars)
summary(mpg2.lm)
plot(mpg ~ predict(mpg2.lm), data = mtcars)
library(ggplot2)
library(lattice)
library(caret)
set.seed(10)
require(caret)
model.mtcars_lm <- train(mpg ~ wt, data=mtcars, method="lm")
coef.icept <- coef(model.mtcars_lm$finalModel)[1] ## Intercept
coef.slope <- coef(model.mtcars_lm$finalModel)[2] ## Slope
ggplot(data=mtcars, aes(x=wt, y=mpg)) + geom_point() + geom_abline(slope=coef.slope, intercept=coef.icept, color="red")
set.seed(10)
subset75 <- createDataPartition(y=mtcars$mpg, p=.75, list=FALSE)
subset75
training <- mtcars[subset75,]
training
testing <- mtcars[-subset75,]
testing
mtcarReg <- train(mpg~., data=training, method="lm")
summary(mtcarReg)
predict(mtcarReg, testing)
| /CaretPrediction.R | no_license | sridharp2992/ML-----R | R | false | false | 859 | r |
mtcars
mpg.lm <- lm(mpg ~ hp, data = mtcars)
summary(mpg.lm)
plot(mpg ~ hp, data = mtcars)
mtcars
mpg2.lm <- lm(mpg ~ hp + hp^2, data = mtcars)
summary(mpg2.lm)
plot(mpg ~ predict(mpg2.lm), data = mtcars)
library(ggplot2)
library(lattice)
library(caret)
set.seed(10)
require(caret)
model.mtcars_lm <- train(mpg ~ wt, data=mtcars, method="lm")
coef.icept <- coef(model.mtcars_lm$finalModel)[1] ## Intercept
coef.slope <- coef(model.mtcars_lm$finalModel)[2] ## Slope
ggplot(data=mtcars, aes(x=wt, y=mpg)) + geom_point() + geom_abline(slope=coef.slope, intercept=coef.icept, color="red")
set.seed(10)
subset75 <- createDataPartition(y=mtcars$mpg, p=.75, list=FALSE)
subset75
training <- mtcars[subset75,]
training
testing <- mtcars[-subset75,]
testing
mtcarReg <- train(mpg~., data=training, method="lm")
summary(mtcarReg)
predict(mtcarReg, testing)
|
testlist <- list(A = structure(c(2.17107980817984e+205, 9.5381833136052e+295 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613123513-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 322 | r | testlist <- list(A = structure(c(2.17107980817984e+205, 9.5381833136052e+295 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
###disable scientific notation###
options(scipen = 999)
###load packages###
library(data.table)
###for reading in dbf###
library(foreign)
library(sf)
library(censusapi)
library(igraph)
library(censusxy)
library(sp)
library(rgeos)
data.table::setDTthreads(1)
generate_USCB_ZCTA_network_file <- function(FIPS_dt, USCB_TIGER.path, omit.park_openspace=TRUE, omit.unpopulated=TRUE, use.bridges=TRUE, ADDR_dt=NULL){
FIPS.dt <- copy(as.data.table(FIPS_dt))
###pad state and county codes with leading zeros###
FIPS.dt[,state := sprintf("%02d", as.numeric(state))]
FIPS.dt[,county := sprintf("%03d", as.numeric(county))]
FIPS.dt <- unique(FIPS.dt[,c("state","county"),with=FALSE])
old.wd <- getwd()
setwd(USCB_TIGER.path)
#######################################################################
###consolidate county-level edge shapefiles into a single data.table###
#######################################################################
edge.files <- paste0("tl_2019_",FIPS.dt$state,FIPS.dt$county,"_edges")
edges.dt <- rbindlist(lapply(edge.files,function(j){
###read in shapefile###
temp.sf <- sf::st_read(file.path("EDGES",j), j, stringsAsFactors = F, quiet=T)
temp.sf$edge_len <- st_length(temp.sf)
###convert to data.table###
return(as.data.table(st_drop_geometry(temp.sf)))
}), use.names=TRUE, fill=TRUE)
#######################################################################
###consolidate county-level face shapefiles into a single data.table###
#######################################################################
face.files <- paste0("tl_2019_",FIPS.dt$state,FIPS.dt$county,"_faces")
faces.dt <- rbindlist(lapply(face.files,function(j){
###read in shapefile###
temp.sf <- sf::st_read(file.path("FACES",j), j, stringsAsFactors = F, quiet=T)
temp.sf$poly_area <- as.numeric(st_area(temp.sf))
###convert to data.table###
return(as.data.table(st_drop_geometry(temp.sf)))
}), use.names=TRUE, fill=TRUE)
faces.dt[,USCB_block_10 := paste0(STATEFP10,COUNTYFP10,TRACTCE10,BLOCKCE10)]
suppressWarnings(faces.dt[,ZCTA5CE10 := ifelse(is.na(as.numeric(ZCTA5CE10)),"99999",sprintf("%05d", as.numeric(ZCTA5CE10)))])
##########################################
###additional step to deal with NA ZCTA###
##########################################
faces.sf <- st_sf(rbindlist(lapply(face.files,function(j){
###read in shapefile###
temp.sf <- sf::st_read(file.path("FACES",j), j, stringsAsFactors = F, quiet=T)
###convert to data.table###
return(as.data.table(temp.sf)[LWFLAG !="P"])
}), use.names=TRUE, fill=TRUE))
faces.sf$USCB_block_10 <- paste0(faces.sf$STATEFP10,faces.sf$COUNTYFP10,faces.sf$TRACTCE10,faces.sf$BLOCKCE10)
suppressWarnings(faces.sf$ZCTA5CE10 <- ifelse(is.na(as.numeric(faces.sf$ZCTA5CE10)),"99999",sprintf("%05d", as.numeric(faces.sf$ZCTA5CE10))))
###create single part polygons from NA ZCTA and then assign arbitrary ID###
faces.sf1 <- st_sf(as.data.table(faces.sf)[ZCTA5CE10=="99999",.(geometry = st_union(geometry)),by=list(ZCTA5CE10)])
faces.sf1 <- st_union(faces.sf1[1:nrow(faces.sf1),], by_feature = T)
suppressWarnings(faces.sf_99999 <- st_cast(faces.sf1,"POLYGON"))
faces.sf_99999$ZCTA5CE10 <- paste0("99999.",1:nrow(faces.sf_99999))
###perform spatial join on multipart polygon and census block centroids###
cb.sf <- st_sf(as.data.table(faces.sf)[ZCTA5CE10=="99999",.(geometry = st_union(geometry)),by=list(USCB_block_10)])
cb.sf <- st_union(cb.sf[1:nrow(cb.sf),], by_feature = T)
###use Surf Point to get contained centroids###
cb.sp <- as(cb.sf, 'Spatial')
cb.sp.pt <- gPointOnSurface(cb.sp, byid=TRUE, id = cb.sp$USCB_block_10)
cb.sp.pt$USCB_block_10 <- row.names(cb.sp.pt)
cb.sf.pt <- st_as_sf(cb.sp.pt)
rm(cb.sp,cb.sp.pt)
cb.sf.pt <- st_set_crs(cb.sf.pt, st_crs(cb.sf))
###perform spatial join on census tract###
suppressMessages(sf.sj <- st_intersects(cb.sf.pt, faces.sf_99999))
int.zone <- lapply(1:length(sf.sj),function(i){
ifelse(length(sf.sj[[i]])==0,NA,sf.sj[[i]][[1]])
})
cb.sf.pt$ZCTA5CE10_sj <- faces.sf_99999$ZCTA5CE10[unlist(int.zone)]
cb.pt.dt <- as.data.table(st_drop_geometry(cb.sf.pt))
faces.dt <- merge(faces.dt,cb.pt.dt,by="USCB_block_10",all.x=TRUE)
faces.dt[,ZCTA5CE10 := ifelse(is.na(ZCTA5CE10_sj),ZCTA5CE10,ZCTA5CE10_sj)]
###future work: use state plane coordinate systems based on county FIPS code###
###generate census block to ZCTA look-up table###
cb2zcta.dt <- unique(faces.dt[,c('USCB_block_10','ZCTA5CE10'),with=FALSE])
rm(cb.sf.pt,faces.sf_99999,faces.sf,faces.sf1)
###########################################
###pull 2010 block-level population data###
###########################################
mycensuskey <-"2ca0b2830ae4835905efab6c35f8cd2b3f570a8a"
my.survey <- "dec/sf1"
my.vars <- c("P001001")
my.vintage <- 2010
api.data_cb <- rbindlist(lapply(1:nrow(FIPS.dt), function(x) as.data.table(getCensus(name = my.survey,
vintage = my.vintage,
key = mycensuskey,
vars = my.vars,
region = "block:*",
regionin = paste0("state:",FIPS.dt[x]$state,"+county:",FIPS.dt[x]$county)))))
api.data_cb[,USCB_block_10 := paste0(state,county,tract,block)]
#test.dt <- merge(cb2zcta.dt,api.data_cb,by="USCB_block_10",all.x=TRUE)
##################################################
###load Topological Faces / Area Landmark files###
##################################################
facesal.files <- paste0("tl_2019_",unique(FIPS.dt$state),"_facesal")
facesal.dt <- rbindlist(lapply(facesal.files,function(j){
###read in sbf###
return(as.data.table(read.dbf(file.path(file.path("FACESAL",j),paste0(j,'.dbf')))))
}), use.names=TRUE, fill=TRUE)
##############################
###load Area Landmark files###
##############################
arealm.files <- paste0("tl_2019_",unique(FIPS.dt$state),"_arealm")
arealm.dt <- rbindlist(lapply(arealm.files,function(j){
###read in shapefile###
temp.sf <- sf::st_read(file.path("AREALM",j), j, stringsAsFactors = F, quiet=T)
#temp.sf$poly_area <- as.numeric(st_area(temp.sf))
###convert to data.table###
return(as.data.table(st_drop_geometry(temp.sf)))
}), use.names=TRUE, fill=TRUE)
###restrict landmarks list to parks, zoos, cemeteries and other unpopulated open spaces###
p.os <- c("K2180", "K2181", "K2183", "K2184", "K2185", "K2186", "K2187", "K2188", "K2189", "K2190", "K2582", "K2586", "K2564", "K2451", "K2456", "K2457", "K2561", "K2564")
arealm.dt <- arealm.dt[(MTFCC %in% p.os)]
###calculate proportion of census block that is park or open space###
arealm.dt <- merge(arealm.dt, facesal.dt, by="AREAID")
arealm.dt <- merge(arealm.dt, faces.dt[LWFLAG !="P", c("TFID","ZCTA5CE10","USCB_block_10","poly_area"),with=FALSE], by="TFID")
arealm.dt <- merge(arealm.dt[,.(poly_area=sum(poly_area)),by=list(USCB_block_10)], faces.dt[LWFLAG != "P",.(poly_area=sum(poly_area)), by=USCB_block_10], by="USCB_block_10")
arealm.dt[,prop.area := round(poly_area.x/poly_area.y,2)]
arealm.dt <- merge(arealm.dt, api.data_cb[,.(P001001=sum(P001001)),by=list(USCB_block_10)], by="USCB_block_10",all.x=TRUE)
#############################################
###generate table of census blocks to omit###
#############################################
omit.dt <- merge(faces.dt[LWFLAG !="P", .(poly_area=sum(poly_area)),by=list(USCB_block_10,ZCTA5CE10)], api.data_cb[,c("USCB_block_10","P001001"), with=FALSE], by="USCB_block_10", all.x=TRUE)
omit.dt[,P001001 := ifelse(is.na(P001001),0,P001001)]
###deal with errors where unpopulated parks/open spaces have P001001 > 0 (e.g., Central Park, Bronx Zoo)###
omit.dt[,P001001 := ifelse(USCB_block_10 %in% unique(arealm.dt[P001001 > 0 & prop.area==1]$USCB_block_10), 0, P001001)]
omit.dt <- merge(omit.dt[P001001==0,.(poly_area=sum(poly_area)),by=list(ZCTA5CE10)],omit.dt[,.(poly_area=sum(poly_area)),by=list(ZCTA5CE10)], by="ZCTA5CE10", all.y=TRUE)
omit.dt[,prop.area := round(poly_area.x/poly_area.y,2)]
omit.dt <- omit.dt[prop.area==1]
omit.dt[,type := ifelse(ZCTA5CE10 %in% unique(substr(arealm.dt[P001001 > 0 & prop.area==1]$USCB_block_10,1,11)),"park_openspace","unpopulated")]
###################################################
###generate a table of neighboring census blocks###
###################################################
b.dt <- merge(merge(unique(edges.dt[,c("TLID","TFIDL","TFIDR","edge_len"),with=FALSE]),unique(faces.dt[,c("TFID","USCB_block_10","LWFLAG"),with=FALSE]),by.x="TFIDL",by.y="TFID"), unique(faces.dt[,c("TFID","USCB_block_10","LWFLAG"),with=FALSE]),by.x="TFIDR",by.y="TFID")
b.dt <- b.dt[USCB_block_10.x != USCB_block_10.y]
b.dt[,USCB_block_10.1 := ifelse(as.numeric(USCB_block_10.x) > as.numeric(USCB_block_10.y), USCB_block_10.y, USCB_block_10.x)]
b.dt[,LWFLAG.1 := ifelse(as.numeric(USCB_block_10.x) > as.numeric(USCB_block_10.y), LWFLAG.y, LWFLAG.x)]
b.dt[,USCB_block_10.2 := ifelse(as.numeric(USCB_block_10.x) > as.numeric(USCB_block_10.y), USCB_block_10.x, USCB_block_10.y)]
b.dt[,LWFLAG.2 := ifelse(as.numeric(USCB_block_10.x) > as.numeric(USCB_block_10.y), LWFLAG.x, LWFLAG.y)]
b.dt2 <- b.dt[,.(edge_len=sum(edge_len)),by=list(USCB_block_10.1,LWFLAG.1,USCB_block_10.2,LWFLAG.2)]
b.dt2 <- b.dt2[LWFLAG.1 != "P" & LWFLAG.2 != "P"]
#############################################################
###determine number of total number of neighbors per block###
#############################################################
###all relationships###
zz1 <- b.dt2[,.(tot=.N),by=USCB_block_10.1]
setnames(zz1,c("USCB_block_10.1"),c("USCB_block_10"))
zz2 <- b.dt2[,.(tot=.N),by=USCB_block_10.2]
setnames(zz2,c("USCB_block_10.2"),c("USCB_block_10"))
block.dt1 <- rbindlist(list(zz1,zz2),use.names=TRUE,fill=TRUE)
###inter county relationships###
zz1 <- b.dt2[substr(USCB_block_10.1,1,5) != substr(USCB_block_10.2,1,5),.(tot.c=.N),by=USCB_block_10.1]
setnames(zz1,c("USCB_block_10.1"),c("USCB_block_10"))
zz2 <- b.dt2[substr(USCB_block_10.1,1,5) != substr(USCB_block_10.2,1,5),.(tot.c=.N),by=USCB_block_10.2]
setnames(zz2,c("USCB_block_10.2"),c("USCB_block_10"))
block.dt2 <- rbindlist(list(zz1,zz2),use.names=TRUE,fill=TRUE)
block_rel.dt <- merge(block.dt1,block.dt2,by="USCB_block_10",all.x=TRUE,all.y=TRUE)
rm(zz1,zz2,block.dt1,block.dt2)
block_rel.dt[,tot.c := ifelse(is.na(tot.c),0,tot.c)]
block_rel.dt <- merge(block_rel.dt, api.data_cb[,c("USCB_block_10","P001001"), with=FALSE], by="USCB_block_10", all.x=TRUE)
###remove piers misassigned to another county (e.g., piers in Queens and Brooklyn misassigned to Manhattan)###
piers.dt <- block_rel.dt[tot.c==tot & P001001==0]
b.dt2[,is.pier := ifelse(USCB_block_10.1 %in% piers.dt$USCB_block_10 | USCB_block_10.2 %in% piers.dt$USCB_block_10,1,0)]
###join to get ZCTA from blocks###
b.dt2 <- merge(b.dt2,cb2zcta.dt,by.x="USCB_block_10.1",by.y="USCB_block_10",all.x=TRUE)
b.dt2 <- merge(b.dt2,cb2zcta.dt,by.x="USCB_block_10.2",by.y="USCB_block_10",all.x=TRUE)
suppressWarnings(b.dt2 <- b.dt2[!(is.na(as.numeric(ZCTA5CE10.x))) & !(is.na(as.numeric(ZCTA5CE10.y)))])
b.dt2[,ZCTA5CE10.1 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.y,ZCTA5CE10.x)]
b.dt2[,ZCTA5CE10.2 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.x,ZCTA5CE10.y)]
b.dt2[,c("ZCTA5CE10.x","ZCTA5CE10.y"):= NULL]
###aggregate by ZCTA and exclude piers###
neighbors.dt <- b.dt2[is.pier==0,.(edge_len=sum(as.numeric(edge_len))),by=list(ZCTA5CE10.1,ZCTA5CE10.2)]
neighbors.dt <- neighbors.dt[ZCTA5CE10.1 != ZCTA5CE10.2]
############################
###generate a node tables###
############################
node.dt1 <- edges.dt[,c("TNIDF","TFIDL","TFIDR"),with=FALSE]
setnames(node.dt1,c("TNIDF"),c("TNID"))
node.dt2 <- edges.dt[,c("TNIDT","TFIDL","TFIDR"),with=FALSE]
setnames(node.dt2,c("TNIDT"),c("TNID"))
node.dt <- unique(rbindlist(list(node.dt1,node.dt2),use.names=TRUE,fill=TRUE))
node.dt <- merge(node.dt,faces.dt[,c("TFID", "USCB_block_10", "ZCTA5CE10","LWFLAG"),with=FALSE],by.x="TFIDL",by.y="TFID",all.x=TRUE)
node.dt <- merge(node.dt,faces.dt[,c("TFID", "USCB_block_10", "ZCTA5CE10","LWFLAG"),with=FALSE],by.x="TFIDR",by.y="TFID",all.x=TRUE)
node.dt.m <- melt(node.dt, id.vars = c("TNID"), measure = list(c("ZCTA5CE10.x", "ZCTA5CE10.y"), c("LWFLAG.x", "LWFLAG.y")), value.name = c("ZCTA5CE10", "LWFLAG"))
node.dt.m[,variable:=NULL]
node.dt.all <- unique(copy(node.dt.m))
###remove water tracts###
node.dt.m <- node.dt.m[LWFLAG != "P"]
node.dt.m[,LWFLAG:=NULL]
node.dt.m <- unique(node.dt.m)
#########################################################
###generate table for tracts that share a single point###
#########################################################
pt.dt <- melt(node.dt, id.vars = c("TNID"), measure = list(c("USCB_block_10.x", "USCB_block_10.y"), c("LWFLAG.x", "LWFLAG.y")), value.name = c("USCB_block_10", "LWFLAG"))
###remove tables that are no longer needed###
rm(node.dt1,node.dt2,node.dt)
###remove water tracts###
pt.dt <- pt.dt[LWFLAG != "P"]
pt.dt[,c('variable','LWFLAG'):=NULL]
pt.dt <- unique(pt.dt)
pt.dt <- merge(pt.dt,pt.dt,by="TNID",allow.cartesian=TRUE)
pt.dt <- pt.dt[USCB_block_10.x != USCB_block_10.y]
###remove piers###
pt.dt[,is.pier := ifelse((substr(USCB_block_10.x,1,5) != substr(USCB_block_10.y,1,5)) & ((USCB_block_10.x %in% piers.dt$USCB_block_10) | (USCB_block_10.y %in% piers.dt$USCB_block_10)), 1, 0)]
pt.dt <- pt.dt[is.pier==0]
###join to get ZCTA from blocks###
pt.dt <- merge(pt.dt,cb2zcta.dt,by.x="USCB_block_10.x",by.y="USCB_block_10",all.x=TRUE)
pt.dt <- merge(pt.dt,cb2zcta.dt,by.x="USCB_block_10.y",by.y="USCB_block_10",all.x=TRUE)
suppressWarnings(pt.dt <- pt.dt[!(is.na(as.numeric(ZCTA5CE10.x))) & !(is.na(as.numeric(ZCTA5CE10.y)))])
pt.dt[,ZCTA5CE10.1 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.y,ZCTA5CE10.x)]
pt.dt[,ZCTA5CE10.2 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.x,ZCTA5CE10.y)]
pt.dt[,c("TNID","USCB_block_10.x","USCB_block_10.y","is.pier","ZCTA5CE10.x","ZCTA5CE10.y"):=NULL]
pt.dt <- unique(pt.dt)[ZCTA5CE10.1 != ZCTA5CE10.2]
pt.dt <- merge(pt.dt, neighbors.dt, by=c("ZCTA5CE10.1", "ZCTA5CE10.2"), all.x=TRUE)
pt.dt <- pt.dt[is.na(edge_len)]
pt.dt[,edge_len := 0]
if(nrow(pt.dt)>0){
###combine point and line results###
neighbors.dt <- rbindlist(list(neighbors.dt,pt.dt), use.names=TRUE, fill=TRUE)
}
neighbors.dt[,type := ifelse(as.numeric(edge_len)==0,"shared point","shared edge")]
########################
###next step: bridges###
########################
###if line is a bridge, capture all segments###
###the rational being that bridges do not start immediatly at the shore but somewhat inland, instead###
bridges_names <- unique(edges.dt[PASSFLG == "B" & grepl("Bri?d?ge?",FULLNAME,ignore.case=TRUE)]$FULLNAME)
###return all bridge segments that are not railroad or boundary line types###
bridges.dt <- edges.dt[((PASSFLG == "B") | (FULLNAME %in% bridges_names)) & !(MTFCC %in% c("P0001","P0004","R1011"))]
#################################################
###use igraph to form bridges from edges lines###
#################################################
e.dt <- unique(bridges.dt[,c("TNIDF","TNIDT"),with=FALSE])
###generate graph object###
net <- graph_from_data_frame(d=e.dt, directed=F)
dg <- decompose.graph(net)
dg.dt <- rbindlist(lapply(1:length(dg),function(i){
return(unique(data.table(TNID=as.numeric(trimws(V(dg[[i]])$name)), b_grp=i)))
}),use.names=TRUE,fill=TRUE)
#####################################################
###capture all start and end nodes in bridge lines###
#####################################################
bridges.nodes.dt <- data.table(TNID=c(bridges.dt$TNIDF,bridges.dt$TNIDT))[,.(tot=.N),by=TNID]
bridges.nodes.dt <- merge(bridges.nodes.dt,bridges.dt[,.(from_tot = .N),by=TNIDF],by.x="TNID",by.y="TNIDF",all.x=TRUE)
bridges.nodes.dt <- merge(bridges.nodes.dt,bridges.dt[,.(to_tot = .N),by=TNIDT], by.x="TNID",by.y="TNIDT",all.x=TRUE)
bridges.nodes.dt[,from_tot := ifelse(is.na(from_tot),0,from_tot)]
bridges.nodes.dt[,to_tot := ifelse(is.na(to_tot),0,to_tot)]
bridges.nodes.dt <- merge(bridges.nodes.dt, dg.dt, by="TNID", all.x=TRUE)
bridges.nodes.dt_ends <- unique(merge(bridges.nodes.dt[to_tot==0 | from_tot==0], node.dt.m, by="TNID")[,c("b_grp","ZCTA5CE10"),with=FALSE])
bridges.nodes.dt_ends <- merge(bridges.nodes.dt_ends,bridges.nodes.dt_ends,by="b_grp", allow.cartesian=TRUE)
bridges.nodes.dt_ends <- unique(bridges.nodes.dt_ends)[ZCTA5CE10.x != ZCTA5CE10.y]
suppressWarnings(bridges.nodes.dt_ends <- bridges.nodes.dt_ends[!(is.na(as.numeric(ZCTA5CE10.x))) & !(is.na(as.numeric(ZCTA5CE10.y)))])
bridges.nodes.dt_ends[,ZCTA5CE10.1 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.y,ZCTA5CE10.x)]
bridges.nodes.dt_ends[,ZCTA5CE10.2 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.x,ZCTA5CE10.y)]
bridges.nodes.dt_ends <- unique(bridges.nodes.dt_ends[,c("b_grp","ZCTA5CE10.1","ZCTA5CE10.2"),with=FALSE])
###get length by node###
bridges.dt <- merge(bridges.dt, dg.dt, by.x = "TNIDF", by.y="TNID", all.x=TRUE)
###create table connecting tracts on each side of bridge###
bridges.dt_agg <- bridges.dt[,.(edge_len=sum(edge_len)),by=list(b_grp,FULLNAME)]
bridges.dt_agg[,bridge_length := sum(edge_len), by=b_grp]
bridges.dt_agg <- bridges.dt_agg[bridges.dt_agg[, .I[which.max(edge_len)], by=list(b_grp)]$V1]
bridges.dt_agg[,edge_len := NULL]
bridges.dt_agg <- merge(bridges.dt_agg, bridges.nodes.dt_ends, by="b_grp", all.x=TRUE)
bridges.dt_agg <- bridges.dt_agg[bridges.dt_agg[, .I[which.min(bridge_length)], by=list(ZCTA5CE10.1, ZCTA5CE10.2)]$V1]
setorder(bridges.dt_agg, ZCTA5CE10.1, ZCTA5CE10.2)
###code to deal with bridge connections where one or more ZCTA are NA###
cont <- ifelse(nrow(bridges.dt_agg[grepl("99999",ZCTA5CE10.1) | grepl("99999",ZCTA5CE10.2)]) > 0, TRUE, FALSE)
while(cont){
xx <- copy(bridges.dt_agg)
#xx[,u_id := .I]
#xx[grepl("99999",ZCTA5CE10.1) | grepl("99999",ZCTA5CE10.2)]
###merge: x1=y1###
yy <- neighbors.dt[grepl("99999",ZCTA5CE10.1),c("ZCTA5CE10.1","ZCTA5CE10.2"),with=FALSE]
setnames(yy,c("ZCTA5CE10.1","ZCTA5CE10.2"),c("ZCTA5CE10.1","ZCTA5CE10.1_new"))
xx <- merge(xx,yy,by="ZCTA5CE10.1",all.x=TRUE)
xx[,ZCTA5CE10.1 := ifelse(is.na(ZCTA5CE10.1_new),ZCTA5CE10.1,ZCTA5CE10.1_new)]
xx[,ZCTA5CE10.1_new := NULL]
xx <- unique(xx)
###merge: x2=y1###
yy <- neighbors.dt[grepl("99999",ZCTA5CE10.1),c("ZCTA5CE10.1","ZCTA5CE10.2"),with=FALSE]
setnames(yy,c("ZCTA5CE10.1","ZCTA5CE10.2"),c("ZCTA5CE10.2","ZCTA5CE10.2_new"))
xx <- merge(xx,yy,by="ZCTA5CE10.2",all.x=TRUE)
xx[,ZCTA5CE10.2 := ifelse(is.na(ZCTA5CE10.2_new),ZCTA5CE10.2,ZCTA5CE10.2_new)]
xx[,ZCTA5CE10.2_new := NULL]
xx <- unique(xx)
###merge: x2=y2###
yy <- neighbors.dt[grepl("99999",ZCTA5CE10.2),c("ZCTA5CE10.1","ZCTA5CE10.2"),with=FALSE]
setnames(yy,c("ZCTA5CE10.1","ZCTA5CE10.2"),c("ZCTA5CE10.2_new","ZCTA5CE10.2"))
xx <- merge(xx,yy,by="ZCTA5CE10.2",all.x=TRUE)
xx[,ZCTA5CE10.2 := ifelse(is.na(ZCTA5CE10.2_new),ZCTA5CE10.2,ZCTA5CE10.2_new)]
xx[,ZCTA5CE10.2_new := NULL]
xx <- unique(xx)
###merge: x1=y2###
yy <- neighbors.dt[grepl("99999",ZCTA5CE10.2),c("ZCTA5CE10.1","ZCTA5CE10.2"),with=FALSE]
setnames(yy,c("ZCTA5CE10.1","ZCTA5CE10.2"),c("ZCTA5CE10.1_new","ZCTA5CE10.1"))
xx <- merge(xx,yy,by="ZCTA5CE10.1",all.x=TRUE)
xx[,ZCTA5CE10.1 := ifelse(is.na(ZCTA5CE10.1_new),ZCTA5CE10.1,ZCTA5CE10.1_new)]
xx[,ZCTA5CE10.1_new := NULL]
xx <- unique(xx)
setnames(xx,c("ZCTA5CE10.1","ZCTA5CE10.2"),c("ZCTA5CE10.x","ZCTA5CE10.y"))
xx[,ZCTA5CE10.1 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.y,ZCTA5CE10.x)]
xx[,ZCTA5CE10.2 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.x,ZCTA5CE10.y)]
xx <- xx[,names(bridges.dt_agg),with=FALSE]
xx <- xx[xx[, .I[which.min(bridge_length)], by=list(ZCTA5CE10.1, ZCTA5CE10.2)]$V1]
setorder(xx, ZCTA5CE10.1, ZCTA5CE10.2)
if(nrow(xx[grepl("99999",ZCTA5CE10.1) | grepl("99999",ZCTA5CE10.2)]) == 0){
cont <- FALSE
} else if(isTRUE(all.equal(xx, bridges.dt_agg, ignore.row.order = TRUE))){
cont <- FALSE
} else{
cont <- TRUE
}
bridges.dt_agg <- copy(xx)
rm(xx)
}
bridges.dt_agg <- bridges.dt_agg[(!(grepl("^99999",ZCTA5CE10.1))) & (!(grepl("^99999",ZCTA5CE10.2)))]
###remove bridge relationships that are already in neighbor relationships table###
keep.cols <- names(bridges.dt_agg)
bridges.dt_agg <- merge(bridges.dt_agg,neighbors.dt,by=c("ZCTA5CE10.1","ZCTA5CE10.2"),all.x=TRUE)
bridges.dt_agg <- bridges.dt_agg[is.na(edge_len),keep.cols,with=FALSE]
bridges.dt_agg <- bridges.dt_agg[!is.na(ZCTA5CE10.1) & !is.na(ZCTA5CE10.2)]
bridges.dt_agg[,b_grp := NULL]
setorder(bridges.dt_agg,ZCTA5CE10.1,ZCTA5CE10.2)
bridges.dt_agg[,type := "bridge connection"]
#####################################
###ZCTA connected by two addresses###
#####################################
if(!is.null(ADDR_dt)) {
if("data.frame" %in% class(ADDR_dt)) {
if(all(c("ADDR","CITY","STATE","ZIP","group_ID") %in% names(ADDR_dt)) & (nrow(ADDR_dt) > 1)) {
addr.dt <- unique(copy(ADDR_dt)[,c("ADDR","CITY","STATE","ZIP","group_ID"), with=FALSE])
gc.dt <- cxy_geocode(addr.dt, street = 'ADDR', city = 'CITY', state = 'STATE', zip = 'ZIP', return = 'geographies', class = 'dataframe', output = 'simple', vintage = 'Current_Current', benchmark = "Public_AR_Current")
gc.dt <- gc.dt[!(is.na(cxy_state_id)) & !(is.na(cxy_county_id)) & !(is.na(cxy_tract_id)) & !(is.na(cxy_block_id))]
if(nrow(addr.dt) > nrow(gc.dt)){
warning("Some addresses in your address table failed to geocode. Please check your address table.\n")
}
gc.dt[,USCB_block_10 := paste0(sprintf("%02d", as.numeric(cxy_state_id)),sprintf("%03d", as.numeric(cxy_county_id)),sprintf("%06d", as.numeric(cxy_tract_id)),sprintf("%04d", as.numeric(cxy_block_id)))]
gc.dt <- merge(gc.dt,cb2zcta.dt,by="USCB_block_10",all.x=TRUE)
gc.dt[,tot := .N, by=group_ID]
if(nrow(gc.dt) > nrow(gc.dt[tot > 1])){
warning("Some addresses in your address table are without fellow group members. Please check your address table.\n")
}
gc.dt <- gc.dt[tot > 1]
if(nrow(gc.dt) > 0){
gc.dt <- merge(gc.dt[,c('group_ID','ZCTA5CE10'),with=FALSE],gc.dt[,c('group_ID','ZCTA5CE10'),with=FALSE],by="group_ID")[ZCTA5CE10.x != ZCTA5CE10.y]
suppressWarnings(gc.dt <- gc.dt[!(is.na(as.numeric(ZCTA5CE10.x))) & !(is.na(as.numeric(ZCTA5CE10.y)))])
gc.dt[,ZCTA5CE10.1 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.y,ZCTA5CE10.x)]
gc.dt[,ZCTA5CE10.2 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.x,ZCTA5CE10.y)]
gc.dt[,c("ZCTA5CE10.x","ZCTA5CE10.y"):=NULL]
gc.dt <- gc.dt[gc.dt[, .I[which.min(group_ID)], by=list(ZCTA5CE10.1, ZCTA5CE10.2)]$V1]
###remove address pair relationships that are already in neighbor relationships table###
keep.cols <- names(gc.dt)
gc.dt <- merge(gc.dt,neighbors.dt,by=c("ZCTA5CE10.1","ZCTA5CE10.2"),all.x=TRUE)
gc.dt <- gc.dt[is.na(edge_len),keep.cols,with=FALSE]
setorder(gc.dt,ZCTA5CE10.1,ZCTA5CE10.2)
if(isTRUE(use.bridges)){
###remove address pair relationships that are already in bridges relationships table###
gc.dt <- merge(gc.dt,bridges.dt_agg,by=c("ZCTA5CE10.1","ZCTA5CE10.2"),all.x=TRUE)
gc.dt <- gc.dt[is.na(bridge_length),keep.cols,with=FALSE]
setorder(gc.dt,ZCTA5CE10.1,ZCTA5CE10.2)
}
###retain address connections###
omit.dt <- omit.dt[!(ZCTA5CE10 %in% unique(c(gc.dt$ZCTA5CE10.1,gc.dt$ZCTA5CE10.2)))]
gc.dt[,type := "manual"]
}
} else{
warning("Your address table is missing fields. Please check your address table.\n")
}
}
}
###merge to relationship tables###
all_pairs.dt <- neighbors.dt[,c("ZCTA5CE10.1","ZCTA5CE10.2","type"),with=FALSE]
if(exists("gc.dt")){
if("data.table" %in% class(gc.dt)){
if(all(c("group_ID","ZCTA5CE10.1","ZCTA5CE10.2","type" ) %in% names(gc.dt)) & (nrow(gc.dt) > 0)) {
all_pairs.dt <- rbindlist(list(gc.dt[,c("ZCTA5CE10.1","ZCTA5CE10.2","type"),with=FALSE],all_pairs.dt), use.names=TRUE, fill=TRUE)
}
}
}
#
##
###
##
#
if(isTRUE(use.bridges)){
all_pairs.dt <- rbindlist(list(bridges.dt_agg[,c("ZCTA5CE10.1","ZCTA5CE10.2","type"),with=FALSE],all_pairs.dt), use.names=TRUE, fill=TRUE)
###retain bridge connections###
omit.dt <- omit.dt[!(ZCTA5CE10 %in% unique(c(bridges.dt_agg$ZCTA5CE10.1,bridges.dt_agg$ZCTA5CE10.2)))]
}
if(isTRUE(omit.park_openspace)){
omit.ct <- omit.dt[type=="park_openspace"]$ZCTA5CE10
all_pairs.dt <- all_pairs.dt[!(ZCTA5CE10.1 %in% omit.ct) & !(ZCTA5CE10.2 %in% omit.ct)]
}
if(isTRUE(omit.unpopulated)){
omit.ct <- omit.dt[type=="unpopulated"]$ZCTA5CE10
all_pairs.dt <- all_pairs.dt[!(ZCTA5CE10.1 %in% omit.ct) & !(ZCTA5CE10.2 %in% omit.ct)]
}
#########################################
###add rows for ZCTA without neighbors###
#########################################
unq.ct <- suppressWarnings(unique(faces.dt[LWFLAG != "P" & (!(is.na(as.numeric(ZCTA5CE10))))]$ZCTA5CE10))
island.dt <- data.table(ZCTA5CE10.1 = unq.ct[!(unq.ct %in% unique(c(all_pairs.dt$ZCTA5CE10.1,all_pairs.dt$ZCTA5CE10.2)))], ZCTA5CE10.2 = NA)
island.dt <- merge(island.dt, unique(omit.dt[,c("ZCTA5CE10","type"),with=FALSE]), by.x="ZCTA5CE10.1", by.y="ZCTA5CE10", all.x=TRUE)
island.dt[,type := trimws(paste("self",ifelse(is.na(type),"",type)))]
all_pairs.dt <- rbindlist(list(all_pairs.dt,island.dt), use.names=TRUE, fill=TRUE)
keep.ZCTA5CE10 <- suppressWarnings(unique(cb2zcta.dt[(substr(USCB_block_10,1,5) %in% paste0(FIPS.dt$state,FIPS.dt$county)) & (!(is.na(as.numeric(ZCTA5CE10))))]$ZCTA5CE10))
all_pairs.dt <- all_pairs.dt[((ZCTA5CE10.1 %in% keep.ZCTA5CE10) & (ZCTA5CE10.2 %in% keep.ZCTA5CE10)) | ((ZCTA5CE10.1 %in% keep.ZCTA5CE10) & (is.na(as.numeric(ZCTA5CE10.2))))]
all_pairs.dt <- all_pairs.dt[(!(grepl("^99999",ZCTA5CE10.1))) & (!(grepl("^99999",ZCTA5CE10.2)))]
invisible(gc())
setwd(old.wd)
return(all_pairs.dt)
}
| /R/generate_USCB_ZCTA_network_file.R | no_license | gmculp/satscan_R_functions | R | false | false | 26,298 | r | ###disable scientific notation###
options(scipen = 999)
###load packages###
library(data.table)
###for reading in dbf###
library(foreign)
library(sf)
library(censusapi)
library(igraph)
library(censusxy)
library(sp)
library(rgeos)
data.table::setDTthreads(1)
generate_USCB_ZCTA_network_file <- function(FIPS_dt, USCB_TIGER.path, omit.park_openspace=TRUE, omit.unpopulated=TRUE, use.bridges=TRUE, ADDR_dt=NULL){
FIPS.dt <- copy(as.data.table(FIPS_dt))
###pad state and county codes with leading zeros###
FIPS.dt[,state := sprintf("%02d", as.numeric(state))]
FIPS.dt[,county := sprintf("%03d", as.numeric(county))]
FIPS.dt <- unique(FIPS.dt[,c("state","county"),with=FALSE])
old.wd <- getwd()
setwd(USCB_TIGER.path)
#######################################################################
###consolidate county-level edge shapefiles into a single data.table###
#######################################################################
edge.files <- paste0("tl_2019_",FIPS.dt$state,FIPS.dt$county,"_edges")
edges.dt <- rbindlist(lapply(edge.files,function(j){
###read in shapefile###
temp.sf <- sf::st_read(file.path("EDGES",j), j, stringsAsFactors = F, quiet=T)
temp.sf$edge_len <- st_length(temp.sf)
###convert to data.table###
return(as.data.table(st_drop_geometry(temp.sf)))
}), use.names=TRUE, fill=TRUE)
#######################################################################
###consolidate county-level face shapefiles into a single data.table###
#######################################################################
face.files <- paste0("tl_2019_",FIPS.dt$state,FIPS.dt$county,"_faces")
faces.dt <- rbindlist(lapply(face.files,function(j){
###read in shapefile###
temp.sf <- sf::st_read(file.path("FACES",j), j, stringsAsFactors = F, quiet=T)
temp.sf$poly_area <- as.numeric(st_area(temp.sf))
###convert to data.table###
return(as.data.table(st_drop_geometry(temp.sf)))
}), use.names=TRUE, fill=TRUE)
faces.dt[,USCB_block_10 := paste0(STATEFP10,COUNTYFP10,TRACTCE10,BLOCKCE10)]
suppressWarnings(faces.dt[,ZCTA5CE10 := ifelse(is.na(as.numeric(ZCTA5CE10)),"99999",sprintf("%05d", as.numeric(ZCTA5CE10)))])
##########################################
###additional step to deal with NA ZCTA###
##########################################
faces.sf <- st_sf(rbindlist(lapply(face.files,function(j){
###read in shapefile###
temp.sf <- sf::st_read(file.path("FACES",j), j, stringsAsFactors = F, quiet=T)
###convert to data.table###
return(as.data.table(temp.sf)[LWFLAG !="P"])
}), use.names=TRUE, fill=TRUE))
faces.sf$USCB_block_10 <- paste0(faces.sf$STATEFP10,faces.sf$COUNTYFP10,faces.sf$TRACTCE10,faces.sf$BLOCKCE10)
suppressWarnings(faces.sf$ZCTA5CE10 <- ifelse(is.na(as.numeric(faces.sf$ZCTA5CE10)),"99999",sprintf("%05d", as.numeric(faces.sf$ZCTA5CE10))))
###create single part polygons from NA ZCTA and then assign arbitrary ID###
faces.sf1 <- st_sf(as.data.table(faces.sf)[ZCTA5CE10=="99999",.(geometry = st_union(geometry)),by=list(ZCTA5CE10)])
faces.sf1 <- st_union(faces.sf1[1:nrow(faces.sf1),], by_feature = T)
suppressWarnings(faces.sf_99999 <- st_cast(faces.sf1,"POLYGON"))
faces.sf_99999$ZCTA5CE10 <- paste0("99999.",1:nrow(faces.sf_99999))
###perform spatial join on multipart polygon and census block centroids###
cb.sf <- st_sf(as.data.table(faces.sf)[ZCTA5CE10=="99999",.(geometry = st_union(geometry)),by=list(USCB_block_10)])
cb.sf <- st_union(cb.sf[1:nrow(cb.sf),], by_feature = T)
###use Surf Point to get contained centroids###
cb.sp <- as(cb.sf, 'Spatial')
cb.sp.pt <- gPointOnSurface(cb.sp, byid=TRUE, id = cb.sp$USCB_block_10)
cb.sp.pt$USCB_block_10 <- row.names(cb.sp.pt)
cb.sf.pt <- st_as_sf(cb.sp.pt)
rm(cb.sp,cb.sp.pt)
cb.sf.pt <- st_set_crs(cb.sf.pt, st_crs(cb.sf))
###perform spatial join on census tract###
suppressMessages(sf.sj <- st_intersects(cb.sf.pt, faces.sf_99999))
int.zone <- lapply(1:length(sf.sj),function(i){
ifelse(length(sf.sj[[i]])==0,NA,sf.sj[[i]][[1]])
})
cb.sf.pt$ZCTA5CE10_sj <- faces.sf_99999$ZCTA5CE10[unlist(int.zone)]
cb.pt.dt <- as.data.table(st_drop_geometry(cb.sf.pt))
faces.dt <- merge(faces.dt,cb.pt.dt,by="USCB_block_10",all.x=TRUE)
faces.dt[,ZCTA5CE10 := ifelse(is.na(ZCTA5CE10_sj),ZCTA5CE10,ZCTA5CE10_sj)]
###future work: use state plane coordinate systems based on county FIPS code###
###generate census block to ZCTA look-up table###
cb2zcta.dt <- unique(faces.dt[,c('USCB_block_10','ZCTA5CE10'),with=FALSE])
rm(cb.sf.pt,faces.sf_99999,faces.sf,faces.sf1)
###########################################
###pull 2010 block-level population data###
###########################################
mycensuskey <-"2ca0b2830ae4835905efab6c35f8cd2b3f570a8a"
my.survey <- "dec/sf1"
my.vars <- c("P001001")
my.vintage <- 2010
api.data_cb <- rbindlist(lapply(1:nrow(FIPS.dt), function(x) as.data.table(getCensus(name = my.survey,
vintage = my.vintage,
key = mycensuskey,
vars = my.vars,
region = "block:*",
regionin = paste0("state:",FIPS.dt[x]$state,"+county:",FIPS.dt[x]$county)))))
api.data_cb[,USCB_block_10 := paste0(state,county,tract,block)]
#test.dt <- merge(cb2zcta.dt,api.data_cb,by="USCB_block_10",all.x=TRUE)
##################################################
###load Topological Faces / Area Landmark files###
##################################################
facesal.files <- paste0("tl_2019_",unique(FIPS.dt$state),"_facesal")
facesal.dt <- rbindlist(lapply(facesal.files,function(j){
###read in sbf###
return(as.data.table(read.dbf(file.path(file.path("FACESAL",j),paste0(j,'.dbf')))))
}), use.names=TRUE, fill=TRUE)
##############################
###load Area Landmark files###
##############################
arealm.files <- paste0("tl_2019_",unique(FIPS.dt$state),"_arealm")
arealm.dt <- rbindlist(lapply(arealm.files,function(j){
###read in shapefile###
temp.sf <- sf::st_read(file.path("AREALM",j), j, stringsAsFactors = F, quiet=T)
#temp.sf$poly_area <- as.numeric(st_area(temp.sf))
###convert to data.table###
return(as.data.table(st_drop_geometry(temp.sf)))
}), use.names=TRUE, fill=TRUE)
###restrict landmarks list to parks, zoos, cemeteries and other unpopulated open spaces###
p.os <- c("K2180", "K2181", "K2183", "K2184", "K2185", "K2186", "K2187", "K2188", "K2189", "K2190", "K2582", "K2586", "K2564", "K2451", "K2456", "K2457", "K2561", "K2564")
arealm.dt <- arealm.dt[(MTFCC %in% p.os)]
###calculate proportion of census block that is park or open space###
arealm.dt <- merge(arealm.dt, facesal.dt, by="AREAID")
arealm.dt <- merge(arealm.dt, faces.dt[LWFLAG !="P", c("TFID","ZCTA5CE10","USCB_block_10","poly_area"),with=FALSE], by="TFID")
arealm.dt <- merge(arealm.dt[,.(poly_area=sum(poly_area)),by=list(USCB_block_10)], faces.dt[LWFLAG != "P",.(poly_area=sum(poly_area)), by=USCB_block_10], by="USCB_block_10")
arealm.dt[,prop.area := round(poly_area.x/poly_area.y,2)]
arealm.dt <- merge(arealm.dt, api.data_cb[,.(P001001=sum(P001001)),by=list(USCB_block_10)], by="USCB_block_10",all.x=TRUE)
#############################################
###generate table of census blocks to omit###
#############################################
omit.dt <- merge(faces.dt[LWFLAG !="P", .(poly_area=sum(poly_area)),by=list(USCB_block_10,ZCTA5CE10)], api.data_cb[,c("USCB_block_10","P001001"), with=FALSE], by="USCB_block_10", all.x=TRUE)
omit.dt[,P001001 := ifelse(is.na(P001001),0,P001001)]
###deal with errors where unpopulated parks/open spaces have P001001 > 0 (e.g., Central Park, Bronx Zoo)###
omit.dt[,P001001 := ifelse(USCB_block_10 %in% unique(arealm.dt[P001001 > 0 & prop.area==1]$USCB_block_10), 0, P001001)]
omit.dt <- merge(omit.dt[P001001==0,.(poly_area=sum(poly_area)),by=list(ZCTA5CE10)],omit.dt[,.(poly_area=sum(poly_area)),by=list(ZCTA5CE10)], by="ZCTA5CE10", all.y=TRUE)
omit.dt[,prop.area := round(poly_area.x/poly_area.y,2)]
omit.dt <- omit.dt[prop.area==1]
omit.dt[,type := ifelse(ZCTA5CE10 %in% unique(substr(arealm.dt[P001001 > 0 & prop.area==1]$USCB_block_10,1,11)),"park_openspace","unpopulated")]
###################################################
###generate a table of neighboring census blocks###
###################################################
b.dt <- merge(merge(unique(edges.dt[,c("TLID","TFIDL","TFIDR","edge_len"),with=FALSE]),unique(faces.dt[,c("TFID","USCB_block_10","LWFLAG"),with=FALSE]),by.x="TFIDL",by.y="TFID"), unique(faces.dt[,c("TFID","USCB_block_10","LWFLAG"),with=FALSE]),by.x="TFIDR",by.y="TFID")
b.dt <- b.dt[USCB_block_10.x != USCB_block_10.y]
b.dt[,USCB_block_10.1 := ifelse(as.numeric(USCB_block_10.x) > as.numeric(USCB_block_10.y), USCB_block_10.y, USCB_block_10.x)]
b.dt[,LWFLAG.1 := ifelse(as.numeric(USCB_block_10.x) > as.numeric(USCB_block_10.y), LWFLAG.y, LWFLAG.x)]
b.dt[,USCB_block_10.2 := ifelse(as.numeric(USCB_block_10.x) > as.numeric(USCB_block_10.y), USCB_block_10.x, USCB_block_10.y)]
b.dt[,LWFLAG.2 := ifelse(as.numeric(USCB_block_10.x) > as.numeric(USCB_block_10.y), LWFLAG.x, LWFLAG.y)]
b.dt2 <- b.dt[,.(edge_len=sum(edge_len)),by=list(USCB_block_10.1,LWFLAG.1,USCB_block_10.2,LWFLAG.2)]
b.dt2 <- b.dt2[LWFLAG.1 != "P" & LWFLAG.2 != "P"]
#############################################################
###determine number of total number of neighbors per block###
#############################################################
###all relationships###
zz1 <- b.dt2[,.(tot=.N),by=USCB_block_10.1]
setnames(zz1,c("USCB_block_10.1"),c("USCB_block_10"))
zz2 <- b.dt2[,.(tot=.N),by=USCB_block_10.2]
setnames(zz2,c("USCB_block_10.2"),c("USCB_block_10"))
block.dt1 <- rbindlist(list(zz1,zz2),use.names=TRUE,fill=TRUE)
###inter county relationships###
zz1 <- b.dt2[substr(USCB_block_10.1,1,5) != substr(USCB_block_10.2,1,5),.(tot.c=.N),by=USCB_block_10.1]
setnames(zz1,c("USCB_block_10.1"),c("USCB_block_10"))
zz2 <- b.dt2[substr(USCB_block_10.1,1,5) != substr(USCB_block_10.2,1,5),.(tot.c=.N),by=USCB_block_10.2]
setnames(zz2,c("USCB_block_10.2"),c("USCB_block_10"))
block.dt2 <- rbindlist(list(zz1,zz2),use.names=TRUE,fill=TRUE)
block_rel.dt <- merge(block.dt1,block.dt2,by="USCB_block_10",all.x=TRUE,all.y=TRUE)
rm(zz1,zz2,block.dt1,block.dt2)
block_rel.dt[,tot.c := ifelse(is.na(tot.c),0,tot.c)]
block_rel.dt <- merge(block_rel.dt, api.data_cb[,c("USCB_block_10","P001001"), with=FALSE], by="USCB_block_10", all.x=TRUE)
###remove piers misassigned to another county (e.g., piers in Queens and Brooklyn misassigned to Manhattan)###
piers.dt <- block_rel.dt[tot.c==tot & P001001==0]
b.dt2[,is.pier := ifelse(USCB_block_10.1 %in% piers.dt$USCB_block_10 | USCB_block_10.2 %in% piers.dt$USCB_block_10,1,0)]
###join to get ZCTA from blocks###
b.dt2 <- merge(b.dt2,cb2zcta.dt,by.x="USCB_block_10.1",by.y="USCB_block_10",all.x=TRUE)
b.dt2 <- merge(b.dt2,cb2zcta.dt,by.x="USCB_block_10.2",by.y="USCB_block_10",all.x=TRUE)
suppressWarnings(b.dt2 <- b.dt2[!(is.na(as.numeric(ZCTA5CE10.x))) & !(is.na(as.numeric(ZCTA5CE10.y)))])
b.dt2[,ZCTA5CE10.1 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.y,ZCTA5CE10.x)]
b.dt2[,ZCTA5CE10.2 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.x,ZCTA5CE10.y)]
b.dt2[,c("ZCTA5CE10.x","ZCTA5CE10.y"):= NULL]
###aggregate by ZCTA and exclude piers###
neighbors.dt <- b.dt2[is.pier==0,.(edge_len=sum(as.numeric(edge_len))),by=list(ZCTA5CE10.1,ZCTA5CE10.2)]
neighbors.dt <- neighbors.dt[ZCTA5CE10.1 != ZCTA5CE10.2]
############################
###generate a node tables###
############################
node.dt1 <- edges.dt[,c("TNIDF","TFIDL","TFIDR"),with=FALSE]
setnames(node.dt1,c("TNIDF"),c("TNID"))
node.dt2 <- edges.dt[,c("TNIDT","TFIDL","TFIDR"),with=FALSE]
setnames(node.dt2,c("TNIDT"),c("TNID"))
node.dt <- unique(rbindlist(list(node.dt1,node.dt2),use.names=TRUE,fill=TRUE))
node.dt <- merge(node.dt,faces.dt[,c("TFID", "USCB_block_10", "ZCTA5CE10","LWFLAG"),with=FALSE],by.x="TFIDL",by.y="TFID",all.x=TRUE)
node.dt <- merge(node.dt,faces.dt[,c("TFID", "USCB_block_10", "ZCTA5CE10","LWFLAG"),with=FALSE],by.x="TFIDR",by.y="TFID",all.x=TRUE)
node.dt.m <- melt(node.dt, id.vars = c("TNID"), measure = list(c("ZCTA5CE10.x", "ZCTA5CE10.y"), c("LWFLAG.x", "LWFLAG.y")), value.name = c("ZCTA5CE10", "LWFLAG"))
node.dt.m[,variable:=NULL]
node.dt.all <- unique(copy(node.dt.m))
###remove water tracts###
node.dt.m <- node.dt.m[LWFLAG != "P"]
node.dt.m[,LWFLAG:=NULL]
node.dt.m <- unique(node.dt.m)
#########################################################
###generate table for tracts that share a single point###
#########################################################
pt.dt <- melt(node.dt, id.vars = c("TNID"), measure = list(c("USCB_block_10.x", "USCB_block_10.y"), c("LWFLAG.x", "LWFLAG.y")), value.name = c("USCB_block_10", "LWFLAG"))
###remove tables that are no longer needed###
rm(node.dt1,node.dt2,node.dt)
###remove water tracts###
pt.dt <- pt.dt[LWFLAG != "P"]
pt.dt[,c('variable','LWFLAG'):=NULL]
pt.dt <- unique(pt.dt)
pt.dt <- merge(pt.dt,pt.dt,by="TNID",allow.cartesian=TRUE)
pt.dt <- pt.dt[USCB_block_10.x != USCB_block_10.y]
###remove piers###
pt.dt[,is.pier := ifelse((substr(USCB_block_10.x,1,5) != substr(USCB_block_10.y,1,5)) & ((USCB_block_10.x %in% piers.dt$USCB_block_10) | (USCB_block_10.y %in% piers.dt$USCB_block_10)), 1, 0)]
pt.dt <- pt.dt[is.pier==0]
###join to get ZCTA from blocks###
pt.dt <- merge(pt.dt,cb2zcta.dt,by.x="USCB_block_10.x",by.y="USCB_block_10",all.x=TRUE)
pt.dt <- merge(pt.dt,cb2zcta.dt,by.x="USCB_block_10.y",by.y="USCB_block_10",all.x=TRUE)
suppressWarnings(pt.dt <- pt.dt[!(is.na(as.numeric(ZCTA5CE10.x))) & !(is.na(as.numeric(ZCTA5CE10.y)))])
pt.dt[,ZCTA5CE10.1 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.y,ZCTA5CE10.x)]
pt.dt[,ZCTA5CE10.2 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.x,ZCTA5CE10.y)]
pt.dt[,c("TNID","USCB_block_10.x","USCB_block_10.y","is.pier","ZCTA5CE10.x","ZCTA5CE10.y"):=NULL]
pt.dt <- unique(pt.dt)[ZCTA5CE10.1 != ZCTA5CE10.2]
pt.dt <- merge(pt.dt, neighbors.dt, by=c("ZCTA5CE10.1", "ZCTA5CE10.2"), all.x=TRUE)
pt.dt <- pt.dt[is.na(edge_len)]
pt.dt[,edge_len := 0]
if(nrow(pt.dt)>0){
###combine point and line results###
neighbors.dt <- rbindlist(list(neighbors.dt,pt.dt), use.names=TRUE, fill=TRUE)
}
neighbors.dt[,type := ifelse(as.numeric(edge_len)==0,"shared point","shared edge")]
########################
###next step: bridges###
########################
###if line is a bridge, capture all segments###
###the rational being that bridges do not start immediatly at the shore but somewhat inland, instead###
bridges_names <- unique(edges.dt[PASSFLG == "B" & grepl("Bri?d?ge?",FULLNAME,ignore.case=TRUE)]$FULLNAME)
###return all bridge segments that are not railroad or boundary line types###
bridges.dt <- edges.dt[((PASSFLG == "B") | (FULLNAME %in% bridges_names)) & !(MTFCC %in% c("P0001","P0004","R1011"))]
#################################################
###use igraph to form bridges from edges lines###
#################################################
e.dt <- unique(bridges.dt[,c("TNIDF","TNIDT"),with=FALSE])
###generate graph object###
net <- graph_from_data_frame(d=e.dt, directed=F)
dg <- decompose.graph(net)
dg.dt <- rbindlist(lapply(1:length(dg),function(i){
return(unique(data.table(TNID=as.numeric(trimws(V(dg[[i]])$name)), b_grp=i)))
}),use.names=TRUE,fill=TRUE)
#####################################################
###capture all start and end nodes in bridge lines###
#####################################################
bridges.nodes.dt <- data.table(TNID=c(bridges.dt$TNIDF,bridges.dt$TNIDT))[,.(tot=.N),by=TNID]
bridges.nodes.dt <- merge(bridges.nodes.dt,bridges.dt[,.(from_tot = .N),by=TNIDF],by.x="TNID",by.y="TNIDF",all.x=TRUE)
bridges.nodes.dt <- merge(bridges.nodes.dt,bridges.dt[,.(to_tot = .N),by=TNIDT], by.x="TNID",by.y="TNIDT",all.x=TRUE)
bridges.nodes.dt[,from_tot := ifelse(is.na(from_tot),0,from_tot)]
bridges.nodes.dt[,to_tot := ifelse(is.na(to_tot),0,to_tot)]
bridges.nodes.dt <- merge(bridges.nodes.dt, dg.dt, by="TNID", all.x=TRUE)
bridges.nodes.dt_ends <- unique(merge(bridges.nodes.dt[to_tot==0 | from_tot==0], node.dt.m, by="TNID")[,c("b_grp","ZCTA5CE10"),with=FALSE])
bridges.nodes.dt_ends <- merge(bridges.nodes.dt_ends,bridges.nodes.dt_ends,by="b_grp", allow.cartesian=TRUE)
bridges.nodes.dt_ends <- unique(bridges.nodes.dt_ends)[ZCTA5CE10.x != ZCTA5CE10.y]
suppressWarnings(bridges.nodes.dt_ends <- bridges.nodes.dt_ends[!(is.na(as.numeric(ZCTA5CE10.x))) & !(is.na(as.numeric(ZCTA5CE10.y)))])
bridges.nodes.dt_ends[,ZCTA5CE10.1 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.y,ZCTA5CE10.x)]
bridges.nodes.dt_ends[,ZCTA5CE10.2 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.x,ZCTA5CE10.y)]
bridges.nodes.dt_ends <- unique(bridges.nodes.dt_ends[,c("b_grp","ZCTA5CE10.1","ZCTA5CE10.2"),with=FALSE])
###get length by node###
bridges.dt <- merge(bridges.dt, dg.dt, by.x = "TNIDF", by.y="TNID", all.x=TRUE)
###create table connecting tracts on each side of bridge###
bridges.dt_agg <- bridges.dt[,.(edge_len=sum(edge_len)),by=list(b_grp,FULLNAME)]
bridges.dt_agg[,bridge_length := sum(edge_len), by=b_grp]
bridges.dt_agg <- bridges.dt_agg[bridges.dt_agg[, .I[which.max(edge_len)], by=list(b_grp)]$V1]
bridges.dt_agg[,edge_len := NULL]
bridges.dt_agg <- merge(bridges.dt_agg, bridges.nodes.dt_ends, by="b_grp", all.x=TRUE)
bridges.dt_agg <- bridges.dt_agg[bridges.dt_agg[, .I[which.min(bridge_length)], by=list(ZCTA5CE10.1, ZCTA5CE10.2)]$V1]
setorder(bridges.dt_agg, ZCTA5CE10.1, ZCTA5CE10.2)
###code to deal with bridge connections where one or more ZCTA are NA###
cont <- ifelse(nrow(bridges.dt_agg[grepl("99999",ZCTA5CE10.1) | grepl("99999",ZCTA5CE10.2)]) > 0, TRUE, FALSE)
while(cont){
xx <- copy(bridges.dt_agg)
#xx[,u_id := .I]
#xx[grepl("99999",ZCTA5CE10.1) | grepl("99999",ZCTA5CE10.2)]
###merge: x1=y1###
yy <- neighbors.dt[grepl("99999",ZCTA5CE10.1),c("ZCTA5CE10.1","ZCTA5CE10.2"),with=FALSE]
setnames(yy,c("ZCTA5CE10.1","ZCTA5CE10.2"),c("ZCTA5CE10.1","ZCTA5CE10.1_new"))
xx <- merge(xx,yy,by="ZCTA5CE10.1",all.x=TRUE)
xx[,ZCTA5CE10.1 := ifelse(is.na(ZCTA5CE10.1_new),ZCTA5CE10.1,ZCTA5CE10.1_new)]
xx[,ZCTA5CE10.1_new := NULL]
xx <- unique(xx)
###merge: x2=y1###
yy <- neighbors.dt[grepl("99999",ZCTA5CE10.1),c("ZCTA5CE10.1","ZCTA5CE10.2"),with=FALSE]
setnames(yy,c("ZCTA5CE10.1","ZCTA5CE10.2"),c("ZCTA5CE10.2","ZCTA5CE10.2_new"))
xx <- merge(xx,yy,by="ZCTA5CE10.2",all.x=TRUE)
xx[,ZCTA5CE10.2 := ifelse(is.na(ZCTA5CE10.2_new),ZCTA5CE10.2,ZCTA5CE10.2_new)]
xx[,ZCTA5CE10.2_new := NULL]
xx <- unique(xx)
###merge: x2=y2###
yy <- neighbors.dt[grepl("99999",ZCTA5CE10.2),c("ZCTA5CE10.1","ZCTA5CE10.2"),with=FALSE]
setnames(yy,c("ZCTA5CE10.1","ZCTA5CE10.2"),c("ZCTA5CE10.2_new","ZCTA5CE10.2"))
xx <- merge(xx,yy,by="ZCTA5CE10.2",all.x=TRUE)
xx[,ZCTA5CE10.2 := ifelse(is.na(ZCTA5CE10.2_new),ZCTA5CE10.2,ZCTA5CE10.2_new)]
xx[,ZCTA5CE10.2_new := NULL]
xx <- unique(xx)
###merge: x1=y2###
yy <- neighbors.dt[grepl("99999",ZCTA5CE10.2),c("ZCTA5CE10.1","ZCTA5CE10.2"),with=FALSE]
setnames(yy,c("ZCTA5CE10.1","ZCTA5CE10.2"),c("ZCTA5CE10.1_new","ZCTA5CE10.1"))
xx <- merge(xx,yy,by="ZCTA5CE10.1",all.x=TRUE)
xx[,ZCTA5CE10.1 := ifelse(is.na(ZCTA5CE10.1_new),ZCTA5CE10.1,ZCTA5CE10.1_new)]
xx[,ZCTA5CE10.1_new := NULL]
xx <- unique(xx)
setnames(xx,c("ZCTA5CE10.1","ZCTA5CE10.2"),c("ZCTA5CE10.x","ZCTA5CE10.y"))
xx[,ZCTA5CE10.1 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.y,ZCTA5CE10.x)]
xx[,ZCTA5CE10.2 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.x,ZCTA5CE10.y)]
xx <- xx[,names(bridges.dt_agg),with=FALSE]
xx <- xx[xx[, .I[which.min(bridge_length)], by=list(ZCTA5CE10.1, ZCTA5CE10.2)]$V1]
setorder(xx, ZCTA5CE10.1, ZCTA5CE10.2)
if(nrow(xx[grepl("99999",ZCTA5CE10.1) | grepl("99999",ZCTA5CE10.2)]) == 0){
cont <- FALSE
} else if(isTRUE(all.equal(xx, bridges.dt_agg, ignore.row.order = TRUE))){
cont <- FALSE
} else{
cont <- TRUE
}
bridges.dt_agg <- copy(xx)
rm(xx)
}
bridges.dt_agg <- bridges.dt_agg[(!(grepl("^99999",ZCTA5CE10.1))) & (!(grepl("^99999",ZCTA5CE10.2)))]
###remove bridge relationships that are already in neighbor relationships table###
keep.cols <- names(bridges.dt_agg)
bridges.dt_agg <- merge(bridges.dt_agg,neighbors.dt,by=c("ZCTA5CE10.1","ZCTA5CE10.2"),all.x=TRUE)
bridges.dt_agg <- bridges.dt_agg[is.na(edge_len),keep.cols,with=FALSE]
bridges.dt_agg <- bridges.dt_agg[!is.na(ZCTA5CE10.1) & !is.na(ZCTA5CE10.2)]
bridges.dt_agg[,b_grp := NULL]
setorder(bridges.dt_agg,ZCTA5CE10.1,ZCTA5CE10.2)
bridges.dt_agg[,type := "bridge connection"]
#####################################
###ZCTA connected by two addresses###
#####################################
if(!is.null(ADDR_dt)) {
if("data.frame" %in% class(ADDR_dt)) {
if(all(c("ADDR","CITY","STATE","ZIP","group_ID") %in% names(ADDR_dt)) & (nrow(ADDR_dt) > 1)) {
addr.dt <- unique(copy(ADDR_dt)[,c("ADDR","CITY","STATE","ZIP","group_ID"), with=FALSE])
gc.dt <- cxy_geocode(addr.dt, street = 'ADDR', city = 'CITY', state = 'STATE', zip = 'ZIP', return = 'geographies', class = 'dataframe', output = 'simple', vintage = 'Current_Current', benchmark = "Public_AR_Current")
gc.dt <- gc.dt[!(is.na(cxy_state_id)) & !(is.na(cxy_county_id)) & !(is.na(cxy_tract_id)) & !(is.na(cxy_block_id))]
if(nrow(addr.dt) > nrow(gc.dt)){
warning("Some addresses in your address table failed to geocode. Please check your address table.\n")
}
gc.dt[,USCB_block_10 := paste0(sprintf("%02d", as.numeric(cxy_state_id)),sprintf("%03d", as.numeric(cxy_county_id)),sprintf("%06d", as.numeric(cxy_tract_id)),sprintf("%04d", as.numeric(cxy_block_id)))]
gc.dt <- merge(gc.dt,cb2zcta.dt,by="USCB_block_10",all.x=TRUE)
gc.dt[,tot := .N, by=group_ID]
if(nrow(gc.dt) > nrow(gc.dt[tot > 1])){
warning("Some addresses in your address table are without fellow group members. Please check your address table.\n")
}
gc.dt <- gc.dt[tot > 1]
if(nrow(gc.dt) > 0){
gc.dt <- merge(gc.dt[,c('group_ID','ZCTA5CE10'),with=FALSE],gc.dt[,c('group_ID','ZCTA5CE10'),with=FALSE],by="group_ID")[ZCTA5CE10.x != ZCTA5CE10.y]
suppressWarnings(gc.dt <- gc.dt[!(is.na(as.numeric(ZCTA5CE10.x))) & !(is.na(as.numeric(ZCTA5CE10.y)))])
gc.dt[,ZCTA5CE10.1 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.y,ZCTA5CE10.x)]
gc.dt[,ZCTA5CE10.2 := ifelse(as.numeric(ZCTA5CE10.x) > as.numeric(ZCTA5CE10.y),ZCTA5CE10.x,ZCTA5CE10.y)]
gc.dt[,c("ZCTA5CE10.x","ZCTA5CE10.y"):=NULL]
gc.dt <- gc.dt[gc.dt[, .I[which.min(group_ID)], by=list(ZCTA5CE10.1, ZCTA5CE10.2)]$V1]
###remove address pair relationships that are already in neighbor relationships table###
keep.cols <- names(gc.dt)
gc.dt <- merge(gc.dt,neighbors.dt,by=c("ZCTA5CE10.1","ZCTA5CE10.2"),all.x=TRUE)
gc.dt <- gc.dt[is.na(edge_len),keep.cols,with=FALSE]
setorder(gc.dt,ZCTA5CE10.1,ZCTA5CE10.2)
if(isTRUE(use.bridges)){
###remove address pair relationships that are already in bridges relationships table###
gc.dt <- merge(gc.dt,bridges.dt_agg,by=c("ZCTA5CE10.1","ZCTA5CE10.2"),all.x=TRUE)
gc.dt <- gc.dt[is.na(bridge_length),keep.cols,with=FALSE]
setorder(gc.dt,ZCTA5CE10.1,ZCTA5CE10.2)
}
###retain address connections###
omit.dt <- omit.dt[!(ZCTA5CE10 %in% unique(c(gc.dt$ZCTA5CE10.1,gc.dt$ZCTA5CE10.2)))]
gc.dt[,type := "manual"]
}
} else{
warning("Your address table is missing fields. Please check your address table.\n")
}
}
}
###merge to relationship tables###
all_pairs.dt <- neighbors.dt[,c("ZCTA5CE10.1","ZCTA5CE10.2","type"),with=FALSE]
if(exists("gc.dt")){
if("data.table" %in% class(gc.dt)){
if(all(c("group_ID","ZCTA5CE10.1","ZCTA5CE10.2","type" ) %in% names(gc.dt)) & (nrow(gc.dt) > 0)) {
all_pairs.dt <- rbindlist(list(gc.dt[,c("ZCTA5CE10.1","ZCTA5CE10.2","type"),with=FALSE],all_pairs.dt), use.names=TRUE, fill=TRUE)
}
}
}
#
##
###
##
#
if(isTRUE(use.bridges)){
all_pairs.dt <- rbindlist(list(bridges.dt_agg[,c("ZCTA5CE10.1","ZCTA5CE10.2","type"),with=FALSE],all_pairs.dt), use.names=TRUE, fill=TRUE)
###retain bridge connections###
omit.dt <- omit.dt[!(ZCTA5CE10 %in% unique(c(bridges.dt_agg$ZCTA5CE10.1,bridges.dt_agg$ZCTA5CE10.2)))]
}
if(isTRUE(omit.park_openspace)){
omit.ct <- omit.dt[type=="park_openspace"]$ZCTA5CE10
all_pairs.dt <- all_pairs.dt[!(ZCTA5CE10.1 %in% omit.ct) & !(ZCTA5CE10.2 %in% omit.ct)]
}
if(isTRUE(omit.unpopulated)){
omit.ct <- omit.dt[type=="unpopulated"]$ZCTA5CE10
all_pairs.dt <- all_pairs.dt[!(ZCTA5CE10.1 %in% omit.ct) & !(ZCTA5CE10.2 %in% omit.ct)]
}
#########################################
###add rows for ZCTA without neighbors###
#########################################
unq.ct <- suppressWarnings(unique(faces.dt[LWFLAG != "P" & (!(is.na(as.numeric(ZCTA5CE10))))]$ZCTA5CE10))
island.dt <- data.table(ZCTA5CE10.1 = unq.ct[!(unq.ct %in% unique(c(all_pairs.dt$ZCTA5CE10.1,all_pairs.dt$ZCTA5CE10.2)))], ZCTA5CE10.2 = NA)
island.dt <- merge(island.dt, unique(omit.dt[,c("ZCTA5CE10","type"),with=FALSE]), by.x="ZCTA5CE10.1", by.y="ZCTA5CE10", all.x=TRUE)
island.dt[,type := trimws(paste("self",ifelse(is.na(type),"",type)))]
all_pairs.dt <- rbindlist(list(all_pairs.dt,island.dt), use.names=TRUE, fill=TRUE)
keep.ZCTA5CE10 <- suppressWarnings(unique(cb2zcta.dt[(substr(USCB_block_10,1,5) %in% paste0(FIPS.dt$state,FIPS.dt$county)) & (!(is.na(as.numeric(ZCTA5CE10))))]$ZCTA5CE10))
all_pairs.dt <- all_pairs.dt[((ZCTA5CE10.1 %in% keep.ZCTA5CE10) & (ZCTA5CE10.2 %in% keep.ZCTA5CE10)) | ((ZCTA5CE10.1 %in% keep.ZCTA5CE10) & (is.na(as.numeric(ZCTA5CE10.2))))]
all_pairs.dt <- all_pairs.dt[(!(grepl("^99999",ZCTA5CE10.1))) & (!(grepl("^99999",ZCTA5CE10.2)))]
invisible(gc())
setwd(old.wd)
return(all_pairs.dt)
}
|
#read data
dataTbl <- read.table("household_power_consumption.txt",sep=";",header=TRUE, na.strings="?", )
#convert dates to proper format
dataTbl$Date <- as.Date(dataTbl$Date, format="%d/%m/%Y")
#create data subset of required dates
plot3data <- subset(dataTbl,subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
## Converting dates-time
datetime <- paste(as.Date(plot3data$Date), plot3data$Time)
plot3data$Datetime <- as.POSIXct(datetime)
#create histogram for plot3
with(plot3data, {
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#Save File
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off() | /plot3.r | no_license | sadashiv19/ExData_Plotting1 | R | false | false | 903 | r |
#read data
dataTbl <- read.table("household_power_consumption.txt",sep=";",header=TRUE, na.strings="?", )
#convert dates to proper format
dataTbl$Date <- as.Date(dataTbl$Date, format="%d/%m/%Y")
#create data subset of required dates
plot3data <- subset(dataTbl,subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
## Converting dates-time
datetime <- paste(as.Date(plot3data$Date), plot3data$Time)
plot3data$Datetime <- as.POSIXct(datetime)
#create histogram for plot3
with(plot3data, {
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#Save File
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wskazniki_poziom_indywidualny.R
\name{wskazniki_nie_z_epizodow}
\alias{wskazniki_nie_z_epizodow}
\title{Obliczanie wskaznikow na poziomie indywidualnym}
\usage{
wskazniki_nie_z_epizodow(x, maksRokEgz)
}
\arguments{
\item{x}{ramka danych z odpowiedziami na ankietę absolwentów - typowo element
\code{dane} listy zwracanej przez funkcję \code{\link{imputuj_miesiac_pk_1rm}}}
\item{maksRokEgz}{liczba - najpóźniejszy rok, w którym absolwent mógł zdać
egzamin, aby w wynikowym wskaźniku został oznaczony jako osoba, która ten
egzamin zdała}
}
\value{
data frame zawierająca następujące kolumny:
\itemize{
\item{wszystkie kolumny ramki danych przekazanej argumentem \code{x},
których nazwy \strong{nie} zaczynają się od "ABS_",}
\item{\code{SZK_teryt} - nr TERYT powiatu, przeliczony na podstawie
znajdującego się wcześniej w tej zmiennej nr TERYT gminy, na terenie
której znajduje się szkoła,}
\item{\code{UCZ_plec} - płeć ucznia ("M" lub "K"),}
\item{\code{matura_zdana} - wskaźnik opisujący, czy badany zdał maturę
(liczba: 1 - zdał, 0 - nie zdał),}
\item{\code{egz_zaw_zdany} - wskaźnik opisujący, czy badany zdał egzamin
zawodowy, tj. zdał wszystkie egzaminy niezbędne do uzyskania
dyplomu potwierdzającego kwalifikacje w zawodzie (liczba: 1 - zdał,
0 - nie zdał).}
}
}
\description{
Funkcja oblicza zmienne-wskaźniki, które wymagają informacji
nie ze zbioru epizodów.
}
\seealso{
\code{\link{egz_zaw_zdawalnosc}}, \code{\link{matura_zdawalnosc}},
\code{\link{liczba_kobiet}}
}
| /man/wskazniki_nie_z_epizodow.Rd | permissive | tzoltak/MLASZdane | R | false | true | 1,668 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wskazniki_poziom_indywidualny.R
\name{wskazniki_nie_z_epizodow}
\alias{wskazniki_nie_z_epizodow}
\title{Obliczanie wskaznikow na poziomie indywidualnym}
\usage{
wskazniki_nie_z_epizodow(x, maksRokEgz)
}
\arguments{
\item{x}{ramka danych z odpowiedziami na ankietę absolwentów - typowo element
\code{dane} listy zwracanej przez funkcję \code{\link{imputuj_miesiac_pk_1rm}}}
\item{maksRokEgz}{liczba - najpóźniejszy rok, w którym absolwent mógł zdać
egzamin, aby w wynikowym wskaźniku został oznaczony jako osoba, która ten
egzamin zdała}
}
\value{
data frame zawierająca następujące kolumny:
\itemize{
\item{wszystkie kolumny ramki danych przekazanej argumentem \code{x},
których nazwy \strong{nie} zaczynają się od "ABS_",}
\item{\code{SZK_teryt} - nr TERYT powiatu, przeliczony na podstawie
znajdującego się wcześniej w tej zmiennej nr TERYT gminy, na terenie
której znajduje się szkoła,}
\item{\code{UCZ_plec} - płeć ucznia ("M" lub "K"),}
\item{\code{matura_zdana} - wskaźnik opisujący, czy badany zdał maturę
(liczba: 1 - zdał, 0 - nie zdał),}
\item{\code{egz_zaw_zdany} - wskaźnik opisujący, czy badany zdał egzamin
zawodowy, tj. zdał wszystkie egzaminy niezbędne do uzyskania
dyplomu potwierdzającego kwalifikacje w zawodzie (liczba: 1 - zdał,
0 - nie zdał).}
}
}
\description{
Funkcja oblicza zmienne-wskaźniki, które wymagają informacji
nie ze zbioru epizodów.
}
\seealso{
\code{\link{egz_zaw_zdawalnosc}}, \code{\link{matura_zdawalnosc}},
\code{\link{liczba_kobiet}}
}
|
###calc RFMix accuracy
library(data.table)
library(argparse)
library(dplyr)
"%&%" = function(a,b) paste(a,b,sep="")
parser <- ArgumentParser()
parser$add_argument("--viterbi", help="Rdata object ouput by MOSAIC")
parser$add_argument("--haps.hap.gz", help="haplotype genome file")
parser$add_argument("--ref.map", help="admixed sample list")
parser$add_argument("--classes", help="classes file made for rfmix input")
parser$add_argument("--nanc", help="number of ancestries estimated")
parser$add_argument("--result", help="results file output by adsim")
parser$add_argument("--out", help="file you would like to output as")
args <- parser$parse_args()
print("processing snp ids")
snps<-fread("zcat " %&% args$haps.hap.gz, select = c(1,3))
colnames(snps)<-c("chm","pos")
rfout<-fread(args$viterbi, header = F)
rfout<-as.data.frame(cbind.data.frame(snps,rfout))
true_ancestry<-fread(args$result, header = T)
true_ancestry_subset<-inner_join(true_ancestry,snps,by=c("chm","pos"))
#separating true ancesty into ancestral groups
snp_count_true<-nrow(true_ancestry_subset)
nanc<-as.numeric(args$nanc)
n_haps<-(ncol(true_ancestry_subset) - 2)
nindv<-n_haps/2
true_ancestry_decomposed_haploid<-matrix(NA,nrow=snp_count_true,ncol=nanc*n_haps)
rf_ancestry_decomposed_haploid<-matrix(NA,nrow=snp_count_true,ncol=nanc*n_haps)
decompose_hap_to_ancestries<-function(haplotype, nanc){
decomposed_anc<-matrix(,nrow = nrow(haplotype),ncol = nanc)
anc1<-ifelse(haplotype==1,1,0)
anc2<-ifelse(haplotype==2,1,0)
decomposed_anc[,1]<-anc1
decomposed_anc[,2]<-anc2
if (nanc == 3){
anc3<-ifelse(haplotype==3,1,0)
decomposed_anc[,3]<-anc3
return(decomposed_anc)
} else {
return(decomposed_anc)
}
}
print("separating haplotypes into composite ancestries")
for (i in c(1:40)){
j<-i+2
k<-i*nanc
if(nanc==3){
storage_indices<-c(k-2,k-1,k)
} else {
storage_indices<-c(k-1,k)
}
true_ancestry_decomposed_haploid[,storage_indices]<-decompose_hap_to_ancestries(select(true_ancestry_subset, c(j)),nanc)
rf_ancestry_decomposed_haploid[,storage_indices]<-decompose_hap_to_ancestries(select(rfout, c(j)),nanc)
}
true_ancestry_decomposed_diploid<-matrix(NA,nrow=snp_count_true,ncol=nanc*nindv)
rf_ancestry_decomposed_diploid<-matrix(NA,nrow=snp_count_true,ncol=nanc*nindv)
print("converting haploid to diploid")
for (i in c(1:20)){
k<-i*nanc*2
j<-i*nanc
if(nanc==3){
hap1_indices<-c(k-5,k-4,k-3)
hap2_indices<-c(k-2,k-1,k)
storage_indices<-c(j-2,j-1,j)
} else {
hap1_indices<-c(k-3,k-2)
hap2_indices<-c(k-1,k)
storage_indices<-c(j-1,j)
}
hap1<-true_ancestry_decomposed_haploid[,hap1_indices]
hap2<-true_ancestry_decomposed_haploid[,hap2_indices]
dip<-(hap1 + hap2)
true_ancestry_decomposed_diploid[,storage_indices]<-dip
rfhap1<-rf_ancestry_decomposed_haploid[,hap1_indices]
rfhap2<-rf_ancestry_decomposed_haploid[,hap2_indices]
rfdip<-(rfhap1 + rfhap2)
rf_ancestry_decomposed_diploid[,storage_indices]<-rfdip
}
hap_corr<-c(rep(NA,n_haps))
dip_corr<-c(rep(NA,nindv))
print("correlating diploid")
for (i in c(1:nindv)){
j<-i*nanc
threshold<-(1/nanc)
if(nanc==3){
storage_indices<-c(j-2,j-1,j)
flip<-c(2,1,3)
} else {
storage_indices<-c(j-1,j)
flip<-c(2,1)
}
rf_indiv_i<-rf_ancestry_decomposed_diploid[,storage_indices]
true_indiv_i<-true_ancestry_decomposed_diploid[,storage_indices]
corr<-cor.test(rf_indiv_i,true_indiv_i)
if (((nanc == 3)) | ((nanc == 2) & (corr$estimate < 0))){
rf_indiv_i<-rf_indiv_i[,flip]
#str(rf_indiv_i)
corr<-cor.test(rf_indiv_i,true_indiv_i)
}
dip_corr[i]<-corr$estimate
}
#dip_corr
fwrite(as.list(dip_corr),args$out,sep ="\t")
| /accuracy_estimation/RFMix_accuracy.R | no_license | WheelerLab/Local_Ancestry | R | false | false | 3,806 | r | ###calc RFMix accuracy
library(data.table)
library(argparse)
library(dplyr)
"%&%" = function(a,b) paste(a,b,sep="")
parser <- ArgumentParser()
parser$add_argument("--viterbi", help="Rdata object ouput by MOSAIC")
parser$add_argument("--haps.hap.gz", help="haplotype genome file")
parser$add_argument("--ref.map", help="admixed sample list")
parser$add_argument("--classes", help="classes file made for rfmix input")
parser$add_argument("--nanc", help="number of ancestries estimated")
parser$add_argument("--result", help="results file output by adsim")
parser$add_argument("--out", help="file you would like to output as")
args <- parser$parse_args()
print("processing snp ids")
snps<-fread("zcat " %&% args$haps.hap.gz, select = c(1,3))
colnames(snps)<-c("chm","pos")
rfout<-fread(args$viterbi, header = F)
rfout<-as.data.frame(cbind.data.frame(snps,rfout))
true_ancestry<-fread(args$result, header = T)
true_ancestry_subset<-inner_join(true_ancestry,snps,by=c("chm","pos"))
#separating true ancesty into ancestral groups
snp_count_true<-nrow(true_ancestry_subset)
nanc<-as.numeric(args$nanc)
n_haps<-(ncol(true_ancestry_subset) - 2)
nindv<-n_haps/2
true_ancestry_decomposed_haploid<-matrix(NA,nrow=snp_count_true,ncol=nanc*n_haps)
rf_ancestry_decomposed_haploid<-matrix(NA,nrow=snp_count_true,ncol=nanc*n_haps)
decompose_hap_to_ancestries<-function(haplotype, nanc){
decomposed_anc<-matrix(,nrow = nrow(haplotype),ncol = nanc)
anc1<-ifelse(haplotype==1,1,0)
anc2<-ifelse(haplotype==2,1,0)
decomposed_anc[,1]<-anc1
decomposed_anc[,2]<-anc2
if (nanc == 3){
anc3<-ifelse(haplotype==3,1,0)
decomposed_anc[,3]<-anc3
return(decomposed_anc)
} else {
return(decomposed_anc)
}
}
print("separating haplotypes into composite ancestries")
for (i in c(1:40)){
j<-i+2
k<-i*nanc
if(nanc==3){
storage_indices<-c(k-2,k-1,k)
} else {
storage_indices<-c(k-1,k)
}
true_ancestry_decomposed_haploid[,storage_indices]<-decompose_hap_to_ancestries(select(true_ancestry_subset, c(j)),nanc)
rf_ancestry_decomposed_haploid[,storage_indices]<-decompose_hap_to_ancestries(select(rfout, c(j)),nanc)
}
true_ancestry_decomposed_diploid<-matrix(NA,nrow=snp_count_true,ncol=nanc*nindv)
rf_ancestry_decomposed_diploid<-matrix(NA,nrow=snp_count_true,ncol=nanc*nindv)
print("converting haploid to diploid")
for (i in c(1:20)){
k<-i*nanc*2
j<-i*nanc
if(nanc==3){
hap1_indices<-c(k-5,k-4,k-3)
hap2_indices<-c(k-2,k-1,k)
storage_indices<-c(j-2,j-1,j)
} else {
hap1_indices<-c(k-3,k-2)
hap2_indices<-c(k-1,k)
storage_indices<-c(j-1,j)
}
hap1<-true_ancestry_decomposed_haploid[,hap1_indices]
hap2<-true_ancestry_decomposed_haploid[,hap2_indices]
dip<-(hap1 + hap2)
true_ancestry_decomposed_diploid[,storage_indices]<-dip
rfhap1<-rf_ancestry_decomposed_haploid[,hap1_indices]
rfhap2<-rf_ancestry_decomposed_haploid[,hap2_indices]
rfdip<-(rfhap1 + rfhap2)
rf_ancestry_decomposed_diploid[,storage_indices]<-rfdip
}
hap_corr<-c(rep(NA,n_haps))
dip_corr<-c(rep(NA,nindv))
print("correlating diploid")
for (i in c(1:nindv)){
j<-i*nanc
threshold<-(1/nanc)
if(nanc==3){
storage_indices<-c(j-2,j-1,j)
flip<-c(2,1,3)
} else {
storage_indices<-c(j-1,j)
flip<-c(2,1)
}
rf_indiv_i<-rf_ancestry_decomposed_diploid[,storage_indices]
true_indiv_i<-true_ancestry_decomposed_diploid[,storage_indices]
corr<-cor.test(rf_indiv_i,true_indiv_i)
if (((nanc == 3)) | ((nanc == 2) & (corr$estimate < 0))){
rf_indiv_i<-rf_indiv_i[,flip]
#str(rf_indiv_i)
corr<-cor.test(rf_indiv_i,true_indiv_i)
}
dip_corr[i]<-corr$estimate
}
#dip_corr
fwrite(as.list(dip_corr),args$out,sep ="\t")
|
library(keras)
library(readr)
library(stringr)
library(purrr)
library(tokenizers)
# Parameters --------------------------------------------------------------
maxlen <- 20
# Data Preparation --------------------------------------------------------
#SET GPU
library(tensorflow)
config <- list()
config$gpu_options$allow_growth = TRUE
session_conf <- do.call(tf$ConfigProto, config)
##
library(stringi)
z<-readLines('GRIMM.txt',encoding="UTF-8",skipNul=TRUE)
# z<-stri_trans_general(z, "latin-ascii")
# z<- z[-1]
z<-str_replace_all(z,"[1234567890*_]","")
#
# z<- readChar('slowregard2.txt',file.info('slowregard2.txt')$size)
text <- z %>%
# str_to_lower() %>%
str_c(collapse = "\n") %>%
tokenize_characters(strip_non_alphanum = FALSE, simplify = TRUE,lowercase=FALSE)
# bad = c("\\","¿","€",">","™")
# text[text %in% bad] = ""
print(sprintf("corpus length: %d", length(text)))
chars <- text %>%
unique() %>%
sort()
print(sprintf("total chars: %d", length(chars)))
# Cut the text in semi-redundant sequences of maxlen characters
dataset <- map(
seq(1, length(text) - maxlen - 1, by = 3),
~list(sentece = text[.x:(.x + maxlen - 1)], next_char = text[.x + maxlen])
)
dataset <- transpose(dataset)
# Vectorization
X <- array(0, dim = c(length(dataset$sentece), maxlen, length(chars)))
y <- array(0, dim = c(length(dataset$sentece), length(chars)))
for(i in 1:length(dataset$sentece)){
X[i,,] <- sapply(chars, function(x){
as.integer(x == dataset$sentece[[i]])
})
y[i,] <- as.integer(chars == dataset$next_char[[i]])
}
# Model Definition --------------------------------------------------------
model <- keras_model_sequential()
model %>%
layer_lstm(256, input_shape = c(maxlen, length(chars)),return_sequences = TRUE) %>%
layer_dropout(rate = 0.2) %>%
# layer_lstm(256, input_shape = c(maxlen, length(chars)),return_sequences = TRUE) %>%
# layer_lstm(64, input_shape = c(maxlen, length(chars)),return_sequences = TRUE) %>%
# layer_lstm(64, input_shape = c(maxlen, length(chars)),return_sequences = TRUE) %>%
# layer_lstm(64, input_shape = c(maxlen, length(chars)),return_sequences = TRUE) %>%
layer_lstm(64, input_shape = c(maxlen, length(chars))) %>%
# layer_dropout(rate = 0.2) %>%
layer_dense(length(chars)) %>%
layer_activation("softmax")
# optimizer <- optimizer_rmsprop(lr = 0.01)
optimizer <- optimizer_rmsprop(lr = 0.005)
model %>% compile(
loss = "categorical_crossentropy",
optimizer = optimizer
)
# Training & Results ----------------------------------------------------
sample_mod <- function(preds, temperature = 1){
preds <- log(preds)/temperature
exp_preds <- exp(preds)
preds <- exp_preds/sum(exp(preds))
rmultinom(1, 1, preds) %>%
as.integer() %>%
which.max()
}
{
sink('TrainingPoe-128x2dropout.txt',append=TRUE)
for(iteration in 1:5){
cat(sprintf("iteration: %02d ---------------\n\n", (iteration*20)))
hist<-model %>% fit(
X, y,
# batch_size = 128,
batch_size =2000,
epochs = 30,
verbose=2
)
for(diversity in c(0.2, 0.5, 1, 1.2)){
cat(sprintf("diversity: %f ---------------\n\n", diversity))
start_index <- sample(1:(length(text) - maxlen), size = 1)
sentence <- text[start_index:(start_index + maxlen - 1)]
generated <- ""
for(i in 1:400){
x <- sapply(chars, function(x){
as.integer(x == sentence)
})
x <- array_reshape(x, c(1, dim(x)))
preds <- predict(model, x)
next_index <- sample_mod(preds, diversity)
next_char <- chars[next_index]
generated <- str_c(generated, next_char, collapse = "")
sentence <- c(sentence[-1], next_char)
}
cat(generated)
cat("\n\n")
}
}
sink()
}
#After training
{
sink('Grimm4-1.txt')
summary(model)
# for(diversity in c(0.2, 0.4, 0.6,0.8, 1, 1.2,1.5)){
for(diversity in 0.8){
# for(diversity in c(0.01,0.1,0.2,.1,.4,.6,.8,1)){
for (repeats in 1:3){
# poem_length=as.integer(runif(n=1,min=200,max=2000))
poem_length=2000
# for(diversity in c(0.80)){
cat(sprintf("diversity: %f ---------------\n\n", diversity))
start_index <- sample(1:(length(text) - maxlen), size = 1)
sentence <- text[start_index:(start_index + maxlen - 1)]
generated <- ""
#alternative way to seed with custom string:
# sentence<-"Just then Auri opened her eyes"
# sentence<-"hey liz, what's up??" #must be 20 characters
# sentence<-"can't feel\nyour face"
# sentence<- "o the feeling of shocking fuzz"
# cat(sentence)
# originalsentence<-sentence
# sentence<-strsplit(sentence,split=NULL)[[1]]
# diversity=1
for(i in 1:poem_length){
x <- sapply(chars, function(x){
as.integer(x == sentence)
})
x <- array_reshape(x, c(1, dim(x)))
preds <- predict(model, x)
# next_index <- sample_mod(preds, runif(1)+0.5)
next_index <- sample_mod(preds, diversity)
next_char <- chars[next_index]
generated <- str_c(generated, next_char, collapse = "")
sentence <- c(sentence[-1], next_char)
}
cat(generated)
# cat(originalsentence,generated)
cat("\n\n")
}
}
sink()
}
save_model_hdf5(model,'GrimmModel5hd5')
saveRDS(text,'Grimm4text.RDS')
saveRDS(chars,'Grimm4chars.RDS')
text<-readRDS('Grimm1text.RDS')
chars<- readRDS('Grimm1chars.RDS')
#note: SlowRegard7 uses two layers of 128
#note: SlowRegard8 uses 4 layers of 64
#note: SlowRegard9 uses 3 layers of 128 64 and 64
#note: SlowRegard10 uses 2 layers of 128 and 2 dropout layers of 0.5
#11 is 1 512 layer
#slow regard 12 is 3 128 layers
#eemodel3 used two 256 layers with dropout of 0.2
#eemodel4 used a 64 and a 512 with dropout of 0.2
#eemodel5 used a 128 and 128 with 0.5 dropout and reduced sentence length of 20 | /model training/grimmgen.R | no_license | AdamPallus/writing_styles | R | false | false | 5,901 | r | library(keras)
library(readr)
library(stringr)
library(purrr)
library(tokenizers)
# Parameters --------------------------------------------------------------
maxlen <- 20
# Data Preparation --------------------------------------------------------
#SET GPU
library(tensorflow)
config <- list()
config$gpu_options$allow_growth = TRUE
session_conf <- do.call(tf$ConfigProto, config)
##
library(stringi)
z<-readLines('GRIMM.txt',encoding="UTF-8",skipNul=TRUE)
# z<-stri_trans_general(z, "latin-ascii")
# z<- z[-1]
z<-str_replace_all(z,"[1234567890*_]","")
#
# z<- readChar('slowregard2.txt',file.info('slowregard2.txt')$size)
text <- z %>%
# str_to_lower() %>%
str_c(collapse = "\n") %>%
tokenize_characters(strip_non_alphanum = FALSE, simplify = TRUE,lowercase=FALSE)
# bad = c("\\","¿","€",">","™")
# text[text %in% bad] = ""
print(sprintf("corpus length: %d", length(text)))
chars <- text %>%
unique() %>%
sort()
print(sprintf("total chars: %d", length(chars)))
# Cut the text in semi-redundant sequences of maxlen characters
dataset <- map(
seq(1, length(text) - maxlen - 1, by = 3),
~list(sentece = text[.x:(.x + maxlen - 1)], next_char = text[.x + maxlen])
)
dataset <- transpose(dataset)
# Vectorization
X <- array(0, dim = c(length(dataset$sentece), maxlen, length(chars)))
y <- array(0, dim = c(length(dataset$sentece), length(chars)))
for(i in 1:length(dataset$sentece)){
X[i,,] <- sapply(chars, function(x){
as.integer(x == dataset$sentece[[i]])
})
y[i,] <- as.integer(chars == dataset$next_char[[i]])
}
# Model Definition --------------------------------------------------------
model <- keras_model_sequential()
model %>%
layer_lstm(256, input_shape = c(maxlen, length(chars)),return_sequences = TRUE) %>%
layer_dropout(rate = 0.2) %>%
# layer_lstm(256, input_shape = c(maxlen, length(chars)),return_sequences = TRUE) %>%
# layer_lstm(64, input_shape = c(maxlen, length(chars)),return_sequences = TRUE) %>%
# layer_lstm(64, input_shape = c(maxlen, length(chars)),return_sequences = TRUE) %>%
# layer_lstm(64, input_shape = c(maxlen, length(chars)),return_sequences = TRUE) %>%
layer_lstm(64, input_shape = c(maxlen, length(chars))) %>%
# layer_dropout(rate = 0.2) %>%
layer_dense(length(chars)) %>%
layer_activation("softmax")
# optimizer <- optimizer_rmsprop(lr = 0.01)
optimizer <- optimizer_rmsprop(lr = 0.005)
model %>% compile(
loss = "categorical_crossentropy",
optimizer = optimizer
)
# Training & Results ----------------------------------------------------
sample_mod <- function(preds, temperature = 1){
preds <- log(preds)/temperature
exp_preds <- exp(preds)
preds <- exp_preds/sum(exp(preds))
rmultinom(1, 1, preds) %>%
as.integer() %>%
which.max()
}
{
sink('TrainingPoe-128x2dropout.txt',append=TRUE)
for(iteration in 1:5){
cat(sprintf("iteration: %02d ---------------\n\n", (iteration*20)))
hist<-model %>% fit(
X, y,
# batch_size = 128,
batch_size =2000,
epochs = 30,
verbose=2
)
for(diversity in c(0.2, 0.5, 1, 1.2)){
cat(sprintf("diversity: %f ---------------\n\n", diversity))
start_index <- sample(1:(length(text) - maxlen), size = 1)
sentence <- text[start_index:(start_index + maxlen - 1)]
generated <- ""
for(i in 1:400){
x <- sapply(chars, function(x){
as.integer(x == sentence)
})
x <- array_reshape(x, c(1, dim(x)))
preds <- predict(model, x)
next_index <- sample_mod(preds, diversity)
next_char <- chars[next_index]
generated <- str_c(generated, next_char, collapse = "")
sentence <- c(sentence[-1], next_char)
}
cat(generated)
cat("\n\n")
}
}
sink()
}
#After training
{
sink('Grimm4-1.txt')
summary(model)
# for(diversity in c(0.2, 0.4, 0.6,0.8, 1, 1.2,1.5)){
for(diversity in 0.8){
# for(diversity in c(0.01,0.1,0.2,.1,.4,.6,.8,1)){
for (repeats in 1:3){
# poem_length=as.integer(runif(n=1,min=200,max=2000))
poem_length=2000
# for(diversity in c(0.80)){
cat(sprintf("diversity: %f ---------------\n\n", diversity))
start_index <- sample(1:(length(text) - maxlen), size = 1)
sentence <- text[start_index:(start_index + maxlen - 1)]
generated <- ""
#alternative way to seed with custom string:
# sentence<-"Just then Auri opened her eyes"
# sentence<-"hey liz, what's up??" #must be 20 characters
# sentence<-"can't feel\nyour face"
# sentence<- "o the feeling of shocking fuzz"
# cat(sentence)
# originalsentence<-sentence
# sentence<-strsplit(sentence,split=NULL)[[1]]
# diversity=1
for(i in 1:poem_length){
x <- sapply(chars, function(x){
as.integer(x == sentence)
})
x <- array_reshape(x, c(1, dim(x)))
preds <- predict(model, x)
# next_index <- sample_mod(preds, runif(1)+0.5)
next_index <- sample_mod(preds, diversity)
next_char <- chars[next_index]
generated <- str_c(generated, next_char, collapse = "")
sentence <- c(sentence[-1], next_char)
}
cat(generated)
# cat(originalsentence,generated)
cat("\n\n")
}
}
sink()
}
save_model_hdf5(model,'GrimmModel5hd5')
saveRDS(text,'Grimm4text.RDS')
saveRDS(chars,'Grimm4chars.RDS')
text<-readRDS('Grimm1text.RDS')
chars<- readRDS('Grimm1chars.RDS')
#note: SlowRegard7 uses two layers of 128
#note: SlowRegard8 uses 4 layers of 64
#note: SlowRegard9 uses 3 layers of 128 64 and 64
#note: SlowRegard10 uses 2 layers of 128 and 2 dropout layers of 0.5
#11 is 1 512 layer
#slow regard 12 is 3 128 layers
#eemodel3 used two 256 layers with dropout of 0.2
#eemodel4 used a 64 and a 512 with dropout of 0.2
#eemodel5 used a 128 and 128 with 0.5 dropout and reduced sentence length of 20 |
require (mvtnorm)
require (phytools)
mainModel <-
function (node, main.data, tree, what = 'local',
prior.list, model = 'oneSigma_noAnc', corC = TRUE, ...)
{
stan.data <- list()
subtree <- extract.clade(tree, node)
stan.data $ C <-
vcvPhylo (subtree,
anc.nodes = ifelse (grepl ('_Anc', model), TRUE, FALSE))
if (corC)
stan.data $ C <- solve (cov2cor (stan.data $ C))
else
stan.data $ C <- solve (stan.data $ C)
subset <- match (subtree $ tip.label, names(main.data))
if (what == 'local')
raw.data <- llply (main.data [subset], function (L) L $ local)
if (what == 'ed')
raw.data <- llply (main.data [subset], function (L) L $ ed)
stan.data <-
within (stan.data,
{
k <- ncol (raw.data [[1]])
m <- length (subtree $ tip.label)
ni <- laply (raw.data, function (L) nrow (L))
ni_max <- max (ni)
X <- array (0, c(m, ni_max, k))
for (i in 1:m)
X [i, 1:ni[i], ] <- raw.data [[i]]
#priorS <- prior.list $ vcv
#priorX <- prior.list $ mean
})
start.values <- list()
start.values $ c1 <- list()
start.values $ c1 <-
within (start.values $ c1,
{
terminal <- rmvnorm(stan.data $ m, prior.list $ mean, prior.list $ vcv)
root <- as.vector (rmvnorm(1, prior.list $ mean, prior.list $ vcv))
if (grepl('multi', model))
{
Gamma <- array (t (chol (cov2cor (prior.list $ vcv))),
c(stan.data$k, stan.data$k, stan.data$m))
# cholesky_factor_corr[k] Gamma[m] (m x k x k)
GammaW <- aperm (Gamma, c(3, 1, 2))
sigmaW <- t (array (sqrt (diag (prior.list $ vcv)),
c(stan.data$k, stan.data$m)))
}
else
{
GammaW <- t (chol (cov2cor (prior.list $ vcv)))
sigmaW <- sqrt (diag (prior.list $ vcv))
}
if (grepl ('_Anc', model))
ancestor <-
rmvnorm (stan.data$m - 2, prior.list $ mean,
prior.list $ vcv)
if (grepl ('alt', model))
{
drift <- 1
Gamma_beta <- t (chol (cov2cor (prior.list $ vcv)))
sigma_beta <- sqrt (diag (prior.list $ vcv))
}
if (grepl ('pcaS', model))
{
dim.flag <- ifelse (stan.data $ k >= stan.data $ m,
stan.data $m - 1, stan.data $k)
GammaB <- t (chol (diag (dim.flag)))
sigmaB <- rep (1, times = dim.flag)
}
else
{
GammaB <- t (chol (cov2cor (prior.list $ vcv)))
sigmaB <- sqrt (diag (prior.list $ vcv))
}
})
model.file <- paste ('../Stan/', model, '.stan', sep = '')
model.fit <- stan (file = model.file, chains = 1,
data = stan.data, init = start.values, ...)
ext <- extract (model.fit)
ext <- llply (ext, function (L)
{
change.k <- length (which (dim (L) == stan.data $ k)) != 0
if (change.k)
for (i in which (dim (L) == stan.data $ k))
{
names (dimnames (L)) [i] <- 'trait'
dimnames (L) [[i]] <- colnames (raw.data [[1]])
}
change.m <- length (which (dim (L) == stan.data $ m)) != 0
if (change.m)
for (i in which (dim (L) == stan.data $ m))
{
names (dimnames (L)) [i] <- 'node'
dimnames (L) [[i]] <- subtree $ tip.label
}
if (ifelse (grepl ('_Anc', model), TRUE, FALSE))
{
change.mm <- length (which (dim (L) == stan.data $ m - 2)) != 0
if (change.mm)
for (i in which (dim (L) == stan.data $ m - 2))
{
names (dimnames (L)) [i] <- 'node'
dimnames (L) [[i]] <-
as.character (stan.data $ m + (2:(stan.data $ m - 1)))
}
}
L
})
return (list ('model' = model.fit, 'extract' = ext))
}
| /FuncR/mainModel.R | no_license | wgar84/BayesMEM | R | false | false | 4,939 | r | require (mvtnorm)
require (phytools)
mainModel <-
function (node, main.data, tree, what = 'local',
prior.list, model = 'oneSigma_noAnc', corC = TRUE, ...)
{
stan.data <- list()
subtree <- extract.clade(tree, node)
stan.data $ C <-
vcvPhylo (subtree,
anc.nodes = ifelse (grepl ('_Anc', model), TRUE, FALSE))
if (corC)
stan.data $ C <- solve (cov2cor (stan.data $ C))
else
stan.data $ C <- solve (stan.data $ C)
subset <- match (subtree $ tip.label, names(main.data))
if (what == 'local')
raw.data <- llply (main.data [subset], function (L) L $ local)
if (what == 'ed')
raw.data <- llply (main.data [subset], function (L) L $ ed)
stan.data <-
within (stan.data,
{
k <- ncol (raw.data [[1]])
m <- length (subtree $ tip.label)
ni <- laply (raw.data, function (L) nrow (L))
ni_max <- max (ni)
X <- array (0, c(m, ni_max, k))
for (i in 1:m)
X [i, 1:ni[i], ] <- raw.data [[i]]
#priorS <- prior.list $ vcv
#priorX <- prior.list $ mean
})
start.values <- list()
start.values $ c1 <- list()
start.values $ c1 <-
within (start.values $ c1,
{
terminal <- rmvnorm(stan.data $ m, prior.list $ mean, prior.list $ vcv)
root <- as.vector (rmvnorm(1, prior.list $ mean, prior.list $ vcv))
if (grepl('multi', model))
{
Gamma <- array (t (chol (cov2cor (prior.list $ vcv))),
c(stan.data$k, stan.data$k, stan.data$m))
# cholesky_factor_corr[k] Gamma[m] (m x k x k)
GammaW <- aperm (Gamma, c(3, 1, 2))
sigmaW <- t (array (sqrt (diag (prior.list $ vcv)),
c(stan.data$k, stan.data$m)))
}
else
{
GammaW <- t (chol (cov2cor (prior.list $ vcv)))
sigmaW <- sqrt (diag (prior.list $ vcv))
}
if (grepl ('_Anc', model))
ancestor <-
rmvnorm (stan.data$m - 2, prior.list $ mean,
prior.list $ vcv)
if (grepl ('alt', model))
{
drift <- 1
Gamma_beta <- t (chol (cov2cor (prior.list $ vcv)))
sigma_beta <- sqrt (diag (prior.list $ vcv))
}
if (grepl ('pcaS', model))
{
dim.flag <- ifelse (stan.data $ k >= stan.data $ m,
stan.data $m - 1, stan.data $k)
GammaB <- t (chol (diag (dim.flag)))
sigmaB <- rep (1, times = dim.flag)
}
else
{
GammaB <- t (chol (cov2cor (prior.list $ vcv)))
sigmaB <- sqrt (diag (prior.list $ vcv))
}
})
model.file <- paste ('../Stan/', model, '.stan', sep = '')
model.fit <- stan (file = model.file, chains = 1,
data = stan.data, init = start.values, ...)
ext <- extract (model.fit)
ext <- llply (ext, function (L)
{
change.k <- length (which (dim (L) == stan.data $ k)) != 0
if (change.k)
for (i in which (dim (L) == stan.data $ k))
{
names (dimnames (L)) [i] <- 'trait'
dimnames (L) [[i]] <- colnames (raw.data [[1]])
}
change.m <- length (which (dim (L) == stan.data $ m)) != 0
if (change.m)
for (i in which (dim (L) == stan.data $ m))
{
names (dimnames (L)) [i] <- 'node'
dimnames (L) [[i]] <- subtree $ tip.label
}
if (ifelse (grepl ('_Anc', model), TRUE, FALSE))
{
change.mm <- length (which (dim (L) == stan.data $ m - 2)) != 0
if (change.mm)
for (i in which (dim (L) == stan.data $ m - 2))
{
names (dimnames (L)) [i] <- 'node'
dimnames (L) [[i]] <-
as.character (stan.data $ m + (2:(stan.data $ m - 1)))
}
}
L
})
return (list ('model' = model.fit, 'extract' = ext))
}
|
#!/usr/bin/env Rscript
#SBATCH --ntasks=1
#SBATCH --mem=50G
#SBATCH --time=02:00:00
#SBATCH --job-name='cum-man'
#SBATCH --output=/rhome/jmarz001/bigdata/CCII_BOZ/scripts/cumulative.stdout
#SBATCH -p short
setwd("/bigdata/koeniglab/jmarz001/CCII_BOZ/results")
library(readr)
library(pacman)
p_load(ggplot2, dplyr, tidyr, data.table)
options(stringsAsFactors = F)
df<-fread("Fisherscumpos")
OutName <- "Fishers"
# parse locus
OutName1<-paste0(OutName, "cumpos")
names(df)[1]<-"CHR"
names(df)[2]<-"POS"
names(df)[17]<-"PVAL"
# format for plotting
df$BP<-as.numeric(df$POS)
result <- df %>%
# Compute chromosome size
group_by(CHR) %>%
summarise(chr_len=max(BP)) %>%
# Calculate cumulative position of each chromosome
mutate(tot=cumsum(as.numeric(chr_len))-as.numeric(chr_len)) %>%
select(-chr_len) %>%
# Add this info to the initial dataset
left_join(df, ., by=c("CHR"="CHR")) %>%
# Add a cumulative position of each SNP
arrange(CHR, BP) %>%
mutate(BPcum=BP+tot)
write.table(result, file=OutName1, quote=F ,sep="\t",row.names=F,col.names=F)
result<-result %>% filter(-log10(result$PVAL)>2)
axisdf = result %>% group_by(CHR) %>% summarize(center=( max(BPcum) + min(BPcum) ) / 2 )
# Manhattan plot
g<-ggplot(result, aes(x=BPcum, y=-log10(PVAL))) +
# Show all points
geom_point(aes(color=as.factor(CHR)), alpha=0.5, size=1.3) +
scale_color_manual(values = rep(c("black", "grey"), 22 )) +
# custom X axis:
scale_x_continuous(label = axisdf$CHR, breaks= axisdf$center) +
scale_y_continuous(expand = c(0, 0.5)) + # remove space between plot area and x axis
# Customize the theme:
theme_classic() +
theme(legend.position="none",
panel.border = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
text=element_text(size=16)) +
xlab("Chromosome") +
ylab("-log10(p-value)")
OutName2<-paste0(OutName, "_manhattan.jpeg")
ggsave(OutName2, g, width=10, height=5, units="in")
| /scripts/cumcount_man.R | no_license | jmmarzolino/CCII_BOZ | R | false | false | 2,006 | r | #!/usr/bin/env Rscript
#SBATCH --ntasks=1
#SBATCH --mem=50G
#SBATCH --time=02:00:00
#SBATCH --job-name='cum-man'
#SBATCH --output=/rhome/jmarz001/bigdata/CCII_BOZ/scripts/cumulative.stdout
#SBATCH -p short
setwd("/bigdata/koeniglab/jmarz001/CCII_BOZ/results")
library(readr)
library(pacman)
p_load(ggplot2, dplyr, tidyr, data.table)
options(stringsAsFactors = F)
df<-fread("Fisherscumpos")
OutName <- "Fishers"
# parse locus
OutName1<-paste0(OutName, "cumpos")
names(df)[1]<-"CHR"
names(df)[2]<-"POS"
names(df)[17]<-"PVAL"
# format for plotting
df$BP<-as.numeric(df$POS)
result <- df %>%
# Compute chromosome size
group_by(CHR) %>%
summarise(chr_len=max(BP)) %>%
# Calculate cumulative position of each chromosome
mutate(tot=cumsum(as.numeric(chr_len))-as.numeric(chr_len)) %>%
select(-chr_len) %>%
# Add this info to the initial dataset
left_join(df, ., by=c("CHR"="CHR")) %>%
# Add a cumulative position of each SNP
arrange(CHR, BP) %>%
mutate(BPcum=BP+tot)
write.table(result, file=OutName1, quote=F ,sep="\t",row.names=F,col.names=F)
result<-result %>% filter(-log10(result$PVAL)>2)
axisdf = result %>% group_by(CHR) %>% summarize(center=( max(BPcum) + min(BPcum) ) / 2 )
# Manhattan plot
g<-ggplot(result, aes(x=BPcum, y=-log10(PVAL))) +
# Show all points
geom_point(aes(color=as.factor(CHR)), alpha=0.5, size=1.3) +
scale_color_manual(values = rep(c("black", "grey"), 22 )) +
# custom X axis:
scale_x_continuous(label = axisdf$CHR, breaks= axisdf$center) +
scale_y_continuous(expand = c(0, 0.5)) + # remove space between plot area and x axis
# Customize the theme:
theme_classic() +
theme(legend.position="none",
panel.border = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
text=element_text(size=16)) +
xlab("Chromosome") +
ylab("-log10(p-value)")
OutName2<-paste0(OutName, "_manhattan.jpeg")
ggsave(OutName2, g, width=10, height=5, units="in")
|
<!DOCTYPE html>
<html lang="en" class="">
<head prefix="og: http://ogp.me/ns# fb: http://ogp.me/ns/fb# object: http://ogp.me/ns/object# article: http://ogp.me/ns/article# profile: http://ogp.me/ns/profile#">
<meta charset='utf-8'>
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta http-equiv="Content-Language" content="en">
<title>Ranking/qstratRank.R at master · rbresearch/Ranking · GitHub</title>
<link rel="search" type="application/opensearchdescription+xml" href="/opensearch.xml" title="GitHub">
<link rel="fluid-icon" href="https://github.com/fluidicon.png" title="GitHub">
<link rel="apple-touch-icon" sizes="57x57" href="/apple-touch-icon-114.png">
<link rel="apple-touch-icon" sizes="114x114" href="/apple-touch-icon-114.png">
<link rel="apple-touch-icon" sizes="72x72" href="/apple-touch-icon-144.png">
<link rel="apple-touch-icon" sizes="144x144" href="/apple-touch-icon-144.png">
<meta property="fb:app_id" content="1401488693436528">
<meta content="@github" name="twitter:site" /><meta content="summary" name="twitter:card" /><meta content="rbresearch/Ranking" name="twitter:title" /><meta content="Ranking - Functions for various methods to rank assets" name="twitter:description" /><meta content="https://avatars3.githubusercontent.com/u/2328682?v=3&s=400" name="twitter:image:src" />
<meta content="GitHub" property="og:site_name" /><meta content="object" property="og:type" /><meta content="https://avatars3.githubusercontent.com/u/2328682?v=3&s=400" property="og:image" /><meta content="rbresearch/Ranking" property="og:title" /><meta content="https://github.com/rbresearch/Ranking" property="og:url" /><meta content="Ranking - Functions for various methods to rank assets" property="og:description" />
<meta name="browser-stats-url" content="/_stats">
<link rel="assets" href="https://assets-cdn.github.com/">
<link rel="conduit-xhr" href="https://ghconduit.com:25035">
<meta name="pjax-timeout" content="1000">
<meta name="msapplication-TileImage" content="/windows-tile.png">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="selected-link" value="repo_source" data-pjax-transient>
<meta name="google-analytics" content="UA-3769691-2">
<meta content="collector.githubapp.com" name="octolytics-host" /><meta content="collector-cdn.github.com" name="octolytics-script-host" /><meta content="github" name="octolytics-app-id" /><meta content="617AB459:4D46:8C26B6:54AE98A6" name="octolytics-dimension-request_id" />
<meta content="Rails, view, blob#show" name="analytics-event" />
<link rel="icon" type="image/x-icon" href="https://assets-cdn.github.com/favicon.ico">
<meta content="authenticity_token" name="csrf-param" />
<meta content="CV17+Y0UrVe64YOUab2NlVZqPe8TG43W0A/UxfzcyvYPeKESDRGcy8YlMgIz0/hDpPC9ExvnONmAp7ltMrt0nQ==" name="csrf-token" />
<link href="https://assets-cdn.github.com/assets/github-48b40f362ce13d414b502b9860f7b3a265fc986a91976bc0e4fdf82704fe5b39.css" media="all" rel="stylesheet" type="text/css" />
<link href="https://assets-cdn.github.com/assets/github2-c81d1566689852c2c43097d424735ea531962a7599ad515afcc4a649ce814da7.css" media="all" rel="stylesheet" type="text/css" />
<meta http-equiv="x-pjax-version" content="13a28b85dba973dbac3877e14943b62c">
<meta name="description" content="Ranking - Functions for various methods to rank assets">
<meta name="go-import" content="github.com/rbresearch/Ranking git https://github.com/rbresearch/Ranking.git">
<meta content="2328682" name="octolytics-dimension-user_id" /><meta content="rbresearch" name="octolytics-dimension-user_login" /><meta content="5774136" name="octolytics-dimension-repository_id" /><meta content="rbresearch/Ranking" name="octolytics-dimension-repository_nwo" /><meta content="true" name="octolytics-dimension-repository_public" /><meta content="false" name="octolytics-dimension-repository_is_fork" /><meta content="5774136" name="octolytics-dimension-repository_network_root_id" /><meta content="rbresearch/Ranking" name="octolytics-dimension-repository_network_root_nwo" />
<link href="https://github.com/rbresearch/Ranking/commits/master.atom" rel="alternate" title="Recent Commits to Ranking:master" type="application/atom+xml">
</head>
<body class="logged_out env-production windows vis-public page-blob">
<a href="#start-of-content" tabindex="1" class="accessibility-aid js-skip-to-content">Skip to content</a>
<div class="wrapper">
<div class="header header-logged-out" role="banner">
<div class="container clearfix">
<a class="header-logo-wordmark" href="https://github.com/" ga-data-click="(Logged out) Header, go to homepage, icon:logo-wordmark">
<span class="mega-octicon octicon-logo-github"></span>
</a>
<div class="header-actions" role="navigation">
<a class="button primary" href="/join" data-ga-click="(Logged out) Header, clicked Sign up, text:sign-up">Sign up</a>
<a class="button" href="/login?return_to=%2Frbresearch%2FRanking%2Fblob%2Fmaster%2Frank-functions%2FqstratRank.R" data-ga-click="(Logged out) Header, clicked Sign in, text:sign-in">Sign in</a>
</div>
<div class="site-search repo-scope js-site-search" role="search">
<form accept-charset="UTF-8" action="/rbresearch/Ranking/search" class="js-site-search-form" data-global-search-url="/search" data-repo-search-url="/rbresearch/Ranking/search" method="get"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /></div>
<input type="text"
class="js-site-search-field is-clearable"
data-hotkey="s"
name="q"
placeholder="Search"
data-global-scope-placeholder="Search GitHub"
data-repo-scope-placeholder="Search"
tabindex="1"
autocapitalize="off">
<div class="scope-badge">This repository</div>
</form>
</div>
<ul class="header-nav left" role="navigation">
<li class="header-nav-item">
<a class="header-nav-link" href="/explore" data-ga-click="(Logged out) Header, go to explore, text:explore">Explore</a>
</li>
<li class="header-nav-item">
<a class="header-nav-link" href="/features" data-ga-click="(Logged out) Header, go to features, text:features">Features</a>
</li>
<li class="header-nav-item">
<a class="header-nav-link" href="https://enterprise.github.com/" data-ga-click="(Logged out) Header, go to enterprise, text:enterprise">Enterprise</a>
</li>
<li class="header-nav-item">
<a class="header-nav-link" href="/blog" data-ga-click="(Logged out) Header, go to blog, text:blog">Blog</a>
</li>
</ul>
</div>
</div>
<div id="start-of-content" class="accessibility-aid"></div>
<div class="site" itemscope itemtype="http://schema.org/WebPage">
<div id="js-flash-container">
</div>
<div class="pagehead repohead instapaper_ignore readability-menu">
<div class="container">
<ul class="pagehead-actions">
<li>
<a href="/login?return_to=%2Frbresearch%2FRanking"
class="minibutton with-count star-button tooltipped tooltipped-n"
aria-label="You must be signed in to star a repository" rel="nofollow">
<span class="octicon octicon-star"></span>
Star
</a>
<a class="social-count js-social-count" href="/rbresearch/Ranking/stargazers">
6
</a>
</li>
<li>
<a href="/login?return_to=%2Frbresearch%2FRanking"
class="minibutton with-count js-toggler-target fork-button tooltipped tooltipped-n"
aria-label="You must be signed in to fork a repository" rel="nofollow">
<span class="octicon octicon-repo-forked"></span>
Fork
</a>
<a href="/rbresearch/Ranking/network" class="social-count">
7
</a>
</li>
</ul>
<h1 itemscope itemtype="http://data-vocabulary.org/Breadcrumb" class="entry-title public">
<span class="mega-octicon octicon-repo"></span>
<span class="author"><a href="/rbresearch" class="url fn" itemprop="url" rel="author"><span itemprop="title">rbresearch</span></a></span><!--
--><span class="path-divider">/</span><!--
--><strong><a href="/rbresearch/Ranking" class="js-current-repository" data-pjax="#js-repo-pjax-container">Ranking</a></strong>
<span class="page-context-loader">
<img alt="" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
</span>
</h1>
</div><!-- /.container -->
</div><!-- /.repohead -->
<div class="container">
<div class="repository-with-sidebar repo-container new-discussion-timeline ">
<div class="repository-sidebar clearfix">
<nav class="sunken-menu repo-nav js-repo-nav js-sidenav-container-pjax js-octicon-loaders"
role="navigation"
data-pjax="#js-repo-pjax-container"
data-issue-count-url="/rbresearch/Ranking/issues/counts">
<ul class="sunken-menu-group">
<li class="tooltipped tooltipped-w" aria-label="Code">
<a href="/rbresearch/Ranking" aria-label="Code" class="selected js-selected-navigation-item sunken-menu-item" data-hotkey="g c" data-selected-links="repo_source repo_downloads repo_commits repo_releases repo_tags repo_branches /rbresearch/Ranking">
<span class="octicon octicon-code"></span> <span class="full-word">Code</span>
<img alt="" class="mini-loader" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
</a> </li>
<li class="tooltipped tooltipped-w" aria-label="Issues">
<a href="/rbresearch/Ranking/issues" aria-label="Issues" class="js-selected-navigation-item sunken-menu-item" data-hotkey="g i" data-selected-links="repo_issues repo_labels repo_milestones /rbresearch/Ranking/issues">
<span class="octicon octicon-issue-opened"></span> <span class="full-word">Issues</span>
<span class="js-issue-replace-counter"></span>
<img alt="" class="mini-loader" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
</a> </li>
<li class="tooltipped tooltipped-w" aria-label="Pull Requests">
<a href="/rbresearch/Ranking/pulls" aria-label="Pull Requests" class="js-selected-navigation-item sunken-menu-item" data-hotkey="g p" data-selected-links="repo_pulls /rbresearch/Ranking/pulls">
<span class="octicon octicon-git-pull-request"></span> <span class="full-word">Pull Requests</span>
<span class="js-pull-replace-counter"></span>
<img alt="" class="mini-loader" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
</a> </li>
</ul>
<div class="sunken-menu-separator"></div>
<ul class="sunken-menu-group">
<li class="tooltipped tooltipped-w" aria-label="Pulse">
<a href="/rbresearch/Ranking/pulse" aria-label="Pulse" class="js-selected-navigation-item sunken-menu-item" data-selected-links="pulse /rbresearch/Ranking/pulse">
<span class="octicon octicon-pulse"></span> <span class="full-word">Pulse</span>
<img alt="" class="mini-loader" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
</a> </li>
<li class="tooltipped tooltipped-w" aria-label="Graphs">
<a href="/rbresearch/Ranking/graphs" aria-label="Graphs" class="js-selected-navigation-item sunken-menu-item" data-selected-links="repo_graphs repo_contributors /rbresearch/Ranking/graphs">
<span class="octicon octicon-graph"></span> <span class="full-word">Graphs</span>
<img alt="" class="mini-loader" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
</a> </li>
</ul>
</nav>
<div class="only-with-full-nav">
<div class="clone-url open"
data-protocol-type="http"
data-url="/users/set_protocol?protocol_selector=http&protocol_type=clone">
<h3><span class="text-emphasized">HTTPS</span> clone URL</h3>
<div class="input-group js-zeroclipboard-container">
<input type="text" class="input-mini input-monospace js-url-field js-zeroclipboard-target"
value="https://github.com/rbresearch/Ranking.git" readonly="readonly">
<span class="input-group-button">
<button aria-label="Copy to clipboard" class="js-zeroclipboard minibutton zeroclipboard-button" data-copied-hint="Copied!" type="button"><span class="octicon octicon-clippy"></span></button>
</span>
</div>
</div>
<div class="clone-url "
data-protocol-type="subversion"
data-url="/users/set_protocol?protocol_selector=subversion&protocol_type=clone">
<h3><span class="text-emphasized">Subversion</span> checkout URL</h3>
<div class="input-group js-zeroclipboard-container">
<input type="text" class="input-mini input-monospace js-url-field js-zeroclipboard-target"
value="https://github.com/rbresearch/Ranking" readonly="readonly">
<span class="input-group-button">
<button aria-label="Copy to clipboard" class="js-zeroclipboard minibutton zeroclipboard-button" data-copied-hint="Copied!" type="button"><span class="octicon octicon-clippy"></span></button>
</span>
</div>
</div>
<p class="clone-options">You can clone with
<a href="#" class="js-clone-selector" data-protocol="http">HTTPS</a> or <a href="#" class="js-clone-selector" data-protocol="subversion">Subversion</a>.
<a href="https://help.github.com/articles/which-remote-url-should-i-use" class="help tooltipped tooltipped-n" aria-label="Get help on which URL is right for you.">
<span class="octicon octicon-question"></span>
</a>
</p>
<a href="http://windows.github.com" class="minibutton sidebar-button" title="Save rbresearch/Ranking to your computer and use it in GitHub Desktop." aria-label="Save rbresearch/Ranking to your computer and use it in GitHub Desktop.">
<span class="octicon octicon-device-desktop"></span>
Clone in Desktop
</a>
<a href="/rbresearch/Ranking/archive/master.zip"
class="minibutton sidebar-button"
aria-label="Download the contents of rbresearch/Ranking as a zip file"
title="Download the contents of rbresearch/Ranking as a zip file"
rel="nofollow">
<span class="octicon octicon-cloud-download"></span>
Download ZIP
</a>
</div>
</div><!-- /.repository-sidebar -->
<div id="js-repo-pjax-container" class="repository-content context-loader-container" data-pjax-container>
<a href="/rbresearch/Ranking/blob/436cbcef884f0592e473a040e7557e9552d4aa10/rank-functions/qstratRank.R" class="hidden js-permalink-shortcut" data-hotkey="y">Permalink</a>
<!-- blob contrib key: blob_contributors:v21:660c5c981d9185a016a1d9e789705bdb -->
<div class="file-navigation js-zeroclipboard-container">
<div class="select-menu js-menu-container js-select-menu left">
<span class="minibutton select-menu-button js-menu-target css-truncate" data-hotkey="w"
data-master-branch="master"
data-ref="master"
title="master"
role="button" aria-label="Switch branches or tags" tabindex="0" aria-haspopup="true">
<span class="octicon octicon-git-branch"></span>
<i>branch:</i>
<span class="js-select-button css-truncate-target">master</span>
</span>
<div class="select-menu-modal-holder js-menu-content js-navigation-container" data-pjax aria-hidden="true">
<div class="select-menu-modal">
<div class="select-menu-header">
<span class="select-menu-title">Switch branches/tags</span>
<span class="octicon octicon-x js-menu-close" role="button" aria-label="Close"></span>
</div> <!-- /.select-menu-header -->
<div class="select-menu-filters">
<div class="select-menu-text-filter">
<input type="text" aria-label="Filter branches/tags" id="context-commitish-filter-field" class="js-filterable-field js-navigation-enable" placeholder="Filter branches/tags">
</div>
<div class="select-menu-tabs">
<ul>
<li class="select-menu-tab">
<a href="#" data-tab-filter="branches" class="js-select-menu-tab">Branches</a>
</li>
<li class="select-menu-tab">
<a href="#" data-tab-filter="tags" class="js-select-menu-tab">Tags</a>
</li>
</ul>
</div><!-- /.select-menu-tabs -->
</div><!-- /.select-menu-filters -->
<div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="branches">
<div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
<div class="select-menu-item js-navigation-item selected">
<span class="select-menu-item-icon octicon octicon-check"></span>
<a href="/rbresearch/Ranking/blob/master/rank-functions/qstratRank.R"
data-name="master"
data-skip-pjax="true"
rel="nofollow"
class="js-navigation-open select-menu-item-text css-truncate-target"
title="master">master</a>
</div> <!-- /.select-menu-item -->
</div>
<div class="select-menu-no-results">Nothing to show</div>
</div> <!-- /.select-menu-list -->
<div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="tags">
<div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
</div>
<div class="select-menu-no-results">Nothing to show</div>
</div> <!-- /.select-menu-list -->
</div> <!-- /.select-menu-modal -->
</div> <!-- /.select-menu-modal-holder -->
</div> <!-- /.select-menu -->
<div class="button-group right">
<a href="/rbresearch/Ranking/find/master"
class="js-show-file-finder minibutton empty-icon tooltipped tooltipped-s"
data-pjax
data-hotkey="t"
aria-label="Quickly jump between files">
<span class="octicon octicon-list-unordered"></span>
</a>
<button aria-label="Copy file path to clipboard" class="js-zeroclipboard minibutton zeroclipboard-button" data-copied-hint="Copied!" type="button"><span class="octicon octicon-clippy"></span></button>
</div>
<div class="breadcrumb js-zeroclipboard-target">
<span class='repo-root js-repo-root'><span itemscope="" itemtype="http://data-vocabulary.org/Breadcrumb"><a href="/rbresearch/Ranking" class="" data-branch="master" data-direction="back" data-pjax="true" itemscope="url"><span itemprop="title">Ranking</span></a></span></span><span class="separator">/</span><span itemscope="" itemtype="http://data-vocabulary.org/Breadcrumb"><a href="/rbresearch/Ranking/tree/master/rank-functions" class="" data-branch="master" data-direction="back" data-pjax="true" itemscope="url"><span itemprop="title">rank-functions</span></a></span><span class="separator">/</span><strong class="final-path">qstratRank.R</strong>
</div>
</div>
<div class="commit file-history-tease">
<div class="file-history-tease-header">
<img alt="rbresearch" class="avatar" data-user="2328682" height="24" src="https://avatars3.githubusercontent.com/u/2328682?v=3&s=48" width="24" />
<span class="author"><a href="/rbresearch" rel="author">rbresearch</a></span>
<time datetime="2013-02-20T03:12:00Z" is="relative-time">Feb 19, 2013</time>
<div class="commit-title">
<a href="/rbresearch/Ranking/commit/2862831f65bf7fe86ff26c2bfd3c1d173e3ea5e3" class="message" data-pjax="true" title="deleting some files">deleting some files</a>
</div>
</div>
<div class="participation">
<p class="quickstat">
<a href="#blob_contributors_box" rel="facebox">
<strong>1</strong>
contributor
</a>
</p>
</div>
<div id="blob_contributors_box" style="display:none">
<h2 class="facebox-header">Users who have contributed to this file</h2>
<ul class="facebox-user-list">
<li class="facebox-user-list-item">
<img alt="rbresearch" data-user="2328682" height="24" src="https://avatars3.githubusercontent.com/u/2328682?v=3&s=48" width="24" />
<a href="/rbresearch">rbresearch</a>
</li>
</ul>
</div>
</div>
<div class="file-box">
<div class="file">
<div class="meta clearfix">
<div class="info file-name">
<span>111 lines (90 sloc)</span>
<span class="meta-divider"></span>
<span>4.053 kb</span>
</div>
<div class="actions">
<div class="button-group">
<a href="/rbresearch/Ranking/raw/master/rank-functions/qstratRank.R" class="minibutton " id="raw-url">Raw</a>
<a href="/rbresearch/Ranking/blame/master/rank-functions/qstratRank.R" class="minibutton js-update-url-with-hash">Blame</a>
<a href="/rbresearch/Ranking/commits/master/rank-functions/qstratRank.R" class="minibutton " rel="nofollow">History</a>
</div><!-- /.button-group -->
<a class="octicon-button tooltipped tooltipped-nw"
href="http://windows.github.com" aria-label="Open this file in GitHub for Windows">
<span class="octicon octicon-device-desktop"></span>
</a>
<a class="octicon-button disabled tooltipped tooltipped-w" href="#"
aria-label="You must be signed in to make or propose changes"><span class="octicon octicon-pencil"></span></a>
<a class="octicon-button danger disabled tooltipped tooltipped-w" href="#"
aria-label="You must be signed in to make or propose changes">
<span class="octicon octicon-trashcan"></span>
</a>
</div><!-- /.actions -->
</div>
<div class="blob-wrapper data type-r">
<table class="highlight tab-size-8 js-file-line-container">
<tr>
<td id="L1" class="blob-num js-line-number" data-line-number="1"></td>
<td id="LC1" class="blob-code js-file-line"><span class="pl-c"># qstratRank.R</span></td>
</tr>
<tr>
<td id="L2" class="blob-num js-line-number" data-line-number="2"></td>
<td id="LC2" class="blob-code js-file-line"><span class="pl-en">qstratRank</span> <span class="pl-k"><-</span> <span class="pl-k">function</span>(<span class="pl-vo">symbols</span>, <span class="pl-v">init.equity</span><span class="pl-k">=</span><span class="pl-c1">100000</span>, <span class="pl-v">top.N</span><span class="pl-k">=</span><span class="pl-c1">1</span>, </td>
</tr>
<tr>
<td id="L3" class="blob-num js-line-number" data-line-number="3"></td>
<td id="LC3" class="blob-code js-file-line"> <span class="pl-v">max.size</span><span class="pl-k">=</span><span class="pl-c1">1000</span>, <span class="pl-v">max.levels</span><span class="pl-k">=</span><span class="pl-c1">1</span>) {</td>
</tr>
<tr>
<td id="L4" class="blob-num js-line-number" data-line-number="4"></td>
<td id="LC4" class="blob-code js-file-line"> <span class="pl-c"># The qstratRank function uses the quantstrat framework to backtest a</span></td>
</tr>
<tr>
<td id="L5" class="blob-num js-line-number" data-line-number="5"></td>
<td id="LC5" class="blob-code js-file-line"> <span class="pl-c"># ranking or relative strength strategy</span></td>
</tr>
<tr>
<td id="L6" class="blob-num js-line-number" data-line-number="6"></td>
<td id="LC6" class="blob-code js-file-line"> <span class="pl-c">#</span></td>
</tr>
<tr>
<td id="L7" class="blob-num js-line-number" data-line-number="7"></td>
<td id="LC7" class="blob-code js-file-line"> <span class="pl-c"># args</span></td>
</tr>
<tr>
<td id="L8" class="blob-num js-line-number" data-line-number="8"></td>
<td id="LC8" class="blob-code js-file-line"> <span class="pl-c"># symbols : character vector of symbols</span></td>
</tr>
<tr>
<td id="L9" class="blob-num js-line-number" data-line-number="9"></td>
<td id="LC9" class="blob-code js-file-line"> <span class="pl-c"># init.equity : initial equity</span></td>
</tr>
<tr>
<td id="L10" class="blob-num js-line-number" data-line-number="10"></td>
<td id="LC10" class="blob-code js-file-line"> <span class="pl-c"># top.N : trade the top N ranked assets</span></td>
</tr>
<tr>
<td id="L11" class="blob-num js-line-number" data-line-number="11"></td>
<td id="LC11" class="blob-code js-file-line"> <span class="pl-c"># max.size : maximum position size</span></td>
</tr>
<tr>
<td id="L12" class="blob-num js-line-number" data-line-number="12"></td>
<td id="LC12" class="blob-code js-file-line"> <span class="pl-c"># max.levels : maximum levels to scale in a trade</span></td>
</tr>
<tr>
<td id="L13" class="blob-num js-line-number" data-line-number="13"></td>
<td id="LC13" class="blob-code js-file-line"> <span class="pl-c"># max.size and max.levels are passed to addPosLimit</span></td>
</tr>
<tr>
<td id="L14" class="blob-num js-line-number" data-line-number="14"></td>
<td id="LC14" class="blob-code js-file-line"> <span class="pl-c">#</span></td>
</tr>
<tr>
<td id="L15" class="blob-num js-line-number" data-line-number="15"></td>
<td id="LC15" class="blob-code js-file-line"> <span class="pl-c"># return value</span></td>
</tr>
<tr>
<td id="L16" class="blob-num js-line-number" data-line-number="16"></td>
<td id="LC16" class="blob-code js-file-line"> <span class="pl-c"># returns a list: end.eq, returns, book, stats</span></td>
</tr>
<tr>
<td id="L17" class="blob-num js-line-number" data-line-number="17"></td>
<td id="LC17" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L18" class="blob-num js-line-number" data-line-number="18"></td>
<td id="LC18" class="blob-code js-file-line"> <span class="pl-c"># remove variables</span></td>
</tr>
<tr>
<td id="L19" class="blob-num js-line-number" data-line-number="19"></td>
<td id="LC19" class="blob-code js-file-line"> suppressWarnings(rm(<span class="pl-s1"><span class="pl-pds">"</span>order_book.Rank<span class="pl-pds">"</span></span>, <span class="pl-v">pos</span><span class="pl-k">=</span>.<span class="pl-vo">strategy</span>))</td>
</tr>
<tr>
<td id="L20" class="blob-num js-line-number" data-line-number="20"></td>
<td id="LC20" class="blob-code js-file-line"> suppressWarnings(rm(<span class="pl-s1"><span class="pl-pds">"</span>account.Rank<span class="pl-pds">"</span></span>, <span class="pl-s1"><span class="pl-pds">"</span>portfolio.Rank<span class="pl-pds">"</span></span>, <span class="pl-v">pos</span><span class="pl-k">=</span>.<span class="pl-vo">blotter</span>))</td>
</tr>
<tr>
<td id="L21" class="blob-num js-line-number" data-line-number="21"></td>
<td id="LC21" class="blob-code js-file-line"> suppressWarnings(rm(<span class="pl-s1"><span class="pl-pds">"</span>account.st<span class="pl-pds">"</span></span>, <span class="pl-s1"><span class="pl-pds">"</span>port.st<span class="pl-pds">"</span></span>, <span class="pl-s1"><span class="pl-pds">"</span>stock.str<span class="pl-pds">"</span></span>, <span class="pl-s1"><span class="pl-pds">"</span>stratRank<span class="pl-pds">"</span></span>,</td>
</tr>
<tr>
<td id="L22" class="blob-num js-line-number" data-line-number="22"></td>
<td id="LC22" class="blob-code js-file-line"> <span class="pl-s1"><span class="pl-pds">"</span>initDate<span class="pl-pds">"</span></span>, <span class="pl-s1"><span class="pl-pds">"</span>initEq<span class="pl-pds">"</span></span>, <span class="pl-s1"><span class="pl-pds">'</span>start_t<span class="pl-pds">'</span></span>, <span class="pl-s1"><span class="pl-pds">'</span>end_t<span class="pl-pds">'</span></span>))</td>
</tr>
<tr>
<td id="L23" class="blob-num js-line-number" data-line-number="23"></td>
<td id="LC23" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L24" class="blob-num js-line-number" data-line-number="24"></td>
<td id="LC24" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L25" class="blob-num js-line-number" data-line-number="25"></td>
<td id="LC25" class="blob-code js-file-line"> <span class="pl-c"># set initial variables</span></td>
</tr>
<tr>
<td id="L26" class="blob-num js-line-number" data-line-number="26"></td>
<td id="LC26" class="blob-code js-file-line"> <span class="pl-vo">initDate</span> <span class="pl-k"><-</span> <span class="pl-s1"><span class="pl-pds">"</span>1900-01-01<span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L27" class="blob-num js-line-number" data-line-number="27"></td>
<td id="LC27" class="blob-code js-file-line"> <span class="pl-vo">initEq</span> <span class="pl-k"><-</span> <span class="pl-vo">init.equity</span></td>
</tr>
<tr>
<td id="L28" class="blob-num js-line-number" data-line-number="28"></td>
<td id="LC28" class="blob-code js-file-line"> <span class="pl-vo">port.st</span> <span class="pl-k"><-</span> <span class="pl-s1"><span class="pl-pds">"</span>Rank<span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L29" class="blob-num js-line-number" data-line-number="29"></td>
<td id="LC29" class="blob-code js-file-line"> <span class="pl-vo">account.st</span> <span class="pl-k"><-</span> <span class="pl-s1"><span class="pl-pds">"</span>Rank<span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L30" class="blob-num js-line-number" data-line-number="30"></td>
<td id="LC30" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L31" class="blob-num js-line-number" data-line-number="31"></td>
<td id="LC31" class="blob-code js-file-line"> <span class="pl-c"># trade the top "N" ranked symbols</span></td>
</tr>
<tr>
<td id="L32" class="blob-num js-line-number" data-line-number="32"></td>
<td id="LC32" class="blob-code js-file-line"> <span class="pl-vo">N</span> <span class="pl-k"><-</span> <span class="pl-vo">top.N</span></td>
</tr>
<tr>
<td id="L33" class="blob-num js-line-number" data-line-number="33"></td>
<td id="LC33" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L34" class="blob-num js-line-number" data-line-number="34"></td>
<td id="LC34" class="blob-code js-file-line"> <span class="pl-c"># initialize quantstrat objects</span></td>
</tr>
<tr>
<td id="L35" class="blob-num js-line-number" data-line-number="35"></td>
<td id="LC35" class="blob-code js-file-line"> initPortf(<span class="pl-vo">port.st</span>, <span class="pl-v">symbols</span><span class="pl-k">=</span><span class="pl-vo">symbols</span>, <span class="pl-v">initDate</span><span class="pl-k">=</span><span class="pl-vo">initDate</span>)</td>
</tr>
<tr>
<td id="L36" class="blob-num js-line-number" data-line-number="36"></td>
<td id="LC36" class="blob-code js-file-line"> initAcct(<span class="pl-vo">account.st</span>, <span class="pl-v">portfolios</span><span class="pl-k">=</span><span class="pl-vo">port.st</span>, <span class="pl-v">initDate</span><span class="pl-k">=</span><span class="pl-vo">initDate</span>,<span class="pl-v">initEq</span><span class="pl-k">=</span><span class="pl-vo">initEq</span>)</td>
</tr>
<tr>
<td id="L37" class="blob-num js-line-number" data-line-number="37"></td>
<td id="LC37" class="blob-code js-file-line"> initOrders(<span class="pl-v">portfolio</span><span class="pl-k">=</span><span class="pl-vo">port.st</span>, <span class="pl-v">initDate</span><span class="pl-k">=</span><span class="pl-vo">initDate</span>)</td>
</tr>
<tr>
<td id="L38" class="blob-num js-line-number" data-line-number="38"></td>
<td id="LC38" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L39" class="blob-num js-line-number" data-line-number="39"></td>
<td id="LC39" class="blob-code js-file-line"> <span class="pl-c"># initialize a strategy object</span></td>
</tr>
<tr>
<td id="L40" class="blob-num js-line-number" data-line-number="40"></td>
<td id="LC40" class="blob-code js-file-line"> <span class="pl-vo">stratRank</span> <span class="pl-k"><-</span> strategy(<span class="pl-s1"><span class="pl-pds">"</span>Rank<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L41" class="blob-num js-line-number" data-line-number="41"></td>
<td id="LC41" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L42" class="blob-num js-line-number" data-line-number="42"></td>
<td id="LC42" class="blob-code js-file-line"> <span class="pl-c"># there are two signals</span></td>
</tr>
<tr>
<td id="L43" class="blob-num js-line-number" data-line-number="43"></td>
<td id="LC43" class="blob-code js-file-line"> <span class="pl-c"># the first signal is when Rank is less than or equal to N</span></td>
</tr>
<tr>
<td id="L44" class="blob-num js-line-number" data-line-number="44"></td>
<td id="LC44" class="blob-code js-file-line"> <span class="pl-c"># (i.e. trades the #1 ranked symbol if N=1)</span></td>
</tr>
<tr>
<td id="L45" class="blob-num js-line-number" data-line-number="45"></td>
<td id="LC45" class="blob-code js-file-line"> <span class="pl-vo">stratRank</span> <span class="pl-k"><-</span> add.signal(<span class="pl-v">strategy</span><span class="pl-k">=</span><span class="pl-vo">stratRank</span>, <span class="pl-v">name</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>sigThreshold<span class="pl-pds">"</span></span>, </td>
</tr>
<tr>
<td id="L46" class="blob-num js-line-number" data-line-number="46"></td>
<td id="LC46" class="blob-code js-file-line"> <span class="pl-v">arguments</span><span class="pl-k">=</span><span class="pl-st">list</span>(<span class="pl-v">threshold</span><span class="pl-k">=</span><span class="pl-vo">N</span>, <span class="pl-v">column</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>Rank<span class="pl-pds">"</span></span>, </td>
</tr>
<tr>
<td id="L47" class="blob-num js-line-number" data-line-number="47"></td>
<td id="LC47" class="blob-code js-file-line"> <span class="pl-v">relationship</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>lte<span class="pl-pds">"</span></span>, <span class="pl-v">cross</span><span class="pl-k">=</span><span class="pl-c1">FALSE</span>), </td>
</tr>
<tr>
<td id="L48" class="blob-num js-line-number" data-line-number="48"></td>
<td id="LC48" class="blob-code js-file-line"> <span class="pl-v">label</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>Rank.lte.N<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L49" class="blob-num js-line-number" data-line-number="49"></td>
<td id="LC49" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L50" class="blob-num js-line-number" data-line-number="50"></td>
<td id="LC50" class="blob-code js-file-line"> <span class="pl-c"># the second signal is when Rank is greter than or equal to N</span></td>
</tr>
<tr>
<td id="L51" class="blob-num js-line-number" data-line-number="51"></td>
<td id="LC51" class="blob-code js-file-line"> <span class="pl-c"># (i.e. trades the #1 ranked symbol if N=1)</span></td>
</tr>
<tr>
<td id="L52" class="blob-num js-line-number" data-line-number="52"></td>
<td id="LC52" class="blob-code js-file-line"> <span class="pl-vo">stratRank</span> <span class="pl-k"><-</span> add.signal(<span class="pl-v">strategy</span><span class="pl-k">=</span><span class="pl-vo">stratRank</span>, <span class="pl-v">name</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>sigThreshold<span class="pl-pds">"</span></span>, </td>
</tr>
<tr>
<td id="L53" class="blob-num js-line-number" data-line-number="53"></td>
<td id="LC53" class="blob-code js-file-line"> <span class="pl-v">arguments</span><span class="pl-k">=</span><span class="pl-st">list</span>(<span class="pl-v">threshold</span><span class="pl-k">=</span><span class="pl-vo">N</span>, <span class="pl-v">column</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>Rank<span class="pl-pds">"</span></span>, </td>
</tr>
<tr>
<td id="L54" class="blob-num js-line-number" data-line-number="54"></td>
<td id="LC54" class="blob-code js-file-line"> <span class="pl-v">relationship</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>gt<span class="pl-pds">"</span></span>, <span class="pl-v">cross</span><span class="pl-k">=</span><span class="pl-c1">FALSE</span>), </td>
</tr>
<tr>
<td id="L55" class="blob-num js-line-number" data-line-number="55"></td>
<td id="LC55" class="blob-code js-file-line"> <span class="pl-v">label</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>Rank.gt.N<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L56" class="blob-num js-line-number" data-line-number="56"></td>
<td id="LC56" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L57" class="blob-num js-line-number" data-line-number="57"></td>
<td id="LC57" class="blob-code js-file-line"> <span class="pl-c"># add buy rule</span></td>
</tr>
<tr>
<td id="L58" class="blob-num js-line-number" data-line-number="58"></td>
<td id="LC58" class="blob-code js-file-line"> <span class="pl-vo">stratRank</span> <span class="pl-k"><-</span> add.rule(<span class="pl-v">strategy</span><span class="pl-k">=</span><span class="pl-vo">stratRank</span>, <span class="pl-v">name</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>ruleSignal<span class="pl-pds">'</span></span>, </td>
</tr>
<tr>
<td id="L59" class="blob-num js-line-number" data-line-number="59"></td>
<td id="LC59" class="blob-code js-file-line"> <span class="pl-v">arguments</span> <span class="pl-k">=</span> <span class="pl-st">list</span>(<span class="pl-v">sigcol</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>Rank.lte.N<span class="pl-pds">"</span></span>, <span class="pl-v">sigval</span><span class="pl-k">=</span><span class="pl-c1">TRUE</span>, </td>
</tr>
<tr>
<td id="L60" class="blob-num js-line-number" data-line-number="60"></td>
<td id="LC60" class="blob-code js-file-line"> <span class="pl-v">orderqty</span><span class="pl-k">=</span><span class="pl-vo">max.size</span>, <span class="pl-v">ordertype</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>market<span class="pl-pds">'</span></span>, </td>
</tr>
<tr>
<td id="L61" class="blob-num js-line-number" data-line-number="61"></td>
<td id="LC61" class="blob-code js-file-line"> <span class="pl-v">orderside</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>long<span class="pl-pds">'</span></span>, <span class="pl-v">pricemethod</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>market<span class="pl-pds">'</span></span>, </td>
</tr>
<tr>
<td id="L62" class="blob-num js-line-number" data-line-number="62"></td>
<td id="LC62" class="blob-code js-file-line"> <span class="pl-v">replace</span><span class="pl-k">=</span><span class="pl-c1">FALSE</span>, <span class="pl-v">osFUN</span><span class="pl-k">=</span><span class="pl-vo">osMaxPos</span>), </td>
</tr>
<tr>
<td id="L63" class="blob-num js-line-number" data-line-number="63"></td>
<td id="LC63" class="blob-code js-file-line"> <span class="pl-v">type</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>enter<span class="pl-pds">'</span></span>, <span class="pl-v">path.dep</span><span class="pl-k">=</span><span class="pl-c1">TRUE</span>)</td>
</tr>
<tr>
<td id="L64" class="blob-num js-line-number" data-line-number="64"></td>
<td id="LC64" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L65" class="blob-num js-line-number" data-line-number="65"></td>
<td id="LC65" class="blob-code js-file-line"> <span class="pl-c"># add exit rule</span></td>
</tr>
<tr>
<td id="L66" class="blob-num js-line-number" data-line-number="66"></td>
<td id="LC66" class="blob-code js-file-line"> <span class="pl-vo">stratRank</span> <span class="pl-k"><-</span> add.rule(<span class="pl-v">strategy</span> <span class="pl-k">=</span> <span class="pl-vo">stratRank</span>, <span class="pl-v">name</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>ruleSignal<span class="pl-pds">'</span></span>, </td>
</tr>
<tr>
<td id="L67" class="blob-num js-line-number" data-line-number="67"></td>
<td id="LC67" class="blob-code js-file-line"> <span class="pl-v">arguments</span> <span class="pl-k">=</span> <span class="pl-st">list</span>(<span class="pl-v">sigcol</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>Rank.gt.N<span class="pl-pds">"</span></span>, <span class="pl-v">sigval</span><span class="pl-k">=</span><span class="pl-c1">TRUE</span>, </td>
</tr>
<tr>
<td id="L68" class="blob-num js-line-number" data-line-number="68"></td>
<td id="LC68" class="blob-code js-file-line"> <span class="pl-v">orderqty</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>all<span class="pl-pds">'</span></span>, <span class="pl-v">ordertype</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>market<span class="pl-pds">'</span></span>, </td>
</tr>
<tr>
<td id="L69" class="blob-num js-line-number" data-line-number="69"></td>
<td id="LC69" class="blob-code js-file-line"> <span class="pl-v">orderside</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>long<span class="pl-pds">'</span></span>, <span class="pl-v">pricemethod</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>market<span class="pl-pds">'</span></span>, </td>
</tr>
<tr>
<td id="L70" class="blob-num js-line-number" data-line-number="70"></td>
<td id="LC70" class="blob-code js-file-line"> <span class="pl-v">replace</span><span class="pl-k">=</span><span class="pl-c1">FALSE</span>), </td>
</tr>
<tr>
<td id="L71" class="blob-num js-line-number" data-line-number="71"></td>
<td id="LC71" class="blob-code js-file-line"> <span class="pl-v">type</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>exit<span class="pl-pds">'</span></span>, <span class="pl-v">path.dep</span><span class="pl-k">=</span><span class="pl-c1">TRUE</span>)</td>
</tr>
<tr>
<td id="L72" class="blob-num js-line-number" data-line-number="72"></td>
<td id="LC72" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L73" class="blob-num js-line-number" data-line-number="73"></td>
<td id="LC73" class="blob-code js-file-line"> <span class="pl-c">#set max position size and levels</span></td>
</tr>
<tr>
<td id="L74" class="blob-num js-line-number" data-line-number="74"></td>
<td id="LC74" class="blob-code js-file-line"> <span class="pl-k">for</span>(<span class="pl-vo">symbol</span> <span class="pl-k">in</span> <span class="pl-vo">symbols</span>){ addPosLimit(<span class="pl-vo">port.st</span>, <span class="pl-vo">symbol</span>, <span class="pl-vo">initDate</span>, <span class="pl-vo">max.size</span>, <span class="pl-vo">max.levels</span>) }</td>
</tr>
<tr>
<td id="L75" class="blob-num js-line-number" data-line-number="75"></td>
<td id="LC75" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L76" class="blob-num js-line-number" data-line-number="76"></td>
<td id="LC76" class="blob-code js-file-line"> print(<span class="pl-s1"><span class="pl-pds">"</span>setup completed<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L77" class="blob-num js-line-number" data-line-number="77"></td>
<td id="LC77" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L78" class="blob-num js-line-number" data-line-number="78"></td>
<td id="LC78" class="blob-code js-file-line"> <span class="pl-c"># apply the strategy to the portfolio</span></td>
</tr>
<tr>
<td id="L79" class="blob-num js-line-number" data-line-number="79"></td>
<td id="LC79" class="blob-code js-file-line"> <span class="pl-vo">start_t</span> <span class="pl-k"><-</span> Sys.time()</td>
</tr>
<tr>
<td id="L80" class="blob-num js-line-number" data-line-number="80"></td>
<td id="LC80" class="blob-code js-file-line"> <span class="pl-vo">out</span> <span class="pl-k"><-</span> try(applyStrategy(<span class="pl-v">strategy</span><span class="pl-k">=</span><span class="pl-vo">stratRank</span>, <span class="pl-v">portfolios</span><span class="pl-k">=</span><span class="pl-vo">port.st</span>))</td>
</tr>
<tr>
<td id="L81" class="blob-num js-line-number" data-line-number="81"></td>
<td id="LC81" class="blob-code js-file-line"> <span class="pl-vo">end_t</span> <span class="pl-k"><-</span> Sys.time()</td>
</tr>
<tr>
<td id="L82" class="blob-num js-line-number" data-line-number="82"></td>
<td id="LC82" class="blob-code js-file-line"> print(<span class="pl-vo">end_t</span><span class="pl-k">-</span><span class="pl-vo">start_t</span>)</td>
</tr>
<tr>
<td id="L83" class="blob-num js-line-number" data-line-number="83"></td>
<td id="LC83" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L84" class="blob-num js-line-number" data-line-number="84"></td>
<td id="LC84" class="blob-code js-file-line"> <span class="pl-c"># update Portfolio</span></td>
</tr>
<tr>
<td id="L85" class="blob-num js-line-number" data-line-number="85"></td>
<td id="LC85" class="blob-code js-file-line"> <span class="pl-vo">start_t</span> <span class="pl-k"><-</span> Sys.time()</td>
</tr>
<tr>
<td id="L86" class="blob-num js-line-number" data-line-number="86"></td>
<td id="LC86" class="blob-code js-file-line"> updatePortf(<span class="pl-v">Portfolio</span><span class="pl-k">=</span><span class="pl-vo">port.st</span>, <span class="pl-v">Dates</span><span class="pl-k">=</span>paste(<span class="pl-s1"><span class="pl-pds">'</span>::<span class="pl-pds">'</span></span>, as.Date(Sys.time()), <span class="pl-v">sep</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span><span class="pl-pds">'</span></span>))</td>
</tr>
<tr>
<td id="L87" class="blob-num js-line-number" data-line-number="87"></td>
<td id="LC87" class="blob-code js-file-line"> <span class="pl-vo">end_t</span> <span class="pl-k"><-</span> Sys.time()</td>
</tr>
<tr>
<td id="L88" class="blob-num js-line-number" data-line-number="88"></td>
<td id="LC88" class="blob-code js-file-line"> print(<span class="pl-s1"><span class="pl-pds">"</span>trade blotter portfolio update:<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L89" class="blob-num js-line-number" data-line-number="89"></td>
<td id="LC89" class="blob-code js-file-line"> print(<span class="pl-vo">end_t</span> <span class="pl-k">-</span> <span class="pl-vo">start_t</span>)</td>
</tr>
<tr>
<td id="L90" class="blob-num js-line-number" data-line-number="90"></td>
<td id="LC90" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L91" class="blob-num js-line-number" data-line-number="91"></td>
<td id="LC91" class="blob-code js-file-line"> <span class="pl-c"># update account</span></td>
</tr>
<tr>
<td id="L92" class="blob-num js-line-number" data-line-number="92"></td>
<td id="LC92" class="blob-code js-file-line"> updateAcct(<span class="pl-vo">account.st</span>)</td>
</tr>
<tr>
<td id="L93" class="blob-num js-line-number" data-line-number="93"></td>
<td id="LC93" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L94" class="blob-num js-line-number" data-line-number="94"></td>
<td id="LC94" class="blob-code js-file-line"> <span class="pl-c"># update ending equity</span></td>
</tr>
<tr>
<td id="L95" class="blob-num js-line-number" data-line-number="95"></td>
<td id="LC95" class="blob-code js-file-line"> updateEndEq(<span class="pl-vo">account.st</span>)</td>
</tr>
<tr>
<td id="L96" class="blob-num js-line-number" data-line-number="96"></td>
<td id="LC96" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L97" class="blob-num js-line-number" data-line-number="97"></td>
<td id="LC97" class="blob-code js-file-line"> <span class="pl-c"># get ending equity</span></td>
</tr>
<tr>
<td id="L98" class="blob-num js-line-number" data-line-number="98"></td>
<td id="LC98" class="blob-code js-file-line"> <span class="pl-vo">eq</span> <span class="pl-k"><-</span> getEndEq(<span class="pl-vo">account.st</span>, Sys.Date()) <span class="pl-k">+</span> <span class="pl-vo">initEq</span></td>
</tr>
<tr>
<td id="L99" class="blob-num js-line-number" data-line-number="99"></td>
<td id="LC99" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L100" class="blob-num js-line-number" data-line-number="100"></td>
<td id="LC100" class="blob-code js-file-line"> <span class="pl-c"># view order book to confirm trades</span></td>
</tr>
<tr>
<td id="L101" class="blob-num js-line-number" data-line-number="101"></td>
<td id="LC101" class="blob-code js-file-line"> <span class="pl-vo">order.book</span> <span class="pl-k"><-</span> getOrderBook(<span class="pl-vo">port.st</span>)</td>
</tr>
<tr>
<td id="L102" class="blob-num js-line-number" data-line-number="102"></td>
<td id="LC102" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L103" class="blob-num js-line-number" data-line-number="103"></td>
<td id="LC103" class="blob-code js-file-line"> <span class="pl-c"># get trade statistics</span></td>
</tr>
<tr>
<td id="L104" class="blob-num js-line-number" data-line-number="104"></td>
<td id="LC104" class="blob-code js-file-line"> <span class="pl-vo">stats</span> <span class="pl-k"><-</span> tradeStats(<span class="pl-vo">port.st</span>)</td>
</tr>
<tr>
<td id="L105" class="blob-num js-line-number" data-line-number="105"></td>
<td id="LC105" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L106" class="blob-num js-line-number" data-line-number="106"></td>
<td id="LC106" class="blob-code js-file-line"> <span class="pl-c"># portfolio returns</span></td>
</tr>
<tr>
<td id="L107" class="blob-num js-line-number" data-line-number="107"></td>
<td id="LC107" class="blob-code js-file-line"> <span class="pl-vo">ret1</span> <span class="pl-k"><-</span> PortfReturns(<span class="pl-vo">port.st</span>)</td>
</tr>
<tr>
<td id="L108" class="blob-num js-line-number" data-line-number="108"></td>
<td id="LC108" class="blob-code js-file-line"> <span class="pl-vo">ret1</span><span class="pl-k">$</span><span class="pl-vo">total</span> <span class="pl-k"><-</span> rowSums(<span class="pl-vo">ret1</span>, <span class="pl-v">na.rm</span><span class="pl-k">=</span><span class="pl-c1">TRUE</span>)</td>
</tr>
<tr>
<td id="L109" class="blob-num js-line-number" data-line-number="109"></td>
<td id="LC109" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L110" class="blob-num js-line-number" data-line-number="110"></td>
<td id="LC110" class="blob-code js-file-line"> <span class="pl-k">return</span>(<span class="pl-st">list</span>(<span class="pl-v">end.eq</span><span class="pl-k">=</span><span class="pl-vo">eq</span>, <span class="pl-v">returns</span><span class="pl-k">=</span><span class="pl-vo">ret1</span>, <span class="pl-v">book</span><span class="pl-k">=</span><span class="pl-vo">order.book</span>, <span class="pl-v">stats</span><span class="pl-k">=</span><span class="pl-vo">stats</span>))</td>
</tr>
<tr>
<td id="L111" class="blob-num js-line-number" data-line-number="111"></td>
<td id="LC111" class="blob-code js-file-line">}</td>
</tr>
</table>
</div>
</div>
</div>
<a href="#jump-to-line" rel="facebox[.linejump]" data-hotkey="l" style="display:none">Jump to Line</a>
<div id="jump-to-line" style="display:none">
<form accept-charset="UTF-8" class="js-jump-to-line-form">
<input class="linejump-input js-jump-to-line-field" type="text" placeholder="Jump to line…" autofocus>
<button type="submit" class="button">Go</button>
</form>
</div>
</div>
</div><!-- /.repo-container -->
<div class="modal-backdrop"></div>
</div><!-- /.container -->
</div><!-- /.site -->
</div><!-- /.wrapper -->
<div class="container">
<div class="site-footer" role="contentinfo">
<ul class="site-footer-links right">
<li><a href="https://status.github.com/">Status</a></li>
<li><a href="https://developer.github.com">API</a></li>
<li><a href="http://training.github.com">Training</a></li>
<li><a href="http://shop.github.com">Shop</a></li>
<li><a href="/blog">Blog</a></li>
<li><a href="/about">About</a></li>
</ul>
<a href="/" aria-label="Homepage">
<span class="mega-octicon octicon-mark-github" title="GitHub"></span>
</a>
<ul class="site-footer-links">
<li>© 2015 <span title="0.03460s from github-fe138-cp1-prd.iad.github.net">GitHub</span>, Inc.</li>
<li><a href="/site/terms">Terms</a></li>
<li><a href="/site/privacy">Privacy</a></li>
<li><a href="/security">Security</a></li>
<li><a href="/contact">Contact</a></li>
</ul>
</div><!-- /.site-footer -->
</div><!-- /.container -->
<div class="fullscreen-overlay js-fullscreen-overlay" id="fullscreen_overlay">
<div class="fullscreen-container js-suggester-container">
<div class="textarea-wrap">
<textarea name="fullscreen-contents" id="fullscreen-contents" class="fullscreen-contents js-fullscreen-contents" placeholder=""></textarea>
<div class="suggester-container">
<div class="suggester fullscreen-suggester js-suggester js-navigation-container"></div>
</div>
</div>
</div>
<div class="fullscreen-sidebar">
<a href="#" class="exit-fullscreen js-exit-fullscreen tooltipped tooltipped-w" aria-label="Exit Zen Mode">
<span class="mega-octicon octicon-screen-normal"></span>
</a>
<a href="#" class="theme-switcher js-theme-switcher tooltipped tooltipped-w"
aria-label="Switch themes">
<span class="octicon octicon-color-mode"></span>
</a>
</div>
</div>
<div id="ajax-error-message" class="flash flash-error">
<span class="octicon octicon-alert"></span>
<a href="#" class="octicon octicon-x flash-close js-ajax-error-dismiss" aria-label="Dismiss error"></a>
Something went wrong with that request. Please try again.
</div>
<script crossorigin="anonymous" src="https://assets-cdn.github.com/assets/frameworks-2a688ea200a50a474783d34f1f46405acbc687c4d17db98b4b2b769b44174d5d.js" type="text/javascript"></script>
<script async="async" crossorigin="anonymous" src="https://assets-cdn.github.com/assets/github-c28c4beb84e212634dbd1b98793bbe9332814860c2e34cdaac36f18b6b90b7dd.js" type="text/javascript"></script>
</body>
</html>
| /qstratRank.R | no_license | randylodes/R | R | false | false | 58,732 | r |
<!DOCTYPE html>
<html lang="en" class="">
<head prefix="og: http://ogp.me/ns# fb: http://ogp.me/ns/fb# object: http://ogp.me/ns/object# article: http://ogp.me/ns/article# profile: http://ogp.me/ns/profile#">
<meta charset='utf-8'>
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta http-equiv="Content-Language" content="en">
<title>Ranking/qstratRank.R at master · rbresearch/Ranking · GitHub</title>
<link rel="search" type="application/opensearchdescription+xml" href="/opensearch.xml" title="GitHub">
<link rel="fluid-icon" href="https://github.com/fluidicon.png" title="GitHub">
<link rel="apple-touch-icon" sizes="57x57" href="/apple-touch-icon-114.png">
<link rel="apple-touch-icon" sizes="114x114" href="/apple-touch-icon-114.png">
<link rel="apple-touch-icon" sizes="72x72" href="/apple-touch-icon-144.png">
<link rel="apple-touch-icon" sizes="144x144" href="/apple-touch-icon-144.png">
<meta property="fb:app_id" content="1401488693436528">
<meta content="@github" name="twitter:site" /><meta content="summary" name="twitter:card" /><meta content="rbresearch/Ranking" name="twitter:title" /><meta content="Ranking - Functions for various methods to rank assets" name="twitter:description" /><meta content="https://avatars3.githubusercontent.com/u/2328682?v=3&s=400" name="twitter:image:src" />
<meta content="GitHub" property="og:site_name" /><meta content="object" property="og:type" /><meta content="https://avatars3.githubusercontent.com/u/2328682?v=3&s=400" property="og:image" /><meta content="rbresearch/Ranking" property="og:title" /><meta content="https://github.com/rbresearch/Ranking" property="og:url" /><meta content="Ranking - Functions for various methods to rank assets" property="og:description" />
<meta name="browser-stats-url" content="/_stats">
<link rel="assets" href="https://assets-cdn.github.com/">
<link rel="conduit-xhr" href="https://ghconduit.com:25035">
<meta name="pjax-timeout" content="1000">
<meta name="msapplication-TileImage" content="/windows-tile.png">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="selected-link" value="repo_source" data-pjax-transient>
<meta name="google-analytics" content="UA-3769691-2">
<meta content="collector.githubapp.com" name="octolytics-host" /><meta content="collector-cdn.github.com" name="octolytics-script-host" /><meta content="github" name="octolytics-app-id" /><meta content="617AB459:4D46:8C26B6:54AE98A6" name="octolytics-dimension-request_id" />
<meta content="Rails, view, blob#show" name="analytics-event" />
<link rel="icon" type="image/x-icon" href="https://assets-cdn.github.com/favicon.ico">
<meta content="authenticity_token" name="csrf-param" />
<meta content="CV17+Y0UrVe64YOUab2NlVZqPe8TG43W0A/UxfzcyvYPeKESDRGcy8YlMgIz0/hDpPC9ExvnONmAp7ltMrt0nQ==" name="csrf-token" />
<link href="https://assets-cdn.github.com/assets/github-48b40f362ce13d414b502b9860f7b3a265fc986a91976bc0e4fdf82704fe5b39.css" media="all" rel="stylesheet" type="text/css" />
<link href="https://assets-cdn.github.com/assets/github2-c81d1566689852c2c43097d424735ea531962a7599ad515afcc4a649ce814da7.css" media="all" rel="stylesheet" type="text/css" />
<meta http-equiv="x-pjax-version" content="13a28b85dba973dbac3877e14943b62c">
<meta name="description" content="Ranking - Functions for various methods to rank assets">
<meta name="go-import" content="github.com/rbresearch/Ranking git https://github.com/rbresearch/Ranking.git">
<meta content="2328682" name="octolytics-dimension-user_id" /><meta content="rbresearch" name="octolytics-dimension-user_login" /><meta content="5774136" name="octolytics-dimension-repository_id" /><meta content="rbresearch/Ranking" name="octolytics-dimension-repository_nwo" /><meta content="true" name="octolytics-dimension-repository_public" /><meta content="false" name="octolytics-dimension-repository_is_fork" /><meta content="5774136" name="octolytics-dimension-repository_network_root_id" /><meta content="rbresearch/Ranking" name="octolytics-dimension-repository_network_root_nwo" />
<link href="https://github.com/rbresearch/Ranking/commits/master.atom" rel="alternate" title="Recent Commits to Ranking:master" type="application/atom+xml">
</head>
<body class="logged_out env-production windows vis-public page-blob">
<a href="#start-of-content" tabindex="1" class="accessibility-aid js-skip-to-content">Skip to content</a>
<div class="wrapper">
<div class="header header-logged-out" role="banner">
<div class="container clearfix">
<a class="header-logo-wordmark" href="https://github.com/" ga-data-click="(Logged out) Header, go to homepage, icon:logo-wordmark">
<span class="mega-octicon octicon-logo-github"></span>
</a>
<div class="header-actions" role="navigation">
<a class="button primary" href="/join" data-ga-click="(Logged out) Header, clicked Sign up, text:sign-up">Sign up</a>
<a class="button" href="/login?return_to=%2Frbresearch%2FRanking%2Fblob%2Fmaster%2Frank-functions%2FqstratRank.R" data-ga-click="(Logged out) Header, clicked Sign in, text:sign-in">Sign in</a>
</div>
<div class="site-search repo-scope js-site-search" role="search">
<form accept-charset="UTF-8" action="/rbresearch/Ranking/search" class="js-site-search-form" data-global-search-url="/search" data-repo-search-url="/rbresearch/Ranking/search" method="get"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /></div>
<input type="text"
class="js-site-search-field is-clearable"
data-hotkey="s"
name="q"
placeholder="Search"
data-global-scope-placeholder="Search GitHub"
data-repo-scope-placeholder="Search"
tabindex="1"
autocapitalize="off">
<div class="scope-badge">This repository</div>
</form>
</div>
<ul class="header-nav left" role="navigation">
<li class="header-nav-item">
<a class="header-nav-link" href="/explore" data-ga-click="(Logged out) Header, go to explore, text:explore">Explore</a>
</li>
<li class="header-nav-item">
<a class="header-nav-link" href="/features" data-ga-click="(Logged out) Header, go to features, text:features">Features</a>
</li>
<li class="header-nav-item">
<a class="header-nav-link" href="https://enterprise.github.com/" data-ga-click="(Logged out) Header, go to enterprise, text:enterprise">Enterprise</a>
</li>
<li class="header-nav-item">
<a class="header-nav-link" href="/blog" data-ga-click="(Logged out) Header, go to blog, text:blog">Blog</a>
</li>
</ul>
</div>
</div>
<div id="start-of-content" class="accessibility-aid"></div>
<div class="site" itemscope itemtype="http://schema.org/WebPage">
<div id="js-flash-container">
</div>
<div class="pagehead repohead instapaper_ignore readability-menu">
<div class="container">
<ul class="pagehead-actions">
<li>
<a href="/login?return_to=%2Frbresearch%2FRanking"
class="minibutton with-count star-button tooltipped tooltipped-n"
aria-label="You must be signed in to star a repository" rel="nofollow">
<span class="octicon octicon-star"></span>
Star
</a>
<a class="social-count js-social-count" href="/rbresearch/Ranking/stargazers">
6
</a>
</li>
<li>
<a href="/login?return_to=%2Frbresearch%2FRanking"
class="minibutton with-count js-toggler-target fork-button tooltipped tooltipped-n"
aria-label="You must be signed in to fork a repository" rel="nofollow">
<span class="octicon octicon-repo-forked"></span>
Fork
</a>
<a href="/rbresearch/Ranking/network" class="social-count">
7
</a>
</li>
</ul>
<h1 itemscope itemtype="http://data-vocabulary.org/Breadcrumb" class="entry-title public">
<span class="mega-octicon octicon-repo"></span>
<span class="author"><a href="/rbresearch" class="url fn" itemprop="url" rel="author"><span itemprop="title">rbresearch</span></a></span><!--
--><span class="path-divider">/</span><!--
--><strong><a href="/rbresearch/Ranking" class="js-current-repository" data-pjax="#js-repo-pjax-container">Ranking</a></strong>
<span class="page-context-loader">
<img alt="" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
</span>
</h1>
</div><!-- /.container -->
</div><!-- /.repohead -->
<div class="container">
<div class="repository-with-sidebar repo-container new-discussion-timeline ">
<div class="repository-sidebar clearfix">
<nav class="sunken-menu repo-nav js-repo-nav js-sidenav-container-pjax js-octicon-loaders"
role="navigation"
data-pjax="#js-repo-pjax-container"
data-issue-count-url="/rbresearch/Ranking/issues/counts">
<ul class="sunken-menu-group">
<li class="tooltipped tooltipped-w" aria-label="Code">
<a href="/rbresearch/Ranking" aria-label="Code" class="selected js-selected-navigation-item sunken-menu-item" data-hotkey="g c" data-selected-links="repo_source repo_downloads repo_commits repo_releases repo_tags repo_branches /rbresearch/Ranking">
<span class="octicon octicon-code"></span> <span class="full-word">Code</span>
<img alt="" class="mini-loader" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
</a> </li>
<li class="tooltipped tooltipped-w" aria-label="Issues">
<a href="/rbresearch/Ranking/issues" aria-label="Issues" class="js-selected-navigation-item sunken-menu-item" data-hotkey="g i" data-selected-links="repo_issues repo_labels repo_milestones /rbresearch/Ranking/issues">
<span class="octicon octicon-issue-opened"></span> <span class="full-word">Issues</span>
<span class="js-issue-replace-counter"></span>
<img alt="" class="mini-loader" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
</a> </li>
<li class="tooltipped tooltipped-w" aria-label="Pull Requests">
<a href="/rbresearch/Ranking/pulls" aria-label="Pull Requests" class="js-selected-navigation-item sunken-menu-item" data-hotkey="g p" data-selected-links="repo_pulls /rbresearch/Ranking/pulls">
<span class="octicon octicon-git-pull-request"></span> <span class="full-word">Pull Requests</span>
<span class="js-pull-replace-counter"></span>
<img alt="" class="mini-loader" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
</a> </li>
</ul>
<div class="sunken-menu-separator"></div>
<ul class="sunken-menu-group">
<li class="tooltipped tooltipped-w" aria-label="Pulse">
<a href="/rbresearch/Ranking/pulse" aria-label="Pulse" class="js-selected-navigation-item sunken-menu-item" data-selected-links="pulse /rbresearch/Ranking/pulse">
<span class="octicon octicon-pulse"></span> <span class="full-word">Pulse</span>
<img alt="" class="mini-loader" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
</a> </li>
<li class="tooltipped tooltipped-w" aria-label="Graphs">
<a href="/rbresearch/Ranking/graphs" aria-label="Graphs" class="js-selected-navigation-item sunken-menu-item" data-selected-links="repo_graphs repo_contributors /rbresearch/Ranking/graphs">
<span class="octicon octicon-graph"></span> <span class="full-word">Graphs</span>
<img alt="" class="mini-loader" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
</a> </li>
</ul>
</nav>
<div class="only-with-full-nav">
<div class="clone-url open"
data-protocol-type="http"
data-url="/users/set_protocol?protocol_selector=http&protocol_type=clone">
<h3><span class="text-emphasized">HTTPS</span> clone URL</h3>
<div class="input-group js-zeroclipboard-container">
<input type="text" class="input-mini input-monospace js-url-field js-zeroclipboard-target"
value="https://github.com/rbresearch/Ranking.git" readonly="readonly">
<span class="input-group-button">
<button aria-label="Copy to clipboard" class="js-zeroclipboard minibutton zeroclipboard-button" data-copied-hint="Copied!" type="button"><span class="octicon octicon-clippy"></span></button>
</span>
</div>
</div>
<div class="clone-url "
data-protocol-type="subversion"
data-url="/users/set_protocol?protocol_selector=subversion&protocol_type=clone">
<h3><span class="text-emphasized">Subversion</span> checkout URL</h3>
<div class="input-group js-zeroclipboard-container">
<input type="text" class="input-mini input-monospace js-url-field js-zeroclipboard-target"
value="https://github.com/rbresearch/Ranking" readonly="readonly">
<span class="input-group-button">
<button aria-label="Copy to clipboard" class="js-zeroclipboard minibutton zeroclipboard-button" data-copied-hint="Copied!" type="button"><span class="octicon octicon-clippy"></span></button>
</span>
</div>
</div>
<p class="clone-options">You can clone with
<a href="#" class="js-clone-selector" data-protocol="http">HTTPS</a> or <a href="#" class="js-clone-selector" data-protocol="subversion">Subversion</a>.
<a href="https://help.github.com/articles/which-remote-url-should-i-use" class="help tooltipped tooltipped-n" aria-label="Get help on which URL is right for you.">
<span class="octicon octicon-question"></span>
</a>
</p>
<a href="http://windows.github.com" class="minibutton sidebar-button" title="Save rbresearch/Ranking to your computer and use it in GitHub Desktop." aria-label="Save rbresearch/Ranking to your computer and use it in GitHub Desktop.">
<span class="octicon octicon-device-desktop"></span>
Clone in Desktop
</a>
<a href="/rbresearch/Ranking/archive/master.zip"
class="minibutton sidebar-button"
aria-label="Download the contents of rbresearch/Ranking as a zip file"
title="Download the contents of rbresearch/Ranking as a zip file"
rel="nofollow">
<span class="octicon octicon-cloud-download"></span>
Download ZIP
</a>
</div>
</div><!-- /.repository-sidebar -->
<div id="js-repo-pjax-container" class="repository-content context-loader-container" data-pjax-container>
<a href="/rbresearch/Ranking/blob/436cbcef884f0592e473a040e7557e9552d4aa10/rank-functions/qstratRank.R" class="hidden js-permalink-shortcut" data-hotkey="y">Permalink</a>
<!-- blob contrib key: blob_contributors:v21:660c5c981d9185a016a1d9e789705bdb -->
<div class="file-navigation js-zeroclipboard-container">
<div class="select-menu js-menu-container js-select-menu left">
<span class="minibutton select-menu-button js-menu-target css-truncate" data-hotkey="w"
data-master-branch="master"
data-ref="master"
title="master"
role="button" aria-label="Switch branches or tags" tabindex="0" aria-haspopup="true">
<span class="octicon octicon-git-branch"></span>
<i>branch:</i>
<span class="js-select-button css-truncate-target">master</span>
</span>
<div class="select-menu-modal-holder js-menu-content js-navigation-container" data-pjax aria-hidden="true">
<div class="select-menu-modal">
<div class="select-menu-header">
<span class="select-menu-title">Switch branches/tags</span>
<span class="octicon octicon-x js-menu-close" role="button" aria-label="Close"></span>
</div> <!-- /.select-menu-header -->
<div class="select-menu-filters">
<div class="select-menu-text-filter">
<input type="text" aria-label="Filter branches/tags" id="context-commitish-filter-field" class="js-filterable-field js-navigation-enable" placeholder="Filter branches/tags">
</div>
<div class="select-menu-tabs">
<ul>
<li class="select-menu-tab">
<a href="#" data-tab-filter="branches" class="js-select-menu-tab">Branches</a>
</li>
<li class="select-menu-tab">
<a href="#" data-tab-filter="tags" class="js-select-menu-tab">Tags</a>
</li>
</ul>
</div><!-- /.select-menu-tabs -->
</div><!-- /.select-menu-filters -->
<div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="branches">
<div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
<div class="select-menu-item js-navigation-item selected">
<span class="select-menu-item-icon octicon octicon-check"></span>
<a href="/rbresearch/Ranking/blob/master/rank-functions/qstratRank.R"
data-name="master"
data-skip-pjax="true"
rel="nofollow"
class="js-navigation-open select-menu-item-text css-truncate-target"
title="master">master</a>
</div> <!-- /.select-menu-item -->
</div>
<div class="select-menu-no-results">Nothing to show</div>
</div> <!-- /.select-menu-list -->
<div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="tags">
<div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
</div>
<div class="select-menu-no-results">Nothing to show</div>
</div> <!-- /.select-menu-list -->
</div> <!-- /.select-menu-modal -->
</div> <!-- /.select-menu-modal-holder -->
</div> <!-- /.select-menu -->
<div class="button-group right">
<a href="/rbresearch/Ranking/find/master"
class="js-show-file-finder minibutton empty-icon tooltipped tooltipped-s"
data-pjax
data-hotkey="t"
aria-label="Quickly jump between files">
<span class="octicon octicon-list-unordered"></span>
</a>
<button aria-label="Copy file path to clipboard" class="js-zeroclipboard minibutton zeroclipboard-button" data-copied-hint="Copied!" type="button"><span class="octicon octicon-clippy"></span></button>
</div>
<div class="breadcrumb js-zeroclipboard-target">
<span class='repo-root js-repo-root'><span itemscope="" itemtype="http://data-vocabulary.org/Breadcrumb"><a href="/rbresearch/Ranking" class="" data-branch="master" data-direction="back" data-pjax="true" itemscope="url"><span itemprop="title">Ranking</span></a></span></span><span class="separator">/</span><span itemscope="" itemtype="http://data-vocabulary.org/Breadcrumb"><a href="/rbresearch/Ranking/tree/master/rank-functions" class="" data-branch="master" data-direction="back" data-pjax="true" itemscope="url"><span itemprop="title">rank-functions</span></a></span><span class="separator">/</span><strong class="final-path">qstratRank.R</strong>
</div>
</div>
<div class="commit file-history-tease">
<div class="file-history-tease-header">
<img alt="rbresearch" class="avatar" data-user="2328682" height="24" src="https://avatars3.githubusercontent.com/u/2328682?v=3&s=48" width="24" />
<span class="author"><a href="/rbresearch" rel="author">rbresearch</a></span>
<time datetime="2013-02-20T03:12:00Z" is="relative-time">Feb 19, 2013</time>
<div class="commit-title">
<a href="/rbresearch/Ranking/commit/2862831f65bf7fe86ff26c2bfd3c1d173e3ea5e3" class="message" data-pjax="true" title="deleting some files">deleting some files</a>
</div>
</div>
<div class="participation">
<p class="quickstat">
<a href="#blob_contributors_box" rel="facebox">
<strong>1</strong>
contributor
</a>
</p>
</div>
<div id="blob_contributors_box" style="display:none">
<h2 class="facebox-header">Users who have contributed to this file</h2>
<ul class="facebox-user-list">
<li class="facebox-user-list-item">
<img alt="rbresearch" data-user="2328682" height="24" src="https://avatars3.githubusercontent.com/u/2328682?v=3&s=48" width="24" />
<a href="/rbresearch">rbresearch</a>
</li>
</ul>
</div>
</div>
<div class="file-box">
<div class="file">
<div class="meta clearfix">
<div class="info file-name">
<span>111 lines (90 sloc)</span>
<span class="meta-divider"></span>
<span>4.053 kb</span>
</div>
<div class="actions">
<div class="button-group">
<a href="/rbresearch/Ranking/raw/master/rank-functions/qstratRank.R" class="minibutton " id="raw-url">Raw</a>
<a href="/rbresearch/Ranking/blame/master/rank-functions/qstratRank.R" class="minibutton js-update-url-with-hash">Blame</a>
<a href="/rbresearch/Ranking/commits/master/rank-functions/qstratRank.R" class="minibutton " rel="nofollow">History</a>
</div><!-- /.button-group -->
<a class="octicon-button tooltipped tooltipped-nw"
href="http://windows.github.com" aria-label="Open this file in GitHub for Windows">
<span class="octicon octicon-device-desktop"></span>
</a>
<a class="octicon-button disabled tooltipped tooltipped-w" href="#"
aria-label="You must be signed in to make or propose changes"><span class="octicon octicon-pencil"></span></a>
<a class="octicon-button danger disabled tooltipped tooltipped-w" href="#"
aria-label="You must be signed in to make or propose changes">
<span class="octicon octicon-trashcan"></span>
</a>
</div><!-- /.actions -->
</div>
<div class="blob-wrapper data type-r">
<table class="highlight tab-size-8 js-file-line-container">
<tr>
<td id="L1" class="blob-num js-line-number" data-line-number="1"></td>
<td id="LC1" class="blob-code js-file-line"><span class="pl-c"># qstratRank.R</span></td>
</tr>
<tr>
<td id="L2" class="blob-num js-line-number" data-line-number="2"></td>
<td id="LC2" class="blob-code js-file-line"><span class="pl-en">qstratRank</span> <span class="pl-k"><-</span> <span class="pl-k">function</span>(<span class="pl-vo">symbols</span>, <span class="pl-v">init.equity</span><span class="pl-k">=</span><span class="pl-c1">100000</span>, <span class="pl-v">top.N</span><span class="pl-k">=</span><span class="pl-c1">1</span>, </td>
</tr>
<tr>
<td id="L3" class="blob-num js-line-number" data-line-number="3"></td>
<td id="LC3" class="blob-code js-file-line"> <span class="pl-v">max.size</span><span class="pl-k">=</span><span class="pl-c1">1000</span>, <span class="pl-v">max.levels</span><span class="pl-k">=</span><span class="pl-c1">1</span>) {</td>
</tr>
<tr>
<td id="L4" class="blob-num js-line-number" data-line-number="4"></td>
<td id="LC4" class="blob-code js-file-line"> <span class="pl-c"># The qstratRank function uses the quantstrat framework to backtest a</span></td>
</tr>
<tr>
<td id="L5" class="blob-num js-line-number" data-line-number="5"></td>
<td id="LC5" class="blob-code js-file-line"> <span class="pl-c"># ranking or relative strength strategy</span></td>
</tr>
<tr>
<td id="L6" class="blob-num js-line-number" data-line-number="6"></td>
<td id="LC6" class="blob-code js-file-line"> <span class="pl-c">#</span></td>
</tr>
<tr>
<td id="L7" class="blob-num js-line-number" data-line-number="7"></td>
<td id="LC7" class="blob-code js-file-line"> <span class="pl-c"># args</span></td>
</tr>
<tr>
<td id="L8" class="blob-num js-line-number" data-line-number="8"></td>
<td id="LC8" class="blob-code js-file-line"> <span class="pl-c"># symbols : character vector of symbols</span></td>
</tr>
<tr>
<td id="L9" class="blob-num js-line-number" data-line-number="9"></td>
<td id="LC9" class="blob-code js-file-line"> <span class="pl-c"># init.equity : initial equity</span></td>
</tr>
<tr>
<td id="L10" class="blob-num js-line-number" data-line-number="10"></td>
<td id="LC10" class="blob-code js-file-line"> <span class="pl-c"># top.N : trade the top N ranked assets</span></td>
</tr>
<tr>
<td id="L11" class="blob-num js-line-number" data-line-number="11"></td>
<td id="LC11" class="blob-code js-file-line"> <span class="pl-c"># max.size : maximum position size</span></td>
</tr>
<tr>
<td id="L12" class="blob-num js-line-number" data-line-number="12"></td>
<td id="LC12" class="blob-code js-file-line"> <span class="pl-c"># max.levels : maximum levels to scale in a trade</span></td>
</tr>
<tr>
<td id="L13" class="blob-num js-line-number" data-line-number="13"></td>
<td id="LC13" class="blob-code js-file-line"> <span class="pl-c"># max.size and max.levels are passed to addPosLimit</span></td>
</tr>
<tr>
<td id="L14" class="blob-num js-line-number" data-line-number="14"></td>
<td id="LC14" class="blob-code js-file-line"> <span class="pl-c">#</span></td>
</tr>
<tr>
<td id="L15" class="blob-num js-line-number" data-line-number="15"></td>
<td id="LC15" class="blob-code js-file-line"> <span class="pl-c"># return value</span></td>
</tr>
<tr>
<td id="L16" class="blob-num js-line-number" data-line-number="16"></td>
<td id="LC16" class="blob-code js-file-line"> <span class="pl-c"># returns a list: end.eq, returns, book, stats</span></td>
</tr>
<tr>
<td id="L17" class="blob-num js-line-number" data-line-number="17"></td>
<td id="LC17" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L18" class="blob-num js-line-number" data-line-number="18"></td>
<td id="LC18" class="blob-code js-file-line"> <span class="pl-c"># remove variables</span></td>
</tr>
<tr>
<td id="L19" class="blob-num js-line-number" data-line-number="19"></td>
<td id="LC19" class="blob-code js-file-line"> suppressWarnings(rm(<span class="pl-s1"><span class="pl-pds">"</span>order_book.Rank<span class="pl-pds">"</span></span>, <span class="pl-v">pos</span><span class="pl-k">=</span>.<span class="pl-vo">strategy</span>))</td>
</tr>
<tr>
<td id="L20" class="blob-num js-line-number" data-line-number="20"></td>
<td id="LC20" class="blob-code js-file-line"> suppressWarnings(rm(<span class="pl-s1"><span class="pl-pds">"</span>account.Rank<span class="pl-pds">"</span></span>, <span class="pl-s1"><span class="pl-pds">"</span>portfolio.Rank<span class="pl-pds">"</span></span>, <span class="pl-v">pos</span><span class="pl-k">=</span>.<span class="pl-vo">blotter</span>))</td>
</tr>
<tr>
<td id="L21" class="blob-num js-line-number" data-line-number="21"></td>
<td id="LC21" class="blob-code js-file-line"> suppressWarnings(rm(<span class="pl-s1"><span class="pl-pds">"</span>account.st<span class="pl-pds">"</span></span>, <span class="pl-s1"><span class="pl-pds">"</span>port.st<span class="pl-pds">"</span></span>, <span class="pl-s1"><span class="pl-pds">"</span>stock.str<span class="pl-pds">"</span></span>, <span class="pl-s1"><span class="pl-pds">"</span>stratRank<span class="pl-pds">"</span></span>,</td>
</tr>
<tr>
<td id="L22" class="blob-num js-line-number" data-line-number="22"></td>
<td id="LC22" class="blob-code js-file-line"> <span class="pl-s1"><span class="pl-pds">"</span>initDate<span class="pl-pds">"</span></span>, <span class="pl-s1"><span class="pl-pds">"</span>initEq<span class="pl-pds">"</span></span>, <span class="pl-s1"><span class="pl-pds">'</span>start_t<span class="pl-pds">'</span></span>, <span class="pl-s1"><span class="pl-pds">'</span>end_t<span class="pl-pds">'</span></span>))</td>
</tr>
<tr>
<td id="L23" class="blob-num js-line-number" data-line-number="23"></td>
<td id="LC23" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L24" class="blob-num js-line-number" data-line-number="24"></td>
<td id="LC24" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L25" class="blob-num js-line-number" data-line-number="25"></td>
<td id="LC25" class="blob-code js-file-line"> <span class="pl-c"># set initial variables</span></td>
</tr>
<tr>
<td id="L26" class="blob-num js-line-number" data-line-number="26"></td>
<td id="LC26" class="blob-code js-file-line"> <span class="pl-vo">initDate</span> <span class="pl-k"><-</span> <span class="pl-s1"><span class="pl-pds">"</span>1900-01-01<span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L27" class="blob-num js-line-number" data-line-number="27"></td>
<td id="LC27" class="blob-code js-file-line"> <span class="pl-vo">initEq</span> <span class="pl-k"><-</span> <span class="pl-vo">init.equity</span></td>
</tr>
<tr>
<td id="L28" class="blob-num js-line-number" data-line-number="28"></td>
<td id="LC28" class="blob-code js-file-line"> <span class="pl-vo">port.st</span> <span class="pl-k"><-</span> <span class="pl-s1"><span class="pl-pds">"</span>Rank<span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L29" class="blob-num js-line-number" data-line-number="29"></td>
<td id="LC29" class="blob-code js-file-line"> <span class="pl-vo">account.st</span> <span class="pl-k"><-</span> <span class="pl-s1"><span class="pl-pds">"</span>Rank<span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L30" class="blob-num js-line-number" data-line-number="30"></td>
<td id="LC30" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L31" class="blob-num js-line-number" data-line-number="31"></td>
<td id="LC31" class="blob-code js-file-line"> <span class="pl-c"># trade the top "N" ranked symbols</span></td>
</tr>
<tr>
<td id="L32" class="blob-num js-line-number" data-line-number="32"></td>
<td id="LC32" class="blob-code js-file-line"> <span class="pl-vo">N</span> <span class="pl-k"><-</span> <span class="pl-vo">top.N</span></td>
</tr>
<tr>
<td id="L33" class="blob-num js-line-number" data-line-number="33"></td>
<td id="LC33" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L34" class="blob-num js-line-number" data-line-number="34"></td>
<td id="LC34" class="blob-code js-file-line"> <span class="pl-c"># initialize quantstrat objects</span></td>
</tr>
<tr>
<td id="L35" class="blob-num js-line-number" data-line-number="35"></td>
<td id="LC35" class="blob-code js-file-line"> initPortf(<span class="pl-vo">port.st</span>, <span class="pl-v">symbols</span><span class="pl-k">=</span><span class="pl-vo">symbols</span>, <span class="pl-v">initDate</span><span class="pl-k">=</span><span class="pl-vo">initDate</span>)</td>
</tr>
<tr>
<td id="L36" class="blob-num js-line-number" data-line-number="36"></td>
<td id="LC36" class="blob-code js-file-line"> initAcct(<span class="pl-vo">account.st</span>, <span class="pl-v">portfolios</span><span class="pl-k">=</span><span class="pl-vo">port.st</span>, <span class="pl-v">initDate</span><span class="pl-k">=</span><span class="pl-vo">initDate</span>,<span class="pl-v">initEq</span><span class="pl-k">=</span><span class="pl-vo">initEq</span>)</td>
</tr>
<tr>
<td id="L37" class="blob-num js-line-number" data-line-number="37"></td>
<td id="LC37" class="blob-code js-file-line"> initOrders(<span class="pl-v">portfolio</span><span class="pl-k">=</span><span class="pl-vo">port.st</span>, <span class="pl-v">initDate</span><span class="pl-k">=</span><span class="pl-vo">initDate</span>)</td>
</tr>
<tr>
<td id="L38" class="blob-num js-line-number" data-line-number="38"></td>
<td id="LC38" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L39" class="blob-num js-line-number" data-line-number="39"></td>
<td id="LC39" class="blob-code js-file-line"> <span class="pl-c"># initialize a strategy object</span></td>
</tr>
<tr>
<td id="L40" class="blob-num js-line-number" data-line-number="40"></td>
<td id="LC40" class="blob-code js-file-line"> <span class="pl-vo">stratRank</span> <span class="pl-k"><-</span> strategy(<span class="pl-s1"><span class="pl-pds">"</span>Rank<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L41" class="blob-num js-line-number" data-line-number="41"></td>
<td id="LC41" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L42" class="blob-num js-line-number" data-line-number="42"></td>
<td id="LC42" class="blob-code js-file-line"> <span class="pl-c"># there are two signals</span></td>
</tr>
<tr>
<td id="L43" class="blob-num js-line-number" data-line-number="43"></td>
<td id="LC43" class="blob-code js-file-line"> <span class="pl-c"># the first signal is when Rank is less than or equal to N</span></td>
</tr>
<tr>
<td id="L44" class="blob-num js-line-number" data-line-number="44"></td>
<td id="LC44" class="blob-code js-file-line"> <span class="pl-c"># (i.e. trades the #1 ranked symbol if N=1)</span></td>
</tr>
<tr>
<td id="L45" class="blob-num js-line-number" data-line-number="45"></td>
<td id="LC45" class="blob-code js-file-line"> <span class="pl-vo">stratRank</span> <span class="pl-k"><-</span> add.signal(<span class="pl-v">strategy</span><span class="pl-k">=</span><span class="pl-vo">stratRank</span>, <span class="pl-v">name</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>sigThreshold<span class="pl-pds">"</span></span>, </td>
</tr>
<tr>
<td id="L46" class="blob-num js-line-number" data-line-number="46"></td>
<td id="LC46" class="blob-code js-file-line"> <span class="pl-v">arguments</span><span class="pl-k">=</span><span class="pl-st">list</span>(<span class="pl-v">threshold</span><span class="pl-k">=</span><span class="pl-vo">N</span>, <span class="pl-v">column</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>Rank<span class="pl-pds">"</span></span>, </td>
</tr>
<tr>
<td id="L47" class="blob-num js-line-number" data-line-number="47"></td>
<td id="LC47" class="blob-code js-file-line"> <span class="pl-v">relationship</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>lte<span class="pl-pds">"</span></span>, <span class="pl-v">cross</span><span class="pl-k">=</span><span class="pl-c1">FALSE</span>), </td>
</tr>
<tr>
<td id="L48" class="blob-num js-line-number" data-line-number="48"></td>
<td id="LC48" class="blob-code js-file-line"> <span class="pl-v">label</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>Rank.lte.N<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L49" class="blob-num js-line-number" data-line-number="49"></td>
<td id="LC49" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L50" class="blob-num js-line-number" data-line-number="50"></td>
<td id="LC50" class="blob-code js-file-line"> <span class="pl-c"># the second signal is when Rank is greter than or equal to N</span></td>
</tr>
<tr>
<td id="L51" class="blob-num js-line-number" data-line-number="51"></td>
<td id="LC51" class="blob-code js-file-line"> <span class="pl-c"># (i.e. trades the #1 ranked symbol if N=1)</span></td>
</tr>
<tr>
<td id="L52" class="blob-num js-line-number" data-line-number="52"></td>
<td id="LC52" class="blob-code js-file-line"> <span class="pl-vo">stratRank</span> <span class="pl-k"><-</span> add.signal(<span class="pl-v">strategy</span><span class="pl-k">=</span><span class="pl-vo">stratRank</span>, <span class="pl-v">name</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>sigThreshold<span class="pl-pds">"</span></span>, </td>
</tr>
<tr>
<td id="L53" class="blob-num js-line-number" data-line-number="53"></td>
<td id="LC53" class="blob-code js-file-line"> <span class="pl-v">arguments</span><span class="pl-k">=</span><span class="pl-st">list</span>(<span class="pl-v">threshold</span><span class="pl-k">=</span><span class="pl-vo">N</span>, <span class="pl-v">column</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>Rank<span class="pl-pds">"</span></span>, </td>
</tr>
<tr>
<td id="L54" class="blob-num js-line-number" data-line-number="54"></td>
<td id="LC54" class="blob-code js-file-line"> <span class="pl-v">relationship</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>gt<span class="pl-pds">"</span></span>, <span class="pl-v">cross</span><span class="pl-k">=</span><span class="pl-c1">FALSE</span>), </td>
</tr>
<tr>
<td id="L55" class="blob-num js-line-number" data-line-number="55"></td>
<td id="LC55" class="blob-code js-file-line"> <span class="pl-v">label</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>Rank.gt.N<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L56" class="blob-num js-line-number" data-line-number="56"></td>
<td id="LC56" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L57" class="blob-num js-line-number" data-line-number="57"></td>
<td id="LC57" class="blob-code js-file-line"> <span class="pl-c"># add buy rule</span></td>
</tr>
<tr>
<td id="L58" class="blob-num js-line-number" data-line-number="58"></td>
<td id="LC58" class="blob-code js-file-line"> <span class="pl-vo">stratRank</span> <span class="pl-k"><-</span> add.rule(<span class="pl-v">strategy</span><span class="pl-k">=</span><span class="pl-vo">stratRank</span>, <span class="pl-v">name</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>ruleSignal<span class="pl-pds">'</span></span>, </td>
</tr>
<tr>
<td id="L59" class="blob-num js-line-number" data-line-number="59"></td>
<td id="LC59" class="blob-code js-file-line"> <span class="pl-v">arguments</span> <span class="pl-k">=</span> <span class="pl-st">list</span>(<span class="pl-v">sigcol</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>Rank.lte.N<span class="pl-pds">"</span></span>, <span class="pl-v">sigval</span><span class="pl-k">=</span><span class="pl-c1">TRUE</span>, </td>
</tr>
<tr>
<td id="L60" class="blob-num js-line-number" data-line-number="60"></td>
<td id="LC60" class="blob-code js-file-line"> <span class="pl-v">orderqty</span><span class="pl-k">=</span><span class="pl-vo">max.size</span>, <span class="pl-v">ordertype</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>market<span class="pl-pds">'</span></span>, </td>
</tr>
<tr>
<td id="L61" class="blob-num js-line-number" data-line-number="61"></td>
<td id="LC61" class="blob-code js-file-line"> <span class="pl-v">orderside</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>long<span class="pl-pds">'</span></span>, <span class="pl-v">pricemethod</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>market<span class="pl-pds">'</span></span>, </td>
</tr>
<tr>
<td id="L62" class="blob-num js-line-number" data-line-number="62"></td>
<td id="LC62" class="blob-code js-file-line"> <span class="pl-v">replace</span><span class="pl-k">=</span><span class="pl-c1">FALSE</span>, <span class="pl-v">osFUN</span><span class="pl-k">=</span><span class="pl-vo">osMaxPos</span>), </td>
</tr>
<tr>
<td id="L63" class="blob-num js-line-number" data-line-number="63"></td>
<td id="LC63" class="blob-code js-file-line"> <span class="pl-v">type</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>enter<span class="pl-pds">'</span></span>, <span class="pl-v">path.dep</span><span class="pl-k">=</span><span class="pl-c1">TRUE</span>)</td>
</tr>
<tr>
<td id="L64" class="blob-num js-line-number" data-line-number="64"></td>
<td id="LC64" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L65" class="blob-num js-line-number" data-line-number="65"></td>
<td id="LC65" class="blob-code js-file-line"> <span class="pl-c"># add exit rule</span></td>
</tr>
<tr>
<td id="L66" class="blob-num js-line-number" data-line-number="66"></td>
<td id="LC66" class="blob-code js-file-line"> <span class="pl-vo">stratRank</span> <span class="pl-k"><-</span> add.rule(<span class="pl-v">strategy</span> <span class="pl-k">=</span> <span class="pl-vo">stratRank</span>, <span class="pl-v">name</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>ruleSignal<span class="pl-pds">'</span></span>, </td>
</tr>
<tr>
<td id="L67" class="blob-num js-line-number" data-line-number="67"></td>
<td id="LC67" class="blob-code js-file-line"> <span class="pl-v">arguments</span> <span class="pl-k">=</span> <span class="pl-st">list</span>(<span class="pl-v">sigcol</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">"</span>Rank.gt.N<span class="pl-pds">"</span></span>, <span class="pl-v">sigval</span><span class="pl-k">=</span><span class="pl-c1">TRUE</span>, </td>
</tr>
<tr>
<td id="L68" class="blob-num js-line-number" data-line-number="68"></td>
<td id="LC68" class="blob-code js-file-line"> <span class="pl-v">orderqty</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>all<span class="pl-pds">'</span></span>, <span class="pl-v">ordertype</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>market<span class="pl-pds">'</span></span>, </td>
</tr>
<tr>
<td id="L69" class="blob-num js-line-number" data-line-number="69"></td>
<td id="LC69" class="blob-code js-file-line"> <span class="pl-v">orderside</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>long<span class="pl-pds">'</span></span>, <span class="pl-v">pricemethod</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>market<span class="pl-pds">'</span></span>, </td>
</tr>
<tr>
<td id="L70" class="blob-num js-line-number" data-line-number="70"></td>
<td id="LC70" class="blob-code js-file-line"> <span class="pl-v">replace</span><span class="pl-k">=</span><span class="pl-c1">FALSE</span>), </td>
</tr>
<tr>
<td id="L71" class="blob-num js-line-number" data-line-number="71"></td>
<td id="LC71" class="blob-code js-file-line"> <span class="pl-v">type</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span>exit<span class="pl-pds">'</span></span>, <span class="pl-v">path.dep</span><span class="pl-k">=</span><span class="pl-c1">TRUE</span>)</td>
</tr>
<tr>
<td id="L72" class="blob-num js-line-number" data-line-number="72"></td>
<td id="LC72" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L73" class="blob-num js-line-number" data-line-number="73"></td>
<td id="LC73" class="blob-code js-file-line"> <span class="pl-c">#set max position size and levels</span></td>
</tr>
<tr>
<td id="L74" class="blob-num js-line-number" data-line-number="74"></td>
<td id="LC74" class="blob-code js-file-line"> <span class="pl-k">for</span>(<span class="pl-vo">symbol</span> <span class="pl-k">in</span> <span class="pl-vo">symbols</span>){ addPosLimit(<span class="pl-vo">port.st</span>, <span class="pl-vo">symbol</span>, <span class="pl-vo">initDate</span>, <span class="pl-vo">max.size</span>, <span class="pl-vo">max.levels</span>) }</td>
</tr>
<tr>
<td id="L75" class="blob-num js-line-number" data-line-number="75"></td>
<td id="LC75" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L76" class="blob-num js-line-number" data-line-number="76"></td>
<td id="LC76" class="blob-code js-file-line"> print(<span class="pl-s1"><span class="pl-pds">"</span>setup completed<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L77" class="blob-num js-line-number" data-line-number="77"></td>
<td id="LC77" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L78" class="blob-num js-line-number" data-line-number="78"></td>
<td id="LC78" class="blob-code js-file-line"> <span class="pl-c"># apply the strategy to the portfolio</span></td>
</tr>
<tr>
<td id="L79" class="blob-num js-line-number" data-line-number="79"></td>
<td id="LC79" class="blob-code js-file-line"> <span class="pl-vo">start_t</span> <span class="pl-k"><-</span> Sys.time()</td>
</tr>
<tr>
<td id="L80" class="blob-num js-line-number" data-line-number="80"></td>
<td id="LC80" class="blob-code js-file-line"> <span class="pl-vo">out</span> <span class="pl-k"><-</span> try(applyStrategy(<span class="pl-v">strategy</span><span class="pl-k">=</span><span class="pl-vo">stratRank</span>, <span class="pl-v">portfolios</span><span class="pl-k">=</span><span class="pl-vo">port.st</span>))</td>
</tr>
<tr>
<td id="L81" class="blob-num js-line-number" data-line-number="81"></td>
<td id="LC81" class="blob-code js-file-line"> <span class="pl-vo">end_t</span> <span class="pl-k"><-</span> Sys.time()</td>
</tr>
<tr>
<td id="L82" class="blob-num js-line-number" data-line-number="82"></td>
<td id="LC82" class="blob-code js-file-line"> print(<span class="pl-vo">end_t</span><span class="pl-k">-</span><span class="pl-vo">start_t</span>)</td>
</tr>
<tr>
<td id="L83" class="blob-num js-line-number" data-line-number="83"></td>
<td id="LC83" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L84" class="blob-num js-line-number" data-line-number="84"></td>
<td id="LC84" class="blob-code js-file-line"> <span class="pl-c"># update Portfolio</span></td>
</tr>
<tr>
<td id="L85" class="blob-num js-line-number" data-line-number="85"></td>
<td id="LC85" class="blob-code js-file-line"> <span class="pl-vo">start_t</span> <span class="pl-k"><-</span> Sys.time()</td>
</tr>
<tr>
<td id="L86" class="blob-num js-line-number" data-line-number="86"></td>
<td id="LC86" class="blob-code js-file-line"> updatePortf(<span class="pl-v">Portfolio</span><span class="pl-k">=</span><span class="pl-vo">port.st</span>, <span class="pl-v">Dates</span><span class="pl-k">=</span>paste(<span class="pl-s1"><span class="pl-pds">'</span>::<span class="pl-pds">'</span></span>, as.Date(Sys.time()), <span class="pl-v">sep</span><span class="pl-k">=</span><span class="pl-s1"><span class="pl-pds">'</span><span class="pl-pds">'</span></span>))</td>
</tr>
<tr>
<td id="L87" class="blob-num js-line-number" data-line-number="87"></td>
<td id="LC87" class="blob-code js-file-line"> <span class="pl-vo">end_t</span> <span class="pl-k"><-</span> Sys.time()</td>
</tr>
<tr>
<td id="L88" class="blob-num js-line-number" data-line-number="88"></td>
<td id="LC88" class="blob-code js-file-line"> print(<span class="pl-s1"><span class="pl-pds">"</span>trade blotter portfolio update:<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L89" class="blob-num js-line-number" data-line-number="89"></td>
<td id="LC89" class="blob-code js-file-line"> print(<span class="pl-vo">end_t</span> <span class="pl-k">-</span> <span class="pl-vo">start_t</span>)</td>
</tr>
<tr>
<td id="L90" class="blob-num js-line-number" data-line-number="90"></td>
<td id="LC90" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L91" class="blob-num js-line-number" data-line-number="91"></td>
<td id="LC91" class="blob-code js-file-line"> <span class="pl-c"># update account</span></td>
</tr>
<tr>
<td id="L92" class="blob-num js-line-number" data-line-number="92"></td>
<td id="LC92" class="blob-code js-file-line"> updateAcct(<span class="pl-vo">account.st</span>)</td>
</tr>
<tr>
<td id="L93" class="blob-num js-line-number" data-line-number="93"></td>
<td id="LC93" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L94" class="blob-num js-line-number" data-line-number="94"></td>
<td id="LC94" class="blob-code js-file-line"> <span class="pl-c"># update ending equity</span></td>
</tr>
<tr>
<td id="L95" class="blob-num js-line-number" data-line-number="95"></td>
<td id="LC95" class="blob-code js-file-line"> updateEndEq(<span class="pl-vo">account.st</span>)</td>
</tr>
<tr>
<td id="L96" class="blob-num js-line-number" data-line-number="96"></td>
<td id="LC96" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L97" class="blob-num js-line-number" data-line-number="97"></td>
<td id="LC97" class="blob-code js-file-line"> <span class="pl-c"># get ending equity</span></td>
</tr>
<tr>
<td id="L98" class="blob-num js-line-number" data-line-number="98"></td>
<td id="LC98" class="blob-code js-file-line"> <span class="pl-vo">eq</span> <span class="pl-k"><-</span> getEndEq(<span class="pl-vo">account.st</span>, Sys.Date()) <span class="pl-k">+</span> <span class="pl-vo">initEq</span></td>
</tr>
<tr>
<td id="L99" class="blob-num js-line-number" data-line-number="99"></td>
<td id="LC99" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L100" class="blob-num js-line-number" data-line-number="100"></td>
<td id="LC100" class="blob-code js-file-line"> <span class="pl-c"># view order book to confirm trades</span></td>
</tr>
<tr>
<td id="L101" class="blob-num js-line-number" data-line-number="101"></td>
<td id="LC101" class="blob-code js-file-line"> <span class="pl-vo">order.book</span> <span class="pl-k"><-</span> getOrderBook(<span class="pl-vo">port.st</span>)</td>
</tr>
<tr>
<td id="L102" class="blob-num js-line-number" data-line-number="102"></td>
<td id="LC102" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L103" class="blob-num js-line-number" data-line-number="103"></td>
<td id="LC103" class="blob-code js-file-line"> <span class="pl-c"># get trade statistics</span></td>
</tr>
<tr>
<td id="L104" class="blob-num js-line-number" data-line-number="104"></td>
<td id="LC104" class="blob-code js-file-line"> <span class="pl-vo">stats</span> <span class="pl-k"><-</span> tradeStats(<span class="pl-vo">port.st</span>)</td>
</tr>
<tr>
<td id="L105" class="blob-num js-line-number" data-line-number="105"></td>
<td id="LC105" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L106" class="blob-num js-line-number" data-line-number="106"></td>
<td id="LC106" class="blob-code js-file-line"> <span class="pl-c"># portfolio returns</span></td>
</tr>
<tr>
<td id="L107" class="blob-num js-line-number" data-line-number="107"></td>
<td id="LC107" class="blob-code js-file-line"> <span class="pl-vo">ret1</span> <span class="pl-k"><-</span> PortfReturns(<span class="pl-vo">port.st</span>)</td>
</tr>
<tr>
<td id="L108" class="blob-num js-line-number" data-line-number="108"></td>
<td id="LC108" class="blob-code js-file-line"> <span class="pl-vo">ret1</span><span class="pl-k">$</span><span class="pl-vo">total</span> <span class="pl-k"><-</span> rowSums(<span class="pl-vo">ret1</span>, <span class="pl-v">na.rm</span><span class="pl-k">=</span><span class="pl-c1">TRUE</span>)</td>
</tr>
<tr>
<td id="L109" class="blob-num js-line-number" data-line-number="109"></td>
<td id="LC109" class="blob-code js-file-line"> </td>
</tr>
<tr>
<td id="L110" class="blob-num js-line-number" data-line-number="110"></td>
<td id="LC110" class="blob-code js-file-line"> <span class="pl-k">return</span>(<span class="pl-st">list</span>(<span class="pl-v">end.eq</span><span class="pl-k">=</span><span class="pl-vo">eq</span>, <span class="pl-v">returns</span><span class="pl-k">=</span><span class="pl-vo">ret1</span>, <span class="pl-v">book</span><span class="pl-k">=</span><span class="pl-vo">order.book</span>, <span class="pl-v">stats</span><span class="pl-k">=</span><span class="pl-vo">stats</span>))</td>
</tr>
<tr>
<td id="L111" class="blob-num js-line-number" data-line-number="111"></td>
<td id="LC111" class="blob-code js-file-line">}</td>
</tr>
</table>
</div>
</div>
</div>
<a href="#jump-to-line" rel="facebox[.linejump]" data-hotkey="l" style="display:none">Jump to Line</a>
<div id="jump-to-line" style="display:none">
<form accept-charset="UTF-8" class="js-jump-to-line-form">
<input class="linejump-input js-jump-to-line-field" type="text" placeholder="Jump to line…" autofocus>
<button type="submit" class="button">Go</button>
</form>
</div>
</div>
</div><!-- /.repo-container -->
<div class="modal-backdrop"></div>
</div><!-- /.container -->
</div><!-- /.site -->
</div><!-- /.wrapper -->
<div class="container">
<div class="site-footer" role="contentinfo">
<ul class="site-footer-links right">
<li><a href="https://status.github.com/">Status</a></li>
<li><a href="https://developer.github.com">API</a></li>
<li><a href="http://training.github.com">Training</a></li>
<li><a href="http://shop.github.com">Shop</a></li>
<li><a href="/blog">Blog</a></li>
<li><a href="/about">About</a></li>
</ul>
<a href="/" aria-label="Homepage">
<span class="mega-octicon octicon-mark-github" title="GitHub"></span>
</a>
<ul class="site-footer-links">
<li>© 2015 <span title="0.03460s from github-fe138-cp1-prd.iad.github.net">GitHub</span>, Inc.</li>
<li><a href="/site/terms">Terms</a></li>
<li><a href="/site/privacy">Privacy</a></li>
<li><a href="/security">Security</a></li>
<li><a href="/contact">Contact</a></li>
</ul>
</div><!-- /.site-footer -->
</div><!-- /.container -->
<div class="fullscreen-overlay js-fullscreen-overlay" id="fullscreen_overlay">
<div class="fullscreen-container js-suggester-container">
<div class="textarea-wrap">
<textarea name="fullscreen-contents" id="fullscreen-contents" class="fullscreen-contents js-fullscreen-contents" placeholder=""></textarea>
<div class="suggester-container">
<div class="suggester fullscreen-suggester js-suggester js-navigation-container"></div>
</div>
</div>
</div>
<div class="fullscreen-sidebar">
<a href="#" class="exit-fullscreen js-exit-fullscreen tooltipped tooltipped-w" aria-label="Exit Zen Mode">
<span class="mega-octicon octicon-screen-normal"></span>
</a>
<a href="#" class="theme-switcher js-theme-switcher tooltipped tooltipped-w"
aria-label="Switch themes">
<span class="octicon octicon-color-mode"></span>
</a>
</div>
</div>
<div id="ajax-error-message" class="flash flash-error">
<span class="octicon octicon-alert"></span>
<a href="#" class="octicon octicon-x flash-close js-ajax-error-dismiss" aria-label="Dismiss error"></a>
Something went wrong with that request. Please try again.
</div>
<script crossorigin="anonymous" src="https://assets-cdn.github.com/assets/frameworks-2a688ea200a50a474783d34f1f46405acbc687c4d17db98b4b2b769b44174d5d.js" type="text/javascript"></script>
<script async="async" crossorigin="anonymous" src="https://assets-cdn.github.com/assets/github-c28c4beb84e212634dbd1b98793bbe9332814860c2e34cdaac36f18b6b90b7dd.js" type="text/javascript"></script>
</body>
</html>
|
library(ggplot2)
library(dplyr)
library(ggnewscale)
total_climate_pca_plot <- function(bg_env, locs, genus, species) {
env_df <- raster::sampleRandom(bg_env, size = 10000) %>%
as_tibble() %>%
mutate(reproductive_mode = "none")
sp_df <- raster::extract(bg_env,
locs %>% filter(species == species) %>%
dplyr::select(longitude, latitude)) %>%
as_tibble() %>%
mutate(reproductive_mode = locs$reproductive_mode)
total_df <- dplyr::bind_rows(env_df, sp_df) %>%
na.omit()
pca_df <- total_df %>% dplyr::select(-reproductive_mode)
env_pca <- prcomp(pca_df, center = TRUE, scale. = TRUE)
pca_out_df <- env_pca$x %>% as_tibble() %>%
mutate(reproductive_mode = total_df$reproductive_mode)
ggplot() +
geom_hex(data = pca_out_df %>% filter(reproductive_mode == "none"),
aes(x = PC1, y = PC2, fill = ..count..)) +
scale_fill_gradient(low = "lightgray", high = "black") +
labs(fill = "Count") +
ggnewscale::new_scale_fill() +
geom_point(
data = pca_out_df %>% filter(reproductive_mode != "none"),
aes(
x = PC1,
y = PC2,
fill = reproductive_mode,
color = reproductive_mode
),
alpha = 0.8,
size = 3,
pch = 21
) +
scale_fill_viridis_d(option = "inferno") +
scale_color_manual(values = c("white", "black"), guide = FALSE) +
labs(title = paste("Total climate space PCA for", genus, species),
fill = "Reproductive Mode") +
theme_minimal()
}
| /R/total_climate_pca_plot.R | no_license | connor-french/stick-insect-niche-divergence | R | false | false | 1,575 | r | library(ggplot2)
library(dplyr)
library(ggnewscale)
total_climate_pca_plot <- function(bg_env, locs, genus, species) {
env_df <- raster::sampleRandom(bg_env, size = 10000) %>%
as_tibble() %>%
mutate(reproductive_mode = "none")
sp_df <- raster::extract(bg_env,
locs %>% filter(species == species) %>%
dplyr::select(longitude, latitude)) %>%
as_tibble() %>%
mutate(reproductive_mode = locs$reproductive_mode)
total_df <- dplyr::bind_rows(env_df, sp_df) %>%
na.omit()
pca_df <- total_df %>% dplyr::select(-reproductive_mode)
env_pca <- prcomp(pca_df, center = TRUE, scale. = TRUE)
pca_out_df <- env_pca$x %>% as_tibble() %>%
mutate(reproductive_mode = total_df$reproductive_mode)
ggplot() +
geom_hex(data = pca_out_df %>% filter(reproductive_mode == "none"),
aes(x = PC1, y = PC2, fill = ..count..)) +
scale_fill_gradient(low = "lightgray", high = "black") +
labs(fill = "Count") +
ggnewscale::new_scale_fill() +
geom_point(
data = pca_out_df %>% filter(reproductive_mode != "none"),
aes(
x = PC1,
y = PC2,
fill = reproductive_mode,
color = reproductive_mode
),
alpha = 0.8,
size = 3,
pch = 21
) +
scale_fill_viridis_d(option = "inferno") +
scale_color_manual(values = c("white", "black"), guide = FALSE) +
labs(title = paste("Total climate space PCA for", genus, species),
fill = "Reproductive Mode") +
theme_minimal()
}
|
#85052 病程 人口學資料
#性別
Flu_episode2010to2015_Summarize[,.N,by='sex']
#年齡層
Flu_episode2010to2015_Summarize[,.N,by='agegroup']
#年齡層百分比
round(Flu_episode2010to2015_Summarize[,.N,by='agegroup']$N/85052,3)*100
#流感相關檢驗結果
Flu_episode2010to2015_Summarize[,.N,by='FluCases']
round(Flu_episode2010to2015_Summarize[,.N,by='FluCases']$N/85052,3)*100
#流感陽性檢驗結果中,各檢驗數
Flu_episode2010to2015_FluVirus[FluCases=='FluPositive'][,.N,by='IgGResult']
Flu_episode2010to2015_FluVirus[FluCases=='FluPositive'][,.N,by='RapidTestResult']
Flu_episode2010to2015_FluVirus[FluCases=='FluPositive'][,.N,by='RNAdetection']
Flu_episode2010to2015_FluVirus[FluCases=='FluPositive'][,.N,by='VirusIsolation']
#流感陽性檢驗結果中,各檢驗百分比
round(Flu_episode2010to2015_FluVirus[FluCases=='FluPositive'][,.N,by='IgGResult'][IgGResult=='positive']$N/19052,3)*100
round(Flu_episode2010to2015_FluVirus[FluCases=='FluPositive'][,.N,by='RapidTestResult'][RapidTestResult=='positive']$N/19052,3)*100
round(Flu_episode2010to2015_FluVirus[FluCases=='FluPositive'][,.N,by='RNAdetection'][RNAdetection=='positive']$N/19052,3)*100
round(Flu_episode2010to2015_FluVirus[FluCases=='FluPositive'][,.N,by='VirusIsolation'][VirusIsolation=='positive']$N/19052,3)*100
#各院區比例
#皮爾森相關係數
Flu_episode2010to2015_Summarize[FluCases=='FluPositive' & is.na(FluCasesWithoutAdmission)]
Flu_episode2010to2015_Summarize[SevereComplicatedInfluenza==T][PulmonaryComplicationCases==T]
Flu_episode2010to2015_Summarize[SevereComplicatedInfluenza==T][MyopathyCases==T]
Flu_episode2010to2015_Summarize[SevereComplicatedInfluenza==T][IBICases==T]
| /Pilot Project_ARIMA_Influenza_R/Data/Demographic.R | no_license | ray0g1thub/Dr.Prenation | R | false | false | 1,696 | r | #85052 病程 人口學資料
#性別
Flu_episode2010to2015_Summarize[,.N,by='sex']
#年齡層
Flu_episode2010to2015_Summarize[,.N,by='agegroup']
#年齡層百分比
round(Flu_episode2010to2015_Summarize[,.N,by='agegroup']$N/85052,3)*100
#流感相關檢驗結果
Flu_episode2010to2015_Summarize[,.N,by='FluCases']
round(Flu_episode2010to2015_Summarize[,.N,by='FluCases']$N/85052,3)*100
#流感陽性檢驗結果中,各檢驗數
Flu_episode2010to2015_FluVirus[FluCases=='FluPositive'][,.N,by='IgGResult']
Flu_episode2010to2015_FluVirus[FluCases=='FluPositive'][,.N,by='RapidTestResult']
Flu_episode2010to2015_FluVirus[FluCases=='FluPositive'][,.N,by='RNAdetection']
Flu_episode2010to2015_FluVirus[FluCases=='FluPositive'][,.N,by='VirusIsolation']
#流感陽性檢驗結果中,各檢驗百分比
round(Flu_episode2010to2015_FluVirus[FluCases=='FluPositive'][,.N,by='IgGResult'][IgGResult=='positive']$N/19052,3)*100
round(Flu_episode2010to2015_FluVirus[FluCases=='FluPositive'][,.N,by='RapidTestResult'][RapidTestResult=='positive']$N/19052,3)*100
round(Flu_episode2010to2015_FluVirus[FluCases=='FluPositive'][,.N,by='RNAdetection'][RNAdetection=='positive']$N/19052,3)*100
round(Flu_episode2010to2015_FluVirus[FluCases=='FluPositive'][,.N,by='VirusIsolation'][VirusIsolation=='positive']$N/19052,3)*100
#各院區比例
#皮爾森相關係數
Flu_episode2010to2015_Summarize[FluCases=='FluPositive' & is.na(FluCasesWithoutAdmission)]
Flu_episode2010to2015_Summarize[SevereComplicatedInfluenza==T][PulmonaryComplicationCases==T]
Flu_episode2010to2015_Summarize[SevereComplicatedInfluenza==T][MyopathyCases==T]
Flu_episode2010to2015_Summarize[SevereComplicatedInfluenza==T][IBICases==T]
|
# Read txt file into R
powercons <- read.csv("household_power_consumption.txt", header = TRUE, sep =";", na.strings = "?")
# Convert date and time columns into date and time format
powercons$datetime <- strptime(paste(powercons$Date, powercons$Time), format = "%d/%m/%Y %H:%M:%S")
# Subset for two first days of February 2007
powercons <- powercons[powercons$Date == "1/2/2007" | powercons$Date == "2/2/2007", ]
# Create Plot 3 and save as png
png("plot3.png", width = 480, height = 480, units = "px")
plot(powercons$datetime, powercons$Sub_metering_1, xlab = "", ylab = "Energy sub metering", type = "n")
lines(powercons$datetime, powercons$Sub_metering_1, col = "grey")
lines(powercons$datetime, powercons$Sub_metering_2, col = "red")
lines(powercons$datetime, powercons$Sub_metering_3, col = "blue")
axis(side=1,at=c(0,1441,2881),labels=c('Thu','Fri','Sat'), tick=TRUE)
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("grey", "red", "blue"), lty=1)
dev.off()
| /plot3.R | no_license | mbcmn/ExData_Plotting1 | R | false | false | 1,007 | r | # Read txt file into R
powercons <- read.csv("household_power_consumption.txt", header = TRUE, sep =";", na.strings = "?")
# Convert date and time columns into date and time format
powercons$datetime <- strptime(paste(powercons$Date, powercons$Time), format = "%d/%m/%Y %H:%M:%S")
# Subset for two first days of February 2007
powercons <- powercons[powercons$Date == "1/2/2007" | powercons$Date == "2/2/2007", ]
# Create Plot 3 and save as png
png("plot3.png", width = 480, height = 480, units = "px")
plot(powercons$datetime, powercons$Sub_metering_1, xlab = "", ylab = "Energy sub metering", type = "n")
lines(powercons$datetime, powercons$Sub_metering_1, col = "grey")
lines(powercons$datetime, powercons$Sub_metering_2, col = "red")
lines(powercons$datetime, powercons$Sub_metering_3, col = "blue")
axis(side=1,at=c(0,1441,2881),labels=c('Thu','Fri','Sat'), tick=TRUE)
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("grey", "red", "blue"), lty=1)
dev.off()
|
\name{exampleData}
\alias{exampleData}
\docType{data}
\title{
Description of the data in the package
}
\description{
\code{chromLength} is the length of 24 human chromosome. The first column is chromosome name and
the second one is the length.
\code{genelocate} is location of all genes in the human chromosome. The first column is gene symbol,
second is the chromosome, third is the start position and fourth is the end position.
\code{sourceOmics} is the copy number alteration data of human colorectal cancer, which was downloaded
from firehose (http://gdac.broadinstitute.org) and contains 40 samples and genes from chromosome 20.
\code{targetOmics} is the RNASeq data of human colorectal cancer, which was dowloaded from the Nature
paper (Proteogenomic characterization of human colon and rectal cancer. Nature. 2014
Sep 18;513(7518):382-7. (PMID: 25043054)) and contains 3764 genes and 40 samples.
}
\keyword{datasets}
| /man/exampleData.Rd | no_license | wangj26/multiOmicsViz | R | false | false | 960 | rd | \name{exampleData}
\alias{exampleData}
\docType{data}
\title{
Description of the data in the package
}
\description{
\code{chromLength} is the length of 24 human chromosome. The first column is chromosome name and
the second one is the length.
\code{genelocate} is location of all genes in the human chromosome. The first column is gene symbol,
second is the chromosome, third is the start position and fourth is the end position.
\code{sourceOmics} is the copy number alteration data of human colorectal cancer, which was downloaded
from firehose (http://gdac.broadinstitute.org) and contains 40 samples and genes from chromosome 20.
\code{targetOmics} is the RNASeq data of human colorectal cancer, which was dowloaded from the Nature
paper (Proteogenomic characterization of human colon and rectal cancer. Nature. 2014
Sep 18;513(7518):382-7. (PMID: 25043054)) and contains 3764 genes and 40 samples.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NLCD_FUNCTIONS.R
\docType{data}
\name{nlcd_canopy_pam}
\alias{nlcd_canopy_pam}
\title{The NLCD canopy PAM attributes.}
\format{An object of class \code{character} of length 2345.}
\usage{
nlcd_canopy_pam
}
\description{
A dataset containing the PAM attributes.
}
\keyword{internal}
| /man/nlcd_canopy_pam.Rd | permissive | QinLab/FedData | R | false | true | 360 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NLCD_FUNCTIONS.R
\docType{data}
\name{nlcd_canopy_pam}
\alias{nlcd_canopy_pam}
\title{The NLCD canopy PAM attributes.}
\format{An object of class \code{character} of length 2345.}
\usage{
nlcd_canopy_pam
}
\description{
A dataset containing the PAM attributes.
}
\keyword{internal}
|
#' Load the data found in data/.
#'
#' @param update Whether you want to update the dataset using the fetch_data
#' function.
#' @param project.name Name of your project. Set the default to keep it always
#' working.
#'
#' @return Outputs the datasets in the data/ folder.
#' @export
#'
#' @examples
#' load_data()
#' load_data(update = TRUE)
#'
load_data <- function(update = FALSE, project.name = "urinaryDBP") {
if (is.null(project.name))
stop('Please change the project.name default to the name of the project.')
if (!dir.exists(system.file('data', package = project.name)))
fetch_data()
if (update)
fetch_data()
devtools::load_data()
}
| /R/load_data.R | no_license | zsemnani/urinaryDBP | R | false | false | 685 | r | #' Load the data found in data/.
#'
#' @param update Whether you want to update the dataset using the fetch_data
#' function.
#' @param project.name Name of your project. Set the default to keep it always
#' working.
#'
#' @return Outputs the datasets in the data/ folder.
#' @export
#'
#' @examples
#' load_data()
#' load_data(update = TRUE)
#'
load_data <- function(update = FALSE, project.name = "urinaryDBP") {
if (is.null(project.name))
stop('Please change the project.name default to the name of the project.')
if (!dir.exists(system.file('data', package = project.name)))
fetch_data()
if (update)
fetch_data()
devtools::load_data()
}
|
library(tuneR)
library(ggplot2)
# everything assuming 16kHz
SAMPLE_RATE = 16000
DUR_THRESH = 1.2
# two thresholds: delta (change in amp) and duration (shortest distinguishable is 1.2ms (Irwin & Purdy, 1982))
DELTA_THRESH = 1e-04
SAMPLE_COUNT_THRESH = (DUR_THRESH / 1000) * SAMPLE_RATE
# one more for differentiating between signal and silence
STATIC_THRESH = 5e-05
sampleSoundObject <- function(sndObj) {
l = length(sndObj@left)
s1 <- sndObj@left
s1 <- s1 / 2^(sndObj@bit -1)
return(s1)
}
dSamplesFromFile <- function(filename) {
sample <-sampleSoundObject(readWave(filename))
difs <- diff(sample)
difs_difs <- diff(difs)
return (list(sample, difs, difs_difs)) # dubious 'abs'
}
files <- sapply(c(1,2,3), FUN=function(ihateR) {sprintf('local/nan-ai-file-%d.wav', ihateR)})
slices <- matrix(data=c(34891,35050,41740,42056,1,Inf), byrow = TRUE, nrow = 3, ncol = 2)
widenSliceBy <- function(slice, wid=3000) {
return( c(max(0,slice[1]-wid), slice[2]+wid) )
}
slices <- t(apply(slices, 1, FUN=widenSliceBy))
test_data <- matrix(cbind(files, slices), ncol=3, nrow=3) # in a perfect world this wouldn't be necessary
lotsofdata <- apply(test_data, 1, FUN=function(x) {
dSamplesFromFile(filename = x[1])
})
data1 <- lotsofdata[[1]][[1]]
data2 <- lotsofdata[[1]][[2]]
data3 <- lotsofdata[[1]][[3]]
overlapHists <- function(data1, data2, data3=NULL, l=1, r=Inf, main='') {
biggerLen <- max(length(data1), length(data2), length(data3))
length(data1) <- biggerLen # WHY IS IT EVEN POSSIBLE
length(data2) <- biggerLen
if (!is.null(data3)) {
length(data3) <- biggerLen
}
r <- min(r, biggerLen)
xx <- (0:(biggerLen-1))
g <- ggplot(data=NULL, mapping=aes(xx)) +
geom_line(aes(y=data1), colour="red", alpha=0.4) +
geom_line(aes(y=data2), colour="green", alpha=0.4) +
xlim(l,r)
if (!is.null(data3)) {
g <- g + geom_line(aes(y=data3), colour="blue", alpha=0.4)
}
# flats <- suppTresh(data1, abs(data2))
# g <- g + geom_line(aes(y=flats[l:r]), colour="purple")
# g_a <- g + annotate("rect", xmin=34891, xmax=35050, ymin=-Inf, ymax=Inf, alpha=.2, fill="yellow")
g
}
suppTresh <- function(data1, data2, delta_tresh=DELTA_THRESH, count_tresh=SAMPLE_COUNT_THRESH, static_tresh=STATIC_THRESH) {
# above <- (function(x) x > delta_tresh)(data2)
above <- mapply(function(X,Y) { (X>static_tresh) || (Y>delta_tresh) }, X=data1, Y=data2 )
above_rle <- rle(above)
above_rle$values = mapply(function(X,Y){ if(X>count_tresh) return(Y) else return(FALSE) }, X=above_rle$lengths, Y=above_rle$values)
above <- inverse.rle(above_rle)
as.integer(above)
}
# pdf("plots6.pdf")
#
# lapply(lotsofdata, FUN=function(x){ overlapHists( abs( x[[1]] ), abs( x[[2]] ), abs( x[[3]] ) ) })
#
# dev.off()
| /old_plots_and_R_code/no_bs_this_time.R | no_license | cymerrad/sams-task | R | false | false | 2,787 | r | library(tuneR)
library(ggplot2)
# everything assuming 16kHz
SAMPLE_RATE = 16000
DUR_THRESH = 1.2
# two thresholds: delta (change in amp) and duration (shortest distinguishable is 1.2ms (Irwin & Purdy, 1982))
DELTA_THRESH = 1e-04
SAMPLE_COUNT_THRESH = (DUR_THRESH / 1000) * SAMPLE_RATE
# one more for differentiating between signal and silence
STATIC_THRESH = 5e-05
sampleSoundObject <- function(sndObj) {
l = length(sndObj@left)
s1 <- sndObj@left
s1 <- s1 / 2^(sndObj@bit -1)
return(s1)
}
dSamplesFromFile <- function(filename) {
sample <-sampleSoundObject(readWave(filename))
difs <- diff(sample)
difs_difs <- diff(difs)
return (list(sample, difs, difs_difs)) # dubious 'abs'
}
files <- sapply(c(1,2,3), FUN=function(ihateR) {sprintf('local/nan-ai-file-%d.wav', ihateR)})
slices <- matrix(data=c(34891,35050,41740,42056,1,Inf), byrow = TRUE, nrow = 3, ncol = 2)
widenSliceBy <- function(slice, wid=3000) {
return( c(max(0,slice[1]-wid), slice[2]+wid) )
}
slices <- t(apply(slices, 1, FUN=widenSliceBy))
test_data <- matrix(cbind(files, slices), ncol=3, nrow=3) # in a perfect world this wouldn't be necessary
lotsofdata <- apply(test_data, 1, FUN=function(x) {
dSamplesFromFile(filename = x[1])
})
data1 <- lotsofdata[[1]][[1]]
data2 <- lotsofdata[[1]][[2]]
data3 <- lotsofdata[[1]][[3]]
overlapHists <- function(data1, data2, data3=NULL, l=1, r=Inf, main='') {
biggerLen <- max(length(data1), length(data2), length(data3))
length(data1) <- biggerLen # WHY IS IT EVEN POSSIBLE
length(data2) <- biggerLen
if (!is.null(data3)) {
length(data3) <- biggerLen
}
r <- min(r, biggerLen)
xx <- (0:(biggerLen-1))
g <- ggplot(data=NULL, mapping=aes(xx)) +
geom_line(aes(y=data1), colour="red", alpha=0.4) +
geom_line(aes(y=data2), colour="green", alpha=0.4) +
xlim(l,r)
if (!is.null(data3)) {
g <- g + geom_line(aes(y=data3), colour="blue", alpha=0.4)
}
# flats <- suppTresh(data1, abs(data2))
# g <- g + geom_line(aes(y=flats[l:r]), colour="purple")
# g_a <- g + annotate("rect", xmin=34891, xmax=35050, ymin=-Inf, ymax=Inf, alpha=.2, fill="yellow")
g
}
suppTresh <- function(data1, data2, delta_tresh=DELTA_THRESH, count_tresh=SAMPLE_COUNT_THRESH, static_tresh=STATIC_THRESH) {
# above <- (function(x) x > delta_tresh)(data2)
above <- mapply(function(X,Y) { (X>static_tresh) || (Y>delta_tresh) }, X=data1, Y=data2 )
above_rle <- rle(above)
above_rle$values = mapply(function(X,Y){ if(X>count_tresh) return(Y) else return(FALSE) }, X=above_rle$lengths, Y=above_rle$values)
above <- inverse.rle(above_rle)
as.integer(above)
}
# pdf("plots6.pdf")
#
# lapply(lotsofdata, FUN=function(x){ overlapHists( abs( x[[1]] ), abs( x[[2]] ), abs( x[[3]] ) ) })
#
# dev.off()
|
setwd("E:\\@nKu$h\\Projects\\Electronic-Health-Record-System\\Disease Predictor\\src\\resources\\R")
load('annModel.RData')
test<-read.csv(file='E:\\@nKu$h\\Projects\\Electronic-Health-Record-System\\Disease Predictor\\src\\resources\\R\\test.csv')
library(nnet)
prediction<-predict(annModel, test, type="class")
sep<-as.character(',')
cat(paste(prediction[1],sep,prediction[2],sep,prediction[3])) | /Disease Predictor/src/resources/R/script.R | no_license | ankush1377/Electronic-Health-Record-System | R | false | false | 397 | r | setwd("E:\\@nKu$h\\Projects\\Electronic-Health-Record-System\\Disease Predictor\\src\\resources\\R")
load('annModel.RData')
test<-read.csv(file='E:\\@nKu$h\\Projects\\Electronic-Health-Record-System\\Disease Predictor\\src\\resources\\R\\test.csv')
library(nnet)
prediction<-predict(annModel, test, type="class")
sep<-as.character(',')
cat(paste(prediction[1],sep,prediction[2],sep,prediction[3])) |
data <- read.csv("household_power_consumption.txt", sep = ";", na.strings = "?", stringsAsFactors = F)
data <- subset(data, as.Date(Date, format = "%d/%m/%Y") %in% c(as.Date("2007-02-01"), as.Date("2007-02-02")))
data$datetime <- as.POSIXct(paste(data$Date, data$Time), format = "%d/%m/%Y %H:%M:%S")
png(file = "plot2.png")
plot(data$Global_active_power ~ data$datetime, type = "l", xlab = '', ylab = "Global Active Power (kilowatts)")
dev.off() | /exploratory-data-analysis/course-project-1/plot2.R | no_license | stepankuzmin/coursera-data-science | R | false | false | 446 | r | data <- read.csv("household_power_consumption.txt", sep = ";", na.strings = "?", stringsAsFactors = F)
data <- subset(data, as.Date(Date, format = "%d/%m/%Y") %in% c(as.Date("2007-02-01"), as.Date("2007-02-02")))
data$datetime <- as.POSIXct(paste(data$Date, data$Time), format = "%d/%m/%Y %H:%M:%S")
png(file = "plot2.png")
plot(data$Global_active_power ~ data$datetime, type = "l", xlab = '', ylab = "Global Active Power (kilowatts)")
dev.off() |
# addLogic.R
# copyright 2015-2016, openreliability.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
addLogic<-function(DF, type, at, reversible_cond=FALSE, cond_first=TRUE, human_pbf=NULL,
vote_par=NULL, tag="", label="", name="", name2="", description="") {
if(!test.ftree(DF)) stop("first argument must be a fault tree")
at <- tagconnect(DF, at)
if(label!="") {
if(any(DF$Name!="") || any(DF$Name2!="")) {
stop("Cannot use label once name convention has been established.")
}
}
if(any(DF$Label!="")) {
if(name!="" || name2!="") {
stop("Cannot use name convention once label has been established.")
}
}
if(tag!="") {
if (length(which(DF$Tag == tag) != 0)) {
stop("tag is not unique")
}
prefix<-substr(tag,1,2)
if(prefix=="E_" || prefix=="G_" || prefix=="H_") {
stop("Prefixes 'E_', 'G_', and 'H_' are reserved for auto-generated tags.")
}
}
if(type=="atleast") {
stop("atleast must be added through FaultTree.SCRAM::addAtLeast")
}
tp<-switch(type,
or = 10,
and = 11,
inhibit=12,
alarm=13,
cond=14,
conditional =14,
priority=14,
comb=15,
vote=15,
## atleast=16, # not allowed by addLogic
stop("gate type not recognized")
)
## model test
if(type==13 || type==15) {
## This proposed addition will be RAM model
if(any(DF$Type==5) || any(DF$Type==16)) {
stop("RAM system event event called for in PRA model")
}
}
parent<-which(DF$ID== at)
if(length(parent)==0) {stop("connection reference not valid")}
thisID<-max(DF$ID)+1
if(DF$Type[parent]<10) {stop("non-gate connection requested")}
if(!DF$MOE[parent]==0) {
stop("connection cannot be made to duplicate nor source of duplication")
}
if(DF$Type[parent]==15) {
if(length(which(DF$CParent==at))>0) {
stop("connection slot not available")
}
if(tp!=10) {
stop("only OR or basic event can connect to priority gate")
}
}
condition=0
if(DF$Type[parent]>11&& DF$Type[parent]<15 ) {
if(length(which(DF$CParent==at))>1) {
stop("connection slot not available")
}
if( length(which(DF$CParent==at))==0) {
if(DF$Cond_Code[parent]<10) {
condition=1
}
}else{
## length(which(DF$CParent==at))==1
if(DF$Cond_Code[parent]>9) {
condition=1
}
}
}
## default is non-reversible, so
reversible=0
if(reversible_cond==TRUE) {
reversible=1
if(tp!=14) {
reversible=0
warning(paste0("reversible_cond entry ignored at gate ",as.character(thisID)))
}
}
## resolve whether condition is first or second child
cond_second=0
if(cond_first == FALSE) {
cond_second=1
if(tp<12 || tp>14) {
cond_second=0
warning(paste0("cond_first entry ignored at gate ",as.character(thisID)))
}
}
cond_code<-reversible+10*cond_second
p1=-1
p2=-1
if(tp == 13) {
if(human_pbf < 0 || human_pbf >1) {
stop(paste0("alarm gate at ID ", as.character(thisID), " requires human failure probability value"))
}
p1<-human_pbf
}else{
if(!is.null(human_pbf)) {
warning(paste0("human failure probability for non-alarm gate at ID ",as.character(thisID), " has been ignored"))
}
}
if(tp==15) {
if(length(vote_par)==2) {
if(vote_par[1]<vote_par[2]) {
p1<-vote_par[1]
p2<-vote_par[2]
}else{
stop("validation error with vote parameters")
}
}else{
stop("must provide k of n vote parameters c(k,n)")
}
}
## apply default tag names if not specified
if(tag=="") {
tag<-paste0("G_", thisID)
}
Dfrow<-data.frame(
ID= thisID ,
GParent= at ,
Tag= tag ,
Type= tp ,
CFR= -1 ,
PBF= -1 ,
CRT= -1 ,
MOE= 0 ,
Condition= condition,
Cond_Code= cond_code ,
EType= 0 ,
P1= p1 ,
P2= p2 ,
Collapse= 0 ,
Label= label ,
Name= name ,
Name2= name2 ,
CParent= at ,
Level= DF$Level[parent]+1 ,
Description= description ,
UType= 0 ,
UP1= 0 ,
UP2= 0
)
DF<-rbind(DF, Dfrow)
DF
}
| /fuzzedpackages/FaultTree/R/addLogic.R | no_license | akhikolla/testpackages | R | false | false | 4,667 | r | # addLogic.R
# copyright 2015-2016, openreliability.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
addLogic<-function(DF, type, at, reversible_cond=FALSE, cond_first=TRUE, human_pbf=NULL,
vote_par=NULL, tag="", label="", name="", name2="", description="") {
if(!test.ftree(DF)) stop("first argument must be a fault tree")
at <- tagconnect(DF, at)
if(label!="") {
if(any(DF$Name!="") || any(DF$Name2!="")) {
stop("Cannot use label once name convention has been established.")
}
}
if(any(DF$Label!="")) {
if(name!="" || name2!="") {
stop("Cannot use name convention once label has been established.")
}
}
if(tag!="") {
if (length(which(DF$Tag == tag) != 0)) {
stop("tag is not unique")
}
prefix<-substr(tag,1,2)
if(prefix=="E_" || prefix=="G_" || prefix=="H_") {
stop("Prefixes 'E_', 'G_', and 'H_' are reserved for auto-generated tags.")
}
}
if(type=="atleast") {
stop("atleast must be added through FaultTree.SCRAM::addAtLeast")
}
tp<-switch(type,
or = 10,
and = 11,
inhibit=12,
alarm=13,
cond=14,
conditional =14,
priority=14,
comb=15,
vote=15,
## atleast=16, # not allowed by addLogic
stop("gate type not recognized")
)
## model test
if(type==13 || type==15) {
## This proposed addition will be RAM model
if(any(DF$Type==5) || any(DF$Type==16)) {
stop("RAM system event event called for in PRA model")
}
}
parent<-which(DF$ID== at)
if(length(parent)==0) {stop("connection reference not valid")}
thisID<-max(DF$ID)+1
if(DF$Type[parent]<10) {stop("non-gate connection requested")}
if(!DF$MOE[parent]==0) {
stop("connection cannot be made to duplicate nor source of duplication")
}
if(DF$Type[parent]==15) {
if(length(which(DF$CParent==at))>0) {
stop("connection slot not available")
}
if(tp!=10) {
stop("only OR or basic event can connect to priority gate")
}
}
condition=0
if(DF$Type[parent]>11&& DF$Type[parent]<15 ) {
if(length(which(DF$CParent==at))>1) {
stop("connection slot not available")
}
if( length(which(DF$CParent==at))==0) {
if(DF$Cond_Code[parent]<10) {
condition=1
}
}else{
## length(which(DF$CParent==at))==1
if(DF$Cond_Code[parent]>9) {
condition=1
}
}
}
## default is non-reversible, so
reversible=0
if(reversible_cond==TRUE) {
reversible=1
if(tp!=14) {
reversible=0
warning(paste0("reversible_cond entry ignored at gate ",as.character(thisID)))
}
}
## resolve whether condition is first or second child
cond_second=0
if(cond_first == FALSE) {
cond_second=1
if(tp<12 || tp>14) {
cond_second=0
warning(paste0("cond_first entry ignored at gate ",as.character(thisID)))
}
}
cond_code<-reversible+10*cond_second
p1=-1
p2=-1
if(tp == 13) {
if(human_pbf < 0 || human_pbf >1) {
stop(paste0("alarm gate at ID ", as.character(thisID), " requires human failure probability value"))
}
p1<-human_pbf
}else{
if(!is.null(human_pbf)) {
warning(paste0("human failure probability for non-alarm gate at ID ",as.character(thisID), " has been ignored"))
}
}
if(tp==15) {
if(length(vote_par)==2) {
if(vote_par[1]<vote_par[2]) {
p1<-vote_par[1]
p2<-vote_par[2]
}else{
stop("validation error with vote parameters")
}
}else{
stop("must provide k of n vote parameters c(k,n)")
}
}
## apply default tag names if not specified
if(tag=="") {
tag<-paste0("G_", thisID)
}
Dfrow<-data.frame(
ID= thisID ,
GParent= at ,
Tag= tag ,
Type= tp ,
CFR= -1 ,
PBF= -1 ,
CRT= -1 ,
MOE= 0 ,
Condition= condition,
Cond_Code= cond_code ,
EType= 0 ,
P1= p1 ,
P2= p2 ,
Collapse= 0 ,
Label= label ,
Name= name ,
Name2= name2 ,
CParent= at ,
Level= DF$Level[parent]+1 ,
Description= description ,
UType= 0 ,
UP1= 0 ,
UP2= 0
)
DF<-rbind(DF, Dfrow)
DF
}
|
# Maximum likelihood estimation for Cormack-Jolly-Seber apparent-survival models
# of type phi(t) p(t) or with time covariates
# Added grouping factor
# Uses detection history matrix as input, not m-array.
# Uses multinomial likelihood.
# Grouping factor strategy:
# Do m-array for each group, combine into a 3d array
# Do design matrices with a set of occasions for each group
# Calculate q-array and likelihood for each group and combine
log_qArray <- function(log_phi, log_p, log_1mp) {
# Calculates the matrix of multinomial cell log(probabilities)
# corresponding to an m-array.
# log_phi = vector of log(apparent survival probabilities)
# log_p = vector of log(recapture probabilities)
# log_1mp = vector of log(1 - recapture probabilities)
# NO SANITY CHECKS, calling function must take care of this.
n <- length(log_phi)
# Create n x n+1 matrix and fill diagonal
q <- diag(as.vector(log_p + log_phi), n, n+1)
# Fill the upper triangle, and get the row sums
sum_probs <- numeric(n)
for (i in 1:(n-1)){
for (j in (i+1):n) {
q[i,j] <- sum(log_phi[i:j]) + sum(log_1mp[i:(j-1)]) + log_p[j]
}
sum_probs[i] <- logSumExp(q[i, i:n])
}
sum_probs[n] <- q[n, n]
# Add the last column and return
q[, n+1] <- log1minusExp(sum_probs)
return(q)
}
# ..........................................................................
survCJS <- function(DH, model=list(phi~1, p~1), data=NULL, freq=1, group, interval=1,
ci = 0.95, link=c("logit", "probit"), ...) {
# ** DH is detection history matrix/data frame, animals x occasions.
# ** freq is vector of frequencies for each detection history
# ** model is a list of 2-sided formulae for psi and p; can also be a single
# 2-sided formula, eg, model = psi ~ habitat.
# ** data a data frame with the covariates.
# ** group is a factor specifying which group each row of DH belongs to.
# ** ci is required confidence interval.
if(match.arg(link) == "logit") {
plink <- plogis
} else {
plink <- pnorm
}
# Sanity checks: for DH??
ni <- ncol(DH) - 1 # number of survival intervals and REcapture occasions
if(!is.null(data) && nrow(data) != ni)
stop("The 'data' argument is not a valid data frame.")
if(length(freq) == 1)
freq <- rep(freq, nrow(DH))
if (length(freq) != nrow(DH))
stop("freq must have a value for each row of the detection history matrix.")
if(!missing(group)) {
if(length(group) != nrow(DH))
stop("Group must have a value for each row of the detection history matrix.")
group <- as.factor(group)
nGroup <- nlevels(group)
groupNames <- levels(group)
data <- as.data.frame(cbind(data, group=rep(groupNames, each=ni)))
} else {
group <- NULL
nGroup <- 1
}
if(length(interval) == 1)
interval <- rep(interval, ni)
if(length(interval) != ni)
stop("'interval' must be scalar or length equal to number of intervals between captures")
crit <- fixCI(ci)
# Convert detection history to 3d array of m-arrays to facilitate use of multinomial likelihood
mARRAY <- array(0, c(ni, ni+1, nGroup))
if(nGroup == 1) {
mARRAY[, , 1] <- ch2mArray(CH=DH, freq=freq)
} else {
for(i in 1:nGroup) {
DHgrp <- subset(DH, group==groupNames[i])
freqgrp <- subset(freq, group==groupNames[i])
mARRAY[, , i] <- ch2mArray(CH=DHgrp, freq=freqgrp)
}
}
# Standardise the model:
model <- stdModel(model, defaultModel=list(phi=~1, p=~1))
# Standardize the data
dataList <- stddata(data, NULL)
dataList$.Time <- standardize(1:ni)
dataList$.Time2 <- dataList$.Time^2
dataList$.Time3 <- dataList$.Time^3
dataList$.time <- as.factor(1:ni)
# Set up model matrices
phiDf <- selectCovars(model$phi, dataList, ni*nGroup)
phiMat <- modelMatrix(model$phi, phiDf)
phiK <- ncol(phiMat)
pDf <- selectCovars(model$p, dataList, ni*nGroup)
pMat <- modelMatrix(model$p, pDf)
pK <- ncol(pMat)
K <- phiK + pK
if(nrow(phiMat) != ni*nGroup || nrow(pMat) != ni*nGroup)
stop("Missing values not allowed in covariates.")
# Set up objects to hold output
beta.mat <- matrix(NA_real_, K, 4)
colnames(beta.mat) <- c("est", "SE", "lowCI", "uppCI")
rownames(beta.mat) <- c(
paste("phi:", colnames(phiMat)),
paste("p:", colnames(pMat)))
lp.mat <- matrix(NA_real_, ni*nGroup*2, 3)
colnames(lp.mat) <- c("est", "lowCI", "uppCI")
if(nGroup == 1) {
rownames(lp.mat) <- c(
paste0("phi", 1:ni),
paste0("p", 1:ni))
} else {
rownames(lp.mat) <- c(
paste0(data$group, ":phi", 1:ni),
paste0(data$group, ":p", 1:ni))
}
npar <- NA_real_
varcov <- NULL
# Log likelihood function
nll <- function(param){
phiBeta <- param[1:phiK]
pBeta <- param[(phiK+1):K]
log_phi <- plink(phiMat %*% phiBeta, log.p=TRUE)
link_p <- pMat %*% pBeta
log_p <- plink(link_p, log.p=TRUE)
log_1mp <- plink( -link_p, log.p=TRUE)
if(nGroup == 1) {
nll <- -sum(mARRAY[, , 1] * log_qArray(log_phi*interval, log_p, log_1mp))
} else {
nll <- numeric(nGroup)
for(i in 1:nGroup) {
log_phi0 <- log_phi[data$group == groupNames[i]]
log_p0 <- log_p[data$group == groupNames[i]]
log_1mp0 <- log_1mp[data$group == groupNames[i]]
nll[i] <- -sum(mARRAY[, , i] * log_qArray(log_phi0*interval, log_p0, log_1mp0))
}
}
return(min(sum(nll), .Machine$double.xmax))
}
# Run mle estimation with nlm:
# res <- nlm(nll, param, hessian=TRUE, stepmax=10) # 2015-03-01
nlmArgs <- list(...)
nlmArgs$f <- nll
nlmArgs$p <- rep(0, K)
nlmArgs$hessian <- TRUE
if(is.null(nlmArgs$stepmax))
nlmArgs$stepmax <- 10
res <- do.call(nlm, nlmArgs)
if(res$code > 2) # exit code 1 or 2 is ok.
warning(paste("Convergence may not have been reached (nlm code", res$code, ")"))
# Process the output
beta.mat[,1] <- res$estimate
lp.mat[, 1] <- c(phiMat %*% beta.mat[1:phiK, 1],
pMat %*% beta.mat[(phiK+1):K, 1])
logLik <- -res$minimum
varcov0 <- try(chol2inv(chol(res$hessian)), silent=TRUE)
# if (!inherits(varcov0, "try-error") && all(diag(varcov0) > 0)) {
if (!inherits(varcov0, "try-error")) {
varcov <- varcov0
SE <- suppressWarnings(sqrt(diag(varcov)))
beta.mat[, 2] <- SE
beta.mat[, 3:4] <- sweep(outer(SE, crit), 1, res$estimate, "+")
SElp <- c(sqrt(getFittedVar(phiMat, varcov[1:phiK, 1:phiK])),
sqrt(getFittedVar(pMat, varcov[(phiK+1):K, (phiK+1):K])))
lp.mat[, 2:3] <- sweep(outer(SElp, crit), 1, lp.mat[, 1], "+")
npar <- K
}
# Put it all together and return
out <- list(call = match.call(),
beta = beta.mat,
beta.vcv = varcov,
real = plink(lp.mat),
logLik = c(logLik=logLik, df=npar, nobs=sum(mARRAY)) )
class(out) <- c("wiqid", "list")
return(out)
}
| /R/survCJS.R | no_license | mikemeredith/wiqid | R | false | false | 6,857 | r |
# Maximum likelihood estimation for Cormack-Jolly-Seber apparent-survival models
# of type phi(t) p(t) or with time covariates
# Added grouping factor
# Uses detection history matrix as input, not m-array.
# Uses multinomial likelihood.
# Grouping factor strategy:
# Do m-array for each group, combine into a 3d array
# Do design matrices with a set of occasions for each group
# Calculate q-array and likelihood for each group and combine
log_qArray <- function(log_phi, log_p, log_1mp) {
# Calculates the matrix of multinomial cell log(probabilities)
# corresponding to an m-array.
# log_phi = vector of log(apparent survival probabilities)
# log_p = vector of log(recapture probabilities)
# log_1mp = vector of log(1 - recapture probabilities)
# NO SANITY CHECKS, calling function must take care of this.
n <- length(log_phi)
# Create n x n+1 matrix and fill diagonal
q <- diag(as.vector(log_p + log_phi), n, n+1)
# Fill the upper triangle, and get the row sums
sum_probs <- numeric(n)
for (i in 1:(n-1)){
for (j in (i+1):n) {
q[i,j] <- sum(log_phi[i:j]) + sum(log_1mp[i:(j-1)]) + log_p[j]
}
sum_probs[i] <- logSumExp(q[i, i:n])
}
sum_probs[n] <- q[n, n]
# Add the last column and return
q[, n+1] <- log1minusExp(sum_probs)
return(q)
}
# ..........................................................................
survCJS <- function(DH, model=list(phi~1, p~1), data=NULL, freq=1, group, interval=1,
ci = 0.95, link=c("logit", "probit"), ...) {
# ** DH is detection history matrix/data frame, animals x occasions.
# ** freq is vector of frequencies for each detection history
# ** model is a list of 2-sided formulae for psi and p; can also be a single
# 2-sided formula, eg, model = psi ~ habitat.
# ** data a data frame with the covariates.
# ** group is a factor specifying which group each row of DH belongs to.
# ** ci is required confidence interval.
if(match.arg(link) == "logit") {
plink <- plogis
} else {
plink <- pnorm
}
# Sanity checks: for DH??
ni <- ncol(DH) - 1 # number of survival intervals and REcapture occasions
if(!is.null(data) && nrow(data) != ni)
stop("The 'data' argument is not a valid data frame.")
if(length(freq) == 1)
freq <- rep(freq, nrow(DH))
if (length(freq) != nrow(DH))
stop("freq must have a value for each row of the detection history matrix.")
if(!missing(group)) {
if(length(group) != nrow(DH))
stop("Group must have a value for each row of the detection history matrix.")
group <- as.factor(group)
nGroup <- nlevels(group)
groupNames <- levels(group)
data <- as.data.frame(cbind(data, group=rep(groupNames, each=ni)))
} else {
group <- NULL
nGroup <- 1
}
if(length(interval) == 1)
interval <- rep(interval, ni)
if(length(interval) != ni)
stop("'interval' must be scalar or length equal to number of intervals between captures")
crit <- fixCI(ci)
# Convert detection history to 3d array of m-arrays to facilitate use of multinomial likelihood
mARRAY <- array(0, c(ni, ni+1, nGroup))
if(nGroup == 1) {
mARRAY[, , 1] <- ch2mArray(CH=DH, freq=freq)
} else {
for(i in 1:nGroup) {
DHgrp <- subset(DH, group==groupNames[i])
freqgrp <- subset(freq, group==groupNames[i])
mARRAY[, , i] <- ch2mArray(CH=DHgrp, freq=freqgrp)
}
}
# Standardise the model:
model <- stdModel(model, defaultModel=list(phi=~1, p=~1))
# Standardize the data
dataList <- stddata(data, NULL)
dataList$.Time <- standardize(1:ni)
dataList$.Time2 <- dataList$.Time^2
dataList$.Time3 <- dataList$.Time^3
dataList$.time <- as.factor(1:ni)
# Set up model matrices
phiDf <- selectCovars(model$phi, dataList, ni*nGroup)
phiMat <- modelMatrix(model$phi, phiDf)
phiK <- ncol(phiMat)
pDf <- selectCovars(model$p, dataList, ni*nGroup)
pMat <- modelMatrix(model$p, pDf)
pK <- ncol(pMat)
K <- phiK + pK
if(nrow(phiMat) != ni*nGroup || nrow(pMat) != ni*nGroup)
stop("Missing values not allowed in covariates.")
# Set up objects to hold output
beta.mat <- matrix(NA_real_, K, 4)
colnames(beta.mat) <- c("est", "SE", "lowCI", "uppCI")
rownames(beta.mat) <- c(
paste("phi:", colnames(phiMat)),
paste("p:", colnames(pMat)))
lp.mat <- matrix(NA_real_, ni*nGroup*2, 3)
colnames(lp.mat) <- c("est", "lowCI", "uppCI")
if(nGroup == 1) {
rownames(lp.mat) <- c(
paste0("phi", 1:ni),
paste0("p", 1:ni))
} else {
rownames(lp.mat) <- c(
paste0(data$group, ":phi", 1:ni),
paste0(data$group, ":p", 1:ni))
}
npar <- NA_real_
varcov <- NULL
# Log likelihood function
nll <- function(param){
phiBeta <- param[1:phiK]
pBeta <- param[(phiK+1):K]
log_phi <- plink(phiMat %*% phiBeta, log.p=TRUE)
link_p <- pMat %*% pBeta
log_p <- plink(link_p, log.p=TRUE)
log_1mp <- plink( -link_p, log.p=TRUE)
if(nGroup == 1) {
nll <- -sum(mARRAY[, , 1] * log_qArray(log_phi*interval, log_p, log_1mp))
} else {
nll <- numeric(nGroup)
for(i in 1:nGroup) {
log_phi0 <- log_phi[data$group == groupNames[i]]
log_p0 <- log_p[data$group == groupNames[i]]
log_1mp0 <- log_1mp[data$group == groupNames[i]]
nll[i] <- -sum(mARRAY[, , i] * log_qArray(log_phi0*interval, log_p0, log_1mp0))
}
}
return(min(sum(nll), .Machine$double.xmax))
}
# Run mle estimation with nlm:
# res <- nlm(nll, param, hessian=TRUE, stepmax=10) # 2015-03-01
nlmArgs <- list(...)
nlmArgs$f <- nll
nlmArgs$p <- rep(0, K)
nlmArgs$hessian <- TRUE
if(is.null(nlmArgs$stepmax))
nlmArgs$stepmax <- 10
res <- do.call(nlm, nlmArgs)
if(res$code > 2) # exit code 1 or 2 is ok.
warning(paste("Convergence may not have been reached (nlm code", res$code, ")"))
# Process the output
beta.mat[,1] <- res$estimate
lp.mat[, 1] <- c(phiMat %*% beta.mat[1:phiK, 1],
pMat %*% beta.mat[(phiK+1):K, 1])
logLik <- -res$minimum
varcov0 <- try(chol2inv(chol(res$hessian)), silent=TRUE)
# if (!inherits(varcov0, "try-error") && all(diag(varcov0) > 0)) {
if (!inherits(varcov0, "try-error")) {
varcov <- varcov0
SE <- suppressWarnings(sqrt(diag(varcov)))
beta.mat[, 2] <- SE
beta.mat[, 3:4] <- sweep(outer(SE, crit), 1, res$estimate, "+")
SElp <- c(sqrt(getFittedVar(phiMat, varcov[1:phiK, 1:phiK])),
sqrt(getFittedVar(pMat, varcov[(phiK+1):K, (phiK+1):K])))
lp.mat[, 2:3] <- sweep(outer(SElp, crit), 1, lp.mat[, 1], "+")
npar <- K
}
# Put it all together and return
out <- list(call = match.call(),
beta = beta.mat,
beta.vcv = varcov,
real = plink(lp.mat),
logLik = c(logLik=logLik, df=npar, nobs=sum(mARRAY)) )
class(out) <- c("wiqid", "list")
return(out)
}
|
################################################################################
# This script conducts an internal validation of the cohort STM of Stage II #
# colon cancer patients stratified by CDX2 biomarker status to survival curves #
# from Dalerba et al. (2016) #
# #
# Authors: #
# - Fernando Alarid-Escudero, PhD, <fernando.alarid@cide.edu> #
# - Deb Schrag, MD, MPH #
# - Karen M. Kuntz, ScD #
################################################################################
# The structure of this code follows DARTH's coding framework #
# https://github.com/DARTH-git/darthpack #
################################################################################
rm(list = ls()) # to clean the workspace
#### 04.1 Load packages and functions ####
#### 04.1.1 Load packages ####
library(cdx2cea)
#### 04.1.2 Load inputs ####
l_params_init_valid <- load_params_init(n_age_init = 75,
n_age_max = 80)
l_params_all_valid <- load_all_params(l_params_init = l_params_init_valid)
#### 04.1.3 Load functions ####
# no required functions
#### 04.1.4 Load targets and calibrated parameters ####
data("03_calibration_targets")
data("m_calib_post")
data("v_calib_post_map")
#### 04.2 Compute model-predicted outputs ####
#### 04.2.1 Compute model-predicted outputs for each sample of posterior distribution ####
### Number of posterior samples
n_samp <- nrow(m_calib_post)
### Define matrices to store model outputs
m_dfs_neg <- matrix(NA, nrow = n_samp, ncol = 61)
m_dfs_pos <- matrix(NA, nrow = n_samp, ncol = 61)
m_os_neg <- matrix(NA, nrow = n_samp, ncol = 61)
m_os_pos <- matrix(NA, nrow = n_samp, ncol = 61)
m_dss_neg <- matrix(NA, nrow = n_samp, ncol = 61)
m_dss_pos <- matrix(NA, nrow = n_samp, ncol = 61)
### Create data frames with model predicted outputs
df_dfs_neg <- data.frame(Outcome = "DFS", CDX2 = "CDX2-Negative", m_dfs_neg)
df_dfs_pos <- data.frame(Outcome = "DFS", CDX2 = "CDX2-Positive", m_dfs_pos)
df_os_neg <- data.frame(Outcome = "OS", CDX2 = "CDX2-Negative", m_os_neg)
df_os_pos <- data.frame(Outcome = "OS", CDX2 = "CDX2-Positive", m_os_pos)
df_dss_neg <- data.frame(Outcome = "DSS", CDX2 = "CDX2-Negative", m_dss_neg)
df_dss_pos <- data.frame(Outcome = "DSS", CDX2 = "CDX2-Positive", m_dss_pos)
### Evaluate model at each posterior sample and store results
for(i in 1:n_samp){ # i = 1
l_out_post <- calibration_out(v_params_calib = m_calib_post[i, ],
l_params_all = l_params_all_valid)
df_dfs_neg[i, -c(1, 2)] <- l_out_post$v_dfs_CDX2neg
df_dfs_pos[i, -c(1, 2)] <- l_out_post$v_dfs_CDX2pos
df_os_neg[i, -c(1, 2)] <- l_out_post$v_os_CDX2neg
df_os_pos[i, -c(1, 2)] <- l_out_post$v_os_CDX2pos
df_dss_neg[i, -c(1, 2)] <- l_out_post$v_dss_CDX2neg
df_dss_pos[i, -c(1, 2)] <- l_out_post$v_dss_CDX2pos
cat('\r', paste(round(i/n_samp * 100), "% done", sep = " ")) # display progress
}
### Combine all outputs
df_out_valid <- dplyr::bind_rows(df_dfs_neg,
df_dfs_pos,
df_os_pos ,
df_os_neg ,
df_dss_pos,
df_dss_neg)
### Rename time variable
colnames(df_out_valid)[3:ncol(df_out_valid)] <- 0:60
### Transform data.frame to long format
df_out_valid_lng <- reshape2::melt(df_out_valid,
id.vars = c("Outcome", "CDX2"))
### Compute posterior-predicted 95% CI
df_out_valid_sum <- data_summary(df_out_valid_lng, varname = "value",
groupnames = c("Outcome", "CDX2", "variable"))
df_out_valid_sum$Time <- as.numeric(df_out_valid_sum$variable)
### Only 5-yr survival
df_out_valid_5yr_sum <- df_out_valid_sum %>%
dplyr::filter(Time == 60) %>%
dplyr::mutate(Source = "Model",
N = NaN) %>%
dplyr::select(Source, Outcome, CDX2, Time, S = value, se = sd, lb, ub)
df_out_valid_5yr_sum$Time <- df_out_valid_5yr_sum$Time-1
### Combine model-predicted outputs with targets
df_model_n_targets <- dplyr::bind_rows(df_out_valid_5yr_sum,
df_calibration_targets)
#### 04.4 Internal validation: Model-predicted outputs vs. targets ####
gg_valid <- ggplot(df_model_n_targets,
aes(x = Outcome, y = S,
ymin = lb, ymax = ub,
fill = Source,
shape = Source)) +
# geom_point(position = position_dodge()) +
facet_wrap(~CDX2) +
# geom_bar(position = position_dodge(),
# stat = "identity", alpha = 0.4) +
geom_errorbar(aes(color = Source),
position = position_dodge2(width = 0.5, padding = 0.7)) +
scale_color_manual(values = c("Calibration target" = "black", "Model" = "red")) +
scale_fill_manual(values = c("Calibration target" = "black", "Model" = "red")) +
scale_shape_manual(values = c("Calibration target" = 1, "Model" = 8)) +
xlab("") +
ylab("5-year survival") +
theme_bw(base_size = 16) +
theme(legend.position = "bottom",
legend.title = element_blank(),
strip.background = element_rect(fill = "white",
color = "white"),
strip.text = element_text(size = 14, face = "bold"))
gg_valid
ggsave(gg_valid,
filename = "figs/04_validation_posterior_vs_targets.pdf",
width = 8, height = 6)
ggsave(gg_valid,
filename = "figs/04_validation_posterior_vs_targets.png",
width = 8, height = 6)
ggsave(gg_valid,
filename = "figs/04_validation_posterior_vs_targets.jpg",
width = 8, height = 6) | /analysis/04_validation.R | permissive | W-Mohammed/cdx2cea | R | false | false | 5,956 | r | ################################################################################
# This script conducts an internal validation of the cohort STM of Stage II #
# colon cancer patients stratified by CDX2 biomarker status to survival curves #
# from Dalerba et al. (2016) #
# #
# Authors: #
# - Fernando Alarid-Escudero, PhD, <fernando.alarid@cide.edu> #
# - Deb Schrag, MD, MPH #
# - Karen M. Kuntz, ScD #
################################################################################
# The structure of this code follows DARTH's coding framework #
# https://github.com/DARTH-git/darthpack #
################################################################################
rm(list = ls()) # to clean the workspace
#### 04.1 Load packages and functions ####
#### 04.1.1 Load packages ####
library(cdx2cea)
#### 04.1.2 Load inputs ####
l_params_init_valid <- load_params_init(n_age_init = 75,
n_age_max = 80)
l_params_all_valid <- load_all_params(l_params_init = l_params_init_valid)
#### 04.1.3 Load functions ####
# no required functions
#### 04.1.4 Load targets and calibrated parameters ####
data("03_calibration_targets")
data("m_calib_post")
data("v_calib_post_map")
#### 04.2 Compute model-predicted outputs ####
#### 04.2.1 Compute model-predicted outputs for each sample of posterior distribution ####
### Number of posterior samples
n_samp <- nrow(m_calib_post)
### Define matrices to store model outputs
m_dfs_neg <- matrix(NA, nrow = n_samp, ncol = 61)
m_dfs_pos <- matrix(NA, nrow = n_samp, ncol = 61)
m_os_neg <- matrix(NA, nrow = n_samp, ncol = 61)
m_os_pos <- matrix(NA, nrow = n_samp, ncol = 61)
m_dss_neg <- matrix(NA, nrow = n_samp, ncol = 61)
m_dss_pos <- matrix(NA, nrow = n_samp, ncol = 61)
### Create data frames with model predicted outputs
df_dfs_neg <- data.frame(Outcome = "DFS", CDX2 = "CDX2-Negative", m_dfs_neg)
df_dfs_pos <- data.frame(Outcome = "DFS", CDX2 = "CDX2-Positive", m_dfs_pos)
df_os_neg <- data.frame(Outcome = "OS", CDX2 = "CDX2-Negative", m_os_neg)
df_os_pos <- data.frame(Outcome = "OS", CDX2 = "CDX2-Positive", m_os_pos)
df_dss_neg <- data.frame(Outcome = "DSS", CDX2 = "CDX2-Negative", m_dss_neg)
df_dss_pos <- data.frame(Outcome = "DSS", CDX2 = "CDX2-Positive", m_dss_pos)
### Evaluate model at each posterior sample and store results
for(i in 1:n_samp){ # i = 1
l_out_post <- calibration_out(v_params_calib = m_calib_post[i, ],
l_params_all = l_params_all_valid)
df_dfs_neg[i, -c(1, 2)] <- l_out_post$v_dfs_CDX2neg
df_dfs_pos[i, -c(1, 2)] <- l_out_post$v_dfs_CDX2pos
df_os_neg[i, -c(1, 2)] <- l_out_post$v_os_CDX2neg
df_os_pos[i, -c(1, 2)] <- l_out_post$v_os_CDX2pos
df_dss_neg[i, -c(1, 2)] <- l_out_post$v_dss_CDX2neg
df_dss_pos[i, -c(1, 2)] <- l_out_post$v_dss_CDX2pos
cat('\r', paste(round(i/n_samp * 100), "% done", sep = " ")) # display progress
}
### Combine all outputs
df_out_valid <- dplyr::bind_rows(df_dfs_neg,
df_dfs_pos,
df_os_pos ,
df_os_neg ,
df_dss_pos,
df_dss_neg)
### Rename time variable
colnames(df_out_valid)[3:ncol(df_out_valid)] <- 0:60
### Transform data.frame to long format
df_out_valid_lng <- reshape2::melt(df_out_valid,
id.vars = c("Outcome", "CDX2"))
### Compute posterior-predicted 95% CI
df_out_valid_sum <- data_summary(df_out_valid_lng, varname = "value",
groupnames = c("Outcome", "CDX2", "variable"))
df_out_valid_sum$Time <- as.numeric(df_out_valid_sum$variable)
### Only 5-yr survival
df_out_valid_5yr_sum <- df_out_valid_sum %>%
dplyr::filter(Time == 60) %>%
dplyr::mutate(Source = "Model",
N = NaN) %>%
dplyr::select(Source, Outcome, CDX2, Time, S = value, se = sd, lb, ub)
df_out_valid_5yr_sum$Time <- df_out_valid_5yr_sum$Time-1
### Combine model-predicted outputs with targets
df_model_n_targets <- dplyr::bind_rows(df_out_valid_5yr_sum,
df_calibration_targets)
#### 04.4 Internal validation: Model-predicted outputs vs. targets ####
gg_valid <- ggplot(df_model_n_targets,
aes(x = Outcome, y = S,
ymin = lb, ymax = ub,
fill = Source,
shape = Source)) +
# geom_point(position = position_dodge()) +
facet_wrap(~CDX2) +
# geom_bar(position = position_dodge(),
# stat = "identity", alpha = 0.4) +
geom_errorbar(aes(color = Source),
position = position_dodge2(width = 0.5, padding = 0.7)) +
scale_color_manual(values = c("Calibration target" = "black", "Model" = "red")) +
scale_fill_manual(values = c("Calibration target" = "black", "Model" = "red")) +
scale_shape_manual(values = c("Calibration target" = 1, "Model" = 8)) +
xlab("") +
ylab("5-year survival") +
theme_bw(base_size = 16) +
theme(legend.position = "bottom",
legend.title = element_blank(),
strip.background = element_rect(fill = "white",
color = "white"),
strip.text = element_text(size = 14, face = "bold"))
gg_valid
ggsave(gg_valid,
filename = "figs/04_validation_posterior_vs_targets.pdf",
width = 8, height = 6)
ggsave(gg_valid,
filename = "figs/04_validation_posterior_vs_targets.png",
width = 8, height = 6)
ggsave(gg_valid,
filename = "figs/04_validation_posterior_vs_targets.jpg",
width = 8, height = 6) |
#' Get All Locations
#'
#' Gets a listing of all locations. One of the following attributes must be set: locality, postalCode, region
#'
#' @concept Location
#'
#' @param p Page Number
#' @param ids ID's of the locations to return, comma separated. Max 10.
#' @param locality Locality of the location (in US, city)
#' @param region Region of the location (in US, state)
#' @param postalCode Postal Code of a location
#' @param isPrimary Whether the location is the primary one or not
#' @param inPlanning Whether the location is in planning and not open yet
#' @param isClosed Whether the location is closed.
#' @param locationType Key(s) of the type of location. Comma separated. micro macro nano prewpub production office tasting restaurant cidery meadery
#' @param countryIsoCode Two-letter country code of the location
#' @param since Returns everything that has been updated since that date. Max 30 days. In UNIX timestamp format.
#' @param status Status of the location in the API
#' @param order How the results should be ordered name Default breweryName locality region postalCode isPrimary inPlanning isClosed locationType countryIsoCode status createDate updateDate
#' @param sort How the results should be sorted. ASC Default DESC
#' @return id The unique id of the location.
#' @return name The name of the location. Typically this will be like "Main Brewery" for breweries that have one location.
#' @return streetAddress The street address of the location.
#' @return extendedAddress The extended part of the address of the location. This is normally the second line and could be something like Suite #100.
#' @return locality This is the locality / city of the location.
#' @return region This is the region / state of the location.
#' @return postalCode The postal code / zip code of the location.
#' @return phone The phone number of the location.
#' @return website The URL to the website for the location.
#' @return hoursOfOperation A combination of the hoursOfOperationExplicit and hoursOfOperationNotes field, put together in a human-readable format.
#' @return hoursOfOperationExplicitString An array of days and open times. Days may have multiple start and end time sets.
#' @return hoursOfOperationNotes A free-text field for notes about the hours of operation.
#' @return tourInfo A free-text field containing information about brewery tours at the location.
#' @return timezoneId The timezone ID for the location. Example: America/New_York
#' @return latitude The latitude of the location.
#' @return longitude The longitude of the location.
#' @return isPrimary Whether or not this location is the primary location for the associated brewery.
#' @return inPlanning Whether or not the location is in planning.
#' @return isClosed Whether or not the location is closed (this means permanently closed)
#' @return openToPublic Whether or not the location is open to the public.
#' @return locationType The key for the type of location. See the locationTypeDisplay for the full display string.
#' @return locationTypeDisplay The display string that corresponds to the locationType.
#' @return countryIsoCode The two character country code of the location. See the country field for detailed information about the country.
#' @return country Detailed information on the country. This field is an object that contains isoCode, name, displayName, isoThree, numbercode, and urlTitle.
#' @return yearOpened The year that the location opened.
#' @return breweryId The id of the associated brewery. See the brewer object for all the details on the associated brewery.
#' @return brewery All the information for the brewery associated with the location.
#' @export
getEveryLocation <- function(p, ids, locality, region, postalCode, isPrimary, inPlanning, isClosed, locationType, countryIsoCode, since, status, order, sort){
params <- as.list(environment())
params <- params[params != ""]
endpoint <- "/locations"
returnData <- makeRequest(endpoint, "GET", params)
flattenJsonList(returnData$data)
}
#' Get a single location
#'
#' Gets a single Location
#'
#' @concept Location
#'
#' @param locationId The locationId
#' @return none
#' @export
getLocation <- function(locationId){
params <- as.list(environment())
params <- params[params != ""]
endpoint <- "/location/:locationId"
additionalParams <- params[1:1]
for(j in 1:length(additionalParams)){
endpoint <- sub(paste0(":", names(additionalParams)[[j]]), additionalParams[[j]], endpoint)
}
params <- params[-(1:length(additionalParams))]
returnData <- makeRequest(endpoint, "GET", params)
flattenJsonList(returnData$data)
}
#' Update an location
#'
#' Updates an existing location.
#'
#' @concept Location
#'
#' @param locationId The locationId
#' @param name Nickname for the location
#' @param streetAddress Street address
#' @param extendedAddress Extended address, such as suite or apartment number
#' @param locality Locality, or city, where the location is at
#' @param region Region, also known as state or province
#' @param postalCode Postal code
#' @param phone Phone number for the location
#' @param website Location-specific website, if different from the brewery
#' @param hoursOfOperationExplicit Explict breakdown of the hours of operation. Should be in format "ddd-hh:mm(am/pm)-hh:mm(am/pm)". Multiple days can be passed with the given format, separated by commas. Example: 'mon-8:00am-10:00pm,tue-9:00am-1:00pm' is valid.
#' @param hoursOfOperationNotes Additional, non-time-related notes about the hours of operation.
#' @param tourInfo Brewery tour information for visitors of the location
#' @param timezoneId Timezone ID for the location. This value is automatically updated based on the latitude and longitude of the location, but can be passed as well. Example: America/New_York
#' @param latitude Latitude for the location
#' @param longitude Longitude for the location
#' @param isPrimary Whether or not this is the primary location for the brewery Y N Default
#' @param inPlanning Whether or not this location is in planning Y N Default
#' @param isClosed Whether or not this location is currently closed Y N Default
#' @param openToPublic Whether or not this location is open to the public Y N Default
#' @param locationType Type of location, such as micro brewery or brew pub micro macro nano prewpub production office tasting restaurant cidery meadery
#' @param countryIsoCode Required Country that the location is located in
#' @return none
#' @export
updateLocation <- function(locationId, name, streetAddress, extendedAddress, locality, region, postalCode, phone, website, hoursOfOperationExplicit, hoursOfOperationNotes, tourInfo, timezoneId, latitude, longitude, isPrimary, inPlanning, isClosed, openToPublic, locationType, countryIsoCode){
params <- as.list(environment())
params <- params[params != ""]
endpoint <- "/location/:locationId"
additionalParams <- params[1:1]
for(j in 1:length(additionalParams)){
endpoint <- sub(paste0(":", names(additionalParams)[[j]]), additionalParams[[j]], endpoint)
}
params <- params[-(1:length(additionalParams))]
returnData <- makeRequest(endpoint, "PUT", params)
flattenJsonList(returnData$data)
}
#' Delete an Location
#'
#' Deletes an existing location
#'
#' @concept Location
#'
#' @param locationId The locationId
#' @return none
#' @export
deleteLocation <- function(locationId){
params <- as.list(environment())
params <- params[params != ""]
endpoint <- "/location/:locationId"
additionalParams <- params[1:1]
for(j in 1:length(additionalParams)){
endpoint <- sub(paste0(":", names(additionalParams)[[j]]), additionalParams[[j]], endpoint)
}
params <- params[-(1:length(additionalParams))]
returnData <- makeRequest(endpoint, "DELETE", params)
flattenJsonList(returnData$data)
}
| /R/Location.R | no_license | bpb824/brewerydb | R | false | false | 7,814 | r |
#' Get All Locations
#'
#' Gets a listing of all locations. One of the following attributes must be set: locality, postalCode, region
#'
#' @concept Location
#'
#' @param p Page Number
#' @param ids ID's of the locations to return, comma separated. Max 10.
#' @param locality Locality of the location (in US, city)
#' @param region Region of the location (in US, state)
#' @param postalCode Postal Code of a location
#' @param isPrimary Whether the location is the primary one or not
#' @param inPlanning Whether the location is in planning and not open yet
#' @param isClosed Whether the location is closed.
#' @param locationType Key(s) of the type of location. Comma separated. micro macro nano prewpub production office tasting restaurant cidery meadery
#' @param countryIsoCode Two-letter country code of the location
#' @param since Returns everything that has been updated since that date. Max 30 days. In UNIX timestamp format.
#' @param status Status of the location in the API
#' @param order How the results should be ordered name Default breweryName locality region postalCode isPrimary inPlanning isClosed locationType countryIsoCode status createDate updateDate
#' @param sort How the results should be sorted. ASC Default DESC
#' @return id The unique id of the location.
#' @return name The name of the location. Typically this will be like "Main Brewery" for breweries that have one location.
#' @return streetAddress The street address of the location.
#' @return extendedAddress The extended part of the address of the location. This is normally the second line and could be something like Suite #100.
#' @return locality This is the locality / city of the location.
#' @return region This is the region / state of the location.
#' @return postalCode The postal code / zip code of the location.
#' @return phone The phone number of the location.
#' @return website The URL to the website for the location.
#' @return hoursOfOperation A combination of the hoursOfOperationExplicit and hoursOfOperationNotes field, put together in a human-readable format.
#' @return hoursOfOperationExplicitString An array of days and open times. Days may have multiple start and end time sets.
#' @return hoursOfOperationNotes A free-text field for notes about the hours of operation.
#' @return tourInfo A free-text field containing information about brewery tours at the location.
#' @return timezoneId The timezone ID for the location. Example: America/New_York
#' @return latitude The latitude of the location.
#' @return longitude The longitude of the location.
#' @return isPrimary Whether or not this location is the primary location for the associated brewery.
#' @return inPlanning Whether or not the location is in planning.
#' @return isClosed Whether or not the location is closed (this means permanently closed)
#' @return openToPublic Whether or not the location is open to the public.
#' @return locationType The key for the type of location. See the locationTypeDisplay for the full display string.
#' @return locationTypeDisplay The display string that corresponds to the locationType.
#' @return countryIsoCode The two character country code of the location. See the country field for detailed information about the country.
#' @return country Detailed information on the country. This field is an object that contains isoCode, name, displayName, isoThree, numbercode, and urlTitle.
#' @return yearOpened The year that the location opened.
#' @return breweryId The id of the associated brewery. See the brewer object for all the details on the associated brewery.
#' @return brewery All the information for the brewery associated with the location.
#' @export
getEveryLocation <- function(p, ids, locality, region, postalCode, isPrimary, inPlanning, isClosed, locationType, countryIsoCode, since, status, order, sort){
params <- as.list(environment())
params <- params[params != ""]
endpoint <- "/locations"
returnData <- makeRequest(endpoint, "GET", params)
flattenJsonList(returnData$data)
}
#' Get a single location
#'
#' Gets a single Location
#'
#' @concept Location
#'
#' @param locationId The locationId
#' @return none
#' @export
getLocation <- function(locationId){
params <- as.list(environment())
params <- params[params != ""]
endpoint <- "/location/:locationId"
additionalParams <- params[1:1]
for(j in 1:length(additionalParams)){
endpoint <- sub(paste0(":", names(additionalParams)[[j]]), additionalParams[[j]], endpoint)
}
params <- params[-(1:length(additionalParams))]
returnData <- makeRequest(endpoint, "GET", params)
flattenJsonList(returnData$data)
}
#' Update an location
#'
#' Updates an existing location.
#'
#' @concept Location
#'
#' @param locationId The locationId
#' @param name Nickname for the location
#' @param streetAddress Street address
#' @param extendedAddress Extended address, such as suite or apartment number
#' @param locality Locality, or city, where the location is at
#' @param region Region, also known as state or province
#' @param postalCode Postal code
#' @param phone Phone number for the location
#' @param website Location-specific website, if different from the brewery
#' @param hoursOfOperationExplicit Explict breakdown of the hours of operation. Should be in format "ddd-hh:mm(am/pm)-hh:mm(am/pm)". Multiple days can be passed with the given format, separated by commas. Example: 'mon-8:00am-10:00pm,tue-9:00am-1:00pm' is valid.
#' @param hoursOfOperationNotes Additional, non-time-related notes about the hours of operation.
#' @param tourInfo Brewery tour information for visitors of the location
#' @param timezoneId Timezone ID for the location. This value is automatically updated based on the latitude and longitude of the location, but can be passed as well. Example: America/New_York
#' @param latitude Latitude for the location
#' @param longitude Longitude for the location
#' @param isPrimary Whether or not this is the primary location for the brewery Y N Default
#' @param inPlanning Whether or not this location is in planning Y N Default
#' @param isClosed Whether or not this location is currently closed Y N Default
#' @param openToPublic Whether or not this location is open to the public Y N Default
#' @param locationType Type of location, such as micro brewery or brew pub micro macro nano prewpub production office tasting restaurant cidery meadery
#' @param countryIsoCode Required Country that the location is located in
#' @return none
#' @export
updateLocation <- function(locationId, name, streetAddress, extendedAddress, locality, region, postalCode, phone, website, hoursOfOperationExplicit, hoursOfOperationNotes, tourInfo, timezoneId, latitude, longitude, isPrimary, inPlanning, isClosed, openToPublic, locationType, countryIsoCode){
params <- as.list(environment())
params <- params[params != ""]
endpoint <- "/location/:locationId"
additionalParams <- params[1:1]
for(j in 1:length(additionalParams)){
endpoint <- sub(paste0(":", names(additionalParams)[[j]]), additionalParams[[j]], endpoint)
}
params <- params[-(1:length(additionalParams))]
returnData <- makeRequest(endpoint, "PUT", params)
flattenJsonList(returnData$data)
}
#' Delete an Location
#'
#' Deletes an existing location
#'
#' @concept Location
#'
#' @param locationId The locationId
#' @return none
#' @export
deleteLocation <- function(locationId){
params <- as.list(environment())
params <- params[params != ""]
endpoint <- "/location/:locationId"
additionalParams <- params[1:1]
for(j in 1:length(additionalParams)){
endpoint <- sub(paste0(":", names(additionalParams)[[j]]), additionalParams[[j]], endpoint)
}
params <- params[-(1:length(additionalParams))]
returnData <- makeRequest(endpoint, "DELETE", params)
flattenJsonList(returnData$data)
}
|
library(tm)
library(SnowballC)
library(wordcloud)
# Take in all the files from the temp directory.
hurricane <- Corpus (DirSource('Desktop/TextAnalyticsWebsite/textAnalytics/TextAnalytics/Tutorials/tutorial4/temp/'))
inspect(hurricane)
# Standard preprocessing, already done most in the python file
hurricane <- tm_map(hurricane, stripWhitespace)
hurricane <- tm_map(hurricane, tolower)
hurricane <- tm_map(hurricane, removeWords, stopwords('english'))
hurricane <- tm_map(hurricane, stemDocument)
wordcloud(hurricane, scale=c(5,0.5), max.words=100, random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, 'Dark2'))
| /TextAnalytics/Tutorials/tutorial4/wordCloud1.R | no_license | kellyj40/textAnalytics | R | false | false | 639 | r | library(tm)
library(SnowballC)
library(wordcloud)
# Take in all the files from the temp directory.
hurricane <- Corpus (DirSource('Desktop/TextAnalyticsWebsite/textAnalytics/TextAnalytics/Tutorials/tutorial4/temp/'))
inspect(hurricane)
# Standard preprocessing, already done most in the python file
hurricane <- tm_map(hurricane, stripWhitespace)
hurricane <- tm_map(hurricane, tolower)
hurricane <- tm_map(hurricane, removeWords, stopwords('english'))
hurricane <- tm_map(hurricane, stemDocument)
wordcloud(hurricane, scale=c(5,0.5), max.words=100, random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, 'Dark2'))
|
library(randomForest) #rf model
library(caret) # feature selection
library(e1071) # model tuning
library(ROCR) # model evaluation
source("performance_plot_utils.R") # plot curves
## separate feature and class variables
test.feature.vars <- test.data[,-1]
test.class.var <- test.data[,1]
## build initial model with training data
formula.init <- "credit.rating ~ ."
formula.init <- as.formula(formula.init)
rf.model <- randomForest(formula.init, data = train.data, importance=T, proximity=T)
## view model details
print(rf.model)
## predict and evaluate results
rf.predictions <- predict(rf.model, test.feature.vars, type="class")
confusionMatrix(data=rf.predictions, reference=test.class.var, positive="1")
## build new model with selected features
formula.new <- "credit.rating ~ account.balance + savings +
credit.amount + credit.duration.months +
previous.credit.payment.status"
formula.new <- as.formula(formula.new)
rf.model.new <- randomForest(formula.new, data = train.data,
importance=T, proximity=T)
## predict and evaluate results
rf.predictions.new <- predict(rf.model.new, test.feature.vars, type="class")
confusionMatrix(data=rf.predictions.new, reference=test.class.var, positive="1")
## hyperparameter optimizations
# run grid search
nodesize.vals <- c(2, 3, 4, 5)
ntree.vals <- c(200, 500, 1000, 2000)
tuning.results <- tune.randomForest(formula.new,
data = train.data,
mtry=3,
nodesize=nodesize.vals,
ntree=ntree.vals)
print(tuning.results)
# get best model and predict and evaluate performance
rf.model.best <- tuning.results$best.model
rf.predictions.best <- predict(rf.model.best, test.feature.vars, type="class")
confusionMatrix(data=rf.predictions.best, reference=test.class.var, positive="1")
## plot model evaluation metric curves
rf.predictions.best <- predict(rf.model.best, test.feature.vars, type="prob")
rf.prediction.values <- rf.predictions.best[,2]
predictions <- prediction(rf.prediction.values, test.class.var)
par(mfrow=c(1,2))
plot.roc.curve(predictions, title.text="RF ROC Curve")
plot.pr.curve(predictions, title.text="RF Precision/Recall Curve") | /demo/app_intro/examples/2016_RMachineLearningByExample/Ch6_PredictCredit/rf_classifier.R | permissive | stharrold/demo | R | false | false | 2,364 | r | library(randomForest) #rf model
library(caret) # feature selection
library(e1071) # model tuning
library(ROCR) # model evaluation
source("performance_plot_utils.R") # plot curves
## separate feature and class variables
test.feature.vars <- test.data[,-1]
test.class.var <- test.data[,1]
## build initial model with training data
formula.init <- "credit.rating ~ ."
formula.init <- as.formula(formula.init)
rf.model <- randomForest(formula.init, data = train.data, importance=T, proximity=T)
## view model details
print(rf.model)
## predict and evaluate results
rf.predictions <- predict(rf.model, test.feature.vars, type="class")
confusionMatrix(data=rf.predictions, reference=test.class.var, positive="1")
## build new model with selected features
formula.new <- "credit.rating ~ account.balance + savings +
credit.amount + credit.duration.months +
previous.credit.payment.status"
formula.new <- as.formula(formula.new)
rf.model.new <- randomForest(formula.new, data = train.data,
importance=T, proximity=T)
## predict and evaluate results
rf.predictions.new <- predict(rf.model.new, test.feature.vars, type="class")
confusionMatrix(data=rf.predictions.new, reference=test.class.var, positive="1")
## hyperparameter optimizations
# run grid search
nodesize.vals <- c(2, 3, 4, 5)
ntree.vals <- c(200, 500, 1000, 2000)
tuning.results <- tune.randomForest(formula.new,
data = train.data,
mtry=3,
nodesize=nodesize.vals,
ntree=ntree.vals)
print(tuning.results)
# get best model and predict and evaluate performance
rf.model.best <- tuning.results$best.model
rf.predictions.best <- predict(rf.model.best, test.feature.vars, type="class")
confusionMatrix(data=rf.predictions.best, reference=test.class.var, positive="1")
## plot model evaluation metric curves
rf.predictions.best <- predict(rf.model.best, test.feature.vars, type="prob")
rf.prediction.values <- rf.predictions.best[,2]
predictions <- prediction(rf.prediction.values, test.class.var)
par(mfrow=c(1,2))
plot.roc.curve(predictions, title.text="RF ROC Curve")
plot.pr.curve(predictions, title.text="RF Precision/Recall Curve") |
## Maps of better methods
## http://rspatial.r-forge.r-project.org/gallery/#fig09.R
## http://www.r-bloggers.com/grid2polygons-2/
# Load libraries
#source("http://www.phaget4.org/R/myImagePlot.R")
quality = c('bias','corr','crpss_clim')
meth = c('R','dc','qm','bma')
# Load coord files
ObsDir = 'E:/calibMET/temp/'
Nmonth = 12;Nlead = 7
Mname = c('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec')
reverse = seq(12,1,-1)
yLabels <- Mname[reverse]
xLabels <- seq(1,Nlead,1)
for (iq in 1:length(quality)){
values = array(NaN,dim=c(Nmonth,Nlead,length(meth)))
for (imeth in 1:length(meth)){#length(meth)
for (nmonth in 1:Nmonth){
name_file = paste(ObsDir,'verification/',meth[imeth],'_',quality[iq],'_m',nmonth,'.txt',sep="")
val = t(matrix(scan(name_file),nrow=7,ncol=724))
values[nmonth,,imeth] = colMeans(val,na.rm = TRUE)
} # end loop month
}
#Plotting
m <- matrix(seq(1:5),nrow = 1,ncol = 5,byrow = TRUE) # No Meth + 1 (for legend)
name_plt = paste(ObsDir,'verification/',quality[iq],'.png',sep='')
png(name_plt,width=750,height=200)
#par(cex.lab=3)
layout(mat = m, widths=c(2,2,2,2,0.5), heights=array(1,dim=c(5)))
nHalf = 10;Thresh = 0
## Make vector of colors for values below threshold
rc1 = colorRampPalette(colors = c("red", "white"), space="Lab")(nHalf)
## Make vector of colors for values above threshold
rc2 = colorRampPalette(colors = c("white", "blue"), space="Lab")(nHalf)
ColorRamp = c(rc1, rc2)
## In your example, this line sets the color for values between 49 and 51.
#ColorRamp[c(nHalf, nHalf+1)] = rgb(t(col2rgb("white")), maxColorValue=256)
if (iq == 1) {edg = c(-100,100)}
else if (iq == 2) {edg = c(-1,1)}
else edg = c(-1,1)
#min = min(values,na.rm = TRUE);max=max(values,na.rm = TRUE)
#ColorLevels <- seq(min, max, length=length(ColorRamp))
rb1 = seq(edg[1], Thresh, length.out=nHalf+1)
rb2 = seq(Thresh, edg[2], length.out=nHalf+1)[-1]
ColorLevels = c(rb1, rb2)
# Plot methods
for (imet in 1:4){
dat <- values[,,imet]
#reverse <- nrow(dat) : 1
#yLabels <- yLabels[reverse]
dat <- dat[reverse,]
main0 = meth[imet]
#par(mar = c(3,2,2.5,1))
image(t(dat),col=ColorRamp , axes=FALSE, zlim=c(edg[1],edg[2]),main = main0,xlab = 'lt (months)')
#image(t(dat),col=ColorRamp, zlim=c(min,max))
at1 = (0:(length(xLabels)-1))/(length(xLabels)-1)
at2 = (0:(length(yLabels)-1))/(length(yLabels)-1)
axis(1,at=at1, labels=xLabels, cex.axis=1)
axis(2,at=at2, labels=yLabels, las= HORIZONTAL<-1,
cex.axis=1)
}
#Plot legend
par(mar = c(3,2,2.5,1))
image(1, ColorLevels,
matrix(data=ColorLevels, ncol=length(ColorLevels),nrow=1),
col=ColorRamp,
xlab="",ylab="",
xaxt="n",axes = FALSE)
axis(2,at=ColorLevels, labels=ColorLevels, las= HORIZONTAL<-1,
cex.axis=1)
} # End qualities
graphics.off() | /temp/verification/make_figure_verification.R | no_license | diana-lucatero/calibMET | R | false | false | 2,915 | r | ## Maps of better methods
## http://rspatial.r-forge.r-project.org/gallery/#fig09.R
## http://www.r-bloggers.com/grid2polygons-2/
# Load libraries
#source("http://www.phaget4.org/R/myImagePlot.R")
quality = c('bias','corr','crpss_clim')
meth = c('R','dc','qm','bma')
# Load coord files
ObsDir = 'E:/calibMET/temp/'
Nmonth = 12;Nlead = 7
Mname = c('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec')
reverse = seq(12,1,-1)
yLabels <- Mname[reverse]
xLabels <- seq(1,Nlead,1)
for (iq in 1:length(quality)){
values = array(NaN,dim=c(Nmonth,Nlead,length(meth)))
for (imeth in 1:length(meth)){#length(meth)
for (nmonth in 1:Nmonth){
name_file = paste(ObsDir,'verification/',meth[imeth],'_',quality[iq],'_m',nmonth,'.txt',sep="")
val = t(matrix(scan(name_file),nrow=7,ncol=724))
values[nmonth,,imeth] = colMeans(val,na.rm = TRUE)
} # end loop month
}
#Plotting
m <- matrix(seq(1:5),nrow = 1,ncol = 5,byrow = TRUE) # No Meth + 1 (for legend)
name_plt = paste(ObsDir,'verification/',quality[iq],'.png',sep='')
png(name_plt,width=750,height=200)
#par(cex.lab=3)
layout(mat = m, widths=c(2,2,2,2,0.5), heights=array(1,dim=c(5)))
nHalf = 10;Thresh = 0
## Make vector of colors for values below threshold
rc1 = colorRampPalette(colors = c("red", "white"), space="Lab")(nHalf)
## Make vector of colors for values above threshold
rc2 = colorRampPalette(colors = c("white", "blue"), space="Lab")(nHalf)
ColorRamp = c(rc1, rc2)
## In your example, this line sets the color for values between 49 and 51.
#ColorRamp[c(nHalf, nHalf+1)] = rgb(t(col2rgb("white")), maxColorValue=256)
if (iq == 1) {edg = c(-100,100)}
else if (iq == 2) {edg = c(-1,1)}
else edg = c(-1,1)
#min = min(values,na.rm = TRUE);max=max(values,na.rm = TRUE)
#ColorLevels <- seq(min, max, length=length(ColorRamp))
rb1 = seq(edg[1], Thresh, length.out=nHalf+1)
rb2 = seq(Thresh, edg[2], length.out=nHalf+1)[-1]
ColorLevels = c(rb1, rb2)
# Plot methods
for (imet in 1:4){
dat <- values[,,imet]
#reverse <- nrow(dat) : 1
#yLabels <- yLabels[reverse]
dat <- dat[reverse,]
main0 = meth[imet]
#par(mar = c(3,2,2.5,1))
image(t(dat),col=ColorRamp , axes=FALSE, zlim=c(edg[1],edg[2]),main = main0,xlab = 'lt (months)')
#image(t(dat),col=ColorRamp, zlim=c(min,max))
at1 = (0:(length(xLabels)-1))/(length(xLabels)-1)
at2 = (0:(length(yLabels)-1))/(length(yLabels)-1)
axis(1,at=at1, labels=xLabels, cex.axis=1)
axis(2,at=at2, labels=yLabels, las= HORIZONTAL<-1,
cex.axis=1)
}
#Plot legend
par(mar = c(3,2,2.5,1))
image(1, ColorLevels,
matrix(data=ColorLevels, ncol=length(ColorLevels),nrow=1),
col=ColorRamp,
xlab="",ylab="",
xaxt="n",axes = FALSE)
axis(2,at=ColorLevels, labels=ColorLevels, las= HORIZONTAL<-1,
cex.axis=1)
} # End qualities
graphics.off() |
\name{burdenOfFiltering}
\alias{burdenOfFiltering}
\title{Burden of filtering}
\usage{
burdenOfFiltering(sc, groups, byGroup = FALSE,
filt_control = NULL)
}
\arguments{
\item{sc}{SingleCellAssay or derived class}
\item{groups}{the groups by which to filter}
\item{byGroup}{logical indicating whether to filter by
group}
\item{filt_control}{a list of control parameters.}
}
\description{
what proportions of wells are filtered due to different
criteria
}
| /man/burdenOfFiltering.Rd | no_license | chesterni/SingleCellAssay | R | false | false | 478 | rd | \name{burdenOfFiltering}
\alias{burdenOfFiltering}
\title{Burden of filtering}
\usage{
burdenOfFiltering(sc, groups, byGroup = FALSE,
filt_control = NULL)
}
\arguments{
\item{sc}{SingleCellAssay or derived class}
\item{groups}{the groups by which to filter}
\item{byGroup}{logical indicating whether to filter by
group}
\item{filt_control}{a list of control parameters.}
}
\description{
what proportions of wells are filtered due to different
criteria
}
|
library(multcomp)
##Creating the variables for the models
##Weighted model
##Data2
#DOC
model_data2_DOC <- lmer(data2$DOC ~ scale(data2$Mean_of_Sunlight_Duration_minutes) + scale(data2$AGE) + (1|data2$SEX) + (1|data2$REGION), weights= data2$WEIGHT)
#Number of diagnoses
model_data2_DIAGNOSn <- lmer(data2$DIAGNOSn ~ scale(data2$Mean_of_Sunlight_Duration_minutes) + scale(data2$AGE) + (1|data2$SEX) + (1|data2$REGION), weights= data2$WEIGHT)
#Number of procedures
model_data2_PROCEDUn <- lmer(data2$PROCEDUn ~ scale(data2$Mean_of_Sunlight_Duration_minutes) + scale(data2$AGE) + (1|data2$SEX) + (1|data2$REGION), weights= data2$WEIGHT)
#Proportion discharged home
#Proportion of elective admissions
##Psych patients
#DOC
model_psychiatric_diagnoses_DOC <- lmer(scale(psychiatric_diagnoses$DOC) ~ scale(psychiatric_diagnoses$Mean_of_Sunlight_Duration_minutes) + scale(psychiatric_diagnoses$AGE) + (1|psychiatric_diagnoses$SEX) + (1|psychiatric_diagnoses$REGION), weights= psychiatric_diagnoses$WEIGHT, control = lmerControl(optimizer ="Nelder_Mead"))
#Number of diagnoses
model_psychiatric_diagnoses_DIAGNOSn <- lmer(scale(psychiatric_diagnoses$DIAGNOSn) ~ scale(psychiatric_diagnoses$Mean_of_Sunlight_Duration_minutes) + scale(psychiatric_diagnoses$AGE) + (1|psychiatric_diagnoses$SEX) + (1|psychiatric_diagnoses$REGION), weights= psychiatric_diagnoses$WEIGHT)
#Number of procedures
model_psychiatric_diagnoses_PROCEDUn <- lmer(scale(psychiatric_diagnoses$PROCEDUn) ~ scale(psychiatric_diagnoses$Mean_of_Sunlight_Duration_minutes) + scale(psychiatric_diagnoses$AGE) + (1|psychiatric_diagnoses$SEX) + (1|psychiatric_diagnoses$REGION), weights= psychiatric_diagnoses$WEIGHT)
##Nonpsych patients
#DOC
model_data2.minus.psychiatric_diagnoses_DOC <- lmer(scale(data2.minus.psychiatric_diagnoses$DOC) ~ scale(data2.minus.psychiatric_diagnoses$Mean_of_Sunlight_Duration_minutes) + scale(data2.minus.psychiatric_diagnoses$AGE) + (1|data2.minus.psychiatric_diagnoses$SEX) + (1|data2.minus.psychiatric_diagnoses$REGION), weights= data2.minus.psychiatric_diagnoses$WEIGHT)
#Number of diagnoses
model_data2.minus.psychiatric_diagnoses_DIAGNOSn <- lmer(scale(data2.minus.psychiatric_diagnoses$DIAGNOSn) ~ scale(data2.minus.psychiatric_diagnoses$Mean_of_Sunlight_Duration_minutes) + scale(data2.minus.psychiatric_diagnoses$AGE) + (1|data2.minus.psychiatric_diagnoses$SEX) + (1|data2.minus.psychiatric_diagnoses$REGION), weights= data2.minus.psychiatric_diagnoses$WEIGHT)
#Number of procedures
model_data2.minus.psychiatric_diagnoses_PROCEDUn <- lmer(scale(data2.minus.psychiatric_diagnoses$PROCEDUn) ~ scale(data2.minus.psychiatric_diagnoses$Mean_of_Sunlight_Duration_minutes) + scale(data2.minus.psychiatric_diagnoses$AGE) + (1|data2.minus.psychiatric_diagnoses$SEX) + (1|data2.minus.psychiatric_diagnoses$REGION), weights= data2.minus.psychiatric_diagnoses$WEIGHT)
#Graph the estimate with the confidence interval
tmp1 <- as.data.frame(confint(glht(model_psychiatric_diagnoses_DOC))$confint)
tmp2 <- as.data.frame(confint(glht(model_psychiatric_diagnoses_DIAGNOSn))$confint)
tmp3 <- as.data.frame(confint(glht(model_psychiatric_diagnoses_PROCEDUn))$confint)
tmp4 <- as.data.frame(confint(glht(model_data2.minus.psychiatric_diagnoses_DOC))$confint)
tmp5 <- as.data.frame(confint(glht(model_data2.minus.psychiatric_diagnoses_DIAGNOSn))$confint)
tmp6 <- as.data.frame(confint(glht(model_data2.minus.psychiatric_diagnoses_PROCEDUn))$confint)
tmp_collated <- rbind(tmp1[2,], tmp2[2,], tmp3[2,], tmp4[2,], tmp5[2,], tmp6[2,])
tmp_collated$Comparison <- rownames(tmp_collated)
tmp_collated$Group <- c("Psychiatric","Psychiatric","Psychiatric","Non-Psychiatric","Non-Psychiatric","Non-Psychiatric")
tmp_collated
ggplot(tmp_collated, aes(x = factor(Comparison), y = Estimate, ymin = lwr, ymax = upr))+
geom_errorbar()+
geom_point()+
geom_text(aes(label = paste(round(Estimate, digits=4))), hjust = -0.2) +
geom_hline(yintercept= 0, linetype="dashed", colour="skyblue")+
scale_x_discrete(name= "",
labels= c("DOC", "Diagnoses", "Procedures", "DOC", "Diagnoses", "Procedures"))+
facet_wrap(~Group, strip.position = "bottom", scales = "free_x") +
theme(panel.background = element_blank(),
axis.line = element_line(colour = "black"),
panel.grid.major = element_line(colour = "grey", size = 0.5),
panel.grid.major.x = element_blank(),
strip.background = element_blank(),
strip.placement = "outside")
############ICD 9 diagnoses####################
#Convert the catagorical variables into factors
for(i in 1:length(ICD_9_code_groups)){ICD_9_code_groups[[i]]$RACE <- factor(ICD_9_code_groups[[i]]$RACE)}
for(i in 1:length(ICD_9_code_groups)){ICD_9_code_groups[[i]]$OWNER <- factor(ICD_9_code_groups[[i]]$OWNER)}
for(i in 1:length(ICD_9_code_groups.short)){ICD_9_code_groups.short[[i]]$SEX <- factor(ICD_9_code_groups.short[[i]]$SEX)}
for(i in 1:length(ICD_9_code_groups)){ICD_9_code_groups[[i]]$ESOP1 <- factor(ICD_9_code_groups[[i]]$ESOP1)}
for(i in 1:length(ICD_9_code_groups)){ICD_9_code_groups[[i]]$MARSTAT <- factor(ICD_9_code_groups[[i]]$MARSTAT)}
for(i in 1:length(ICD_9_code_groups)){ICD_9_code_groups[[i]]$REGION <- factor(ICD_9_code_groups[[i]]$REGION)}
#get a list ready to recieve the outputs from the model (correlation coefficients for each disease)
disease_model_DOC <- list()
#The model. Multiple regression.
for(i in 1:length(ICD_9_code_groups.short)){disease_model_DOC[[i]] <- lmer(scale(ICD_9_code_groups.short[[i]]$DOC) ~ scale(ICD_9_code_groups.short[[i]]$Mean_of_Sunlight_Duration_minutes) + scale(ICD_9_code_groups.short[[i]]$AGE) + (1|ICD_9_code_groups.short[[i]]$SEX) + (1|ICD_9_code_groups.short[[i]]$REGION),
weights= ICD_9_code_groups.short[[i]]$WEIGHT)
}
#################################################################
#Graph the estimate with the confidence interval
tmp.list <- list()
for(i in 1:length(disease_model_DOC)){
tmp.list[[i]] <- as.data.frame(confint(glht(disease_model_DOC[[i]]))$confint)#extract
tmp.list[[i]] <- tmp.list[[i]][,2]#cut down to coloumn 2
}
tmp_collated <- do.call(rbind, tmp.list)#bind the rooms together
tmp_collated <- as.data.frame(tmp_collated)
tmp_collated$Comparison <- rownames(tmp_collated) #name it
tmp_collated
#########Different methods to get the coefficients###############
#get a summary of the list of outputs
summary_disease_model_DOC <- list()
for(i in 1:16){summary_disease_model_DOC[[i]]<- summary(disease_model_DOC[[i]])}
#extract coefficients
coefficients_disease_model_DOC <- list()
for(i in 1:16){coefficients_disease_model_DOC[[i]] <- summary_disease_model_DOC[[i]]$coefficients[,c(1)]}
#extract std. error values
se.values_disease_model_DOC <- list()
for(i in 1:16){se.values_disease_model_DOC[[i]] <- summary_disease_model_DOC[[i]]$coefficients[,c(2)]}
###Get them into one dataframe
#Transpose so that you can bind by ROWS
for(i in 1:16){coefficients_disease_model_DOC[[i]] <- t(coefficients_disease_model_DOC[[i]])}
#As dataframes so you can yse rbind.fill
for(i in 1:16){coefficients_disease_model_DOC[[i]] <- as.data.frame(coefficients_disease_model_DOC[[i]])}
#Put them end to end
coefficients_disease_model_DOC <- rbind.fill(coefficients_disease_model_DOC)
coefficients_disease_model_DOC$Diagnosis <- c(1:16)
#Transpose so that you can bind by ROWS
for(i in 1:16){se.values_disease_model_DOC[[i]] <- t(se.values_disease_model_DOC[[i]])}
#As dataframes so you can yse rbind.fill
for(i in 1:16){se.values_disease_model_DOC[[i]] <- as.data.frame(se.values_disease_model_DOC[[i]])}
#Put them end to end
se.values_disease_model_DOC <- rbind.fill(se.values_disease_model_DOC)
se.values_disease_model_DOC$Diagnosis <- c(1:16)
coefficients.se.values_disease_model_DOC <- merge(se.values_disease_model_DOC, coefficients_disease_model_DOC, by = "Diagnosis")
colnames(coefficients.se.values_disease_model_DOC) <- c("Diagnoses", "Intercept", "SE", "Age_coefficient", "Intercept1", "Sun_coefficient", "Age_coefficient1")
summary_disease_model_DOC[[1]]
coefficients_for_plot <- coefficients.se.values_disease_model_DOC[,c(1,3,6)] #select the columns that have the diagnoses, coefficient and the SE
#Get the confidence interval from the SE
coefficients_for_plot$confidence <- (coefficients_for_plot$SE*1.96)
#Name the rows
coefficients_for_plot[,1] <- c("Organic_psychotic_conditions",
"Senile_and_presenile_organic_psychotic_conditions",
"Alcoholic_psychoses",
"Drug_psychoses",
"Transient_organic_psychotic_conditions",
"Other_organic_psychotic_conditions_chronic",
"Other_psychoses",
"Schizophrenic_psychoses",
"Affective_psychoses",
"Paranoid_states",
"Other_nonorganic_psychoses",
"Psychoses_with_origin_specific_to_childhood",
"Neurotic_disorders_personality_disorders_and_nonpsychotic_mental_disorders",
"Neurotic_disorders",
"Personality_disorders",
"Sexual_deviations",
"Psychoactive_substance",
"Alcohol_dependence_syndrome",
"Drug_dependence",
"Nondependent_aduse_of_drugs",
"Other_primarily_adult_onset",
"Physiological_malfunction_arising_from_mental_factors",
"Special_symptoms_or_syndromes_not_elsewhere_classified",
"Acute_reaction_to_stress",
"Adjustment_reaction",
"Specific_nonpsychotic_mental_disorders_following_brain_damage",
"Depressive_disorder_not_elsewhere_classified",
"Mental_disorders_childhood",
"Disturbance_of_conduct_not_elsewhere_classified",
"Disturbance_of_emotions_specific_to_childhood_and_adolescence",
"Hyperkinetic_syndrome_of_childhood",
"Specific_delays_in_development",
"Psychic_factors",
"Mental_retardation")
################################################################
ggplot(coefficients_for_plot, aes(x = factor(reorder(coefficients_for_plot$Diagnoses, -coefficients_for_plot$Sun_coefficient)), y = coefficients_for_plot$Sun_coefficient, ymin = (coefficients_for_plot$Sun_coefficient-coefficients_for_plot$confidence), ymax = (coefficients_for_plot$Sun_coefficient+coefficients_for_plot$confidence), colour = coefficients_for_plot$Sun_coefficient > 0))+
scale_colour_manual(name = 'PC1 > 0', values = setNames(c('skyblue','grey'),c(T, F)))+
geom_errorbar()+
geom_point()+
geom_text(aes(label = paste(round(coefficients_for_plot$Sun_coefficient, digits=4))), hjust = -0.2)+
geom_hline(yintercept= 0, linetype="dashed", colour="skyblue")+
scale_x_discrete(name= "")+
scale_y_continuous(name= "Correlation coefficient with light duration")+
theme(panel.background = element_blank(),
axis.line = element_line(colour = "black"),
panel.grid.major = element_line(colour = "grey", size = 0.5),
panel.grid.major.x = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = "none")
#Plot the table
formattable::formattable(coefficients.p.values_disease_model_DOC,
align = c("l", "l", rep("r", NCOL(coefficients_disease_model_DOC))),
c(hide, format))
#######trash######
ggplot(data2.short, aes(x=data2.short$Mean_of_Sunlight_Duration_minutes, y= data2.short$DOC))+
geom_point()+
geom_smooth()+
coord_cartesian(ylim= c(3,11))
test.model <- lmer(DOC ~ data2.short$Mean_of_Sunlight_Duration_minutes + data2.short$AGE + (data2.short$Mean_of_Sunlight_Duration_minutes|data2.short$REGION), data= data2.short)
coef(test.model)
tmp <- as.data.frame(confint(glht(test.model))$confint)
tmp$Comparison <- rownames(tmp)
ggplot(tmp, aes(x = Comparison, y = Estimate, ymin = lwr, ymax = upr)) +
geom_errorbar() + geom_point()
stargazer(test.model, type = "text",
digits = 3,
star.cutoffs = c(0.05, 0.01, 0.001),
digit.separator = "")
| /Code for lmer.R | no_license | HugoAstley/Light-project | R | false | false | 13,327 | r | library(multcomp)
##Creating the variables for the models
##Weighted model
##Data2
#DOC
model_data2_DOC <- lmer(data2$DOC ~ scale(data2$Mean_of_Sunlight_Duration_minutes) + scale(data2$AGE) + (1|data2$SEX) + (1|data2$REGION), weights= data2$WEIGHT)
#Number of diagnoses
model_data2_DIAGNOSn <- lmer(data2$DIAGNOSn ~ scale(data2$Mean_of_Sunlight_Duration_minutes) + scale(data2$AGE) + (1|data2$SEX) + (1|data2$REGION), weights= data2$WEIGHT)
#Number of procedures
model_data2_PROCEDUn <- lmer(data2$PROCEDUn ~ scale(data2$Mean_of_Sunlight_Duration_minutes) + scale(data2$AGE) + (1|data2$SEX) + (1|data2$REGION), weights= data2$WEIGHT)
#Proportion discharged home
#Proportion of elective admissions
##Psych patients
#DOC
model_psychiatric_diagnoses_DOC <- lmer(scale(psychiatric_diagnoses$DOC) ~ scale(psychiatric_diagnoses$Mean_of_Sunlight_Duration_minutes) + scale(psychiatric_diagnoses$AGE) + (1|psychiatric_diagnoses$SEX) + (1|psychiatric_diagnoses$REGION), weights= psychiatric_diagnoses$WEIGHT, control = lmerControl(optimizer ="Nelder_Mead"))
#Number of diagnoses
model_psychiatric_diagnoses_DIAGNOSn <- lmer(scale(psychiatric_diagnoses$DIAGNOSn) ~ scale(psychiatric_diagnoses$Mean_of_Sunlight_Duration_minutes) + scale(psychiatric_diagnoses$AGE) + (1|psychiatric_diagnoses$SEX) + (1|psychiatric_diagnoses$REGION), weights= psychiatric_diagnoses$WEIGHT)
#Number of procedures
model_psychiatric_diagnoses_PROCEDUn <- lmer(scale(psychiatric_diagnoses$PROCEDUn) ~ scale(psychiatric_diagnoses$Mean_of_Sunlight_Duration_minutes) + scale(psychiatric_diagnoses$AGE) + (1|psychiatric_diagnoses$SEX) + (1|psychiatric_diagnoses$REGION), weights= psychiatric_diagnoses$WEIGHT)
##Nonpsych patients
#DOC
model_data2.minus.psychiatric_diagnoses_DOC <- lmer(scale(data2.minus.psychiatric_diagnoses$DOC) ~ scale(data2.minus.psychiatric_diagnoses$Mean_of_Sunlight_Duration_minutes) + scale(data2.minus.psychiatric_diagnoses$AGE) + (1|data2.minus.psychiatric_diagnoses$SEX) + (1|data2.minus.psychiatric_diagnoses$REGION), weights= data2.minus.psychiatric_diagnoses$WEIGHT)
#Number of diagnoses
model_data2.minus.psychiatric_diagnoses_DIAGNOSn <- lmer(scale(data2.minus.psychiatric_diagnoses$DIAGNOSn) ~ scale(data2.minus.psychiatric_diagnoses$Mean_of_Sunlight_Duration_minutes) + scale(data2.minus.psychiatric_diagnoses$AGE) + (1|data2.minus.psychiatric_diagnoses$SEX) + (1|data2.minus.psychiatric_diagnoses$REGION), weights= data2.minus.psychiatric_diagnoses$WEIGHT)
#Number of procedures
model_data2.minus.psychiatric_diagnoses_PROCEDUn <- lmer(scale(data2.minus.psychiatric_diagnoses$PROCEDUn) ~ scale(data2.minus.psychiatric_diagnoses$Mean_of_Sunlight_Duration_minutes) + scale(data2.minus.psychiatric_diagnoses$AGE) + (1|data2.minus.psychiatric_diagnoses$SEX) + (1|data2.minus.psychiatric_diagnoses$REGION), weights= data2.minus.psychiatric_diagnoses$WEIGHT)
#Graph the estimate with the confidence interval
tmp1 <- as.data.frame(confint(glht(model_psychiatric_diagnoses_DOC))$confint)
tmp2 <- as.data.frame(confint(glht(model_psychiatric_diagnoses_DIAGNOSn))$confint)
tmp3 <- as.data.frame(confint(glht(model_psychiatric_diagnoses_PROCEDUn))$confint)
tmp4 <- as.data.frame(confint(glht(model_data2.minus.psychiatric_diagnoses_DOC))$confint)
tmp5 <- as.data.frame(confint(glht(model_data2.minus.psychiatric_diagnoses_DIAGNOSn))$confint)
tmp6 <- as.data.frame(confint(glht(model_data2.minus.psychiatric_diagnoses_PROCEDUn))$confint)
tmp_collated <- rbind(tmp1[2,], tmp2[2,], tmp3[2,], tmp4[2,], tmp5[2,], tmp6[2,])
tmp_collated$Comparison <- rownames(tmp_collated)
tmp_collated$Group <- c("Psychiatric","Psychiatric","Psychiatric","Non-Psychiatric","Non-Psychiatric","Non-Psychiatric")
tmp_collated
ggplot(tmp_collated, aes(x = factor(Comparison), y = Estimate, ymin = lwr, ymax = upr))+
geom_errorbar()+
geom_point()+
geom_text(aes(label = paste(round(Estimate, digits=4))), hjust = -0.2) +
geom_hline(yintercept= 0, linetype="dashed", colour="skyblue")+
scale_x_discrete(name= "",
labels= c("DOC", "Diagnoses", "Procedures", "DOC", "Diagnoses", "Procedures"))+
facet_wrap(~Group, strip.position = "bottom", scales = "free_x") +
theme(panel.background = element_blank(),
axis.line = element_line(colour = "black"),
panel.grid.major = element_line(colour = "grey", size = 0.5),
panel.grid.major.x = element_blank(),
strip.background = element_blank(),
strip.placement = "outside")
############ICD 9 diagnoses####################
#Convert the catagorical variables into factors
for(i in 1:length(ICD_9_code_groups)){ICD_9_code_groups[[i]]$RACE <- factor(ICD_9_code_groups[[i]]$RACE)}
for(i in 1:length(ICD_9_code_groups)){ICD_9_code_groups[[i]]$OWNER <- factor(ICD_9_code_groups[[i]]$OWNER)}
for(i in 1:length(ICD_9_code_groups.short)){ICD_9_code_groups.short[[i]]$SEX <- factor(ICD_9_code_groups.short[[i]]$SEX)}
for(i in 1:length(ICD_9_code_groups)){ICD_9_code_groups[[i]]$ESOP1 <- factor(ICD_9_code_groups[[i]]$ESOP1)}
for(i in 1:length(ICD_9_code_groups)){ICD_9_code_groups[[i]]$MARSTAT <- factor(ICD_9_code_groups[[i]]$MARSTAT)}
for(i in 1:length(ICD_9_code_groups)){ICD_9_code_groups[[i]]$REGION <- factor(ICD_9_code_groups[[i]]$REGION)}
#get a list ready to recieve the outputs from the model (correlation coefficients for each disease)
disease_model_DOC <- list()
#The model. Multiple regression.
for(i in 1:length(ICD_9_code_groups.short)){disease_model_DOC[[i]] <- lmer(scale(ICD_9_code_groups.short[[i]]$DOC) ~ scale(ICD_9_code_groups.short[[i]]$Mean_of_Sunlight_Duration_minutes) + scale(ICD_9_code_groups.short[[i]]$AGE) + (1|ICD_9_code_groups.short[[i]]$SEX) + (1|ICD_9_code_groups.short[[i]]$REGION),
weights= ICD_9_code_groups.short[[i]]$WEIGHT)
}
#################################################################
#Graph the estimate with the confidence interval
tmp.list <- list()
for(i in 1:length(disease_model_DOC)){
tmp.list[[i]] <- as.data.frame(confint(glht(disease_model_DOC[[i]]))$confint)#extract
tmp.list[[i]] <- tmp.list[[i]][,2]#cut down to coloumn 2
}
tmp_collated <- do.call(rbind, tmp.list)#bind the rooms together
tmp_collated <- as.data.frame(tmp_collated)
tmp_collated$Comparison <- rownames(tmp_collated) #name it
tmp_collated
#########Different methods to get the coefficients###############
#get a summary of the list of outputs
summary_disease_model_DOC <- list()
for(i in 1:16){summary_disease_model_DOC[[i]]<- summary(disease_model_DOC[[i]])}
#extract coefficients
coefficients_disease_model_DOC <- list()
for(i in 1:16){coefficients_disease_model_DOC[[i]] <- summary_disease_model_DOC[[i]]$coefficients[,c(1)]}
#extract std. error values
se.values_disease_model_DOC <- list()
for(i in 1:16){se.values_disease_model_DOC[[i]] <- summary_disease_model_DOC[[i]]$coefficients[,c(2)]}
###Get them into one dataframe
#Transpose so that you can bind by ROWS
for(i in 1:16){coefficients_disease_model_DOC[[i]] <- t(coefficients_disease_model_DOC[[i]])}
#As dataframes so you can yse rbind.fill
for(i in 1:16){coefficients_disease_model_DOC[[i]] <- as.data.frame(coefficients_disease_model_DOC[[i]])}
#Put them end to end
coefficients_disease_model_DOC <- rbind.fill(coefficients_disease_model_DOC)
coefficients_disease_model_DOC$Diagnosis <- c(1:16)
#Transpose so that you can bind by ROWS
for(i in 1:16){se.values_disease_model_DOC[[i]] <- t(se.values_disease_model_DOC[[i]])}
#As dataframes so you can yse rbind.fill
for(i in 1:16){se.values_disease_model_DOC[[i]] <- as.data.frame(se.values_disease_model_DOC[[i]])}
#Put them end to end
se.values_disease_model_DOC <- rbind.fill(se.values_disease_model_DOC)
se.values_disease_model_DOC$Diagnosis <- c(1:16)
coefficients.se.values_disease_model_DOC <- merge(se.values_disease_model_DOC, coefficients_disease_model_DOC, by = "Diagnosis")
colnames(coefficients.se.values_disease_model_DOC) <- c("Diagnoses", "Intercept", "SE", "Age_coefficient", "Intercept1", "Sun_coefficient", "Age_coefficient1")
summary_disease_model_DOC[[1]]
coefficients_for_plot <- coefficients.se.values_disease_model_DOC[,c(1,3,6)] #select the columns that have the diagnoses, coefficient and the SE
#Get the confidence interval from the SE
coefficients_for_plot$confidence <- (coefficients_for_plot$SE*1.96)
#Name the rows
coefficients_for_plot[,1] <- c("Organic_psychotic_conditions",
"Senile_and_presenile_organic_psychotic_conditions",
"Alcoholic_psychoses",
"Drug_psychoses",
"Transient_organic_psychotic_conditions",
"Other_organic_psychotic_conditions_chronic",
"Other_psychoses",
"Schizophrenic_psychoses",
"Affective_psychoses",
"Paranoid_states",
"Other_nonorganic_psychoses",
"Psychoses_with_origin_specific_to_childhood",
"Neurotic_disorders_personality_disorders_and_nonpsychotic_mental_disorders",
"Neurotic_disorders",
"Personality_disorders",
"Sexual_deviations",
"Psychoactive_substance",
"Alcohol_dependence_syndrome",
"Drug_dependence",
"Nondependent_aduse_of_drugs",
"Other_primarily_adult_onset",
"Physiological_malfunction_arising_from_mental_factors",
"Special_symptoms_or_syndromes_not_elsewhere_classified",
"Acute_reaction_to_stress",
"Adjustment_reaction",
"Specific_nonpsychotic_mental_disorders_following_brain_damage",
"Depressive_disorder_not_elsewhere_classified",
"Mental_disorders_childhood",
"Disturbance_of_conduct_not_elsewhere_classified",
"Disturbance_of_emotions_specific_to_childhood_and_adolescence",
"Hyperkinetic_syndrome_of_childhood",
"Specific_delays_in_development",
"Psychic_factors",
"Mental_retardation")
################################################################
ggplot(coefficients_for_plot, aes(x = factor(reorder(coefficients_for_plot$Diagnoses, -coefficients_for_plot$Sun_coefficient)), y = coefficients_for_plot$Sun_coefficient, ymin = (coefficients_for_plot$Sun_coefficient-coefficients_for_plot$confidence), ymax = (coefficients_for_plot$Sun_coefficient+coefficients_for_plot$confidence), colour = coefficients_for_plot$Sun_coefficient > 0))+
scale_colour_manual(name = 'PC1 > 0', values = setNames(c('skyblue','grey'),c(T, F)))+
geom_errorbar()+
geom_point()+
geom_text(aes(label = paste(round(coefficients_for_plot$Sun_coefficient, digits=4))), hjust = -0.2)+
geom_hline(yintercept= 0, linetype="dashed", colour="skyblue")+
scale_x_discrete(name= "")+
scale_y_continuous(name= "Correlation coefficient with light duration")+
theme(panel.background = element_blank(),
axis.line = element_line(colour = "black"),
panel.grid.major = element_line(colour = "grey", size = 0.5),
panel.grid.major.x = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = "none")
#Plot the table
formattable::formattable(coefficients.p.values_disease_model_DOC,
align = c("l", "l", rep("r", NCOL(coefficients_disease_model_DOC))),
c(hide, format))
#######trash######
ggplot(data2.short, aes(x=data2.short$Mean_of_Sunlight_Duration_minutes, y= data2.short$DOC))+
geom_point()+
geom_smooth()+
coord_cartesian(ylim= c(3,11))
test.model <- lmer(DOC ~ data2.short$Mean_of_Sunlight_Duration_minutes + data2.short$AGE + (data2.short$Mean_of_Sunlight_Duration_minutes|data2.short$REGION), data= data2.short)
coef(test.model)
tmp <- as.data.frame(confint(glht(test.model))$confint)
tmp$Comparison <- rownames(tmp)
ggplot(tmp, aes(x = Comparison, y = Estimate, ymin = lwr, ymax = upr)) +
geom_errorbar() + geom_point()
stargazer(test.model, type = "text",
digits = 3,
star.cutoffs = c(0.05, 0.01, 0.001),
digit.separator = "")
|
bsStat <- function(y, two = NULL, digits = c(2, 2),
use = 'complete.obs', na.rm = TRUE, ...)
{
digits <- rep(digits, 2)[1:2]
ff <- data.frame(matrix(data = 0, nrow = ncol(y), ncol = ncol(y) + 6))
colnames(ff) <- c("name", "mean", "stde", "mini", "maxi", "obno", colnames(y))
ff$name <- colnames(y)
for (i in 1:ncol(y)){
ff[i, "mean"] <- round(mean(y[, i], na.rm = na.rm), digits=digits[2])
ff[i, "stde"] <- round( sd(y[, i], na.rm = na.rm), digits=digits[2])
ff[i, "mini"] <- round( min(y[, i], na.rm = na.rm), digits=digits[2])
ff[i, "maxi"] <- round( max(y[, i], na.rm = na.rm), digits=digits[2])
ff[i, "obno"] <- length(na.omit(y[, i]))
for (j in 1:ncol(y)) {
ff[i, j+6] <- round(cor(y[,i], y[,j], use = use), digits=digits[1])
}
}
fstat <- ff[, 1:6]
corr <- ff[, -c(2:6)]
if (is.null(two)) {
if (ncol(y) < 11) { two <- FALSE } else { two <- TRUE }
}
if (two) {
result <- list(fstat = fstat, corr = corr)
} else {
result <- ff
}
return(result)
} | /output/sources/authors/1646/erer/bsStat.r | no_license | Irbis3/crantasticScrapper | R | false | false | 1,049 | r | bsStat <- function(y, two = NULL, digits = c(2, 2),
use = 'complete.obs', na.rm = TRUE, ...)
{
digits <- rep(digits, 2)[1:2]
ff <- data.frame(matrix(data = 0, nrow = ncol(y), ncol = ncol(y) + 6))
colnames(ff) <- c("name", "mean", "stde", "mini", "maxi", "obno", colnames(y))
ff$name <- colnames(y)
for (i in 1:ncol(y)){
ff[i, "mean"] <- round(mean(y[, i], na.rm = na.rm), digits=digits[2])
ff[i, "stde"] <- round( sd(y[, i], na.rm = na.rm), digits=digits[2])
ff[i, "mini"] <- round( min(y[, i], na.rm = na.rm), digits=digits[2])
ff[i, "maxi"] <- round( max(y[, i], na.rm = na.rm), digits=digits[2])
ff[i, "obno"] <- length(na.omit(y[, i]))
for (j in 1:ncol(y)) {
ff[i, j+6] <- round(cor(y[,i], y[,j], use = use), digits=digits[1])
}
}
fstat <- ff[, 1:6]
corr <- ff[, -c(2:6)]
if (is.null(two)) {
if (ncol(y) < 11) { two <- FALSE } else { two <- TRUE }
}
if (two) {
result <- list(fstat = fstat, corr = corr)
} else {
result <- ff
}
return(result)
} |
library(shinydashboard)
library(shiny)
library(visNetwork)
library(igraph)
library(statnet)
library(shinyBS)
dashboardPage(
dashboardHeader(title = "GRAFOS"),
# Lateral del Dashboard
dashboardSidebar(
sidebarMenu(
#sidebarPanel(
# Lista desplegable con el tipo de algoritmo de generación de grafos aleatorios que se ofrecen:
# los div son para poder poner el boton de dudas a la misma altura.
div(
div(
style="width:80%; display:inline-block; vertical-align: top;",
selectInput(
inputId = "algorithm",
label = "Algoritmo",
choices = c(
"Random network" = "erdos",
"Scale free network" = "albert"
)
)
),
div(
style="display:inline-block; vertical-align: middle;",
bsButton(
"q1",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q1",
title = "Información",
content = paste0("Selección del algoritmo de generación de grafos aleatorio."),
placement = "right",
trigger = "hover",
options = list(container = "body")
)
)
),
# Campo editable con el número de nodos que se quiere para el grafo aleatrio.
div(
div(
style="width:80%; display:inline-block; vertical-align: top;",
numericInput(
"nnodes",
label = "Número de nodos",
4,
step = 1,
min = 1
)),
div(
style="display:inline-block; vertical-align: middle;",
bsButton(
"q2",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q2",
title = "Información",
content = paste0("Selecciona el número de nodos que quieres que contenga tu grafo. ADVERTENCIA: Si se elige un número de nodos muy elevado el grafo tardará un tiempo significativo en generarse."),
placement = "right",
trigger = "hover",
options = list(container = "body")
))),
# Campo en el que se selecciona el porcentaje de probabilidad que se desea que exista
# a la hora de dibujar conexiones entre dos nodos aleatorios.
div(
div(
style="width:80%; display:inline-block; vertical-align: top;",
numericInput(
"conexion",
value = 70,
min = 1,
max = 100,
step = 1,
label ="% de conexiones"
)
),
div(
style="display:inline-block; vertical-align: middle;",
bsButton(
"q3",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q3",
title = "Información",
content = paste0("Elige el porcentaje de conexiones que deseas que haya entre nodos."),
placement = "right",
trigger = "hover",
options = list(container = "body")
))
),
# Botón que habrá que pulsar para general el grafo cada vez que se modifiquen
# los parámetros de los numeric inputs
actionButton("generate", label = "Generar grafo"),
menuItem(div(
div( style="width:80%; display:inline-block; vertical-align: middle;",
"Análisis de comunidades"),
div(style="display:inline-block; vertical-align: middle;",
bsButton(
"q9",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q9",
title = "Información",
content = paste0("El análisis de comunidades diferenciará los nodos por colores en función de la comunidad a la que pertenezcan."),
placement = "right",
trigger = "hover",
options = list(container = "body")
)
)),
div(
div(style="width:80%; display:inline-block; vertical-align: middle;",
actionButton("comunidad1", label = "Edge Betweeness", width = '100%')
),
div(style="display:inline-block; vertical-align: middle;",
bsButton(
"q5",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q5",
title = "Información",
content = paste0("Se llevará a cabo el análisis de comunidades en base al algoritmo de Edge Betweeness"),
placement = "right",
trigger = "hover",
options = list(container = "body")
)
)
),
div(
div(style="width:80%; display:inline-block; vertical-align: middle;",
actionButton("comunidad2", label = "Walktrap Method", width = '100%')),
div(style="display:inline-block; vertical-align: middle;",
bsButton(
"q6",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q6",
title = "Información",
content = paste0("Se llevará a cabo el análisis de comunidades en base al algoritmo de Walktrap Method"),
placement = "right",
trigger = "hover",
options = list(container = "body")
)
))),
menuItem(div(
div(style="width:80%; display:inline-block; vertical-align: middle;",
"Análisis de centralidad"),
div(style="display:inline-block; vertical-align: middle;",
bsButton(
"q10",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q10",
title = "Información",
content = paste0("El análisis de centralidad diferenciará los nodos por tamaño en función del nivel de centralidad que posea."),
placement = "right",
trigger = "hover",
options = list(container = "body")
))),
div(
div(style="width:80%; display:inline-block; vertical-align: middle;",
actionButton("centralidad1", label = "Betweenness", width = '100%')),
div(style="display:inline-block; vertical-align: middle;",
bsButton(
"q7",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q7",
title = "Información",
content = paste0("Se llevará a cabo el análisis de centralidad en base al algoritmo de Betweenness"),
placement = "right",
trigger = "hover",
options = list(container = "body")
)
)),
div(
div(style="width:80%; display:inline-block; vertical-align: middle;",
actionButton("centralidad2", label = "Kleinberg", width = '100%')),
div(style="display:inline-block; vertical-align: middle;",
bsButton(
"q8",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q8",
title = "Información",
content = paste0("Se llevará a cabo el análisis de centralidad en base al algoritmo de Kleinberg"),
placement = "right",
trigger = "hover",
options = list(container = "body")
))
# A continuación añadimos los Outputs que se verán:
#)
)))
),
# Fin del lateral del Dashboard
# Cuerpo del Dashboard
dashboardBody(fluidPage(
# Application title
titlePanel("GRAFOS"),
# Sidebar with a slider input for number of bins
mainPanel(
# Output del propio grafo:
visNetworkOutput("grafica"),
# Output de texto en el que aparecerá el # de nodo seleccionado
tableOutput('view_id'),
# Output de texto en el que aparecerá la centralidad del nodo seleccionado.
tableOutput("close"),
tableOutput("between"),
tableOutput("klein"),
tableOutput("comun1"),
tableOutput("comun2")
)
# Fin Cuerpo Pestaña 1 Dashboard
# Inicio Pestaña 2 Dashboard
))
)
| /ui.R | no_license | aprada9/SuperShiny | R | false | false | 8,759 | r | library(shinydashboard)
library(shiny)
library(visNetwork)
library(igraph)
library(statnet)
library(shinyBS)
dashboardPage(
dashboardHeader(title = "GRAFOS"),
# Lateral del Dashboard
dashboardSidebar(
sidebarMenu(
#sidebarPanel(
# Lista desplegable con el tipo de algoritmo de generación de grafos aleatorios que se ofrecen:
# los div son para poder poner el boton de dudas a la misma altura.
div(
div(
style="width:80%; display:inline-block; vertical-align: top;",
selectInput(
inputId = "algorithm",
label = "Algoritmo",
choices = c(
"Random network" = "erdos",
"Scale free network" = "albert"
)
)
),
div(
style="display:inline-block; vertical-align: middle;",
bsButton(
"q1",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q1",
title = "Información",
content = paste0("Selección del algoritmo de generación de grafos aleatorio."),
placement = "right",
trigger = "hover",
options = list(container = "body")
)
)
),
# Campo editable con el número de nodos que se quiere para el grafo aleatrio.
div(
div(
style="width:80%; display:inline-block; vertical-align: top;",
numericInput(
"nnodes",
label = "Número de nodos",
4,
step = 1,
min = 1
)),
div(
style="display:inline-block; vertical-align: middle;",
bsButton(
"q2",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q2",
title = "Información",
content = paste0("Selecciona el número de nodos que quieres que contenga tu grafo. ADVERTENCIA: Si se elige un número de nodos muy elevado el grafo tardará un tiempo significativo en generarse."),
placement = "right",
trigger = "hover",
options = list(container = "body")
))),
# Campo en el que se selecciona el porcentaje de probabilidad que se desea que exista
# a la hora de dibujar conexiones entre dos nodos aleatorios.
div(
div(
style="width:80%; display:inline-block; vertical-align: top;",
numericInput(
"conexion",
value = 70,
min = 1,
max = 100,
step = 1,
label ="% de conexiones"
)
),
div(
style="display:inline-block; vertical-align: middle;",
bsButton(
"q3",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q3",
title = "Información",
content = paste0("Elige el porcentaje de conexiones que deseas que haya entre nodos."),
placement = "right",
trigger = "hover",
options = list(container = "body")
))
),
# Botón que habrá que pulsar para general el grafo cada vez que se modifiquen
# los parámetros de los numeric inputs
actionButton("generate", label = "Generar grafo"),
menuItem(div(
div( style="width:80%; display:inline-block; vertical-align: middle;",
"Análisis de comunidades"),
div(style="display:inline-block; vertical-align: middle;",
bsButton(
"q9",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q9",
title = "Información",
content = paste0("El análisis de comunidades diferenciará los nodos por colores en función de la comunidad a la que pertenezcan."),
placement = "right",
trigger = "hover",
options = list(container = "body")
)
)),
div(
div(style="width:80%; display:inline-block; vertical-align: middle;",
actionButton("comunidad1", label = "Edge Betweeness", width = '100%')
),
div(style="display:inline-block; vertical-align: middle;",
bsButton(
"q5",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q5",
title = "Información",
content = paste0("Se llevará a cabo el análisis de comunidades en base al algoritmo de Edge Betweeness"),
placement = "right",
trigger = "hover",
options = list(container = "body")
)
)
),
div(
div(style="width:80%; display:inline-block; vertical-align: middle;",
actionButton("comunidad2", label = "Walktrap Method", width = '100%')),
div(style="display:inline-block; vertical-align: middle;",
bsButton(
"q6",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q6",
title = "Información",
content = paste0("Se llevará a cabo el análisis de comunidades en base al algoritmo de Walktrap Method"),
placement = "right",
trigger = "hover",
options = list(container = "body")
)
))),
menuItem(div(
div(style="width:80%; display:inline-block; vertical-align: middle;",
"Análisis de centralidad"),
div(style="display:inline-block; vertical-align: middle;",
bsButton(
"q10",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q10",
title = "Información",
content = paste0("El análisis de centralidad diferenciará los nodos por tamaño en función del nivel de centralidad que posea."),
placement = "right",
trigger = "hover",
options = list(container = "body")
))),
div(
div(style="width:80%; display:inline-block; vertical-align: middle;",
actionButton("centralidad1", label = "Betweenness", width = '100%')),
div(style="display:inline-block; vertical-align: middle;",
bsButton(
"q7",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q7",
title = "Información",
content = paste0("Se llevará a cabo el análisis de centralidad en base al algoritmo de Betweenness"),
placement = "right",
trigger = "hover",
options = list(container = "body")
)
)),
div(
div(style="width:80%; display:inline-block; vertical-align: middle;",
actionButton("centralidad2", label = "Kleinberg", width = '100%')),
div(style="display:inline-block; vertical-align: middle;",
bsButton(
"q8",
label = "",
icon = icon("question"),
style = "info",
size = "extra-small"
),
bsPopover(
id = "q8",
title = "Información",
content = paste0("Se llevará a cabo el análisis de centralidad en base al algoritmo de Kleinberg"),
placement = "right",
trigger = "hover",
options = list(container = "body")
))
# A continuación añadimos los Outputs que se verán:
#)
)))
),
# Fin del lateral del Dashboard
# Cuerpo del Dashboard
dashboardBody(fluidPage(
# Application title
titlePanel("GRAFOS"),
# Sidebar with a slider input for number of bins
mainPanel(
# Output del propio grafo:
visNetworkOutput("grafica"),
# Output de texto en el que aparecerá el # de nodo seleccionado
tableOutput('view_id'),
# Output de texto en el que aparecerá la centralidad del nodo seleccionado.
tableOutput("close"),
tableOutput("between"),
tableOutput("klein"),
tableOutput("comun1"),
tableOutput("comun2")
)
# Fin Cuerpo Pestaña 1 Dashboard
# Inicio Pestaña 2 Dashboard
))
)
|
#' Object size in MB
#'
#' @param obj obj
#' @export
objectsize = function(obj) {
paste(round(object.size(obj) / 1000000, 2), "MB")
} | /R/objectsize.R | no_license | philippstats/RRmisc | R | false | false | 138 | r | #' Object size in MB
#'
#' @param obj obj
#' @export
objectsize = function(obj) {
paste(round(object.size(obj) / 1000000, 2), "MB")
} |
# Author: Fall-From-Grace
# Date: 10 May 2014
# Exploratory Data Analysis
#read and subset data
powerData <- read.csv("household_power_consumption.txt", sep=";", stringsAsFactors=FALSE)
powerDataFeb <- subset(powerData, powerData$Date == "1/2/2007" | powerData$Date == "2/2/2007")
#make the graph
png(filename = "plot3.png", height=480, width=480)
plot(strptime(paste(powerDataFeb$Date, powerDataFeb$Time), "%d/%m/%Y %H:%M:%S"), as.numeric(powerDataFeb$Sub_metering_1), type="l", ylab = "Energy sub metering", xlab = "")
lines(strptime(paste(powerDataFeb$Date, powerDataFeb$Time), "%d/%m/%Y %H:%M:%S"), as.numeric(powerDataFeb$Sub_metering_2), type="l", col = "red")
lines(strptime(paste(powerDataFeb$Date, powerDataFeb$Time), "%d/%m/%Y %H:%M:%S"), as.numeric(powerDataFeb$Sub_metering_3), type="l", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), lwd = c(2.5, 2.5, 2.5), col = c("black", "red", "blue"))
dev.off()
~
| /plot3.R | no_license | Fall-From-Grace/ExData_Plotting1 | R | false | false | 1,002 | r | # Author: Fall-From-Grace
# Date: 10 May 2014
# Exploratory Data Analysis
#read and subset data
powerData <- read.csv("household_power_consumption.txt", sep=";", stringsAsFactors=FALSE)
powerDataFeb <- subset(powerData, powerData$Date == "1/2/2007" | powerData$Date == "2/2/2007")
#make the graph
png(filename = "plot3.png", height=480, width=480)
plot(strptime(paste(powerDataFeb$Date, powerDataFeb$Time), "%d/%m/%Y %H:%M:%S"), as.numeric(powerDataFeb$Sub_metering_1), type="l", ylab = "Energy sub metering", xlab = "")
lines(strptime(paste(powerDataFeb$Date, powerDataFeb$Time), "%d/%m/%Y %H:%M:%S"), as.numeric(powerDataFeb$Sub_metering_2), type="l", col = "red")
lines(strptime(paste(powerDataFeb$Date, powerDataFeb$Time), "%d/%m/%Y %H:%M:%S"), as.numeric(powerDataFeb$Sub_metering_3), type="l", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), lwd = c(2.5, 2.5, 2.5), col = c("black", "red", "blue"))
dev.off()
~
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bothStations.R
\docType{data}
\name{bothStations}
\alias{bothStations}
\title{Historical weather observations from Norfolk, Virginia}
\format{A data frame with 52,213 observations of 6 variables.
\describe{
\item{Date}{the date of the observation}
\item{MaxTemperature}{(integer) the high temperature (in °F)}
\item{MinTemperature}{(integer) the low temperature (in °F)}
\item{AvgTemperature}{(double) the average temperature (in °F)}
\item{CsvPrecipitation}{(character) the day's precipitation (in inches),
as expressed in the original comma separated value (CSV) file.
\strong{NOTE:} Either the letter \code{T} (for trace) or a number,
expressed to the hundredth of an inch}
\item{CsvSnowfall}{(character) the day's snowfall (in inches),
as expressed in the original comma separated value (CSV) file.
\strong{NOTE:} Either the letter \code{T} (for trace) or a number,
expressed to the tenth of an inch}
}}
\source{
\url{http://climodtest.nrcc.cornell.edu/}
}
\usage{
bothStations
}
\description{
A dataset containing observations of weather since 1874 from Norfolk, VA.
Since 1946, those observations are taken at ORF, Norfolk International
Airport. Prior to that, they were taken at the Weather Bureau offices in
the city.
}
\keyword{datasets}
| /orfwx/man/bothStations.Rd | no_license | verumsolum/orf_weather | R | false | true | 1,328 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bothStations.R
\docType{data}
\name{bothStations}
\alias{bothStations}
\title{Historical weather observations from Norfolk, Virginia}
\format{A data frame with 52,213 observations of 6 variables.
\describe{
\item{Date}{the date of the observation}
\item{MaxTemperature}{(integer) the high temperature (in °F)}
\item{MinTemperature}{(integer) the low temperature (in °F)}
\item{AvgTemperature}{(double) the average temperature (in °F)}
\item{CsvPrecipitation}{(character) the day's precipitation (in inches),
as expressed in the original comma separated value (CSV) file.
\strong{NOTE:} Either the letter \code{T} (for trace) or a number,
expressed to the hundredth of an inch}
\item{CsvSnowfall}{(character) the day's snowfall (in inches),
as expressed in the original comma separated value (CSV) file.
\strong{NOTE:} Either the letter \code{T} (for trace) or a number,
expressed to the tenth of an inch}
}}
\source{
\url{http://climodtest.nrcc.cornell.edu/}
}
\usage{
bothStations
}
\description{
A dataset containing observations of weather since 1874 from Norfolk, VA.
Since 1946, those observations are taken at ORF, Norfolk International
Airport. Prior to that, they were taken at the Weather Bureau offices in
the city.
}
\keyword{datasets}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(1.00574128561883e-231, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(6L, 2L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615773060-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 232 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(1.00574128561883e-231, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(6L, 2L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
xgb<- function(data, K){
library(data.table)
library(FeatureHashing)
library(xgboost)
library(dplyr)
library(Matrix)
Y<- data$outcome
data$outcome<- NULL
# nm1 <- names(data)[which(sapply(data, function(x) sum(x==0)>0))]
# # Results:
# # [1] "people_char_10" "people_char_11" "people_char_12" "people_char_13" "people_char_14" "people_char_15"
# # [7] "people_char_16" "people_char_17" "people_char_18" "people_char_19" "people_char_20" "people_char_21"
# # [13] "people_char_22" "people_char_23" "people_char_24" "people_char_25" "people_char_26" "people_char_27"
# # [19] "people_char_28" "people_char_29" "people_char_30" "people_char_31" "people_char_32" "people_char_33"
# # [25] "people_char_34" "people_char_35" "people_char_36" "people_char_37" "people_char_38"
# snm1<- names(data)[which(sapply(data, function(x) sum(x==0)==0))]
# # Results:
# # [1] "V1" "people_id" "activity_id" "date" "activity_category"
# # [6] "char_1" "char_2" "char_3" "char_4" "char_5"
# # [11] "char_6" "char_7" "char_8" "char_9" "char_10"
# # [16] "people_char_1" "people_group_1" "people_char_2" "people_date" "people_char_3"
# # [21] "people_char_4" "people_char_5" "people_char_6" "people_char_7" "people_char_8"
# # [26] "people_char_9"
# snm1<- snm1[-(1:4)]
# # [1] "activity_category" "char_1" "char_2" "char_3" "char_4"
# # [6] "char_5" "char_6" "char_7" "char_8" "char_9"
# # [11] "char_10" "people_char_1" "people_group_1" "people_char_2" "people_date"
# # [16] "people_char_3" "people_char_4" "people_char_5" "people_char_6" "people_char_7"
# # [21] "people_char_8" "people_char_9"
# snm1<- snm1[-15]
# # [1] "activity_category" "char_1" "char_2" "char_3" "char_4"
# # [6] "char_5" "char_6" "char_7" "char_8" "char_9"
# # [11] "char_10" "people_char_1" "people_group_1" "people_char_2" "people_char_3"
# # [16] "people_char_4" "people_char_5" "people_char_6" "people_char_7" "people_char_8"
# # [21] "people_char_9"
data$i<- 1:dim(data)[1]
D<- data
data.sparse=
cBind(sparseMatrix(D$i,D$activity_category),
sparseMatrix(D$i,D$people_group_1),
sparseMatrix(D$i,D$char_1),
sparseMatrix(D$i,D$char_2),
sparseMatrix(D$i,D$char_3),
sparseMatrix(D$i,D$char_4),
sparseMatrix(D$i,D$char_5),
sparseMatrix(D$i,D$char_6),
sparseMatrix(D$i,D$char_7),
sparseMatrix(D$i,D$char_8),
sparseMatrix(D$i,D$char_9),
sparseMatrix(D$i,D$char_10),
sparseMatrix(D$i,D$people_char_1),
sparseMatrix(D$i,D$people_char_2),
sparseMatrix(D$i,D$people_char_3),
sparseMatrix(D$i,D$people_char_4),
sparseMatrix(D$i,D$people_char_5),
sparseMatrix(D$i,D$people_char_6),
sparseMatrix(D$i,D$people_char_7),
sparseMatrix(D$i,D$people_char_8),
sparseMatrix(D$i,D$people_char_9)
)
data.sparse=
cBind(data.sparse,
D$people_char_10,
D$people_char_11,
D$people_char_12,
D$people_char_13,
D$people_char_14,
D$people_char_15,
D$people_char_16,
D$people_char_17,
D$people_char_18,
D$people_char_19,
D$people_char_20,
D$people_char_21,
D$people_char_22,
D$people_char_23,
D$people_char_24,
D$people_char_25,
D$people_char_26,
D$people_char_27,
D$people_char_28,
D$people_char_29,
D$people_char_30,
D$people_char_31,
D$people_char_32,
D$people_char_33,
D$people_char_34,
D$people_char_35,
D$people_char_36,
D$people_char_37,
D$people_char_38)
p <- list(objective = "binary:logistic",
eval_metric = "auc",
booster = "gblinear",
eta = 0.02,
subsample = 0.7,
colsample_bytree = 0.7,
min_child_weight = 0,
max_depth = 10)
set.seed(120)
dtrain1 <- xgb.DMatrix(data.sparse , label = Y)
xgb.cv(data=dtrain1, nrounds = 100, nfold = 5, params = p)
} | /lib/xgboost.R | no_license | TZstatsADS/Fall2017-project5-grp3 | R | false | false | 4,712 | r | xgb<- function(data, K){
library(data.table)
library(FeatureHashing)
library(xgboost)
library(dplyr)
library(Matrix)
Y<- data$outcome
data$outcome<- NULL
# nm1 <- names(data)[which(sapply(data, function(x) sum(x==0)>0))]
# # Results:
# # [1] "people_char_10" "people_char_11" "people_char_12" "people_char_13" "people_char_14" "people_char_15"
# # [7] "people_char_16" "people_char_17" "people_char_18" "people_char_19" "people_char_20" "people_char_21"
# # [13] "people_char_22" "people_char_23" "people_char_24" "people_char_25" "people_char_26" "people_char_27"
# # [19] "people_char_28" "people_char_29" "people_char_30" "people_char_31" "people_char_32" "people_char_33"
# # [25] "people_char_34" "people_char_35" "people_char_36" "people_char_37" "people_char_38"
# snm1<- names(data)[which(sapply(data, function(x) sum(x==0)==0))]
# # Results:
# # [1] "V1" "people_id" "activity_id" "date" "activity_category"
# # [6] "char_1" "char_2" "char_3" "char_4" "char_5"
# # [11] "char_6" "char_7" "char_8" "char_9" "char_10"
# # [16] "people_char_1" "people_group_1" "people_char_2" "people_date" "people_char_3"
# # [21] "people_char_4" "people_char_5" "people_char_6" "people_char_7" "people_char_8"
# # [26] "people_char_9"
# snm1<- snm1[-(1:4)]
# # [1] "activity_category" "char_1" "char_2" "char_3" "char_4"
# # [6] "char_5" "char_6" "char_7" "char_8" "char_9"
# # [11] "char_10" "people_char_1" "people_group_1" "people_char_2" "people_date"
# # [16] "people_char_3" "people_char_4" "people_char_5" "people_char_6" "people_char_7"
# # [21] "people_char_8" "people_char_9"
# snm1<- snm1[-15]
# # [1] "activity_category" "char_1" "char_2" "char_3" "char_4"
# # [6] "char_5" "char_6" "char_7" "char_8" "char_9"
# # [11] "char_10" "people_char_1" "people_group_1" "people_char_2" "people_char_3"
# # [16] "people_char_4" "people_char_5" "people_char_6" "people_char_7" "people_char_8"
# # [21] "people_char_9"
data$i<- 1:dim(data)[1]
D<- data
data.sparse=
cBind(sparseMatrix(D$i,D$activity_category),
sparseMatrix(D$i,D$people_group_1),
sparseMatrix(D$i,D$char_1),
sparseMatrix(D$i,D$char_2),
sparseMatrix(D$i,D$char_3),
sparseMatrix(D$i,D$char_4),
sparseMatrix(D$i,D$char_5),
sparseMatrix(D$i,D$char_6),
sparseMatrix(D$i,D$char_7),
sparseMatrix(D$i,D$char_8),
sparseMatrix(D$i,D$char_9),
sparseMatrix(D$i,D$char_10),
sparseMatrix(D$i,D$people_char_1),
sparseMatrix(D$i,D$people_char_2),
sparseMatrix(D$i,D$people_char_3),
sparseMatrix(D$i,D$people_char_4),
sparseMatrix(D$i,D$people_char_5),
sparseMatrix(D$i,D$people_char_6),
sparseMatrix(D$i,D$people_char_7),
sparseMatrix(D$i,D$people_char_8),
sparseMatrix(D$i,D$people_char_9)
)
data.sparse=
cBind(data.sparse,
D$people_char_10,
D$people_char_11,
D$people_char_12,
D$people_char_13,
D$people_char_14,
D$people_char_15,
D$people_char_16,
D$people_char_17,
D$people_char_18,
D$people_char_19,
D$people_char_20,
D$people_char_21,
D$people_char_22,
D$people_char_23,
D$people_char_24,
D$people_char_25,
D$people_char_26,
D$people_char_27,
D$people_char_28,
D$people_char_29,
D$people_char_30,
D$people_char_31,
D$people_char_32,
D$people_char_33,
D$people_char_34,
D$people_char_35,
D$people_char_36,
D$people_char_37,
D$people_char_38)
p <- list(objective = "binary:logistic",
eval_metric = "auc",
booster = "gblinear",
eta = 0.02,
subsample = 0.7,
colsample_bytree = 0.7,
min_child_weight = 0,
max_depth = 10)
set.seed(120)
dtrain1 <- xgb.DMatrix(data.sparse , label = Y)
xgb.cv(data=dtrain1, nrounds = 100, nfold = 5, params = p)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-doc.R
\docType{data}
\name{prg_nuts2}
\alias{prg_nuts2}
\title{Prague NUTS2 code}
\format{
character vector of length 1
\describe{
Prague NUTS2 code
}
}
\usage{
prg_nuts2
}
\description{
Prague NUTS2 code
}
\seealso{
Other Codes and metadata:
\code{\link{prg_bbox_krovak}},
\code{\link{prg_bbox_wgs84}},
\code{\link{prg_fua_oecd}},
\code{\link{prg_ico}},
\code{\link{prg_kod}},
\code{\link{prg_kraj}},
\code{\link{prg_lau1}},
\code{\link{prg_metro_oecd}},
\code{\link{prg_nuts3}},
\code{\link{prg_okres_nuts}},
\code{\link{prg_okres}}
}
\concept{Codes and metadata}
\keyword{datasets}
| /man/prg_nuts2.Rd | permissive | petrbouchal/pragr | R | false | true | 669 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-doc.R
\docType{data}
\name{prg_nuts2}
\alias{prg_nuts2}
\title{Prague NUTS2 code}
\format{
character vector of length 1
\describe{
Prague NUTS2 code
}
}
\usage{
prg_nuts2
}
\description{
Prague NUTS2 code
}
\seealso{
Other Codes and metadata:
\code{\link{prg_bbox_krovak}},
\code{\link{prg_bbox_wgs84}},
\code{\link{prg_fua_oecd}},
\code{\link{prg_ico}},
\code{\link{prg_kod}},
\code{\link{prg_kraj}},
\code{\link{prg_lau1}},
\code{\link{prg_metro_oecd}},
\code{\link{prg_nuts3}},
\code{\link{prg_okres_nuts}},
\code{\link{prg_okres}}
}
\concept{Codes and metadata}
\keyword{datasets}
|
getdisc <- function(X, thresholds){# must start at zero for sirt::polychorc
z <- X
my.data <- sapply(1:ncol(X), function(i) arules::discretize(z[,i], method = "fixed", labels=F, breaks=thresholds[[i]]))
colnames(my.data ) <- paste("d", 1:ncol(X), sep="")
sapply(data.frame(my.data), function(col) col-min(col))
} | /R/getdisc.R | no_license | njaalf/discnorm | R | false | false | 323 | r |
getdisc <- function(X, thresholds){# must start at zero for sirt::polychorc
z <- X
my.data <- sapply(1:ncol(X), function(i) arules::discretize(z[,i], method = "fixed", labels=F, breaks=thresholds[[i]]))
colnames(my.data ) <- paste("d", 1:ncol(X), sep="")
sapply(data.frame(my.data), function(col) col-min(col))
} |
#' nv barplot
#'
#' @param height either a data.frame, vector or matrix of values describing the bars which
#' make up the plot. If \code{height} is a vector, the plot consists
#' of a sequence of rectangular bars with heights given by the
#' values in the vector. If \code{height} is a matrix and \code{beside}
#' is \code{FALSE} then each bar of the plot corresponds to a column
#' of \code{height}, with the values in the column giving the heights
#' of stacked sub-bars making up the bar. If \code{height} is a
#' matrix and \code{beside} is \code{TRUE}, then the values in each column
#' are juxtaposed rather than stacked. If \code{height} is a data.frame
#' then an attempt to coerce it into matrix form is made using the
#' \code{dftab} function.
#' @param space bar group spacing (scalar value)
#' @param names.arg a vector of names to be plotted below each bar or group of
#' bars. If this argument is omitted, then the names are taken
#' from the \code{names} attribute of \code{height} if this is a vector,
#' or the column names if it is a matrix.
#' @param beside a logical value. If \code{FALSE}, the columns of \code{height} are
#' portrayed as stacked bars, and if \code{TRUE} the columns are
#' portrayed as juxtaposed bars.
#' @param horiz a logical value. If \code{FALSE}, the bars are drawn vertically
#' with the first bar to the left. If \code{TRUE}, the bars are
#' drawn horizontally with the first at the bottom.
#' @param col optional vector of bar component colors
#' @param xlab x-axis label
#' @param ylab y-axis label
#' @param xaxt set to 'n' to suppress drawing x axis, otherwise plot the x axis.
#' @param yaxt set to 'n' to suppress drawing x axis, otherwise plot the x axis.
#' @param axes if \code{TRUE} display the bar axis and its values.
#' \code{axes=FALSE} is equivalent to \code{yaxt='n'} when \code{horiz=FALSE}.
#' @param axisnames if \code{TRUE} display the group axis and its values.
#' \code{axisnames=FALSE} is equivalent to \code{xaxt='n'} when \code{horiz=FALSE}.
#' @param rotateLabels axis text rotation in degrees
#' @param showControls set to \code{TRUE} to display interactive controls
#' @param tickNumFormat format numeric tick labels using a d3.format string (d3.js)
#' @param ... optional additional named plot options passed directly to nvd3.js (see examples)
#' @return
#' An htmlwidget object that is displayed using the object's show or print method.
#' (If you don't see your widget plot, try printing it with the \code{print} function.)
#' @importFrom jsonlite toJSON
#' @export
nvbarplot = function(height, space = 0.1, names.arg = NULL, beside = FALSE,
horiz = FALSE, col = NULL, xlab = NULL, ylab = NULL,
axes = TRUE, axisnames = TRUE, xaxt = 's', yaxt = 's',
rotateLabels=0, showControls=TRUE, tickNumFormat=",.1f", ...)
{
if(is.data.frame(height)) height = dftab(height)
if(!is.matrix(height)) {
height = rbind(height)
rownames(height) = NULL
showControls = FALSE
}
if(NROW(height) == 1) showControls = FALSE
height = t(height) # XXX fix this sillyness
if(!is.null(names.arg)) {
if(length(names.arg) < nrow(height)) names.arg = c(names.arg, rep("", length.out=nrow(height) - length(names.arg)))
rownames(height) = names.arg
}
showXAxis = ifelse(xaxt == 'n' || (!axisnames), FALSE, TRUE)
showYAxis = ifelse(yaxt == 'n' || (!axes), FALSE, TRUE)
if(is.null(rownames(height))) rownames(height) = seq(nrow(height))
data = sprintf("var rdata=%s;\nvar xlabels=%s;\n", multibar_matrix2json(height), toJSON(rownames(height)))
barColor = ""
if(!is.null(col)) barColor = sprintf(".barColor(%s)\n", toJSON(col))
options = list(...)
if(length(options) > 0)
{
no = names(options)
names(options) = NULL
options = paste(lapply(seq_along(no),
function(i) sprintf("chart.%s(%s);", no[i], jsonlite::toJSON(options[[i]], auto_unbox=TRUE))), collapse="\n")
} else options = ""
chart = sprintf("
var chart=nv.models.multiBarChart()%s
.rotateLabels(%s)
.showControls(%s)
.groupSpacing(%s)
.showXAxis(%s)
.showYAxis(%s)
.stacked(%s);\n",
barColor,
toJSON(rotateLabels, auto_unbox=TRUE),
toJSON(showControls, auto_unbox=TRUE),
toJSON(space, auto_unbox=TRUE),
toJSON(showXAxis, auto_unbox=TRUE),
toJSON(showYAxis, auto_unbox=TRUE),
toJSON(!beside, auto_unbox=TRUE))
sprintf("
%s
nv.addGraph(function() {
%s%s
chart.xAxis.tickFormat(function (d) { return xlabels[d];});
chart.yAxis.tickFormat(d3.format('%s'));
d3.select(_this.svg)
.datum(rdata)
.call(chart);
nv.utils.windowResize(chart.update);
return chart;
});", data, chart, options, tickNumFormat)
}
| /R/nvbarplot.r | no_license | bwlewis/nvd3 | R | false | false | 4,989 | r | #' nv barplot
#'
#' @param height either a data.frame, vector or matrix of values describing the bars which
#' make up the plot. If \code{height} is a vector, the plot consists
#' of a sequence of rectangular bars with heights given by the
#' values in the vector. If \code{height} is a matrix and \code{beside}
#' is \code{FALSE} then each bar of the plot corresponds to a column
#' of \code{height}, with the values in the column giving the heights
#' of stacked sub-bars making up the bar. If \code{height} is a
#' matrix and \code{beside} is \code{TRUE}, then the values in each column
#' are juxtaposed rather than stacked. If \code{height} is a data.frame
#' then an attempt to coerce it into matrix form is made using the
#' \code{dftab} function.
#' @param space bar group spacing (scalar value)
#' @param names.arg a vector of names to be plotted below each bar or group of
#' bars. If this argument is omitted, then the names are taken
#' from the \code{names} attribute of \code{height} if this is a vector,
#' or the column names if it is a matrix.
#' @param beside a logical value. If \code{FALSE}, the columns of \code{height} are
#' portrayed as stacked bars, and if \code{TRUE} the columns are
#' portrayed as juxtaposed bars.
#' @param horiz a logical value. If \code{FALSE}, the bars are drawn vertically
#' with the first bar to the left. If \code{TRUE}, the bars are
#' drawn horizontally with the first at the bottom.
#' @param col optional vector of bar component colors
#' @param xlab x-axis label
#' @param ylab y-axis label
#' @param xaxt set to 'n' to suppress drawing x axis, otherwise plot the x axis.
#' @param yaxt set to 'n' to suppress drawing x axis, otherwise plot the x axis.
#' @param axes if \code{TRUE} display the bar axis and its values.
#' \code{axes=FALSE} is equivalent to \code{yaxt='n'} when \code{horiz=FALSE}.
#' @param axisnames if \code{TRUE} display the group axis and its values.
#' \code{axisnames=FALSE} is equivalent to \code{xaxt='n'} when \code{horiz=FALSE}.
#' @param rotateLabels axis text rotation in degrees
#' @param showControls set to \code{TRUE} to display interactive controls
#' @param tickNumFormat format numeric tick labels using a d3.format string (d3.js)
#' @param ... optional additional named plot options passed directly to nvd3.js (see examples)
#' @return
#' An htmlwidget object that is displayed using the object's show or print method.
#' (If you don't see your widget plot, try printing it with the \code{print} function.)
#' @importFrom jsonlite toJSON
#' @export
nvbarplot = function(height, space = 0.1, names.arg = NULL, beside = FALSE,
horiz = FALSE, col = NULL, xlab = NULL, ylab = NULL,
axes = TRUE, axisnames = TRUE, xaxt = 's', yaxt = 's',
rotateLabels=0, showControls=TRUE, tickNumFormat=",.1f", ...)
{
if(is.data.frame(height)) height = dftab(height)
if(!is.matrix(height)) {
height = rbind(height)
rownames(height) = NULL
showControls = FALSE
}
if(NROW(height) == 1) showControls = FALSE
height = t(height) # XXX fix this sillyness
if(!is.null(names.arg)) {
if(length(names.arg) < nrow(height)) names.arg = c(names.arg, rep("", length.out=nrow(height) - length(names.arg)))
rownames(height) = names.arg
}
showXAxis = ifelse(xaxt == 'n' || (!axisnames), FALSE, TRUE)
showYAxis = ifelse(yaxt == 'n' || (!axes), FALSE, TRUE)
if(is.null(rownames(height))) rownames(height) = seq(nrow(height))
data = sprintf("var rdata=%s;\nvar xlabels=%s;\n", multibar_matrix2json(height), toJSON(rownames(height)))
barColor = ""
if(!is.null(col)) barColor = sprintf(".barColor(%s)\n", toJSON(col))
options = list(...)
if(length(options) > 0)
{
no = names(options)
names(options) = NULL
options = paste(lapply(seq_along(no),
function(i) sprintf("chart.%s(%s);", no[i], jsonlite::toJSON(options[[i]], auto_unbox=TRUE))), collapse="\n")
} else options = ""
chart = sprintf("
var chart=nv.models.multiBarChart()%s
.rotateLabels(%s)
.showControls(%s)
.groupSpacing(%s)
.showXAxis(%s)
.showYAxis(%s)
.stacked(%s);\n",
barColor,
toJSON(rotateLabels, auto_unbox=TRUE),
toJSON(showControls, auto_unbox=TRUE),
toJSON(space, auto_unbox=TRUE),
toJSON(showXAxis, auto_unbox=TRUE),
toJSON(showYAxis, auto_unbox=TRUE),
toJSON(!beside, auto_unbox=TRUE))
sprintf("
%s
nv.addGraph(function() {
%s%s
chart.xAxis.tickFormat(function (d) { return xlabels[d];});
chart.yAxis.tickFormat(d3.format('%s'));
d3.select(_this.svg)
.datum(rdata)
.call(chart);
nv.utils.windowResize(chart.update);
return chart;
});", data, chart, options, tickNumFormat)
}
|
\name{FWEminP}
\alias{FWEminP}
\title{FWE Adjustment Using Permutation}
\usage{
FWEminP(Pmat)
}
\arguments{
\item{Pmat}{\code{matrix} of \emph{p-}values where
comparisons are on the columns}
}
\value{
\code{numeric} vector of corrected p.values
}
\description{
FWE Adjustment Using Permutation and NPC
}
\details{
Multiplicity correction controlling the Family-Wise Error
using the permutation \emph{p}-values and NonParametric
Combination with \emph{minP} as combining function.
}
\examples{
set.seed(123)
P <- matrix(runif(1010), nrow = 101, ncol = 10,
dimnames = list(c("p-obs", paste("p-*", 1L:100)), LETTERS[1L:10]))
P[1L, 1L:4] <- 1/100
FWEminP(P)
}
\author{
Dario Basso and Federico Mattiello
<federico.mattiello@gmail.com>
}
\references{
Pesarin, F. and Salmaso, L. (2010) \emph{Permutation
Tests for Complex Data}. Wiley: United Kingdom \cr
Finos, L. and Pesarin, F. and Salmaso, L. (2003) Test
combinati per il controllo della {molteplicit\`a}
mediante procedure di closed testing, \emph{Statistica
Applicata}, \bold{15}, 301--329.
}
\seealso{
\code{\link{p.adjust}}, \code{\link{p.adjust.methods}}
}
| /man/FWEminP.Rd | no_license | cran/SOUP | R | false | false | 1,195 | rd | \name{FWEminP}
\alias{FWEminP}
\title{FWE Adjustment Using Permutation}
\usage{
FWEminP(Pmat)
}
\arguments{
\item{Pmat}{\code{matrix} of \emph{p-}values where
comparisons are on the columns}
}
\value{
\code{numeric} vector of corrected p.values
}
\description{
FWE Adjustment Using Permutation and NPC
}
\details{
Multiplicity correction controlling the Family-Wise Error
using the permutation \emph{p}-values and NonParametric
Combination with \emph{minP} as combining function.
}
\examples{
set.seed(123)
P <- matrix(runif(1010), nrow = 101, ncol = 10,
dimnames = list(c("p-obs", paste("p-*", 1L:100)), LETTERS[1L:10]))
P[1L, 1L:4] <- 1/100
FWEminP(P)
}
\author{
Dario Basso and Federico Mattiello
<federico.mattiello@gmail.com>
}
\references{
Pesarin, F. and Salmaso, L. (2010) \emph{Permutation
Tests for Complex Data}. Wiley: United Kingdom \cr
Finos, L. and Pesarin, F. and Salmaso, L. (2003) Test
combinati per il controllo della {molteplicit\`a}
mediante procedure di closed testing, \emph{Statistica
Applicata}, \bold{15}, 301--329.
}
\seealso{
\code{\link{p.adjust}}, \code{\link{p.adjust.methods}}
}
|
data <- read.csv("D:/Research Paper Finance/Finance Data/quantile-regression/USA.csv")
head(data)
library(forecast)
library(smooth)
library(graphics)
library(datasets)
library(tseries)
library(ggplot2)
library(fpp2)
library(imputeTS)
library(xts)
datats = ts(data[,3], start = c(2020,1), frequency = 365.25)
install.packages("quantreg")
library(quantreg)
summary(data)
# we can see at different quartiles price of BTC rices
#Let's look at the graph with ols line
ggplot(data,aes(SUM,Close))+
geom_point() +
geom_smooth(method="lm",color="red")+
geom_quantile(quantiles = seq(0.05,0.95,by = 0.05))
head(datats)
install.packages("ggplot2")
library(ggplot2)
library(lmtest)
install.packages("olsrr")
#Quantile regression
quantreg25 <- rq(Close ~ SUM,tau = 0.25,data=data)
quantreg50 <- rq(Close ~ SUM,tau = 0.50,data=data)
quantreg75 <- rq(Close ~ SUM,tau = 0.75,data=data)
ols <- lm(Close ~ SUM,data=data)
install.packages("stargazer")
library(stargazer)
stargazer(ols,quantreg25,quantreg50,quantreg75,type = "text")
#Graph to visualize all quantiles
quantreg.all <- rq(Close ~ SUM,tau = seq(0.05,0.95,by=0.05),data=data)
quantreg.plot <- summary(quantreg.all)
plot(quantreg.plot)
| /quantile-regression.R | no_license | Arun97-creator/Surge-of-Crypto-during-the-Pandemic | R | false | false | 1,270 | r | data <- read.csv("D:/Research Paper Finance/Finance Data/quantile-regression/USA.csv")
head(data)
library(forecast)
library(smooth)
library(graphics)
library(datasets)
library(tseries)
library(ggplot2)
library(fpp2)
library(imputeTS)
library(xts)
datats = ts(data[,3], start = c(2020,1), frequency = 365.25)
install.packages("quantreg")
library(quantreg)
summary(data)
# we can see at different quartiles price of BTC rices
#Let's look at the graph with ols line
ggplot(data,aes(SUM,Close))+
geom_point() +
geom_smooth(method="lm",color="red")+
geom_quantile(quantiles = seq(0.05,0.95,by = 0.05))
head(datats)
install.packages("ggplot2")
library(ggplot2)
library(lmtest)
install.packages("olsrr")
#Quantile regression
quantreg25 <- rq(Close ~ SUM,tau = 0.25,data=data)
quantreg50 <- rq(Close ~ SUM,tau = 0.50,data=data)
quantreg75 <- rq(Close ~ SUM,tau = 0.75,data=data)
ols <- lm(Close ~ SUM,data=data)
install.packages("stargazer")
library(stargazer)
stargazer(ols,quantreg25,quantreg50,quantreg75,type = "text")
#Graph to visualize all quantiles
quantreg.all <- rq(Close ~ SUM,tau = seq(0.05,0.95,by=0.05),data=data)
quantreg.plot <- summary(quantreg.all)
plot(quantreg.plot)
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.8156752168637e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615835559-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,047 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.8156752168637e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
## Ivan's Functions 27/03/2016
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | iphilips/ProgrammingAssignment2 | R | false | false | 355 | r | ## Ivan's Functions 27/03/2016
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
}
|
\docType{methods}
\name{summary,BMAdata-method}
\alias{summary,BMAdata-method}
\title{Summarize BMAdata Objects}
\usage{
\S4method{summary}{BMAdata}(object, ...)
}
\arguments{
\item{BMAdata}{An object of class BMAdata}
\item{object}{an object for which a summary is desired.}
\item{...}{additional arguments affecting the summary
produced.}
}
\description{
Modifies the "summary" function to take objects of class
\code{BMAdata} (or any of its subclasses). It summarizes
the posterior expected values and probabilities (or being
non-zero) of the coefficients.
}
\author{
Thomas Carroll: \email{thomasscarroll89
}
| /BMA/man/summary.Rd | no_license | thomasscarroll89/Midterm | R | false | false | 624 | rd | \docType{methods}
\name{summary,BMAdata-method}
\alias{summary,BMAdata-method}
\title{Summarize BMAdata Objects}
\usage{
\S4method{summary}{BMAdata}(object, ...)
}
\arguments{
\item{BMAdata}{An object of class BMAdata}
\item{object}{an object for which a summary is desired.}
\item{...}{additional arguments affecting the summary
produced.}
}
\description{
Modifies the "summary" function to take objects of class
\code{BMAdata} (or any of its subclasses). It summarizes
the posterior expected values and probabilities (or being
non-zero) of the coefficients.
}
\author{
Thomas Carroll: \email{thomasscarroll89
}
|
# RDD in R implemented in S4 OO system.
setOldClass("jobj")
#' @title S4 class that represents an RDD
#' @description RDD can be created using functions like
#' \code{parallelize}, \code{textFile} etc.
#' @rdname RDD
#' @seealso parallelize, textFile
#'
#' @slot env An R environment that stores bookkeeping states of the RDD
#' @slot jrdd Java object reference to the backing JavaRDD
#' @export
setClass("RDD",
slots = list(env = "environment",
jrdd = "jobj"))
setClass("PipelinedRDD",
slots = list(prev = "RDD",
func = "function",
prev_jrdd = "jobj"),
contains = "RDD")
setMethod("initialize", "RDD", function(.Object, jrdd, serialized,
isCached, isCheckpointed) {
# We use an environment to store mutable states inside an RDD object.
# Note that R's call-by-value semantics makes modifying slots inside an
# object (passed as an argument into a function, such as cache()) difficult:
# i.e. one needs to make a copy of the RDD object and sets the new slot value
# there.
# The slots are inheritable from superclass. Here, both `env' and `jrdd' are
# inherited from RDD, but only the former is used.
.Object@env <- new.env()
.Object@env$isCached <- isCached
.Object@env$isCheckpointed <- isCheckpointed
.Object@env$serialized <- serialized
.Object@jrdd <- jrdd
.Object
})
setMethod("initialize", "PipelinedRDD", function(.Object, prev, func, jrdd_val) {
.Object@env <- new.env()
.Object@env$isCached <- FALSE
.Object@env$isCheckpointed <- FALSE
.Object@env$jrdd_val <- jrdd_val
# This tracks if jrdd_val is serialized
.Object@env$serialized <- prev@env$serialized
# NOTE: We use prev_serialized to track if prev_jrdd is serialized
# prev_serialized is used during the delayed computation of JRDD in getJRDD
.Object@prev <- prev
isPipelinable <- function(rdd) {
e <- rdd@env
!(e$isCached || e$isCheckpointed)
}
if (!inherits(prev, "PipelinedRDD") || !isPipelinable(prev)) {
# This transformation is the first in its stage:
.Object@func <- func
.Object@prev_jrdd <- getJRDD(prev)
# Since this is the first step in the pipeline, the prev_serialized
# is same as serialized here.
.Object@env$prev_serialized <- .Object@env$serialized
} else {
pipelinedFunc <- function(split, iterator) {
func(split, prev@func(split, iterator))
}
.Object@func <- pipelinedFunc
.Object@prev_jrdd <- prev@prev_jrdd # maintain the pipeline
# Get if the prev_jrdd was serialized from the parent RDD
.Object@env$prev_serialized <- prev@env$prev_serialized
}
.Object
})
#' @rdname RDD
#' @export
#'
#' @param jrdd Java object reference to the backing JavaRDD
#' @param serialized TRUE if the RDD stores data serialized in R
#' @param isCached TRUE if the RDD is cached
#' @param isCheckpointed TRUE if the RDD has been checkpointed
RDD <- function(jrdd, serialized = TRUE, isCached = FALSE,
isCheckpointed = FALSE) {
new("RDD", jrdd, serialized, isCached, isCheckpointed)
}
PipelinedRDD <- function(prev, func) {
new("PipelinedRDD", prev, func, NULL)
}
# The jrdd accessor function.
setGeneric("getJRDD", function(rdd, ...) { standardGeneric("getJRDD") })
setMethod("getJRDD", signature(rdd = "RDD"), function(rdd) rdd@jrdd )
setMethod("getJRDD", signature(rdd = "PipelinedRDD"),
function(rdd, dataSerialization = TRUE) {
if (!is.null(rdd@env$jrdd_val)) {
return(rdd@env$jrdd_val)
}
# TODO: This is to handle anonymous functions. Find out a
# better way to do this.
computeFunc <- function(split, part) {
rdd@func(split, part)
}
serializedFuncArr <- serialize("computeFunc", connection = NULL)
packageNamesArr <- serialize(.sparkREnv[[".packages"]],
connection = NULL)
broadcastArr <- lapply(ls(.broadcastNames),
function(name) { get(name, .broadcastNames) })
depsBin <- getDependencies(computeFunc)
prev_jrdd <- rdd@prev_jrdd
if (dataSerialization) {
rddRef <- newJObject("edu.berkeley.cs.amplab.sparkr.RRDD",
callJMethod(prev_jrdd, "rdd"),
serializedFuncArr,
rdd@env$prev_serialized,
depsBin,
packageNamesArr,
as.character(.sparkREnv[["libname"]]),
broadcastArr,
callJMethod(prev_jrdd, "classTag"))
} else {
rddRef <- newJObject("edu.berkeley.cs.amplab.sparkr.StringRRDD",
callJMethod(prev_jrdd, "rdd"),
serializedFuncArr,
rdd@env$prev_serialized,
depsBin,
packageNamesArr,
as.character(.sparkREnv[["libname"]]),
broadcastArr,
callJMethod(prev_jrdd, "classTag"))
}
# Save the serialization flag after we create a RRDD
rdd@env$serialized <- dataSerialization
rdd@env$jrdd_val <- callJMethod(rddRef, "asJavaRDD") # rddRef$asJavaRDD()
rdd@env$jrdd_val
})
setValidity("RDD",
function(object) {
jrdd <- getJRDD(object)
cls <- callJMethod(jrdd, "getClass")
className <- callJMethod(cls, "getName")
if (grep("spark.api.java.*RDD*", className) == 1) {
TRUE
} else {
paste("Invalid RDD class ", className)
}
})
############ Actions and Transformations ############
#' Persist an RDD
#'
#' Persist this RDD with the default storage level (MEMORY_ONLY).
#'
#' @param x The RDD to cache
#' @rdname cache-methods
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10, 2L)
#' cache(rdd)
#'}
setGeneric("cache", function(x) { standardGeneric("cache") })
#' @rdname cache-methods
#' @aliases cache,RDD-method
setMethod("cache",
signature(x = "RDD"),
function(x) {
callJMethod(getJRDD(x), "cache")
x@env$isCached <- TRUE
x
})
#' Persist an RDD
#'
#' Persist this RDD with the specified storage level. For details of the
#' supported storage levels, refer to
#' http://spark.apache.org/docs/latest/programming-guide.html#rdd-persistence.
#'
#' @param x The RDD to persist
#' @param newLevel The new storage level to be assigned
#' @rdname persist
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10, 2L)
#' persist(rdd, "MEMORY_AND_DISK")
#'}
setGeneric("persist", function(x, newLevel) { standardGeneric("persist") })
#' @rdname persist
#' @aliases persist,RDD-method
setMethod("persist",
signature(x = "RDD", newLevel = "character"),
function(x, newLevel = c("DISK_ONLY",
"DISK_ONLY_2",
"MEMORY_AND_DISK",
"MEMORY_AND_DISK_2",
"MEMORY_AND_DISK_SER",
"MEMORY_AND_DISK_SER_2",
"MEMORY_ONLY",
"MEMORY_ONLY_2",
"MEMORY_ONLY_SER",
"MEMORY_ONLY_SER_2",
"OFF_HEAP")) {
match.arg(newLevel)
storageLevel <- switch(newLevel,
"DISK_ONLY" = callJStatic("org.apache.spark.storage.StorageLevel", "DISK_ONLY"),
"DISK_ONLY_2" = callJStatic("org.apache.spark.storage.StorageLevel", "DISK_ONLY_2"),
"MEMORY_AND_DISK" = callJStatic("org.apache.spark.storage.StorageLevel", "MEMORY_AND_DISK"),
"MEMORY_AND_DISK_2" = callJStatic("org.apache.spark.storage.StorageLevel", "MEMORY_AND_DISK_2"),
"MEMORY_AND_DISK_SER" = callJStatic("org.apache.spark.storage.StorageLevel", "MEMORY_AND_DISK_SER"),
"MEMORY_AND_DISK_SER_2" = callJStatic("org.apache.spark.storage.StorageLevel", "MEMORY_AND_DISK_SER_2"),
"MEMORY_ONLY" = callJStatic("org.apache.spark.storage.StorageLevel", "MEMORY_ONLY"),
"MEMORY_ONLY_2" = callJStatic("org.apache.spark.storage.StorageLevel", "MEMORY_ONLY_2"),
"MEMORY_ONLY_SER" = callJStatic("org.apache.spark.storage.StorageLevel", "MEMORY_ONLY_SER"),
"MEMORY_ONLY_SER_2" = callJStatic("org.apache.spark.storage.StorageLevel", "MEMORY_ONLY_SER_2"),
"OFF_HEAP" = callJStatic("org.apache.spark.storage.StorageLevel", "OFF_HEAP"))
callJMethod(getJRDD(x), "persist", storageLevel)
x@env$isCached <- TRUE
x
})
#' Unpersist an RDD
#'
#' Mark the RDD as non-persistent, and remove all blocks for it from memory and
#' disk.
#'
#' @param rdd The RDD to unpersist
#' @rdname unpersist-methods
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10, 2L)
#' cache(rdd) # rdd@@env$isCached == TRUE
#' unpersist(rdd) # rdd@@env$isCached == FALSE
#'}
setGeneric("unpersist", function(x) { standardGeneric("unpersist") })
#' @rdname unpersist-methods
#' @aliases unpersist,RDD-method
setMethod("unpersist",
signature(x = "RDD"),
function(x) {
callJMethod(getJRDD(x), "unpersist")
x@env$isCached <- FALSE
x
})
#' Checkpoint an RDD
#'
#' Mark this RDD for checkpointing. It will be saved to a file inside the
#' checkpoint directory set with setCheckpointDir() and all references to its
#' parent RDDs will be removed. This function must be called before any job has
#' been executed on this RDD. It is strongly recommended that this RDD is
#' persisted in memory, otherwise saving it on a file will require recomputation.
#'
#' @param rdd The RDD to checkpoint
#' @rdname checkpoint-methods
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' setCheckpointDir(sc, "checkpoints")
#' rdd <- parallelize(sc, 1:10, 2L)
#' checkpoint(rdd)
#'}
setGeneric("checkpoint", function(x) { standardGeneric("checkpoint") })
#' @rdname checkpoint-methods
#' @aliases checkpoint,RDD-method
setMethod("checkpoint",
signature(x = "RDD"),
function(x) {
jrdd <- getJRDD(x)
callJMethod(jrdd, "checkpoint")
x@env$isCheckpointed <- TRUE
x
})
#' Gets the number of partitions of an RDD
#'
#' @param x A RDD.
#' @return the number of partitions of rdd as an integer.
#' @rdname numPartitions
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10, 2L)
#' numPartitions(rdd) # 2L
#'}
setGeneric("numPartitions", function(x) { standardGeneric("numPartitions") })
#' @rdname numPartitions
#' @aliases numPartitions,RDD-method
setMethod("numPartitions",
signature(x = "RDD"),
function(x) {
jrdd <- getJRDD(x)
partitions <- callJMethod(jrdd, "splits")
callJMethod(partitions, "size")
})
#' Collect elements of an RDD
#'
#' @description
#' \code{collect} returns a list that contains all of the elements in this RDD.
#'
#' @param x The RDD to collect
#' @param ... Other optional arguments to collect
#' @param flatten FALSE if the list should not flattened
#' @return a list containing elements in the RDD
#' @rdname collect-methods
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10, 2L)
#' collect(rdd) # list from 1 to 10
#' collectPartition(rdd, 0L) # list from 1 to 5
#'}
setGeneric("collect", function(x, ...) { standardGeneric("collect") })
#' @rdname collect-methods
#' @aliases collect,RDD-method
setMethod("collect",
signature(x = "RDD"),
function(x, flatten = TRUE) {
# Assumes a pairwise RDD is backed by a JavaPairRDD.
collected <- callJMethod(getJRDD(x), "collect")
convertJListToRList(collected, flatten)
})
#' @rdname collect-methods
#' @export
#' @description
#' \code{collectPartition} returns a list that contains all of the elements
#' in the specified partition of the RDD.
#' @param partitionId the partition to collect (starts from 0)
setGeneric("collectPartition",
function(x, partitionId) {
standardGeneric("collectPartition")
})
#' @rdname collect-methods
#' @aliases collectPartition,integer,RDD-method
setMethod("collectPartition",
signature(x = "RDD", partitionId = "integer"),
function(x, partitionId) {
jPartitionsList <- callJMethod(getJRDD(x),
"collectPartitions",
as.list(as.integer(partitionId)))
jList <- jPartitionsList[[1]]
convertJListToRList(jList, flatten = TRUE)
})
#' @rdname collect-methods
#' @export
#' @description
#' \code{collectAsMap} returns a named list as a map that contains all of the elements
#' in a key-value pair RDD.
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(list(1, 2), list(3, 4)), 2L)
#' collectAsMap(rdd) # list(`1` = 2, `3` = 4)
#'}
setGeneric("collectAsMap", function(x) { standardGeneric("collectAsMap") })
#' @rdname collect-methods
#' @aliases collectAsMap,RDD-method
setMethod("collectAsMap",
signature(x = "RDD"),
function(x) {
pairList <- collect(x)
map <- new.env()
lapply(pairList, function(i) { assign(as.character(i[[1]]), i[[2]], envir = map) })
as.list(map)
})
#' Return the number of elements in the RDD.
#'
#' @param x The RDD to count
#' @return number of elements in the RDD.
#' @rdname count
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' count(rdd) # 10
#' length(rdd) # Same as count
#'}
setGeneric("count", function(x) { standardGeneric("count") })
#' @rdname count
#' @aliases count,RDD-method
setMethod("count",
signature(x = "RDD"),
function(x) {
countPartition <- function(part) {
as.integer(length(part))
}
valsRDD <- lapplyPartition(x, countPartition)
vals <- collect(valsRDD)
sum(as.integer(vals))
})
#' Return the number of elements in the RDD
#' @export
#' @rdname count
setMethod("length",
signature(x = "RDD"),
function(x) {
count(x)
})
#' Return the count of each unique value in this RDD as a list of
#' (value, count) pairs.
#'
#' Same as countByValue in Spark.
#'
#' @param x The RDD to count
#' @return list of (value, count) pairs, where count is number of each unique
#' value in rdd.
#' @rdname countByValue
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, c(1,2,3,2,1))
#' countByValue(rdd) # (1,2L), (2,2L), (3,1L)
#'}
setGeneric("countByValue", function(x) { standardGeneric("countByValue") })
#' @rdname countByValue
#' @aliases countByValue,RDD-method
setMethod("countByValue",
signature(x = "RDD"),
function(x) {
ones <- lapply(x, function(item) { list(item, 1L) })
collect(reduceByKey(ones, `+`, numPartitions(x)))
})
#' Apply a function to all elements
#'
#' This function creates a new RDD by applying the given transformation to all
#' elements of the given RDD
#'
#' @param X The RDD to apply the transformation.
#' @param FUN the transformation to apply on each element
#' @return a new RDD created by the transformation.
#' @rdname lapply
#' @aliases lapply
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' multiplyByTwo <- lapply(rdd, function(x) { x * 2 })
#' collect(multiplyByTwo) # 2,4,6...
#'}
setMethod("lapply",
signature(X = "RDD", FUN = "function"),
function(X, FUN) {
func <- function(split, iterator) {
lapply(iterator, FUN)
}
lapplyPartitionsWithIndex(X, func)
})
#' @rdname lapply
#' @export
setGeneric("map", function(X, FUN) {
standardGeneric("map") })
#' @rdname lapply
#' @aliases map,RDD,function-method
setMethod("map",
signature(X = "RDD", FUN = "function"),
function(X, FUN) {
lapply(X, FUN)
})
#' Flatten results after apply a function to all elements
#'
#' This function return a new RDD by first applying a function to all
#' elements of this RDD, and then flattening the results.
#'
#' @param X The RDD to apply the transformation.
#' @param FUN the transformation to apply on each element
#' @return a new RDD created by the transformation.
#' @rdname flatMap
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' multiplyByTwo <- flatMap(rdd, function(x) { list(x*2, x*10) })
#' collect(multiplyByTwo) # 2,20,4,40,6,60...
#'}
setGeneric("flatMap", function(X, FUN) {
standardGeneric("flatMap") })
#' @rdname flatMap
#' @aliases flatMap,RDD,function-method
setMethod("flatMap",
signature(X = "RDD", FUN = "function"),
function(X, FUN) {
partitionFunc <- function(part) {
unlist(
lapply(part, FUN),
recursive = F
)
}
lapplyPartition(X, partitionFunc)
})
#' Apply a function to each partition of an RDD
#'
#' Return a new RDD by applying a function to each partition of this RDD.
#'
#' @param X The RDD to apply the transformation.
#' @param FUN the transformation to apply on each partition.
#' @return a new RDD created by the transformation.
#' @rdname lapplyPartition
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' partitionSum <- lapplyPartition(rdd, function(part) { Reduce("+", part) })
#' collect(partitionSum) # 15, 40
#'}
setGeneric("lapplyPartition", function(X, FUN) {
standardGeneric("lapplyPartition") })
#' @rdname lapplyPartition
#' @aliases lapplyPartition,RDD,function-method
setMethod("lapplyPartition",
signature(X = "RDD", FUN = "function"),
function(X, FUN) {
lapplyPartitionsWithIndex(X, function(s, part) { FUN(part) })
})
#' mapPartitions is the same as lapplyPartition.
#'
#' @rdname lapplyPartition
#' @export
setGeneric("mapPartitions", function(X, FUN) {
standardGeneric("mapPartitions") })
#' @rdname lapplyPartition
#' @aliases mapPartitions,RDD,function-method
setMethod("mapPartitions",
signature(X = "RDD", FUN = "function"),
function(X, FUN) {
lapplyPartition(X, FUN)
})
#' Return a new RDD by applying a function to each partition of this RDD, while
#' tracking the index of the original partition.
#'
#' @param X The RDD to apply the transformation.
#' @param FUN the transformation to apply on each partition; takes the partition
#' index and a list of elements in the particular partition.
#' @return a new RDD created by the transformation.
#' @rdname lapplyPartitionsWithIndex
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10, 5L)
#' prod <- lapplyPartitionsWithIndex(rdd, function(split, part) {
#' split * Reduce("+", part) })
#' collect(prod, flatten = FALSE) # 0, 7, 22, 45, 76
#'}
setGeneric("lapplyPartitionsWithIndex", function(X, FUN) {
standardGeneric("lapplyPartitionsWithIndex") })
#' @rdname lapplyPartitionsWithIndex
#' @aliases lapplyPartitionsWithIndex,RDD,function-method
setMethod("lapplyPartitionsWithIndex",
signature(X = "RDD", FUN = "function"),
function(X, FUN) {
closureCapturingFunc <- function(split, part) {
FUN(split, part)
}
PipelinedRDD(X, closureCapturingFunc)
})
#' @rdname lapplyPartitionsWithIndex
#' @export
setGeneric("mapPartitionsWithIndex", function(X, FUN) {
standardGeneric("mapPartitionsWithIndex") })
#' @rdname lapplyPartitionsWithIndex
#' @aliases mapPartitionsWithIndex,RDD,function-method
setMethod("mapPartitionsWithIndex",
signature(X = "RDD", FUN = "function"),
function(X, FUN) {
lapplyPartitionsWithIndex(X, FUN)
})
#' This function returns a new RDD containing only the elements that satisfy
#' a predicate (i.e. returning TRUE in a given logical function).
#' The same as `filter()' in Spark.
#'
#' @param x The RDD to be filtered.
#' @param f A unary predicate function.
#' @rdname filterRDD
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' unlist(collect(filterRDD(rdd, function (x) { x < 3 }))) # c(1, 2)
#'}
setGeneric("filterRDD",
function(x, f) { standardGeneric("filterRDD") })
#' @rdname filterRDD
#' @aliases filterRDD,RDD,function-method
setMethod("filterRDD",
signature(x = "RDD", f = "function"),
function(x, f) {
filter.func <- function(part) {
Filter(f, part)
}
lapplyPartition(x, filter.func)
})
#' @rdname filterRDD
#' @export
#' @aliases Filter
setMethod("Filter",
signature(f = "function", x = "RDD"),
function(f, x) {
filterRDD(x, f)
})
#' Reduce across elements of an RDD.
#'
#' This function reduces the elements of this RDD using the
#' specified commutative and associative binary operator.
#'
#' @param rdd The RDD to reduce
#' @param func Commutative and associative function to apply on elements
#' of the RDD.
#' @export
#' @rdname reduce
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' reduce(rdd, "+") # 55
#'}
setGeneric("reduce", function(x, func) { standardGeneric("reduce") })
#' @rdname reduce
#' @aliases reduce,RDD,ANY-method
setMethod("reduce",
signature(x = "RDD", func = "ANY"),
function(x, func) {
reducePartition <- function(part) {
Reduce(func, part)
}
partitionList <- collect(lapplyPartition(x, reducePartition),
flatten = FALSE)
Reduce(func, partitionList)
})
#' Get the maximum element of an RDD.
#'
#' @param x The RDD to get the maximum element from
#' @export
#' @rdname maximum
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' maximum(rdd) # 10
#'}
setGeneric("maximum", function(x) { standardGeneric("maximum") })
#' @rdname maximum
#' @aliases maximum,RDD
setMethod("maximum",
signature(x = "RDD"),
function(x) {
reduce(x, max)
})
#' Get the minimum element of an RDD.
#'
#' @param x The RDD to get the minimum element from
#' @export
#' @rdname minimum
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' minimum(rdd) # 1
#'}
setGeneric("minimum", function(x) { standardGeneric("minimum") })
#' @rdname minimum
#' @aliases minimum,RDD
setMethod("minimum",
signature(x = "RDD"),
function(x) {
reduce(x, min)
})
#' Applies a function to all elements in an RDD, and force evaluation.
#'
#' @param x The RDD to apply the function
#' @param func The function to be applied.
#' @return invisible NULL.
#' @export
#' @rdname foreach
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' foreach(rdd, function(x) { save(x, file=...) })
#'}
setGeneric("foreach", function(x, func) { standardGeneric("foreach") })
#' @rdname foreach
#' @aliases foreach,RDD,function-method
setMethod("foreach",
signature(x = "RDD", func = "function"),
function(x, func) {
partition.func <- function(x) {
lapply(x, func)
NULL
}
invisible(collect(mapPartitions(x, partition.func)))
})
#' Applies a function to each partition in an RDD, and force evaluation.
#'
#' @export
#' @rdname foreach
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' foreachPartition(rdd, function(part) { save(part, file=...); NULL })
#'}
setGeneric("foreachPartition",
function(x, func) { standardGeneric("foreachPartition") })
#' @rdname foreach
#' @aliases foreachPartition,RDD,function-method
setMethod("foreachPartition",
signature(x = "RDD", func = "function"),
function(x, func) {
invisible(collect(mapPartitions(x, func)))
})
#' Take elements from an RDD.
#'
#' This function takes the first NUM elements in the RDD and
#' returns them in a list.
#'
#' @param x The RDD to take elements from
#' @param num Number of elements to take
#' @rdname take
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' take(rdd, 2L) # list(1, 2)
#'}
setGeneric("take", function(x, num) { standardGeneric("take") })
#' @rdname take
#' @aliases take,RDD,numeric-method
setMethod("take",
signature(x = "RDD", num = "numeric"),
function(x, num) {
resList <- list()
index <- -1
jrdd <- getJRDD(x)
numPartitions <- numPartitions(x)
# TODO(shivaram): Collect more than one partition based on size
# estimates similar to the scala version of `take`.
while (TRUE) {
index <- index + 1
if (length(resList) >= num || index >= numPartitions)
break
# a JList of byte arrays
partitionArr <- callJMethod(jrdd, "collectPartitions", as.list(as.integer(index)))
partition <- partitionArr[[1]]
size <- num - length(resList)
# elems is capped to have at most `size` elements
elems <- convertJListToRList(partition,
flatten = TRUE,
logicalUpperBound = size,
serialized = x@env$serialized)
# TODO: Check if this append is O(n^2)?
resList <- append(resList, elems)
}
resList
})
#' Removes the duplicates from RDD.
#'
#' This function returns a new RDD containing the distinct elements in the
#' given RDD. The same as `distinct()' in Spark.
#'
#' @param x The RDD to remove duplicates from.
#' @param numPartitions Number of partitions to create.
#' @rdname distinct
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, c(1,2,2,3,3,3))
#' sort(unlist(collect(distinct(rdd)))) # c(1, 2, 3)
#'}
setGeneric("distinct",
function(x, numPartitions) { standardGeneric("distinct") })
setClassUnion("missingOrInteger", c("missing", "integer"))
#' @rdname distinct
#' @aliases distinct,RDD,missingOrInteger-method
setMethod("distinct",
signature(x = "RDD", numPartitions = "missingOrInteger"),
function(x, numPartitions) {
if (missing(numPartitions)) {
numPartitions <- SparkR::numPartitions(x)
}
identical.mapped <- lapply(x, function(x) { list(x, NULL) })
reduced <- reduceByKey(identical.mapped,
function(x, y) { x },
numPartitions)
resRDD <- lapply(reduced, function(x) { x[[1]] })
resRDD
})
#' Return an RDD that is a sampled subset of the given RDD.
#'
#' The same as `sample()' in Spark. (We rename it due to signature
#' inconsistencies with the `sample()' function in R's base package.)
#'
#' @param x The RDD to sample elements from
#' @param withReplacement Sampling with replacement or not
#' @param fraction The (rough) sample target fraction
#' @param seed Randomness seed value
#' @rdname sampleRDD
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10) # ensure each num is in its own split
#' collect(sampleRDD(rdd, FALSE, 0.5, 1618L)) # ~5 distinct elements
#' collect(sampleRDD(rdd, TRUE, 0.5, 9L)) # ~5 elements possibly with duplicates
#'}
setGeneric("sampleRDD",
function(x, withReplacement, fraction, seed) {
standardGeneric("sampleRDD")
})
#' @rdname sampleRDD
#' @aliases sampleRDD,RDD
setMethod("sampleRDD",
signature(x = "RDD", withReplacement = "logical",
fraction = "numeric", seed = "integer"),
function(x, withReplacement, fraction, seed) {
# The sampler: takes a partition and returns its sampled version.
samplingFunc <- function(split, part) {
set.seed(seed)
res <- vector("list", length(part))
len <- 0
# Discards some random values to ensure each partition has a
# different random seed.
runif(split)
for (elem in part) {
if (withReplacement) {
count <- rpois(1, fraction)
if (count > 0) {
res[(len + 1):(len + count)] <- rep(list(elem), count)
len <- len + count
}
} else {
if (runif(1) < fraction) {
len <- len + 1
res[[len]] <- elem
}
}
}
# TODO(zongheng): look into the performance of the current
# implementation. Look into some iterator package? Note that
# Scala avoids many calls to creating an empty list and PySpark
# similarly achieves this using `yield'.
if (len > 0)
res[1:len]
else
list()
}
lapplyPartitionsWithIndex(x, samplingFunc)
})
#' Return a list of the elements that are a sampled subset of the given RDD.
#'
#' @param x The RDD to sample elements from
#' @param withReplacement Sampling with replacement or not
#' @param num Number of elements to return
#' @param seed Randomness seed value
#' @rdname takeSample
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:100)
#' # exactly 5 elements sampled, which may not be distinct
#' takeSample(rdd, TRUE, 5L, 1618L)
#' # exactly 5 distinct elements sampled
#' takeSample(rdd, FALSE, 5L, 16181618L)
#'}
setGeneric("takeSample",
function(x, withReplacement, num, seed) {
standardGeneric("takeSample")
})
#' @rdname takeSample
#' @aliases takeSample,RDD
setMethod("takeSample", signature(x = "RDD", withReplacement = "logical",
num = "integer", seed = "integer"),
function(x, withReplacement, num, seed) {
# This function is ported from RDD.scala.
fraction <- 0.0
total <- 0
multiplier <- 3.0
initialCount <- count(x)
maxSelected <- 0
MAXINT <- .Machine$integer.max
if (num < 0)
stop(paste("Negative number of elements requested"))
if (initialCount > MAXINT - 1) {
maxSelected <- MAXINT - 1
} else {
maxSelected <- initialCount
}
if (num > initialCount && !withReplacement) {
total <- maxSelected
fraction <- multiplier * (maxSelected + 1) / initialCount
} else {
total <- num
fraction <- multiplier * (num + 1) / initialCount
}
set.seed(seed)
samples <- collect(sampleRDD(x, withReplacement, fraction,
as.integer(ceiling(runif(1,
-MAXINT,
MAXINT)))))
# If the first sample didn't turn out large enough, keep trying to
# take samples; this shouldn't happen often because we use a big
# multiplier for thei initial size
while (length(samples) < total)
samples <- collect(sampleRDD(x, withReplacement, fraction,
as.integer(ceiling(runif(1,
-MAXINT,
MAXINT)))))
# TODO(zongheng): investigate if this call is an in-place shuffle?
sample(samples)[1:total]
})
#' Creates tuples of the elements in this RDD by applying a function.
#'
#' @param x The RDD.
#' @param func The function to be applied.
#' @rdname keyBy
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(1, 2, 3))
#' collect(keyBy(rdd, function(x) { x*x })) # list(list(1, 1), list(4, 2), list(9, 3))
#'}
setGeneric("keyBy", function(x, func) { standardGeneric("keyBy") })
#' @rdname keyBy
#' @aliases keyBy,RDD
setMethod("keyBy",
signature(x = "RDD", func = "function"),
function(x, func) {
apply.func <- function(x) {
list(func(x), x)
}
lapply(x, apply.func)
})
#' Save this RDD as a SequenceFile of serialized objects.
#'
#' @param x The RDD to save
#' @param path The directory where the file is saved
#' @rdname saveAsObjectFile
#' @seealso objectFile
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:3)
#' saveAsObjectFile(rdd, "/tmp/sparkR-tmp")
#'}
setGeneric("saveAsObjectFile", function(x, path) { standardGeneric("saveAsObjectFile") })
#' @rdname saveAsObjectFile
#' @aliases saveAsObjectFile,RDD
setMethod("saveAsObjectFile",
signature(x = "RDD", path = "character"),
function(x, path) {
# If the RDD is in string format, need to serialize it before saving it because when
# objectFile() is invoked to load the saved file, only serialized format is assumed.
if (!x@env$serialized) {
x <- reserialize(x)
}
# Return nothing
invisible(callJMethod(getJRDD(x), "saveAsObjectFile", path))
})
#' Save this RDD as a text file, using string representations of elements.
#'
#' @param x The RDD to save
#' @param path The directory where the splits of the text file are saved
#' @rdname saveAsTextFile
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:3)
#' saveAsTextFile(rdd, "/tmp/sparkR-tmp")
#'}
setGeneric("saveAsTextFile", function(x, path) { standardGeneric("saveAsTextFile") })
#' @rdname saveAsTextFile
#' @aliases saveAsTextFile,RDD
setMethod("saveAsTextFile",
signature(x = "RDD", path = "character"),
function(x, path) {
func <- function(str) {
toString(str)
}
stringRdd <- lapply(x, func)
# Return nothing
invisible(
callJMethod(getJRDD(stringRdd, dataSerialization = FALSE), "saveAsTextFile", path))
})
#' Sort an RDD by the given key function.
#'
#' @param x An RDD to be sorted.
#' @param func A function used to compute the sort key for each element.
#' @param ascending A flag to indicate whether the sorting is ascending or descending.
#' @param numPartitions Number of partitions to create.
#' @return An RDD where all elements are sorted.
#' @rdname sortBy
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(3, 2, 1))
#' collect(sortBy(rdd, function(x) { x })) # list (1, 2, 3)
#'}
setGeneric("sortBy", function(x,
func,
ascending = TRUE,
numPartitions = 1L) {
standardGeneric("sortBy")
})
#' @rdname sortBy
#' @aliases sortBy,RDD,RDD-method
setMethod("sortBy",
signature(x = "RDD", func = "function"),
function(x, func, ascending = TRUE, numPartitions = SparkR::numPartitions(x)) {
values(sortByKey(keyBy(x, func), ascending, numPartitions))
})
# Helper function to get first N elements from an RDD in the specified order.
# Param:
# x An RDD.
# num Number of elements to return.
# ascending A flag to indicate whether the sorting is ascending or descending.
# Return:
# A list of the first N elements from the RDD in the specified order.
#
takeOrderedElem <- function(x, num, ascending = TRUE) {
if (num <= 0L) {
return(list())
}
partitionFunc <- function(part) {
if (num < length(part)) {
# R limitation: order works only on primitive types!
ord <- order(unlist(part, recursive = FALSE), decreasing = !ascending)
list(part[ord[1:num]])
} else {
list(part)
}
}
reduceFunc <- function(elems, part) {
newElems <- append(elems, part)
# R limitation: order works only on primitive types!
ord <- order(unlist(newElems, recursive = FALSE), decreasing = !ascending)
newElems[ord[1:num]]
}
newRdd <- mapPartitions(x, partitionFunc)
reduce(newRdd, reduceFunc)
}
#' Returns the first N elements from an RDD in ascending order.
#'
#' @param x An RDD.
#' @param num Number of elements to return.
#' @return The first N elements from the RDD in ascending order.
#' @rdname takeOrdered
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(10, 1, 2, 9, 3, 4, 5, 6, 7))
#' takeOrdered(rdd, 6L) # list(1, 2, 3, 4, 5, 6)
#'}
setGeneric("takeOrdered", function(x, num) { standardGeneric("takeOrdered") })
#' @rdname takeOrdered
#' @aliases takeOrdered,RDD,RDD-method
setMethod("takeOrdered",
signature(x = "RDD", num = "integer"),
function(x, num) {
takeOrderedElem(x, num)
})
#' Returns the top N elements from an RDD.
#'
#' @param x An RDD.
#' @param num Number of elements to return.
#' @return The top N elements from the RDD.
#' @rdname top
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(10, 1, 2, 9, 3, 4, 5, 6, 7))
#' top(rdd, 6L) # list(10, 9, 7, 6, 5, 4)
#'}
setGeneric("top", function(x, num) { standardGeneric("top") })
#' @rdname top
#' @aliases top,RDD,RDD-method
setMethod("top",
signature(x = "RDD", num = "integer"),
function(x, num) {
takeOrderedElem(x, num, FALSE)
})
#' Fold an RDD using a given associative function and a neutral "zero value".
#'
#' Aggregate the elements of each partition, and then the results for all the
#' partitions, using a given associative function and a neutral "zero value".
#'
#' @param x An RDD.
#' @param zeroValue A neutral "zero value".
#' @param op An associative function for the folding operation.
#' @return The folding result.
#' @rdname fold
#' @seealso reduce
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(1, 2, 3, 4, 5))
#' fold(rdd, 0, "+") # 15
#'}
setGeneric("fold", function(x, zeroValue, op) { standardGeneric("fold") })
#' @rdname fold
#' @aliases fold,RDD,RDD-method
setMethod("fold",
signature(x = "RDD", zeroValue = "ANY", op = "ANY"),
function(x, zeroValue, op) {
aggregateRDD(x, zeroValue, op, op)
})
#' Aggregate an RDD using the given combine functions and a neutral "zero value".
#'
#' Aggregate the elements of each partition, and then the results for all the
#' partitions, using given combine functions and a neutral "zero value".
#'
#' @param x An RDD.
#' @param zeroValue A neutral "zero value".
#' @param seqOp A function to aggregate the RDD elements. It may return a different
#' result type from the type of the RDD elements.
#' @param combOp A function to aggregate results of seqOp.
#' @return The aggregation result.
#' @rdname aggregateRDD
#' @seealso reduce
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(1, 2, 3, 4))
#' zeroValue <- list(0, 0)
#' seqOp <- function(x, y) { list(x[[1]] + y, x[[2]] + 1) }
#' combOp <- function(x, y) { list(x[[1]] + y[[1]], x[[2]] + y[[2]]) }
#' aggregateRDD(rdd, zeroValue, seqOp, combOp) # list(10, 4)
#'}
setGeneric("aggregateRDD", function(x, zeroValue, seqOp, combOp) { standardGeneric("aggregateRDD") })
#' @rdname aggregateRDD
#' @aliases aggregateRDD,RDD,RDD-method
setMethod("aggregateRDD",
signature(x = "RDD", zeroValue = "ANY", seqOp = "ANY", combOp = "ANY"),
function(x, zeroValue, seqOp, combOp) {
partitionFunc <- function(part) {
Reduce(seqOp, part, zeroValue)
}
partitionList <- collect(lapplyPartition(x, partitionFunc),
flatten = FALSE)
Reduce(combOp, partitionList, zeroValue)
})
#' Pipes elements to a forked external process.
#'
#' The same as 'pipe()' in Spark.
#'
#' @param x The RDD whose elements are piped to the forked external process.
#' @param command The command to fork an external process.
#' @param env A named list to set environment variables of the external process.
#' @return A new RDD created by piping all elements to a forked external process.
#' @rdname pipeRDD
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' collect(pipeRDD(rdd, "more")
#' Output: c("1", "2", ..., "10")
#'}
setGeneric("pipeRDD", function(x, command, env = list()) {
standardGeneric("pipeRDD")
})
#' @rdname pipeRDD
#' @aliases pipeRDD,RDD,character-method
setMethod("pipeRDD",
signature(x = "RDD", command = "character"),
function(x, command, env = list()) {
func <- function(part) {
trim.trailing.func <- function(x) {
sub("[\r\n]*$", "", toString(x))
}
input <- unlist(lapply(part, trim.trailing.func))
res <- system2(command, stdout = TRUE, input = input, env = env)
lapply(res, trim.trailing.func)
}
lapplyPartition(x, func)
})
# TODO: Consider caching the name in the RDD's environment
#' Return an RDD's name.
#'
#' @param x The RDD whose name is returned.
#' @rdname name
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(1,2,3))
#' name(rdd) # NULL (if not set before)
#'}
setGeneric("name", function(x) { standardGeneric("name") })
#' @rdname name
#' @aliases name,RDD
setMethod("name",
signature(x = "RDD"),
function(x) {
callJMethod(getJRDD(x), "name")
})
#' Set an RDD's name.
#'
#' @param x The RDD whose name is to be set.
#' @param name The RDD name to be set.
#' @return a new RDD renamed.
#' @rdname setName
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(1,2,3))
#' setName(rdd, "myRDD")
#' name(rdd) # "myRDD"
#'}
setGeneric("setName", function(x, name) { standardGeneric("setName") })
#' @rdname setName
#' @aliases setName,RDD
setMethod("setName",
signature(x = "RDD", name = "character"),
function(x, name) {
callJMethod(getJRDD(x), "setName", name)
x
})
############ Binary Functions #############
#' Return the union RDD of two RDDs.
#' The same as union() in Spark.
#'
#' @param x An RDD.
#' @param y An RDD.
#' @return a new RDD created by performing the simple union (witout removing
#' duplicates) of two input RDDs.
#' @rdname unionRDD
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:3)
#' unionRDD(rdd, rdd) # 1, 2, 3, 1, 2, 3
#'}
setGeneric("unionRDD", function(x, y) { standardGeneric("unionRDD") })
#' @rdname unionRDD
#' @aliases unionRDD,RDD,RDD-method
setMethod("unionRDD",
signature(x = "RDD", y = "RDD"),
function(x, y) {
if (x@env$serialized == y@env$serialized) {
jrdd <- callJMethod(getJRDD(x), "union", getJRDD(y))
union.rdd <- RDD(jrdd, x@env$serialized)
} else {
# One of the RDDs is not serialized, we need to serialize it first.
if (!x@env$serialized) {
x <- reserialize(x)
} else {
y <- reserialize(y)
}
jrdd <- callJMethod(getJRDD(x), "union", getJRDD(y))
union.rdd <- RDD(jrdd, TRUE)
}
union.rdd
})
| /pkg/R/RDD.R | permissive | lfjover/SparkR-pkg | R | false | false | 45,265 | r | # RDD in R implemented in S4 OO system.
setOldClass("jobj")
#' @title S4 class that represents an RDD
#' @description RDD can be created using functions like
#' \code{parallelize}, \code{textFile} etc.
#' @rdname RDD
#' @seealso parallelize, textFile
#'
#' @slot env An R environment that stores bookkeeping states of the RDD
#' @slot jrdd Java object reference to the backing JavaRDD
#' @export
setClass("RDD",
slots = list(env = "environment",
jrdd = "jobj"))
setClass("PipelinedRDD",
slots = list(prev = "RDD",
func = "function",
prev_jrdd = "jobj"),
contains = "RDD")
setMethod("initialize", "RDD", function(.Object, jrdd, serialized,
isCached, isCheckpointed) {
# We use an environment to store mutable states inside an RDD object.
# Note that R's call-by-value semantics makes modifying slots inside an
# object (passed as an argument into a function, such as cache()) difficult:
# i.e. one needs to make a copy of the RDD object and sets the new slot value
# there.
# The slots are inheritable from superclass. Here, both `env' and `jrdd' are
# inherited from RDD, but only the former is used.
.Object@env <- new.env()
.Object@env$isCached <- isCached
.Object@env$isCheckpointed <- isCheckpointed
.Object@env$serialized <- serialized
.Object@jrdd <- jrdd
.Object
})
setMethod("initialize", "PipelinedRDD", function(.Object, prev, func, jrdd_val) {
.Object@env <- new.env()
.Object@env$isCached <- FALSE
.Object@env$isCheckpointed <- FALSE
.Object@env$jrdd_val <- jrdd_val
# This tracks if jrdd_val is serialized
.Object@env$serialized <- prev@env$serialized
# NOTE: We use prev_serialized to track if prev_jrdd is serialized
# prev_serialized is used during the delayed computation of JRDD in getJRDD
.Object@prev <- prev
isPipelinable <- function(rdd) {
e <- rdd@env
!(e$isCached || e$isCheckpointed)
}
if (!inherits(prev, "PipelinedRDD") || !isPipelinable(prev)) {
# This transformation is the first in its stage:
.Object@func <- func
.Object@prev_jrdd <- getJRDD(prev)
# Since this is the first step in the pipeline, the prev_serialized
# is same as serialized here.
.Object@env$prev_serialized <- .Object@env$serialized
} else {
pipelinedFunc <- function(split, iterator) {
func(split, prev@func(split, iterator))
}
.Object@func <- pipelinedFunc
.Object@prev_jrdd <- prev@prev_jrdd # maintain the pipeline
# Get if the prev_jrdd was serialized from the parent RDD
.Object@env$prev_serialized <- prev@env$prev_serialized
}
.Object
})
#' @rdname RDD
#' @export
#'
#' @param jrdd Java object reference to the backing JavaRDD
#' @param serialized TRUE if the RDD stores data serialized in R
#' @param isCached TRUE if the RDD is cached
#' @param isCheckpointed TRUE if the RDD has been checkpointed
RDD <- function(jrdd, serialized = TRUE, isCached = FALSE,
isCheckpointed = FALSE) {
new("RDD", jrdd, serialized, isCached, isCheckpointed)
}
PipelinedRDD <- function(prev, func) {
new("PipelinedRDD", prev, func, NULL)
}
# The jrdd accessor function.
setGeneric("getJRDD", function(rdd, ...) { standardGeneric("getJRDD") })
setMethod("getJRDD", signature(rdd = "RDD"), function(rdd) rdd@jrdd )
setMethod("getJRDD", signature(rdd = "PipelinedRDD"),
function(rdd, dataSerialization = TRUE) {
if (!is.null(rdd@env$jrdd_val)) {
return(rdd@env$jrdd_val)
}
# TODO: This is to handle anonymous functions. Find out a
# better way to do this.
computeFunc <- function(split, part) {
rdd@func(split, part)
}
serializedFuncArr <- serialize("computeFunc", connection = NULL)
packageNamesArr <- serialize(.sparkREnv[[".packages"]],
connection = NULL)
broadcastArr <- lapply(ls(.broadcastNames),
function(name) { get(name, .broadcastNames) })
depsBin <- getDependencies(computeFunc)
prev_jrdd <- rdd@prev_jrdd
if (dataSerialization) {
rddRef <- newJObject("edu.berkeley.cs.amplab.sparkr.RRDD",
callJMethod(prev_jrdd, "rdd"),
serializedFuncArr,
rdd@env$prev_serialized,
depsBin,
packageNamesArr,
as.character(.sparkREnv[["libname"]]),
broadcastArr,
callJMethod(prev_jrdd, "classTag"))
} else {
rddRef <- newJObject("edu.berkeley.cs.amplab.sparkr.StringRRDD",
callJMethod(prev_jrdd, "rdd"),
serializedFuncArr,
rdd@env$prev_serialized,
depsBin,
packageNamesArr,
as.character(.sparkREnv[["libname"]]),
broadcastArr,
callJMethod(prev_jrdd, "classTag"))
}
# Save the serialization flag after we create a RRDD
rdd@env$serialized <- dataSerialization
rdd@env$jrdd_val <- callJMethod(rddRef, "asJavaRDD") # rddRef$asJavaRDD()
rdd@env$jrdd_val
})
setValidity("RDD",
function(object) {
jrdd <- getJRDD(object)
cls <- callJMethod(jrdd, "getClass")
className <- callJMethod(cls, "getName")
if (grep("spark.api.java.*RDD*", className) == 1) {
TRUE
} else {
paste("Invalid RDD class ", className)
}
})
############ Actions and Transformations ############
#' Persist an RDD
#'
#' Persist this RDD with the default storage level (MEMORY_ONLY).
#'
#' @param x The RDD to cache
#' @rdname cache-methods
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10, 2L)
#' cache(rdd)
#'}
setGeneric("cache", function(x) { standardGeneric("cache") })
#' @rdname cache-methods
#' @aliases cache,RDD-method
setMethod("cache",
signature(x = "RDD"),
function(x) {
callJMethod(getJRDD(x), "cache")
x@env$isCached <- TRUE
x
})
#' Persist an RDD
#'
#' Persist this RDD with the specified storage level. For details of the
#' supported storage levels, refer to
#' http://spark.apache.org/docs/latest/programming-guide.html#rdd-persistence.
#'
#' @param x The RDD to persist
#' @param newLevel The new storage level to be assigned
#' @rdname persist
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10, 2L)
#' persist(rdd, "MEMORY_AND_DISK")
#'}
setGeneric("persist", function(x, newLevel) { standardGeneric("persist") })
#' @rdname persist
#' @aliases persist,RDD-method
setMethod("persist",
signature(x = "RDD", newLevel = "character"),
function(x, newLevel = c("DISK_ONLY",
"DISK_ONLY_2",
"MEMORY_AND_DISK",
"MEMORY_AND_DISK_2",
"MEMORY_AND_DISK_SER",
"MEMORY_AND_DISK_SER_2",
"MEMORY_ONLY",
"MEMORY_ONLY_2",
"MEMORY_ONLY_SER",
"MEMORY_ONLY_SER_2",
"OFF_HEAP")) {
match.arg(newLevel)
storageLevel <- switch(newLevel,
"DISK_ONLY" = callJStatic("org.apache.spark.storage.StorageLevel", "DISK_ONLY"),
"DISK_ONLY_2" = callJStatic("org.apache.spark.storage.StorageLevel", "DISK_ONLY_2"),
"MEMORY_AND_DISK" = callJStatic("org.apache.spark.storage.StorageLevel", "MEMORY_AND_DISK"),
"MEMORY_AND_DISK_2" = callJStatic("org.apache.spark.storage.StorageLevel", "MEMORY_AND_DISK_2"),
"MEMORY_AND_DISK_SER" = callJStatic("org.apache.spark.storage.StorageLevel", "MEMORY_AND_DISK_SER"),
"MEMORY_AND_DISK_SER_2" = callJStatic("org.apache.spark.storage.StorageLevel", "MEMORY_AND_DISK_SER_2"),
"MEMORY_ONLY" = callJStatic("org.apache.spark.storage.StorageLevel", "MEMORY_ONLY"),
"MEMORY_ONLY_2" = callJStatic("org.apache.spark.storage.StorageLevel", "MEMORY_ONLY_2"),
"MEMORY_ONLY_SER" = callJStatic("org.apache.spark.storage.StorageLevel", "MEMORY_ONLY_SER"),
"MEMORY_ONLY_SER_2" = callJStatic("org.apache.spark.storage.StorageLevel", "MEMORY_ONLY_SER_2"),
"OFF_HEAP" = callJStatic("org.apache.spark.storage.StorageLevel", "OFF_HEAP"))
callJMethod(getJRDD(x), "persist", storageLevel)
x@env$isCached <- TRUE
x
})
#' Unpersist an RDD
#'
#' Mark the RDD as non-persistent, and remove all blocks for it from memory and
#' disk.
#'
#' @param rdd The RDD to unpersist
#' @rdname unpersist-methods
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10, 2L)
#' cache(rdd) # rdd@@env$isCached == TRUE
#' unpersist(rdd) # rdd@@env$isCached == FALSE
#'}
setGeneric("unpersist", function(x) { standardGeneric("unpersist") })
#' @rdname unpersist-methods
#' @aliases unpersist,RDD-method
setMethod("unpersist",
signature(x = "RDD"),
function(x) {
callJMethod(getJRDD(x), "unpersist")
x@env$isCached <- FALSE
x
})
#' Checkpoint an RDD
#'
#' Mark this RDD for checkpointing. It will be saved to a file inside the
#' checkpoint directory set with setCheckpointDir() and all references to its
#' parent RDDs will be removed. This function must be called before any job has
#' been executed on this RDD. It is strongly recommended that this RDD is
#' persisted in memory, otherwise saving it on a file will require recomputation.
#'
#' @param rdd The RDD to checkpoint
#' @rdname checkpoint-methods
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' setCheckpointDir(sc, "checkpoints")
#' rdd <- parallelize(sc, 1:10, 2L)
#' checkpoint(rdd)
#'}
setGeneric("checkpoint", function(x) { standardGeneric("checkpoint") })
#' @rdname checkpoint-methods
#' @aliases checkpoint,RDD-method
setMethod("checkpoint",
signature(x = "RDD"),
function(x) {
jrdd <- getJRDD(x)
callJMethod(jrdd, "checkpoint")
x@env$isCheckpointed <- TRUE
x
})
#' Gets the number of partitions of an RDD
#'
#' @param x A RDD.
#' @return the number of partitions of rdd as an integer.
#' @rdname numPartitions
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10, 2L)
#' numPartitions(rdd) # 2L
#'}
setGeneric("numPartitions", function(x) { standardGeneric("numPartitions") })
#' @rdname numPartitions
#' @aliases numPartitions,RDD-method
setMethod("numPartitions",
signature(x = "RDD"),
function(x) {
jrdd <- getJRDD(x)
partitions <- callJMethod(jrdd, "splits")
callJMethod(partitions, "size")
})
#' Collect elements of an RDD
#'
#' @description
#' \code{collect} returns a list that contains all of the elements in this RDD.
#'
#' @param x The RDD to collect
#' @param ... Other optional arguments to collect
#' @param flatten FALSE if the list should not flattened
#' @return a list containing elements in the RDD
#' @rdname collect-methods
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10, 2L)
#' collect(rdd) # list from 1 to 10
#' collectPartition(rdd, 0L) # list from 1 to 5
#'}
setGeneric("collect", function(x, ...) { standardGeneric("collect") })
#' @rdname collect-methods
#' @aliases collect,RDD-method
setMethod("collect",
signature(x = "RDD"),
function(x, flatten = TRUE) {
# Assumes a pairwise RDD is backed by a JavaPairRDD.
collected <- callJMethod(getJRDD(x), "collect")
convertJListToRList(collected, flatten)
})
#' @rdname collect-methods
#' @export
#' @description
#' \code{collectPartition} returns a list that contains all of the elements
#' in the specified partition of the RDD.
#' @param partitionId the partition to collect (starts from 0)
setGeneric("collectPartition",
function(x, partitionId) {
standardGeneric("collectPartition")
})
#' @rdname collect-methods
#' @aliases collectPartition,integer,RDD-method
setMethod("collectPartition",
signature(x = "RDD", partitionId = "integer"),
function(x, partitionId) {
jPartitionsList <- callJMethod(getJRDD(x),
"collectPartitions",
as.list(as.integer(partitionId)))
jList <- jPartitionsList[[1]]
convertJListToRList(jList, flatten = TRUE)
})
#' @rdname collect-methods
#' @export
#' @description
#' \code{collectAsMap} returns a named list as a map that contains all of the elements
#' in a key-value pair RDD.
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(list(1, 2), list(3, 4)), 2L)
#' collectAsMap(rdd) # list(`1` = 2, `3` = 4)
#'}
setGeneric("collectAsMap", function(x) { standardGeneric("collectAsMap") })
#' @rdname collect-methods
#' @aliases collectAsMap,RDD-method
setMethod("collectAsMap",
signature(x = "RDD"),
function(x) {
pairList <- collect(x)
map <- new.env()
lapply(pairList, function(i) { assign(as.character(i[[1]]), i[[2]], envir = map) })
as.list(map)
})
#' Return the number of elements in the RDD.
#'
#' @param x The RDD to count
#' @return number of elements in the RDD.
#' @rdname count
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' count(rdd) # 10
#' length(rdd) # Same as count
#'}
setGeneric("count", function(x) { standardGeneric("count") })
#' @rdname count
#' @aliases count,RDD-method
setMethod("count",
signature(x = "RDD"),
function(x) {
countPartition <- function(part) {
as.integer(length(part))
}
valsRDD <- lapplyPartition(x, countPartition)
vals <- collect(valsRDD)
sum(as.integer(vals))
})
#' Return the number of elements in the RDD
#' @export
#' @rdname count
setMethod("length",
signature(x = "RDD"),
function(x) {
count(x)
})
#' Return the count of each unique value in this RDD as a list of
#' (value, count) pairs.
#'
#' Same as countByValue in Spark.
#'
#' @param x The RDD to count
#' @return list of (value, count) pairs, where count is number of each unique
#' value in rdd.
#' @rdname countByValue
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, c(1,2,3,2,1))
#' countByValue(rdd) # (1,2L), (2,2L), (3,1L)
#'}
setGeneric("countByValue", function(x) { standardGeneric("countByValue") })
#' @rdname countByValue
#' @aliases countByValue,RDD-method
setMethod("countByValue",
signature(x = "RDD"),
function(x) {
ones <- lapply(x, function(item) { list(item, 1L) })
collect(reduceByKey(ones, `+`, numPartitions(x)))
})
#' Apply a function to all elements
#'
#' This function creates a new RDD by applying the given transformation to all
#' elements of the given RDD
#'
#' @param X The RDD to apply the transformation.
#' @param FUN the transformation to apply on each element
#' @return a new RDD created by the transformation.
#' @rdname lapply
#' @aliases lapply
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' multiplyByTwo <- lapply(rdd, function(x) { x * 2 })
#' collect(multiplyByTwo) # 2,4,6...
#'}
setMethod("lapply",
signature(X = "RDD", FUN = "function"),
function(X, FUN) {
func <- function(split, iterator) {
lapply(iterator, FUN)
}
lapplyPartitionsWithIndex(X, func)
})
#' @rdname lapply
#' @export
setGeneric("map", function(X, FUN) {
standardGeneric("map") })
#' @rdname lapply
#' @aliases map,RDD,function-method
setMethod("map",
signature(X = "RDD", FUN = "function"),
function(X, FUN) {
lapply(X, FUN)
})
#' Flatten results after apply a function to all elements
#'
#' This function return a new RDD by first applying a function to all
#' elements of this RDD, and then flattening the results.
#'
#' @param X The RDD to apply the transformation.
#' @param FUN the transformation to apply on each element
#' @return a new RDD created by the transformation.
#' @rdname flatMap
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' multiplyByTwo <- flatMap(rdd, function(x) { list(x*2, x*10) })
#' collect(multiplyByTwo) # 2,20,4,40,6,60...
#'}
setGeneric("flatMap", function(X, FUN) {
standardGeneric("flatMap") })
#' @rdname flatMap
#' @aliases flatMap,RDD,function-method
setMethod("flatMap",
signature(X = "RDD", FUN = "function"),
function(X, FUN) {
partitionFunc <- function(part) {
unlist(
lapply(part, FUN),
recursive = F
)
}
lapplyPartition(X, partitionFunc)
})
#' Apply a function to each partition of an RDD
#'
#' Return a new RDD by applying a function to each partition of this RDD.
#'
#' @param X The RDD to apply the transformation.
#' @param FUN the transformation to apply on each partition.
#' @return a new RDD created by the transformation.
#' @rdname lapplyPartition
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' partitionSum <- lapplyPartition(rdd, function(part) { Reduce("+", part) })
#' collect(partitionSum) # 15, 40
#'}
setGeneric("lapplyPartition", function(X, FUN) {
standardGeneric("lapplyPartition") })
#' @rdname lapplyPartition
#' @aliases lapplyPartition,RDD,function-method
setMethod("lapplyPartition",
signature(X = "RDD", FUN = "function"),
function(X, FUN) {
lapplyPartitionsWithIndex(X, function(s, part) { FUN(part) })
})
#' mapPartitions is the same as lapplyPartition.
#'
#' @rdname lapplyPartition
#' @export
setGeneric("mapPartitions", function(X, FUN) {
standardGeneric("mapPartitions") })
#' @rdname lapplyPartition
#' @aliases mapPartitions,RDD,function-method
setMethod("mapPartitions",
signature(X = "RDD", FUN = "function"),
function(X, FUN) {
lapplyPartition(X, FUN)
})
#' Return a new RDD by applying a function to each partition of this RDD, while
#' tracking the index of the original partition.
#'
#' @param X The RDD to apply the transformation.
#' @param FUN the transformation to apply on each partition; takes the partition
#' index and a list of elements in the particular partition.
#' @return a new RDD created by the transformation.
#' @rdname lapplyPartitionsWithIndex
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10, 5L)
#' prod <- lapplyPartitionsWithIndex(rdd, function(split, part) {
#' split * Reduce("+", part) })
#' collect(prod, flatten = FALSE) # 0, 7, 22, 45, 76
#'}
setGeneric("lapplyPartitionsWithIndex", function(X, FUN) {
standardGeneric("lapplyPartitionsWithIndex") })
#' @rdname lapplyPartitionsWithIndex
#' @aliases lapplyPartitionsWithIndex,RDD,function-method
setMethod("lapplyPartitionsWithIndex",
signature(X = "RDD", FUN = "function"),
function(X, FUN) {
closureCapturingFunc <- function(split, part) {
FUN(split, part)
}
PipelinedRDD(X, closureCapturingFunc)
})
#' @rdname lapplyPartitionsWithIndex
#' @export
setGeneric("mapPartitionsWithIndex", function(X, FUN) {
standardGeneric("mapPartitionsWithIndex") })
#' @rdname lapplyPartitionsWithIndex
#' @aliases mapPartitionsWithIndex,RDD,function-method
setMethod("mapPartitionsWithIndex",
signature(X = "RDD", FUN = "function"),
function(X, FUN) {
lapplyPartitionsWithIndex(X, FUN)
})
#' This function returns a new RDD containing only the elements that satisfy
#' a predicate (i.e. returning TRUE in a given logical function).
#' The same as `filter()' in Spark.
#'
#' @param x The RDD to be filtered.
#' @param f A unary predicate function.
#' @rdname filterRDD
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' unlist(collect(filterRDD(rdd, function (x) { x < 3 }))) # c(1, 2)
#'}
setGeneric("filterRDD",
function(x, f) { standardGeneric("filterRDD") })
#' @rdname filterRDD
#' @aliases filterRDD,RDD,function-method
setMethod("filterRDD",
signature(x = "RDD", f = "function"),
function(x, f) {
filter.func <- function(part) {
Filter(f, part)
}
lapplyPartition(x, filter.func)
})
#' @rdname filterRDD
#' @export
#' @aliases Filter
setMethod("Filter",
signature(f = "function", x = "RDD"),
function(f, x) {
filterRDD(x, f)
})
#' Reduce across elements of an RDD.
#'
#' This function reduces the elements of this RDD using the
#' specified commutative and associative binary operator.
#'
#' @param rdd The RDD to reduce
#' @param func Commutative and associative function to apply on elements
#' of the RDD.
#' @export
#' @rdname reduce
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' reduce(rdd, "+") # 55
#'}
setGeneric("reduce", function(x, func) { standardGeneric("reduce") })
#' @rdname reduce
#' @aliases reduce,RDD,ANY-method
setMethod("reduce",
signature(x = "RDD", func = "ANY"),
function(x, func) {
reducePartition <- function(part) {
Reduce(func, part)
}
partitionList <- collect(lapplyPartition(x, reducePartition),
flatten = FALSE)
Reduce(func, partitionList)
})
#' Get the maximum element of an RDD.
#'
#' @param x The RDD to get the maximum element from
#' @export
#' @rdname maximum
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' maximum(rdd) # 10
#'}
setGeneric("maximum", function(x) { standardGeneric("maximum") })
#' @rdname maximum
#' @aliases maximum,RDD
setMethod("maximum",
signature(x = "RDD"),
function(x) {
reduce(x, max)
})
#' Get the minimum element of an RDD.
#'
#' @param x The RDD to get the minimum element from
#' @export
#' @rdname minimum
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' minimum(rdd) # 1
#'}
setGeneric("minimum", function(x) { standardGeneric("minimum") })
#' @rdname minimum
#' @aliases minimum,RDD
setMethod("minimum",
signature(x = "RDD"),
function(x) {
reduce(x, min)
})
#' Applies a function to all elements in an RDD, and force evaluation.
#'
#' @param x The RDD to apply the function
#' @param func The function to be applied.
#' @return invisible NULL.
#' @export
#' @rdname foreach
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' foreach(rdd, function(x) { save(x, file=...) })
#'}
setGeneric("foreach", function(x, func) { standardGeneric("foreach") })
#' @rdname foreach
#' @aliases foreach,RDD,function-method
setMethod("foreach",
signature(x = "RDD", func = "function"),
function(x, func) {
partition.func <- function(x) {
lapply(x, func)
NULL
}
invisible(collect(mapPartitions(x, partition.func)))
})
#' Applies a function to each partition in an RDD, and force evaluation.
#'
#' @export
#' @rdname foreach
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' foreachPartition(rdd, function(part) { save(part, file=...); NULL })
#'}
setGeneric("foreachPartition",
function(x, func) { standardGeneric("foreachPartition") })
#' @rdname foreach
#' @aliases foreachPartition,RDD,function-method
setMethod("foreachPartition",
signature(x = "RDD", func = "function"),
function(x, func) {
invisible(collect(mapPartitions(x, func)))
})
#' Take elements from an RDD.
#'
#' This function takes the first NUM elements in the RDD and
#' returns them in a list.
#'
#' @param x The RDD to take elements from
#' @param num Number of elements to take
#' @rdname take
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' take(rdd, 2L) # list(1, 2)
#'}
setGeneric("take", function(x, num) { standardGeneric("take") })
#' @rdname take
#' @aliases take,RDD,numeric-method
setMethod("take",
signature(x = "RDD", num = "numeric"),
function(x, num) {
resList <- list()
index <- -1
jrdd <- getJRDD(x)
numPartitions <- numPartitions(x)
# TODO(shivaram): Collect more than one partition based on size
# estimates similar to the scala version of `take`.
while (TRUE) {
index <- index + 1
if (length(resList) >= num || index >= numPartitions)
break
# a JList of byte arrays
partitionArr <- callJMethod(jrdd, "collectPartitions", as.list(as.integer(index)))
partition <- partitionArr[[1]]
size <- num - length(resList)
# elems is capped to have at most `size` elements
elems <- convertJListToRList(partition,
flatten = TRUE,
logicalUpperBound = size,
serialized = x@env$serialized)
# TODO: Check if this append is O(n^2)?
resList <- append(resList, elems)
}
resList
})
#' Removes the duplicates from RDD.
#'
#' This function returns a new RDD containing the distinct elements in the
#' given RDD. The same as `distinct()' in Spark.
#'
#' @param x The RDD to remove duplicates from.
#' @param numPartitions Number of partitions to create.
#' @rdname distinct
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, c(1,2,2,3,3,3))
#' sort(unlist(collect(distinct(rdd)))) # c(1, 2, 3)
#'}
setGeneric("distinct",
function(x, numPartitions) { standardGeneric("distinct") })
setClassUnion("missingOrInteger", c("missing", "integer"))
#' @rdname distinct
#' @aliases distinct,RDD,missingOrInteger-method
setMethod("distinct",
signature(x = "RDD", numPartitions = "missingOrInteger"),
function(x, numPartitions) {
if (missing(numPartitions)) {
numPartitions <- SparkR::numPartitions(x)
}
identical.mapped <- lapply(x, function(x) { list(x, NULL) })
reduced <- reduceByKey(identical.mapped,
function(x, y) { x },
numPartitions)
resRDD <- lapply(reduced, function(x) { x[[1]] })
resRDD
})
#' Return an RDD that is a sampled subset of the given RDD.
#'
#' The same as `sample()' in Spark. (We rename it due to signature
#' inconsistencies with the `sample()' function in R's base package.)
#'
#' @param x The RDD to sample elements from
#' @param withReplacement Sampling with replacement or not
#' @param fraction The (rough) sample target fraction
#' @param seed Randomness seed value
#' @rdname sampleRDD
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10) # ensure each num is in its own split
#' collect(sampleRDD(rdd, FALSE, 0.5, 1618L)) # ~5 distinct elements
#' collect(sampleRDD(rdd, TRUE, 0.5, 9L)) # ~5 elements possibly with duplicates
#'}
setGeneric("sampleRDD",
function(x, withReplacement, fraction, seed) {
standardGeneric("sampleRDD")
})
#' @rdname sampleRDD
#' @aliases sampleRDD,RDD
setMethod("sampleRDD",
signature(x = "RDD", withReplacement = "logical",
fraction = "numeric", seed = "integer"),
function(x, withReplacement, fraction, seed) {
# The sampler: takes a partition and returns its sampled version.
samplingFunc <- function(split, part) {
set.seed(seed)
res <- vector("list", length(part))
len <- 0
# Discards some random values to ensure each partition has a
# different random seed.
runif(split)
for (elem in part) {
if (withReplacement) {
count <- rpois(1, fraction)
if (count > 0) {
res[(len + 1):(len + count)] <- rep(list(elem), count)
len <- len + count
}
} else {
if (runif(1) < fraction) {
len <- len + 1
res[[len]] <- elem
}
}
}
# TODO(zongheng): look into the performance of the current
# implementation. Look into some iterator package? Note that
# Scala avoids many calls to creating an empty list and PySpark
# similarly achieves this using `yield'.
if (len > 0)
res[1:len]
else
list()
}
lapplyPartitionsWithIndex(x, samplingFunc)
})
#' Return a list of the elements that are a sampled subset of the given RDD.
#'
#' @param x The RDD to sample elements from
#' @param withReplacement Sampling with replacement or not
#' @param num Number of elements to return
#' @param seed Randomness seed value
#' @rdname takeSample
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:100)
#' # exactly 5 elements sampled, which may not be distinct
#' takeSample(rdd, TRUE, 5L, 1618L)
#' # exactly 5 distinct elements sampled
#' takeSample(rdd, FALSE, 5L, 16181618L)
#'}
setGeneric("takeSample",
function(x, withReplacement, num, seed) {
standardGeneric("takeSample")
})
#' @rdname takeSample
#' @aliases takeSample,RDD
setMethod("takeSample", signature(x = "RDD", withReplacement = "logical",
num = "integer", seed = "integer"),
function(x, withReplacement, num, seed) {
# This function is ported from RDD.scala.
fraction <- 0.0
total <- 0
multiplier <- 3.0
initialCount <- count(x)
maxSelected <- 0
MAXINT <- .Machine$integer.max
if (num < 0)
stop(paste("Negative number of elements requested"))
if (initialCount > MAXINT - 1) {
maxSelected <- MAXINT - 1
} else {
maxSelected <- initialCount
}
if (num > initialCount && !withReplacement) {
total <- maxSelected
fraction <- multiplier * (maxSelected + 1) / initialCount
} else {
total <- num
fraction <- multiplier * (num + 1) / initialCount
}
set.seed(seed)
samples <- collect(sampleRDD(x, withReplacement, fraction,
as.integer(ceiling(runif(1,
-MAXINT,
MAXINT)))))
# If the first sample didn't turn out large enough, keep trying to
# take samples; this shouldn't happen often because we use a big
# multiplier for thei initial size
while (length(samples) < total)
samples <- collect(sampleRDD(x, withReplacement, fraction,
as.integer(ceiling(runif(1,
-MAXINT,
MAXINT)))))
# TODO(zongheng): investigate if this call is an in-place shuffle?
sample(samples)[1:total]
})
#' Creates tuples of the elements in this RDD by applying a function.
#'
#' @param x The RDD.
#' @param func The function to be applied.
#' @rdname keyBy
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(1, 2, 3))
#' collect(keyBy(rdd, function(x) { x*x })) # list(list(1, 1), list(4, 2), list(9, 3))
#'}
setGeneric("keyBy", function(x, func) { standardGeneric("keyBy") })
#' @rdname keyBy
#' @aliases keyBy,RDD
setMethod("keyBy",
signature(x = "RDD", func = "function"),
function(x, func) {
apply.func <- function(x) {
list(func(x), x)
}
lapply(x, apply.func)
})
#' Save this RDD as a SequenceFile of serialized objects.
#'
#' @param x The RDD to save
#' @param path The directory where the file is saved
#' @rdname saveAsObjectFile
#' @seealso objectFile
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:3)
#' saveAsObjectFile(rdd, "/tmp/sparkR-tmp")
#'}
setGeneric("saveAsObjectFile", function(x, path) { standardGeneric("saveAsObjectFile") })
#' @rdname saveAsObjectFile
#' @aliases saveAsObjectFile,RDD
setMethod("saveAsObjectFile",
signature(x = "RDD", path = "character"),
function(x, path) {
# If the RDD is in string format, need to serialize it before saving it because when
# objectFile() is invoked to load the saved file, only serialized format is assumed.
if (!x@env$serialized) {
x <- reserialize(x)
}
# Return nothing
invisible(callJMethod(getJRDD(x), "saveAsObjectFile", path))
})
#' Save this RDD as a text file, using string representations of elements.
#'
#' @param x The RDD to save
#' @param path The directory where the splits of the text file are saved
#' @rdname saveAsTextFile
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:3)
#' saveAsTextFile(rdd, "/tmp/sparkR-tmp")
#'}
setGeneric("saveAsTextFile", function(x, path) { standardGeneric("saveAsTextFile") })
#' @rdname saveAsTextFile
#' @aliases saveAsTextFile,RDD
setMethod("saveAsTextFile",
signature(x = "RDD", path = "character"),
function(x, path) {
func <- function(str) {
toString(str)
}
stringRdd <- lapply(x, func)
# Return nothing
invisible(
callJMethod(getJRDD(stringRdd, dataSerialization = FALSE), "saveAsTextFile", path))
})
#' Sort an RDD by the given key function.
#'
#' @param x An RDD to be sorted.
#' @param func A function used to compute the sort key for each element.
#' @param ascending A flag to indicate whether the sorting is ascending or descending.
#' @param numPartitions Number of partitions to create.
#' @return An RDD where all elements are sorted.
#' @rdname sortBy
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(3, 2, 1))
#' collect(sortBy(rdd, function(x) { x })) # list (1, 2, 3)
#'}
setGeneric("sortBy", function(x,
func,
ascending = TRUE,
numPartitions = 1L) {
standardGeneric("sortBy")
})
#' @rdname sortBy
#' @aliases sortBy,RDD,RDD-method
setMethod("sortBy",
signature(x = "RDD", func = "function"),
function(x, func, ascending = TRUE, numPartitions = SparkR::numPartitions(x)) {
values(sortByKey(keyBy(x, func), ascending, numPartitions))
})
# Helper function to get first N elements from an RDD in the specified order.
# Param:
# x An RDD.
# num Number of elements to return.
# ascending A flag to indicate whether the sorting is ascending or descending.
# Return:
# A list of the first N elements from the RDD in the specified order.
#
takeOrderedElem <- function(x, num, ascending = TRUE) {
if (num <= 0L) {
return(list())
}
partitionFunc <- function(part) {
if (num < length(part)) {
# R limitation: order works only on primitive types!
ord <- order(unlist(part, recursive = FALSE), decreasing = !ascending)
list(part[ord[1:num]])
} else {
list(part)
}
}
reduceFunc <- function(elems, part) {
newElems <- append(elems, part)
# R limitation: order works only on primitive types!
ord <- order(unlist(newElems, recursive = FALSE), decreasing = !ascending)
newElems[ord[1:num]]
}
newRdd <- mapPartitions(x, partitionFunc)
reduce(newRdd, reduceFunc)
}
#' Returns the first N elements from an RDD in ascending order.
#'
#' @param x An RDD.
#' @param num Number of elements to return.
#' @return The first N elements from the RDD in ascending order.
#' @rdname takeOrdered
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(10, 1, 2, 9, 3, 4, 5, 6, 7))
#' takeOrdered(rdd, 6L) # list(1, 2, 3, 4, 5, 6)
#'}
setGeneric("takeOrdered", function(x, num) { standardGeneric("takeOrdered") })
#' @rdname takeOrdered
#' @aliases takeOrdered,RDD,RDD-method
setMethod("takeOrdered",
signature(x = "RDD", num = "integer"),
function(x, num) {
takeOrderedElem(x, num)
})
#' Returns the top N elements from an RDD.
#'
#' @param x An RDD.
#' @param num Number of elements to return.
#' @return The top N elements from the RDD.
#' @rdname top
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(10, 1, 2, 9, 3, 4, 5, 6, 7))
#' top(rdd, 6L) # list(10, 9, 7, 6, 5, 4)
#'}
setGeneric("top", function(x, num) { standardGeneric("top") })
#' @rdname top
#' @aliases top,RDD,RDD-method
setMethod("top",
signature(x = "RDD", num = "integer"),
function(x, num) {
takeOrderedElem(x, num, FALSE)
})
#' Fold an RDD using a given associative function and a neutral "zero value".
#'
#' Aggregate the elements of each partition, and then the results for all the
#' partitions, using a given associative function and a neutral "zero value".
#'
#' @param x An RDD.
#' @param zeroValue A neutral "zero value".
#' @param op An associative function for the folding operation.
#' @return The folding result.
#' @rdname fold
#' @seealso reduce
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(1, 2, 3, 4, 5))
#' fold(rdd, 0, "+") # 15
#'}
setGeneric("fold", function(x, zeroValue, op) { standardGeneric("fold") })
#' @rdname fold
#' @aliases fold,RDD,RDD-method
setMethod("fold",
signature(x = "RDD", zeroValue = "ANY", op = "ANY"),
function(x, zeroValue, op) {
aggregateRDD(x, zeroValue, op, op)
})
#' Aggregate an RDD using the given combine functions and a neutral "zero value".
#'
#' Aggregate the elements of each partition, and then the results for all the
#' partitions, using given combine functions and a neutral "zero value".
#'
#' @param x An RDD.
#' @param zeroValue A neutral "zero value".
#' @param seqOp A function to aggregate the RDD elements. It may return a different
#' result type from the type of the RDD elements.
#' @param combOp A function to aggregate results of seqOp.
#' @return The aggregation result.
#' @rdname aggregateRDD
#' @seealso reduce
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(1, 2, 3, 4))
#' zeroValue <- list(0, 0)
#' seqOp <- function(x, y) { list(x[[1]] + y, x[[2]] + 1) }
#' combOp <- function(x, y) { list(x[[1]] + y[[1]], x[[2]] + y[[2]]) }
#' aggregateRDD(rdd, zeroValue, seqOp, combOp) # list(10, 4)
#'}
setGeneric("aggregateRDD", function(x, zeroValue, seqOp, combOp) { standardGeneric("aggregateRDD") })
#' @rdname aggregateRDD
#' @aliases aggregateRDD,RDD,RDD-method
setMethod("aggregateRDD",
signature(x = "RDD", zeroValue = "ANY", seqOp = "ANY", combOp = "ANY"),
function(x, zeroValue, seqOp, combOp) {
partitionFunc <- function(part) {
Reduce(seqOp, part, zeroValue)
}
partitionList <- collect(lapplyPartition(x, partitionFunc),
flatten = FALSE)
Reduce(combOp, partitionList, zeroValue)
})
#' Pipes elements to a forked external process.
#'
#' The same as 'pipe()' in Spark.
#'
#' @param x The RDD whose elements are piped to the forked external process.
#' @param command The command to fork an external process.
#' @param env A named list to set environment variables of the external process.
#' @return A new RDD created by piping all elements to a forked external process.
#' @rdname pipeRDD
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' collect(pipeRDD(rdd, "more")
#' Output: c("1", "2", ..., "10")
#'}
setGeneric("pipeRDD", function(x, command, env = list()) {
standardGeneric("pipeRDD")
})
#' @rdname pipeRDD
#' @aliases pipeRDD,RDD,character-method
setMethod("pipeRDD",
signature(x = "RDD", command = "character"),
function(x, command, env = list()) {
func <- function(part) {
trim.trailing.func <- function(x) {
sub("[\r\n]*$", "", toString(x))
}
input <- unlist(lapply(part, trim.trailing.func))
res <- system2(command, stdout = TRUE, input = input, env = env)
lapply(res, trim.trailing.func)
}
lapplyPartition(x, func)
})
# TODO: Consider caching the name in the RDD's environment
#' Return an RDD's name.
#'
#' @param x The RDD whose name is returned.
#' @rdname name
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(1,2,3))
#' name(rdd) # NULL (if not set before)
#'}
setGeneric("name", function(x) { standardGeneric("name") })
#' @rdname name
#' @aliases name,RDD
setMethod("name",
signature(x = "RDD"),
function(x) {
callJMethod(getJRDD(x), "name")
})
#' Set an RDD's name.
#'
#' @param x The RDD whose name is to be set.
#' @param name The RDD name to be set.
#' @return a new RDD renamed.
#' @rdname setName
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(1,2,3))
#' setName(rdd, "myRDD")
#' name(rdd) # "myRDD"
#'}
setGeneric("setName", function(x, name) { standardGeneric("setName") })
#' @rdname setName
#' @aliases setName,RDD
setMethod("setName",
signature(x = "RDD", name = "character"),
function(x, name) {
callJMethod(getJRDD(x), "setName", name)
x
})
############ Binary Functions #############
#' Return the union RDD of two RDDs.
#' The same as union() in Spark.
#'
#' @param x An RDD.
#' @param y An RDD.
#' @return a new RDD created by performing the simple union (witout removing
#' duplicates) of two input RDDs.
#' @rdname unionRDD
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:3)
#' unionRDD(rdd, rdd) # 1, 2, 3, 1, 2, 3
#'}
setGeneric("unionRDD", function(x, y) { standardGeneric("unionRDD") })
#' @rdname unionRDD
#' @aliases unionRDD,RDD,RDD-method
setMethod("unionRDD",
signature(x = "RDD", y = "RDD"),
function(x, y) {
if (x@env$serialized == y@env$serialized) {
jrdd <- callJMethod(getJRDD(x), "union", getJRDD(y))
union.rdd <- RDD(jrdd, x@env$serialized)
} else {
# One of the RDDs is not serialized, we need to serialize it first.
if (!x@env$serialized) {
x <- reserialize(x)
} else {
y <- reserialize(y)
}
jrdd <- callJMethod(getJRDD(x), "union", getJRDD(y))
union.rdd <- RDD(jrdd, TRUE)
}
union.rdd
})
|
library(dplyr)
library(ggplot2)
library(gridExtra)
library(ROCR)
library(rpart)
library(randomForest)
library(caret)
dat=read.csv("C:/Users/LG/Downloads/datatest.txt",strip.white = TRUE)
data2=read.csv("C:/Users/LG/Downloads/datatest2.txt",strip.white = TRUE)
train=read.csv("C:/Users/LG/Downloads/datatraining.txt",strip.white = TRUE)
dat=dat[,-1]
data2=data2[,-1]
train=train[,-1]
# 시각화
tmp=as.data.frame(cor(train[,-6], as.numeric(train$Occupancy)))
tmp=tmp%>%rename(cor=V1)
tmp$var=rownames(tmp)
tmp%>%ggplot(aes(reorder(var,cor), cor))+geom_point()+coord_flip()
#훈련, 검증, 테스트 데이터의 구분
set.seed(1606)
n=nrow(train)
index=1:n
training_index=sample(index,n*0.75)
validation_index=setdiff(index,training_index)
training=train[training_index,]
validation=train[validation_index,]
####### 로지스틱 회귀 분석 모형
# 모형 적합 단계, 즉 학습 단계
logistic=glm(Occupancy~.,data=training,family=binomial)
# 학습 결과 확인하기
summary(logistic)
# 모형평가를 위한 예측 단계 (검증 집합, 즉 validation set 사용)
y_obs=as.numeric(as.character(validation$Occupancy))
y_hat=predict(logistic,newdata=validation,type='response')
pred_glm=prediction(y_hat,y_obs)
performance(pred_glm,'auc')@y.values[[1]]
####### 나무 모형
# 모형 적합 단계, 즉 학습 단계
tree=rpart(Occupancy~.,data=training)
# 학습 결과 확인하기
printcp(tree)
summary(tree)
opar=par(mfrow=c(1,1),xpd=NA)
plot(tree)
text(tree,use.n=TRUE)
par(opar)
# 모형평가를 위한 예측 단계 (검증 집합, 즉 validation set 사용)
y_hat=predict(tree,newdata=validation)
y_hat=y_hat[,'1']
pred_tree=prediction(y_hat,y_obs)
performance(pred_tree,'auc')@y.values[[1]]
# ROC 곡선 그리기
p_glm=performance(pred_glm,'tpr','fpr')
p_tree=performance(pred_tree,'tpr','fpr')
plot(p_glm,col='green', lty='solid', main='ROC 곡선')
plot(p_tree,add=TRUE, col='red',lty='dotted')
abline(0,1)
legend('bottomright',inset=0.1,legend=c('glm','tree'),col=c('green','red'),lty=c('solid','dotted'),lwd=2)
###### 검증 집합으로 모형 형가를 실시한 결과, 모형이 가장 좋은 성능을 보임
###### 따라서 를 최종 모형으로 선정함
###### 테스트집합을 가지 모형을 평가함
# dat
y_obs_test=as.numeric(as.character(dat$Occupancy))
y_hat=predict(logistic,newdata=dat,type='response')
pred=prediction(y_hat,y_obs_test)
performance(pred,'auc')@y.values[[1]]
acc=performance(pred,'acc')
plot(acc,main="분계점(Cutoff)에 따른 정확률 곡선")
roc=performance(pred,'tpr','fpr')
plot(roc,main="ROC 곡선")
# data2
y_obs_test2=as.numeric(as.character(data2$Occupancy))
y_hat2=predict(logistic,newdata=data2,type='response')
pred2=prediction(y_hat2,y_obs_test2)
performance(pred2,'auc')@y.values[[1]]
acc2=performance(pred2,'acc')
plot(acc2,main="분계점(Cutoff)에 따른 정확률 곡선")
roc2=performance(pred2,'tpr','fpr')
plot(roc2,main="ROC 곡선")
set.seed(1607)
forest=randomForest(Occupancy~.,training)
# 학습 결과 확인하기
forest
summary(forest)
opar=par(mfrow=c(1,2))
plot(forest)
varImpPlot(forest)
par(opar)
# 모형평가를 위한 예측 단계 (검증 집합, 즉 validation set 사용)
y_hat=predict(forest,newdata=validation,type='prob')[,'1']
pred_rf=prediction(y_hat,y_obs)
performance(pred_rf,'auc')@y.values[[1]]
| /Occupancy.R | no_license | miny2627/Data_Science | R | false | false | 3,341 | r | library(dplyr)
library(ggplot2)
library(gridExtra)
library(ROCR)
library(rpart)
library(randomForest)
library(caret)
dat=read.csv("C:/Users/LG/Downloads/datatest.txt",strip.white = TRUE)
data2=read.csv("C:/Users/LG/Downloads/datatest2.txt",strip.white = TRUE)
train=read.csv("C:/Users/LG/Downloads/datatraining.txt",strip.white = TRUE)
dat=dat[,-1]
data2=data2[,-1]
train=train[,-1]
# 시각화
tmp=as.data.frame(cor(train[,-6], as.numeric(train$Occupancy)))
tmp=tmp%>%rename(cor=V1)
tmp$var=rownames(tmp)
tmp%>%ggplot(aes(reorder(var,cor), cor))+geom_point()+coord_flip()
#훈련, 검증, 테스트 데이터의 구분
set.seed(1606)
n=nrow(train)
index=1:n
training_index=sample(index,n*0.75)
validation_index=setdiff(index,training_index)
training=train[training_index,]
validation=train[validation_index,]
####### 로지스틱 회귀 분석 모형
# 모형 적합 단계, 즉 학습 단계
logistic=glm(Occupancy~.,data=training,family=binomial)
# 학습 결과 확인하기
summary(logistic)
# 모형평가를 위한 예측 단계 (검증 집합, 즉 validation set 사용)
y_obs=as.numeric(as.character(validation$Occupancy))
y_hat=predict(logistic,newdata=validation,type='response')
pred_glm=prediction(y_hat,y_obs)
performance(pred_glm,'auc')@y.values[[1]]
####### 나무 모형
# 모형 적합 단계, 즉 학습 단계
tree=rpart(Occupancy~.,data=training)
# 학습 결과 확인하기
printcp(tree)
summary(tree)
opar=par(mfrow=c(1,1),xpd=NA)
plot(tree)
text(tree,use.n=TRUE)
par(opar)
# 모형평가를 위한 예측 단계 (검증 집합, 즉 validation set 사용)
y_hat=predict(tree,newdata=validation)
y_hat=y_hat[,'1']
pred_tree=prediction(y_hat,y_obs)
performance(pred_tree,'auc')@y.values[[1]]
# ROC 곡선 그리기
p_glm=performance(pred_glm,'tpr','fpr')
p_tree=performance(pred_tree,'tpr','fpr')
plot(p_glm,col='green', lty='solid', main='ROC 곡선')
plot(p_tree,add=TRUE, col='red',lty='dotted')
abline(0,1)
legend('bottomright',inset=0.1,legend=c('glm','tree'),col=c('green','red'),lty=c('solid','dotted'),lwd=2)
###### 검증 집합으로 모형 형가를 실시한 결과, 모형이 가장 좋은 성능을 보임
###### 따라서 를 최종 모형으로 선정함
###### 테스트집합을 가지 모형을 평가함
# dat
y_obs_test=as.numeric(as.character(dat$Occupancy))
y_hat=predict(logistic,newdata=dat,type='response')
pred=prediction(y_hat,y_obs_test)
performance(pred,'auc')@y.values[[1]]
acc=performance(pred,'acc')
plot(acc,main="분계점(Cutoff)에 따른 정확률 곡선")
roc=performance(pred,'tpr','fpr')
plot(roc,main="ROC 곡선")
# data2
y_obs_test2=as.numeric(as.character(data2$Occupancy))
y_hat2=predict(logistic,newdata=data2,type='response')
pred2=prediction(y_hat2,y_obs_test2)
performance(pred2,'auc')@y.values[[1]]
acc2=performance(pred2,'acc')
plot(acc2,main="분계점(Cutoff)에 따른 정확률 곡선")
roc2=performance(pred2,'tpr','fpr')
plot(roc2,main="ROC 곡선")
set.seed(1607)
forest=randomForest(Occupancy~.,training)
# 학습 결과 확인하기
forest
summary(forest)
opar=par(mfrow=c(1,2))
plot(forest)
varImpPlot(forest)
par(opar)
# 모형평가를 위한 예측 단계 (검증 집합, 즉 validation set 사용)
y_hat=predict(forest,newdata=validation,type='prob')[,'1']
pred_rf=prediction(y_hat,y_obs)
performance(pred_rf,'auc')@y.values[[1]]
|
library(feather)
for (i in 2005 :2014 ) {
print(i)
df <- readRDS(paste('bd.df.30firms.',i,'.Rds',sep = ""))
path <- paste(i,".feather",sep = "")
write_feather(df, path)
}
rm(i)
rm(path)
rm(df)
| /Text Analytics/Group Assignment-1/ConvertRdsToFeather.r | no_license | mithileshmohanty/ISBAnalytics | R | false | false | 211 | r | library(feather)
for (i in 2005 :2014 ) {
print(i)
df <- readRDS(paste('bd.df.30firms.',i,'.Rds',sep = ""))
path <- paste(i,".feather",sep = "")
write_feather(df, path)
}
rm(i)
rm(path)
rm(df)
|
\name{handler}
\alias{handler}
\alias{is.handler}
\title{Create handler S3 class.}
\usage{
handler(subclass, listener, control_args = list(), value = NULL,
map = identity, id = rand_id())
is.handler(x)
}
\arguments{
\item{subclass}{name of the subclass. \code{handler} is
an abstract base class so this must always be provided.}
\item{listener}{name of the js listener (with
corresponding R event broker) that this handler is
associated with}
\item{control_args}{a list of arguments passed to
\code{control_f}}
\item{value}{the default value of the input}
\item{map}{a function with a singe argument that takes
the value returned from the input control and converts it
to an argument useful for ggvis. Defaults to
\code{identity}, leaving the output unchanged.}
\item{id}{a unique identifier for this interactive input
- used to de-duplicate the controls when the same input
is used in multiple places in a visualisation}
\item{x}{object to test for "input"-ness}
}
\description{
This is currently a subclass on input, but it should
probably be the other way around since inputs are handlers
that have controls.
}
\examples{
p <- ggvis(mtcars, props(x = ~mpg, y = ~wt, size = left_right(1, 100)),
mark_symbol())
p$props$size.update$dr
# Handlers are extracted with the internal handlers() function
# ggvis:::handlers(p)
}
| /man/handler.Rd | no_license | wch/ggvis | R | false | false | 1,368 | rd | \name{handler}
\alias{handler}
\alias{is.handler}
\title{Create handler S3 class.}
\usage{
handler(subclass, listener, control_args = list(), value = NULL,
map = identity, id = rand_id())
is.handler(x)
}
\arguments{
\item{subclass}{name of the subclass. \code{handler} is
an abstract base class so this must always be provided.}
\item{listener}{name of the js listener (with
corresponding R event broker) that this handler is
associated with}
\item{control_args}{a list of arguments passed to
\code{control_f}}
\item{value}{the default value of the input}
\item{map}{a function with a singe argument that takes
the value returned from the input control and converts it
to an argument useful for ggvis. Defaults to
\code{identity}, leaving the output unchanged.}
\item{id}{a unique identifier for this interactive input
- used to de-duplicate the controls when the same input
is used in multiple places in a visualisation}
\item{x}{object to test for "input"-ness}
}
\description{
This is currently a subclass on input, but it should
probably be the other way around since inputs are handlers
that have controls.
}
\examples{
p <- ggvis(mtcars, props(x = ~mpg, y = ~wt, size = left_right(1, 100)),
mark_symbol())
p$props$size.update$dr
# Handlers are extracted with the internal handlers() function
# ggvis:::handlers(p)
}
|
\name{AnPhenoMetrics}
\alias{AnPhenoMetrics}
\title{
Extracting annual phenological metrics
}
\description{
This function extracts annual phenological metrics
}
\usage{
AnPhenoMetrics(TS, outfile, outgraph, Ystart, period, SOSth = 0.5, EOSth = 0.5)
}
\arguments{
\item{TS}{matrix or data.frame containing the time series (one row per time series).}
\item{outfile}{name of the file where the metrics are saved (extension ".txt"). }
\item{outgraph}{name of the file where the graphs are saved (extension ".pdf").\cr}
\item{Ystart}{starting year of the analysis (in four-digit format).}
\item{period}{number of observations per year. \cr
For VGT's dataset : \var{period}=36, GIMMS's dataset: \var{period}=24.}
\item{SOSth}{threshold for the detection of the start of the season. \cr By default SOSth=0.5}
\item{EOSth}{threshold for the detection of the start of the season. \cr By default SOSth=0.5}
}
\details{
We proceed in 3 mean steps :
\enumerate{
\item Detecting the number of minimums and maximums with the mean signal.\cr
The mean signal over the year is computed. Then we detect the minimums and the maximums.
A maximum is detected if it is a maximum over a window of 6 measures and if its value is higher than the mean of the mean signal.
Similarly, a minimum is detected if it is a minimum over a window of 6 measures and if its value is lower than the mean of the mean signal.
Then, a routine check if minimums and maximums are one after another and if there are the same number of maximums and minimums.
If the global mean of the time series is higher than 0.7 or smaller than 0.2, further calculations are stopped.
The signal is the one of the bare soil or forest and metrics became very difficult to compute, no more reliable.
\item Focusing on individual year time series to detect minimums and maximums.\cr
The main issue is to handle the full season whenever its starts and its stopped (very often up to the next year).
Our solution is to start the time series two months before the global minimum.
If two minimums (two seasons), we take into account the minimum of them.
For each year, we consider a time series with a length of 16 months to be sure to capture the full season.
The yearly minimums/maximums are detected within a two months windows around the minimums/maximums of the mean signal (detected during the first step).
\item Calculating the phenological metrics. \cr
Every year, four metrics are extracted from the time series :
\itemize{
\item SOS : Start Of the growing Season \cr
We use a threshold method. The day of the start of the season is detected when the NDVI value has increase by $50\%$ of the distance between the "left" minimum and the maximum.
The value of the threshold can be customized by the user.
\item EOS : End Of the growing Season \cr
Similarly than SOS, the day of the end of the season is detected when the NDVI value has decrease by $50\%$ of the difference between the "right" minimum and the maximum.
The value of the threshold can be customized by the user.
\item LOS : Length of the growing Season, $LOS = EOS - SOS$.\cr
LOS a number of period.
\item cumNDVI : cumulative NDVI.\cr
integral under the NDVI curve during the growing season.
There isn't a consensus on the way to compute the cumulative NDVI.
The minimum value has to be set.
For the moment, we compute the full integral under the curves.
Hopefully, soon there will be an option to consider either 0.1 as minimum (to delete bare soil effect) or
the mean of the SOS and EOS NDVI value (to take into account only the curve of the growing season).
}
}
}
\value{
A text file is saved with phenological metrics \verb{outfile} and a list is returned with :
\itemize{
\item names : vector with the names of the sites (rownames of the following matrix).
\item year : vector with the year (colnames of the following matrix).
\item mld : date of the left minimum (number of the period).
\item M : maximum NDVI value over the considered period.
\item SOS : Start Of the Season (number of the period)
\item EOS : End Of the Season (number of the period)
\item LOS : Length Of the Season (number of the period)
\item cumNDVI : integral under the NDVI curve during the growing season.
}
}
\references{
Brown, M. E. and de Beurs K. and Vrieling A. 2010 \emph{Remote Sensing of Environment}, The response of African land surface phenology to large scale climate oscillations, \bold{114}, 2286--2296
}
\author{
Romain Frelat and Bruno Gerard
}
\examples{
#loading the data
data(SLPSAs_ts)
#defining local variables
outfile = "SLPSAs-Metrics.txt"
outgraph = "SLPSAs-Metrics.pdf"
#extracting metrics
metrics = AnPhenoMetrics(SLPSAs_ts$ts, outfile, outgraph,
Ystart=SLPSAs_ts$Ystart, period=SLPSAs_ts$period)
}
\keyword{metrics}
| /pkg/man/.svn/text-base/AnPhenoMetrics.Rd.svn-base | no_license | rfrelat/ndvits-R | R | false | false | 4,860 | \name{AnPhenoMetrics}
\alias{AnPhenoMetrics}
\title{
Extracting annual phenological metrics
}
\description{
This function extracts annual phenological metrics
}
\usage{
AnPhenoMetrics(TS, outfile, outgraph, Ystart, period, SOSth = 0.5, EOSth = 0.5)
}
\arguments{
\item{TS}{matrix or data.frame containing the time series (one row per time series).}
\item{outfile}{name of the file where the metrics are saved (extension ".txt"). }
\item{outgraph}{name of the file where the graphs are saved (extension ".pdf").\cr}
\item{Ystart}{starting year of the analysis (in four-digit format).}
\item{period}{number of observations per year. \cr
For VGT's dataset : \var{period}=36, GIMMS's dataset: \var{period}=24.}
\item{SOSth}{threshold for the detection of the start of the season. \cr By default SOSth=0.5}
\item{EOSth}{threshold for the detection of the start of the season. \cr By default SOSth=0.5}
}
\details{
We proceed in 3 mean steps :
\enumerate{
\item Detecting the number of minimums and maximums with the mean signal.\cr
The mean signal over the year is computed. Then we detect the minimums and the maximums.
A maximum is detected if it is a maximum over a window of 6 measures and if its value is higher than the mean of the mean signal.
Similarly, a minimum is detected if it is a minimum over a window of 6 measures and if its value is lower than the mean of the mean signal.
Then, a routine check if minimums and maximums are one after another and if there are the same number of maximums and minimums.
If the global mean of the time series is higher than 0.7 or smaller than 0.2, further calculations are stopped.
The signal is the one of the bare soil or forest and metrics became very difficult to compute, no more reliable.
\item Focusing on individual year time series to detect minimums and maximums.\cr
The main issue is to handle the full season whenever its starts and its stopped (very often up to the next year).
Our solution is to start the time series two months before the global minimum.
If two minimums (two seasons), we take into account the minimum of them.
For each year, we consider a time series with a length of 16 months to be sure to capture the full season.
The yearly minimums/maximums are detected within a two months windows around the minimums/maximums of the mean signal (detected during the first step).
\item Calculating the phenological metrics. \cr
Every year, four metrics are extracted from the time series :
\itemize{
\item SOS : Start Of the growing Season \cr
We use a threshold method. The day of the start of the season is detected when the NDVI value has increase by $50\%$ of the distance between the "left" minimum and the maximum.
The value of the threshold can be customized by the user.
\item EOS : End Of the growing Season \cr
Similarly than SOS, the day of the end of the season is detected when the NDVI value has decrease by $50\%$ of the difference between the "right" minimum and the maximum.
The value of the threshold can be customized by the user.
\item LOS : Length of the growing Season, $LOS = EOS - SOS$.\cr
LOS a number of period.
\item cumNDVI : cumulative NDVI.\cr
integral under the NDVI curve during the growing season.
There isn't a consensus on the way to compute the cumulative NDVI.
The minimum value has to be set.
For the moment, we compute the full integral under the curves.
Hopefully, soon there will be an option to consider either 0.1 as minimum (to delete bare soil effect) or
the mean of the SOS and EOS NDVI value (to take into account only the curve of the growing season).
}
}
}
\value{
A text file is saved with phenological metrics \verb{outfile} and a list is returned with :
\itemize{
\item names : vector with the names of the sites (rownames of the following matrix).
\item year : vector with the year (colnames of the following matrix).
\item mld : date of the left minimum (number of the period).
\item M : maximum NDVI value over the considered period.
\item SOS : Start Of the Season (number of the period)
\item EOS : End Of the Season (number of the period)
\item LOS : Length Of the Season (number of the period)
\item cumNDVI : integral under the NDVI curve during the growing season.
}
}
\references{
Brown, M. E. and de Beurs K. and Vrieling A. 2010 \emph{Remote Sensing of Environment}, The response of African land surface phenology to large scale climate oscillations, \bold{114}, 2286--2296
}
\author{
Romain Frelat and Bruno Gerard
}
\examples{
#loading the data
data(SLPSAs_ts)
#defining local variables
outfile = "SLPSAs-Metrics.txt"
outgraph = "SLPSAs-Metrics.pdf"
#extracting metrics
metrics = AnPhenoMetrics(SLPSAs_ts$ts, outfile, outgraph,
Ystart=SLPSAs_ts$Ystart, period=SLPSAs_ts$period)
}
\keyword{metrics}
| |
library(ggplot2)
library(dplyr)
library(readr)
library(tidyr)
library(randomForest)
setwd("~/zrc/HHS_CostAnalysis/data")
#READ IN DATA
hosp_cost <- read.csv("HospitalCosts.csv")
#DESCRIPTIVE STATS
# str describes the types of variables we are working with in the wisconsin hospital dataset
#The age range is between 0-17 years old and the gender identifying 1 or 0 refers to isFemale = 1, isNotFemale=0
#LOS refers to the length of stay. TOTCHG is the hospital discharge costs, and APRDRG stands for all patient refined diagnosis related groups
str(hosp_cost)
head(hosp_cost)
tail(hosp_cost)
summary(hosp_cost)
head(hosp_cost$AGE)
summary(hosp_cost$AGE)
table(hosp_cost$AGE)
hist(hosp_cost$AGE)
summary(as.factor(hosp_cost$AGE))
max(table(hosp_cost$AGE))
max(summary(as.factor(hosp_cost$AGE)))
which.max(table(hosp_cost$AGE))
age <- aggregate(TOTCHG ~ AGE, data = hosp_cost, sum)
max(age)
#whats the race distribution?
str(hosp_cost)
head(hosp_cost$RACE)
tail(hosp_cost$RACE)
summary(hosp_cost$RACE)
plot(hosp_cost$RACE)
summary(as.factor(hosp_cost$RACE))
max(summary(as.factor(hosp_cost$RACE)))
#whats the gender distirbution?
str(hosp_cost$FEMALE)
head(hosp_cost$FEMALE)
summary(hosp_cost$FEMALE)
tail(hosp_cost$FEMALE)
hist(hosp_cost$FEMALE)
#In order of severity of the diagnosis and treatments and to find out the expensive treatments, the agency wants to find the diagnosis related group that has maximum hospitalization and expenditure.
t <- table(hosp_cost$APRDRG)
d <- as.data.frame(t)
names(d)[1] = 'Diagnosis Group'
d
which.max(table(hosp_cost$APRDRG))
which.max(t)
which.max(d)
res <- aggregate(TOTCHG ~ APRDRG, data = hosp_cost, sum)
res
which.max(res$TOTCHG)
res[which.max(res$TOTCHG),]
#To make sure that there is no malpractice, the agency needs to analyze if the race of the patient is related to the hospitalization costs
table(hosp_cost$RACE)
hosp_cost$RACE <- as.factor(hosp_cost$RACE)
fit <- lm(TOTCHG ~ RACE,data=hosp_cost)
fit
summary(fit)
fit1 <- aov(TOTCHG ~ RACE,data=hosp_cost)
summary(fit1)
hosp_cost <- na.omit(hosp_cost)
#To properly utilize the costs, the agency has to analyze the severity of the hospital costs by age and gender for proper allocation of resources.
table(hosp_cost$FEMALE)
a <- aov(TOTCHG ~ AGE+FEMALE,data=hosp_cost)
summary(a)
b <- lm(TOTCHG ~ AGE+FEMALE,data=hosp_cost)
summary(b)
#Since the length of stay is the crucial factor for inpatients, the agency wants to find if the length of stay can be predicted from age, gender, and race.
table(hosp_cost$LOS)
cat <- aov(LOS ~ AGE+FEMALE+RACE,data=hosp_cost)
summary(cat)
cat <- lm(LOS ~ AGE+FEMALE+RACE,data=hosp_cost)
summary(cat)
#To perform a complete analysis, the agency wants to find the variable that mainly affects the hospital costs.
aov(TOTCHG ~.,data=hosp_cost)
mod <- lm(TOTCHG ~ .,data=hosp_cost)
summary(mod)
#ALTERNATIVE
str(hosp_cost)
summary(hosp_cost)
hist(hosp_cost$AGE, col="red", main="Age Distributions")
hist(hosp_cost$FEMALE, col="blue", main="Transaction Volume")
plot(hosp_cost$RACE, hosp_cost$TOTCHG, main="Cost Distribution by Race")
#RANDOM FOREST
set.seed(1900)
train_ind <- sample(nrow(hosp_cost),round(0.75*nrow(hosp_cost)))
train <- hosp_cost[train_ind,]
test <- hosp_cost[-train_ind,]
str(hosp_cost)
rfModel <- randomForest(RACE ~ . , data = hosp_cost)
test$predicted <- predict(rfModel, test)
library(caret)
confusionMatrix(test$RACE, test$predicted)
library(MLmetrics)
F1_all <- F1_Score(test$RACE, test$predicted)
F1_all
options(repr.plot.width=5, repr.plot.height=4)
varImpPlot(rfModel,
sort=T,
n.var=10,
main="Most Important Variables")
| /index.R | no_license | vjnathan0307/Hospital-Cost-Analysis | R | false | false | 3,657 | r | library(ggplot2)
library(dplyr)
library(readr)
library(tidyr)
library(randomForest)
setwd("~/zrc/HHS_CostAnalysis/data")
#READ IN DATA
hosp_cost <- read.csv("HospitalCosts.csv")
#DESCRIPTIVE STATS
# str describes the types of variables we are working with in the wisconsin hospital dataset
#The age range is between 0-17 years old and the gender identifying 1 or 0 refers to isFemale = 1, isNotFemale=0
#LOS refers to the length of stay. TOTCHG is the hospital discharge costs, and APRDRG stands for all patient refined diagnosis related groups
str(hosp_cost)
head(hosp_cost)
tail(hosp_cost)
summary(hosp_cost)
head(hosp_cost$AGE)
summary(hosp_cost$AGE)
table(hosp_cost$AGE)
hist(hosp_cost$AGE)
summary(as.factor(hosp_cost$AGE))
max(table(hosp_cost$AGE))
max(summary(as.factor(hosp_cost$AGE)))
which.max(table(hosp_cost$AGE))
age <- aggregate(TOTCHG ~ AGE, data = hosp_cost, sum)
max(age)
#whats the race distribution?
str(hosp_cost)
head(hosp_cost$RACE)
tail(hosp_cost$RACE)
summary(hosp_cost$RACE)
plot(hosp_cost$RACE)
summary(as.factor(hosp_cost$RACE))
max(summary(as.factor(hosp_cost$RACE)))
#whats the gender distirbution?
str(hosp_cost$FEMALE)
head(hosp_cost$FEMALE)
summary(hosp_cost$FEMALE)
tail(hosp_cost$FEMALE)
hist(hosp_cost$FEMALE)
#In order of severity of the diagnosis and treatments and to find out the expensive treatments, the agency wants to find the diagnosis related group that has maximum hospitalization and expenditure.
t <- table(hosp_cost$APRDRG)
d <- as.data.frame(t)
names(d)[1] = 'Diagnosis Group'
d
which.max(table(hosp_cost$APRDRG))
which.max(t)
which.max(d)
res <- aggregate(TOTCHG ~ APRDRG, data = hosp_cost, sum)
res
which.max(res$TOTCHG)
res[which.max(res$TOTCHG),]
#To make sure that there is no malpractice, the agency needs to analyze if the race of the patient is related to the hospitalization costs
table(hosp_cost$RACE)
hosp_cost$RACE <- as.factor(hosp_cost$RACE)
fit <- lm(TOTCHG ~ RACE,data=hosp_cost)
fit
summary(fit)
fit1 <- aov(TOTCHG ~ RACE,data=hosp_cost)
summary(fit1)
hosp_cost <- na.omit(hosp_cost)
#To properly utilize the costs, the agency has to analyze the severity of the hospital costs by age and gender for proper allocation of resources.
table(hosp_cost$FEMALE)
a <- aov(TOTCHG ~ AGE+FEMALE,data=hosp_cost)
summary(a)
b <- lm(TOTCHG ~ AGE+FEMALE,data=hosp_cost)
summary(b)
#Since the length of stay is the crucial factor for inpatients, the agency wants to find if the length of stay can be predicted from age, gender, and race.
table(hosp_cost$LOS)
cat <- aov(LOS ~ AGE+FEMALE+RACE,data=hosp_cost)
summary(cat)
cat <- lm(LOS ~ AGE+FEMALE+RACE,data=hosp_cost)
summary(cat)
#To perform a complete analysis, the agency wants to find the variable that mainly affects the hospital costs.
aov(TOTCHG ~.,data=hosp_cost)
mod <- lm(TOTCHG ~ .,data=hosp_cost)
summary(mod)
#ALTERNATIVE
str(hosp_cost)
summary(hosp_cost)
hist(hosp_cost$AGE, col="red", main="Age Distributions")
hist(hosp_cost$FEMALE, col="blue", main="Transaction Volume")
plot(hosp_cost$RACE, hosp_cost$TOTCHG, main="Cost Distribution by Race")
#RANDOM FOREST
set.seed(1900)
train_ind <- sample(nrow(hosp_cost),round(0.75*nrow(hosp_cost)))
train <- hosp_cost[train_ind,]
test <- hosp_cost[-train_ind,]
str(hosp_cost)
rfModel <- randomForest(RACE ~ . , data = hosp_cost)
test$predicted <- predict(rfModel, test)
library(caret)
confusionMatrix(test$RACE, test$predicted)
library(MLmetrics)
F1_all <- F1_Score(test$RACE, test$predicted)
F1_all
options(repr.plot.width=5, repr.plot.height=4)
varImpPlot(rfModel,
sort=T,
n.var=10,
main="Most Important Variables")
|
library(bit64)
### Name: xor.integer64
### Title: Binary operators for integer64 vectors
### Aliases: &.integer64 |.integer64 xor.integer64 !=.integer64
### ==.integer64 <.integer64 <=.integer64 >.integer64 >=.integer64
### +.integer64 -.integer64 *.integer64 ^.integer64 /.integer64
### %/%.integer64 %%.integer64 binattr
### Keywords: classes manip
### ** Examples
as.integer64(1:12) - 1
| /data/genthat_extracted_code/bit64/examples/xor.integer64.rd.R | no_license | surayaaramli/typeRrh | R | false | false | 405 | r | library(bit64)
### Name: xor.integer64
### Title: Binary operators for integer64 vectors
### Aliases: &.integer64 |.integer64 xor.integer64 !=.integer64
### ==.integer64 <.integer64 <=.integer64 >.integer64 >=.integer64
### +.integer64 -.integer64 *.integer64 ^.integer64 /.integer64
### %/%.integer64 %%.integer64 binattr
### Keywords: classes manip
### ** Examples
as.integer64(1:12) - 1
|
loadCmpH5toGenomeF <- function(cmpH5.file, out.dir, n.chunk=10, normalization.method='bySubread', is.use.CCS=FALSE, is.return=FALSE, mapQV.cutoff=255)
{
mapQV.cutoff <- mapQV.cutoff - 1
cmpH5 <- PacBioCmpH5(cmpH5.file)
alnsIdx <- alnIndex(cmpH5)
### split chunks
n.subread <- nrow(alnsIdx)
chunk.size <- floor(n.subread/ n.chunk)
idx.list <- list()
for (i in 1:(n.chunk-1)){
idx.list[[i]] <- ((i-1)*chunk.size+1):(i*chunk.size)
}
idx.list[[n.chunk]] <- ((n.chunk-1)*chunk.size+1):n.subread
### load data form each chunk
for (i in 1:n.chunk ){
alnsF.cur <- getAlignmentsWithFeatures(cmpH5, idx=idx.list[[i]], fxs=list(IPD=getIPD))
alnsIdx.cur <- alnsIdx[idx.list[[i]], ]
# remove low mapQV subreads
idx.sel <- which (alnsIdx.cur$mapQV>=mapQV.cutoff)
alnsF.cur <- alnsF.cur[idx.sel]
alnsIdx.cur <- alnsIdx.cur[idx.sel,]
alnsF.cur <- transformIPD(alnsF.cur)
if (normalization.method=='bySubread')
alnsF.cur <- normalizeBySubread(alnsF.cur)
genomeF.cur <- getFeaturesAlongGenome(alnsF.cur, alnsIdx.cur, is.use.CCS)
file.out <- paste(out.dir,'/genomeF.chunk.',i,'.Rdata',sep='')
save(genomeF.cur, file=file.out)
rm(alnsF.cur, alnsIdx.cur);gc()
cat('load chunk ',i,'\n')
}
cat('load chunk', n.chunk,'\n')
### merge chunks
print('start to merge chunks')
rm(genomeF.cur)
genomeF.list <- list()
for (i in 1:n.chunk){
file.in <- paste(out.dir,'/genomeF.chunk.',i,'.Rdata',sep='')
load (file.in)
genomeF.list[[i]] <- genomeF.cur
rm(genomeF.cur)
cat('load chunk ',i,'\r')
}
cat('finished loading ', n.chunk, ' chunks\n')
genomeF <- mergeGenomeF(genomeF.list)
save(genomeF, file= paste(out.dir,'/genomeF.Rdata',sep=''))
print('finished')
rm (genomeF.list);gc()
if (is.return==TRUE)
genomeF
}
| /R/loadCmpH5toGenomeF.R | no_license | xuqe/seqPatch | R | false | false | 1,772 | r | loadCmpH5toGenomeF <- function(cmpH5.file, out.dir, n.chunk=10, normalization.method='bySubread', is.use.CCS=FALSE, is.return=FALSE, mapQV.cutoff=255)
{
mapQV.cutoff <- mapQV.cutoff - 1
cmpH5 <- PacBioCmpH5(cmpH5.file)
alnsIdx <- alnIndex(cmpH5)
### split chunks
n.subread <- nrow(alnsIdx)
chunk.size <- floor(n.subread/ n.chunk)
idx.list <- list()
for (i in 1:(n.chunk-1)){
idx.list[[i]] <- ((i-1)*chunk.size+1):(i*chunk.size)
}
idx.list[[n.chunk]] <- ((n.chunk-1)*chunk.size+1):n.subread
### load data form each chunk
for (i in 1:n.chunk ){
alnsF.cur <- getAlignmentsWithFeatures(cmpH5, idx=idx.list[[i]], fxs=list(IPD=getIPD))
alnsIdx.cur <- alnsIdx[idx.list[[i]], ]
# remove low mapQV subreads
idx.sel <- which (alnsIdx.cur$mapQV>=mapQV.cutoff)
alnsF.cur <- alnsF.cur[idx.sel]
alnsIdx.cur <- alnsIdx.cur[idx.sel,]
alnsF.cur <- transformIPD(alnsF.cur)
if (normalization.method=='bySubread')
alnsF.cur <- normalizeBySubread(alnsF.cur)
genomeF.cur <- getFeaturesAlongGenome(alnsF.cur, alnsIdx.cur, is.use.CCS)
file.out <- paste(out.dir,'/genomeF.chunk.',i,'.Rdata',sep='')
save(genomeF.cur, file=file.out)
rm(alnsF.cur, alnsIdx.cur);gc()
cat('load chunk ',i,'\n')
}
cat('load chunk', n.chunk,'\n')
### merge chunks
print('start to merge chunks')
rm(genomeF.cur)
genomeF.list <- list()
for (i in 1:n.chunk){
file.in <- paste(out.dir,'/genomeF.chunk.',i,'.Rdata',sep='')
load (file.in)
genomeF.list[[i]] <- genomeF.cur
rm(genomeF.cur)
cat('load chunk ',i,'\r')
}
cat('finished loading ', n.chunk, ' chunks\n')
genomeF <- mergeGenomeF(genomeF.list)
save(genomeF, file= paste(out.dir,'/genomeF.Rdata',sep=''))
print('finished')
rm (genomeF.list);gc()
if (is.return==TRUE)
genomeF
}
|
library(EMVS)
### Name: EMVS
### Title: Bayesian Variable Selection using EM Algorithm
### Aliases: EMVS
### Keywords: Bayesian variable selection Spike and slab
### ** Examples
# Linear regression with p>n variables
library(EMVS)
n = 100
p = 1000
X = matrix(rnorm(n * p), n, p)
beta = c(1.5, 2, 2.5, rep(0, p-3))
Y = X[,1] * beta[1] + X[,2] * beta[2] + X[,3] * beta[3] + rnorm(n)
# conjugate prior on regression coefficients and variance
v0 = seq(0.1, 2, length.out = 20)
v1 = 1000
beta_init = rep(1, p)
sigma_init = 1
a = b = 1
epsilon = 10^{-5}
result = EMVS(Y, X, v0 = v0, v1 = v1, type = "betabinomial",
independent = FALSE, beta_init = beta_init, sigma_init = sigma_init,
epsilon = epsilon, a = a, b = b)
EMVSplot(result, "both", FALSE)
EMVSbest(result)
# independent prior on regression coefficients and variance
v0 = exp(seq(-10, -1, length.out = 20))
v1 = 1
beta_init = rep(1,p)
sigma_init = 1
a = b = 1
epsilon = 10^{-5}
result = EMVS(Y, X, v0 = v0, v1 = v1, type = "betabinomial",
independent = TRUE, beta_init = beta_init, sigma_init = sigma_init,
epsilon = epsilon, a = a, b = b, log_v0 = TRUE)
EMVSplot(result, "both", FALSE)
EMVSbest(result)
| /data/genthat_extracted_code/EMVS/examples/EMVS.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,175 | r | library(EMVS)
### Name: EMVS
### Title: Bayesian Variable Selection using EM Algorithm
### Aliases: EMVS
### Keywords: Bayesian variable selection Spike and slab
### ** Examples
# Linear regression with p>n variables
library(EMVS)
n = 100
p = 1000
X = matrix(rnorm(n * p), n, p)
beta = c(1.5, 2, 2.5, rep(0, p-3))
Y = X[,1] * beta[1] + X[,2] * beta[2] + X[,3] * beta[3] + rnorm(n)
# conjugate prior on regression coefficients and variance
v0 = seq(0.1, 2, length.out = 20)
v1 = 1000
beta_init = rep(1, p)
sigma_init = 1
a = b = 1
epsilon = 10^{-5}
result = EMVS(Y, X, v0 = v0, v1 = v1, type = "betabinomial",
independent = FALSE, beta_init = beta_init, sigma_init = sigma_init,
epsilon = epsilon, a = a, b = b)
EMVSplot(result, "both", FALSE)
EMVSbest(result)
# independent prior on regression coefficients and variance
v0 = exp(seq(-10, -1, length.out = 20))
v1 = 1
beta_init = rep(1,p)
sigma_init = 1
a = b = 1
epsilon = 10^{-5}
result = EMVS(Y, X, v0 = v0, v1 = v1, type = "betabinomial",
independent = TRUE, beta_init = beta_init, sigma_init = sigma_init,
epsilon = epsilon, a = a, b = b, log_v0 = TRUE)
EMVSplot(result, "both", FALSE)
EMVSbest(result)
|
requireNamespace("uuid", quietly = T)
## Create emuDB database schema object from EMU template (.tpl) file
##
## @param tplPath EMU template file path
## @param dbUUID optional database UUID
## @param encoding encoding of the template file
## @return object of class emuDB.schema.db
## @import stringr uuid wrassp
## @keywords emuDB database schema Emu
##
load_dbConfigFromEmuTemplate=function(tplPath,dbUUID=NULL,encoding=NULL){
LEVEL_CMD='level'
LABFILE_CMD='labfile'
LABEL_CMD='label'
SET_CMD='set'
TRACK_CMD='track'
PATH_CMD='path'
LEGAL_CMD='legal'
if(is.null(tplPath)) {
stop("Argument tplPath (path to Emu template file) must not be NULL\n")
}
tplBasename = basename(tplPath)
dbName=gsub("[.][tT][pP][lL]$","",tplBasename)
# read
if(is.null(encoding)){
tpl = try(readLines(tplPath))
}else{
tpl = try(readLines(tplPath,encoding=encoding))
}
if(class(tpl) == "try-error") {
stop("read tpl: cannot read from file ", tplPath)
}
# check if file (not directory)
tplFInfo = try(file.info(tplPath))
if(class(tplFInfo) == "try-error" | is.null(tplFInfo)) {
stop("check template file: cannot get file info: ", tplPath)
}
if(tplFInfo[['isdir']]){
stop(tplPath," is a directory. Expected a legacy EMU template file path.")
}
tracks=list()
flags=list()
levelDefinitions=list()
linkDefinitions=list()
pathDescriptors=list()
annotationDescriptors=list()
hlbTierDescriptors=list()
hlbAnnotationDescriptor=NULL;
lineNr=0
for(line in tpl){
lineNr=lineNr+1L
trimmedLine=stringr::str_trim(line)
if(trimmedLine!=''){
firstChar=substr(trimmedLine,1,1)
if(firstChar!='!'){
lineTokensLst=strsplit(trimmedLine,'[[:space:]]+')
lineTokens=lineTokensLst[[1]]
lineTokenCount=length(lineTokens)
if(lineTokenCount>=1){
command=lineTokens[1]
if(command==LABFILE_CMD){
tierName=lineTokens[2]
# TODO are there any default values for this properties?
extension=NULL
type=NULL
timeFactor=NULL
for(tki in 3:length(lineTokens)){
tk=lineTokens[[tki]]
if(substr(tk,1,1)==':'){
# property key
key=substring(tk,2)
}else{
#property value
if(is.null(key)){
stop("Emu template parser key/value error in ",lineNr,"\n")
}
val=tk
if(key=='extension'){
extension=val
}else if(key=='type'){
type=val
}else if(key=='time-factor'){
timeFactor=val
}
# reset key
key=NULL
}
}
ad=list(name=tierName,extension=extension,type=type,timeFactor=timeFactor)
annotationDescriptors[[length(annotationDescriptors)+1L]] <- ad
# lab file can reference hlb level
replaced=FALSE
tdLen=length(levelDefinitions)
for(i in 1:tdLen){
td=levelDefinitions[[i]]
if(td[['name']]==tierName){
# replace
levelDefinitions[[i]]=list(name=td[['name']],type=type,attributeDefinitions=td[['attributeDefinitions']]);
replaced=TRUE
break;
}
}
if(!replaced){
# append
levelDefinitions[[length(levelDefinitions)+1L]]=list(name=tierName);
}
}else if(command==TRACK_CMD){
name=lineTokens[2]
extension=lineTokens[3]
track=list(name=name, columnName=name, fileExtension=extension)
tracks[[length(tracks)+1L]] <- track
}else if(command==SET_CMD){
key=lineTokens[2]
value=lineTokens[3]
flags[[key]]=value
}else if(command==PATH_CMD){
annoKeysStr=lineTokens[2]
annoBasePath=lineTokens[3]
annoKeysLst=strsplit(annoKeysStr,',')[[1]]
for(aki in 1:length(annoKeysLst)){
annoKey=annoKeysLst[[aki]]
pathDescr=list(basePath=annoBasePath,key=annoKey)
pathDescriptors[[length(pathDescriptors)+1L]] <- pathDescr
if(annoKey=='hlb'){
# special meaning
# hlb files are neither declared by tracks nor by labfile directive
# add as annotationDescriptor
ad=list(name=NULL,extension=annoKey,type='HLB')
annotationDescriptors[[length(annotationDescriptors)+1L]] <- ad
}
}
}else if(command==LEVEL_CMD){
levelTierName=lineTokens[2]
if(lineTokenCount>=3){
linkType="ONE_TO_MANY"
if(lineTokenCount>=4){
relationshipType=lineTokens[4]
if(relationshipType=='many-to-many'){
linkType="MANY_TO_MANY"
}
}
linkDefinition=list(type=linkType,superlevelName=lineTokens[3],sublevelName=levelTierName)
linkDefinitions[[length(linkDefinitions)+1L]]=linkDefinition
}
tierDescr=list(name=levelTierName,type='ITEM', attributeDefinitions=list(list(name = levelTierName, type = "STRING")))
exists=FALSE
for(lDef in levelDefinitions){
if(lDef[['name']]==levelTierName){
exists=TRUE
break
}
}
if(!exists){
levelDefinitions[[length(levelDefinitions)+1L]]=tierDescr
}
# TODO constraints
}else if(command==LABEL_CMD){
levelTierName=lineTokens[2]
labelNames=list(levelTierName)
if(lineTokenCount!=3){
stop("Expected label directive \"label levelName labelName\"")
}
for(i in 1:length(levelDefinitions)){
td=levelDefinitions[[i]]
if(td[['name']]==levelTierName){
# replace
attrDefs=levelDefinitions[[i]][['attributeDefinitions']]
attrDefs[[length(attrDefs)+1L]]=list(name=lineTokens[3],type='STRING')
levelDefinitions[[i]]=list(name=levelTierName,type=td[['type']],attributeDefinitions=attrDefs);
break
}
}
}else if(command==LEGAL_CMD){
if(lineTokenCount<=3){
stop("Expected legal directive \"legal levelName groupName label1 label2 ... labeln\"")
}
attrName=lineTokens[2]
labelGroupName=lineTokens[3]
groupLabels=list()
for(i in 4:lineTokenCount){
groupLabels[[length(groupLabels)+1]]=lineTokens[i]
}
set=FALSE
for(i in 1:length(levelDefinitions)){
td=levelDefinitions[[i]]
ads=td[['attributeDefinitions']]
for(j in 1:length(ads)){
ad=ads[[j]]
if(ad[['name']]==attrName){
lblGrIdx=length(ad[['labelGroups']])+1
levelDefinitions[[i]][['attributeDefinitions']][[j]][['labelGroups']][[lblGrIdx]]=list(name=labelGroupName,values=groupLabels)
set=TRUE
break
}
}
if(set){
break
}
}
}
}
}
}
}
#pef=flags$PrimaryExtension
tl=length(tracks)
al=length(annotationDescriptors)
# apply pathes to tracks
tss2=1:tl
for(ti2 in tss2){
for(pd in pathDescriptors){
if(tracks[[ti2]][['fileExtension']] == pd[['key']]){
tracks[[ti2]][['basePath']]=pd[['basePath']]
break
}
}
}
# apply pathes to annotations
as=1:al
for(ai in as){
for(pd in pathDescriptors){
if(annotationDescriptors[[ai]][['extension']] == pd[['key']]){
annotationDescriptors[[ai]][['basePath']]=pd[['basePath']]
break
}
}
}
ssffTrackDefinitions=list()
assign=list()
mediafileBasePathPattern=NULL
mediafileExtension=NULL
for(tr in tracks){
n=tr[['name']]
e=tr[['fileExtension']]
if(e==flags[['PrimaryExtension']]){
primaryBasePath=tr[['basePath']]
}
if(n=='samples'){
if(e!='wav'){
cat("WARNING! Media file type with extension ",e," not supported by EMU-Webapp.\n")
}
mediafileExtension=e
mediafileBasePathPattern=tr[['basePath']]
}else{
#array !
ssffTrackDefinitions[[length(ssffTrackDefinitions)+1L]]=tr
# default assign all to spectrum TODO
}
}
if(is.null(dbUUID)){
# Generate UUID
# problem: the UUID will change on every reload
dbUUID=uuid::UUIDgenerate()
}
# default perspective
# assign all SSFF tracks to sonagram
assign=list()
for(ssffTrack in ssffTrackDefinitions){
# TODO dirty workaround
# detect formant tracks by number of channels
if(ssffTrack[['name']] == 'fm'){
#ssffTrack$name='FORMANTS'
#assign[[length(assign)+1]]=list(signalCanvasName='SPEC',ssffTrackName='FORMANTS')
}
}
contourLims=list()
sc=list(order=c("OSCI","SPEC"), assign=assign, contourLims=contourLims)
defaultLvlOrder=list()
for(ld in levelDefinitions){
if(ld[['type']]=='SEGMENT' || ld[['type']]=='EVENT'){
defaultLvlOrder[[length(defaultLvlOrder)+1L]]=ld[['name']]
}
}
defPersp=list(name='default',signalCanvases=sc,levelCanvases=list(order=defaultLvlOrder),twoDimCanvases=list(order=list()))
waCfg=list(perspectives=list(defPersp))
dbSchema=list(name=dbName,UUID=dbUUID,mediafileBasePathPattern=mediafileBasePathPattern,mediafileExtension=mediafileExtension,ssffTrackDefinitions=ssffTrackDefinitions,levelDefinitions=levelDefinitions,linkDefinitions=linkDefinitions,EMUwebAppConfig=waCfg,annotationDescriptors=annotationDescriptors,tracks=tracks,flags=flags);
# get max label array size
maxLbls=0
for(lvlDef in levelDefinitions){
attrCnt=length(lvlDef[['attributeDefinitions']])
if(attrCnt > maxLbls){
maxLbls=attrCnt
}
}
dbSchema[['maxNumberOfLabels']]=maxLbls
return(dbSchema)
} | /R/emuR-legacy.template.R | no_license | thomaskisler/emuR | R | false | false | 10,464 | r | requireNamespace("uuid", quietly = T)
## Create emuDB database schema object from EMU template (.tpl) file
##
## @param tplPath EMU template file path
## @param dbUUID optional database UUID
## @param encoding encoding of the template file
## @return object of class emuDB.schema.db
## @import stringr uuid wrassp
## @keywords emuDB database schema Emu
##
load_dbConfigFromEmuTemplate=function(tplPath,dbUUID=NULL,encoding=NULL){
LEVEL_CMD='level'
LABFILE_CMD='labfile'
LABEL_CMD='label'
SET_CMD='set'
TRACK_CMD='track'
PATH_CMD='path'
LEGAL_CMD='legal'
if(is.null(tplPath)) {
stop("Argument tplPath (path to Emu template file) must not be NULL\n")
}
tplBasename = basename(tplPath)
dbName=gsub("[.][tT][pP][lL]$","",tplBasename)
# read
if(is.null(encoding)){
tpl = try(readLines(tplPath))
}else{
tpl = try(readLines(tplPath,encoding=encoding))
}
if(class(tpl) == "try-error") {
stop("read tpl: cannot read from file ", tplPath)
}
# check if file (not directory)
tplFInfo = try(file.info(tplPath))
if(class(tplFInfo) == "try-error" | is.null(tplFInfo)) {
stop("check template file: cannot get file info: ", tplPath)
}
if(tplFInfo[['isdir']]){
stop(tplPath," is a directory. Expected a legacy EMU template file path.")
}
tracks=list()
flags=list()
levelDefinitions=list()
linkDefinitions=list()
pathDescriptors=list()
annotationDescriptors=list()
hlbTierDescriptors=list()
hlbAnnotationDescriptor=NULL;
lineNr=0
for(line in tpl){
lineNr=lineNr+1L
trimmedLine=stringr::str_trim(line)
if(trimmedLine!=''){
firstChar=substr(trimmedLine,1,1)
if(firstChar!='!'){
lineTokensLst=strsplit(trimmedLine,'[[:space:]]+')
lineTokens=lineTokensLst[[1]]
lineTokenCount=length(lineTokens)
if(lineTokenCount>=1){
command=lineTokens[1]
if(command==LABFILE_CMD){
tierName=lineTokens[2]
# TODO are there any default values for this properties?
extension=NULL
type=NULL
timeFactor=NULL
for(tki in 3:length(lineTokens)){
tk=lineTokens[[tki]]
if(substr(tk,1,1)==':'){
# property key
key=substring(tk,2)
}else{
#property value
if(is.null(key)){
stop("Emu template parser key/value error in ",lineNr,"\n")
}
val=tk
if(key=='extension'){
extension=val
}else if(key=='type'){
type=val
}else if(key=='time-factor'){
timeFactor=val
}
# reset key
key=NULL
}
}
ad=list(name=tierName,extension=extension,type=type,timeFactor=timeFactor)
annotationDescriptors[[length(annotationDescriptors)+1L]] <- ad
# lab file can reference hlb level
replaced=FALSE
tdLen=length(levelDefinitions)
for(i in 1:tdLen){
td=levelDefinitions[[i]]
if(td[['name']]==tierName){
# replace
levelDefinitions[[i]]=list(name=td[['name']],type=type,attributeDefinitions=td[['attributeDefinitions']]);
replaced=TRUE
break;
}
}
if(!replaced){
# append
levelDefinitions[[length(levelDefinitions)+1L]]=list(name=tierName);
}
}else if(command==TRACK_CMD){
name=lineTokens[2]
extension=lineTokens[3]
track=list(name=name, columnName=name, fileExtension=extension)
tracks[[length(tracks)+1L]] <- track
}else if(command==SET_CMD){
key=lineTokens[2]
value=lineTokens[3]
flags[[key]]=value
}else if(command==PATH_CMD){
annoKeysStr=lineTokens[2]
annoBasePath=lineTokens[3]
annoKeysLst=strsplit(annoKeysStr,',')[[1]]
for(aki in 1:length(annoKeysLst)){
annoKey=annoKeysLst[[aki]]
pathDescr=list(basePath=annoBasePath,key=annoKey)
pathDescriptors[[length(pathDescriptors)+1L]] <- pathDescr
if(annoKey=='hlb'){
# special meaning
# hlb files are neither declared by tracks nor by labfile directive
# add as annotationDescriptor
ad=list(name=NULL,extension=annoKey,type='HLB')
annotationDescriptors[[length(annotationDescriptors)+1L]] <- ad
}
}
}else if(command==LEVEL_CMD){
levelTierName=lineTokens[2]
if(lineTokenCount>=3){
linkType="ONE_TO_MANY"
if(lineTokenCount>=4){
relationshipType=lineTokens[4]
if(relationshipType=='many-to-many'){
linkType="MANY_TO_MANY"
}
}
linkDefinition=list(type=linkType,superlevelName=lineTokens[3],sublevelName=levelTierName)
linkDefinitions[[length(linkDefinitions)+1L]]=linkDefinition
}
tierDescr=list(name=levelTierName,type='ITEM', attributeDefinitions=list(list(name = levelTierName, type = "STRING")))
exists=FALSE
for(lDef in levelDefinitions){
if(lDef[['name']]==levelTierName){
exists=TRUE
break
}
}
if(!exists){
levelDefinitions[[length(levelDefinitions)+1L]]=tierDescr
}
# TODO constraints
}else if(command==LABEL_CMD){
levelTierName=lineTokens[2]
labelNames=list(levelTierName)
if(lineTokenCount!=3){
stop("Expected label directive \"label levelName labelName\"")
}
for(i in 1:length(levelDefinitions)){
td=levelDefinitions[[i]]
if(td[['name']]==levelTierName){
# replace
attrDefs=levelDefinitions[[i]][['attributeDefinitions']]
attrDefs[[length(attrDefs)+1L]]=list(name=lineTokens[3],type='STRING')
levelDefinitions[[i]]=list(name=levelTierName,type=td[['type']],attributeDefinitions=attrDefs);
break
}
}
}else if(command==LEGAL_CMD){
if(lineTokenCount<=3){
stop("Expected legal directive \"legal levelName groupName label1 label2 ... labeln\"")
}
attrName=lineTokens[2]
labelGroupName=lineTokens[3]
groupLabels=list()
for(i in 4:lineTokenCount){
groupLabels[[length(groupLabels)+1]]=lineTokens[i]
}
set=FALSE
for(i in 1:length(levelDefinitions)){
td=levelDefinitions[[i]]
ads=td[['attributeDefinitions']]
for(j in 1:length(ads)){
ad=ads[[j]]
if(ad[['name']]==attrName){
lblGrIdx=length(ad[['labelGroups']])+1
levelDefinitions[[i]][['attributeDefinitions']][[j]][['labelGroups']][[lblGrIdx]]=list(name=labelGroupName,values=groupLabels)
set=TRUE
break
}
}
if(set){
break
}
}
}
}
}
}
}
#pef=flags$PrimaryExtension
tl=length(tracks)
al=length(annotationDescriptors)
# apply pathes to tracks
tss2=1:tl
for(ti2 in tss2){
for(pd in pathDescriptors){
if(tracks[[ti2]][['fileExtension']] == pd[['key']]){
tracks[[ti2]][['basePath']]=pd[['basePath']]
break
}
}
}
# apply pathes to annotations
as=1:al
for(ai in as){
for(pd in pathDescriptors){
if(annotationDescriptors[[ai]][['extension']] == pd[['key']]){
annotationDescriptors[[ai]][['basePath']]=pd[['basePath']]
break
}
}
}
ssffTrackDefinitions=list()
assign=list()
mediafileBasePathPattern=NULL
mediafileExtension=NULL
for(tr in tracks){
n=tr[['name']]
e=tr[['fileExtension']]
if(e==flags[['PrimaryExtension']]){
primaryBasePath=tr[['basePath']]
}
if(n=='samples'){
if(e!='wav'){
cat("WARNING! Media file type with extension ",e," not supported by EMU-Webapp.\n")
}
mediafileExtension=e
mediafileBasePathPattern=tr[['basePath']]
}else{
#array !
ssffTrackDefinitions[[length(ssffTrackDefinitions)+1L]]=tr
# default assign all to spectrum TODO
}
}
if(is.null(dbUUID)){
# Generate UUID
# problem: the UUID will change on every reload
dbUUID=uuid::UUIDgenerate()
}
# default perspective
# assign all SSFF tracks to sonagram
assign=list()
for(ssffTrack in ssffTrackDefinitions){
# TODO dirty workaround
# detect formant tracks by number of channels
if(ssffTrack[['name']] == 'fm'){
#ssffTrack$name='FORMANTS'
#assign[[length(assign)+1]]=list(signalCanvasName='SPEC',ssffTrackName='FORMANTS')
}
}
contourLims=list()
sc=list(order=c("OSCI","SPEC"), assign=assign, contourLims=contourLims)
defaultLvlOrder=list()
for(ld in levelDefinitions){
if(ld[['type']]=='SEGMENT' || ld[['type']]=='EVENT'){
defaultLvlOrder[[length(defaultLvlOrder)+1L]]=ld[['name']]
}
}
defPersp=list(name='default',signalCanvases=sc,levelCanvases=list(order=defaultLvlOrder),twoDimCanvases=list(order=list()))
waCfg=list(perspectives=list(defPersp))
dbSchema=list(name=dbName,UUID=dbUUID,mediafileBasePathPattern=mediafileBasePathPattern,mediafileExtension=mediafileExtension,ssffTrackDefinitions=ssffTrackDefinitions,levelDefinitions=levelDefinitions,linkDefinitions=linkDefinitions,EMUwebAppConfig=waCfg,annotationDescriptors=annotationDescriptors,tracks=tracks,flags=flags);
# get max label array size
maxLbls=0
for(lvlDef in levelDefinitions){
attrCnt=length(lvlDef[['attributeDefinitions']])
if(attrCnt > maxLbls){
maxLbls=attrCnt
}
}
dbSchema[['maxNumberOfLabels']]=maxLbls
return(dbSchema)
} |
/gnome1/third_party_lib/qt libraries/RIncludes/AppleEvents.r | permissive | NOAA-ORR-ERD/PyGnome | R | false | false | 4,016 | r | ||
#' @param mu mean
#' @param Sigma covariance
pmvn_nce <- function(mu, Sigma, lb, ub, A = diag(length(mu)), data_samples = NULL,
n_noise) {
Prec <- chol2inv(chol(Sigma))
if (is.null(data_samples)) {
data_samples <- t(TruncatedNormal::rtmvnorm(n_noise, mu, Sigma, lb, ub))
}
else {
data_samples <- t(data_samples)
}
n_data <- ncol(data_samples)
log_nu <- log(n_noise / n_data)
ep_approx <- epmgp::moments2(mu, Sigma, lb, ub, A)
noise_samples <- t(mvtnorm::rmvnorm(n_noise, ep_approx$mu, ep_approx$Sigma))
X_mu <- t(data_samples - mu)
tmvn_lpdf_data <- -.5 * rowSums( (X_mu %*% Prec) * X_mu )
mvn_lpdf_data <- mvtnorm::dmvnorm(t(data_samples), ep_approx$mu,
ep_approx$Sigma, log = TRUE)
log_ratio_data <- tmvn_lpdf_data - mvn_lpdf_data
X_A <- A %*% noise_samples
indicators <- apply((X_A >= lb) & (X_A <= ub), 2, all)
noise_samples <- noise_samples[, indicators]
X_mu <- t(noise_samples - mu)
tmvn_lpdf_noise <- -.5 * rowSums( (X_mu %*% Prec) * X_mu)
mvn_lpdf_noise <- mvtnorm::dmvnorm(t(noise_samples), ep_approx$mu,
ep_approx$Sigma, log = TRUE)
log_ratio_noise <- tmvn_lpdf_noise - mvn_lpdf_noise
f <- function(c) {
llik_data <- sum(plogis(log_ratio_data + c - log_nu, log.p = TRUE))
llik_noise <- sum(plogis(log_ratio_noise + c - log_nu,
lower.tail = FALSE, log.p = TRUE))
return (- (llik_data + llik_noise) / n_data)
}
result <- optimize(f, c(-10, 10))
d <- length(mu)
logprob <- - (result$minimum + .5 * (d * log(2 * pi) + determinant(Sigma)$modulus))
prob <- exp(logprob)
# prob <- (1 / exp(result$minimum)) /
# ( (2 * pi)^(d / 2) * exp(determinant(Sigma)$modulus)^.5 )
return(prob)
}
| /R/nce.R | no_license | delimited0/rcpp-epmgp | R | false | false | 1,834 | r | #' @param mu mean
#' @param Sigma covariance
pmvn_nce <- function(mu, Sigma, lb, ub, A = diag(length(mu)), data_samples = NULL,
n_noise) {
Prec <- chol2inv(chol(Sigma))
if (is.null(data_samples)) {
data_samples <- t(TruncatedNormal::rtmvnorm(n_noise, mu, Sigma, lb, ub))
}
else {
data_samples <- t(data_samples)
}
n_data <- ncol(data_samples)
log_nu <- log(n_noise / n_data)
ep_approx <- epmgp::moments2(mu, Sigma, lb, ub, A)
noise_samples <- t(mvtnorm::rmvnorm(n_noise, ep_approx$mu, ep_approx$Sigma))
X_mu <- t(data_samples - mu)
tmvn_lpdf_data <- -.5 * rowSums( (X_mu %*% Prec) * X_mu )
mvn_lpdf_data <- mvtnorm::dmvnorm(t(data_samples), ep_approx$mu,
ep_approx$Sigma, log = TRUE)
log_ratio_data <- tmvn_lpdf_data - mvn_lpdf_data
X_A <- A %*% noise_samples
indicators <- apply((X_A >= lb) & (X_A <= ub), 2, all)
noise_samples <- noise_samples[, indicators]
X_mu <- t(noise_samples - mu)
tmvn_lpdf_noise <- -.5 * rowSums( (X_mu %*% Prec) * X_mu)
mvn_lpdf_noise <- mvtnorm::dmvnorm(t(noise_samples), ep_approx$mu,
ep_approx$Sigma, log = TRUE)
log_ratio_noise <- tmvn_lpdf_noise - mvn_lpdf_noise
f <- function(c) {
llik_data <- sum(plogis(log_ratio_data + c - log_nu, log.p = TRUE))
llik_noise <- sum(plogis(log_ratio_noise + c - log_nu,
lower.tail = FALSE, log.p = TRUE))
return (- (llik_data + llik_noise) / n_data)
}
result <- optimize(f, c(-10, 10))
d <- length(mu)
logprob <- - (result$minimum + .5 * (d * log(2 * pi) + determinant(Sigma)$modulus))
prob <- exp(logprob)
# prob <- (1 / exp(result$minimum)) /
# ( (2 * pi)^(d / 2) * exp(determinant(Sigma)$modulus)^.5 )
return(prob)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{SAGMMFit}
\alias{SAGMMFit}
\title{Clustering via Stochastic Approximation and Gaussian Mixture Models (GMM)}
\usage{
SAGMMFit(X, Y = NULL, Burnin = 5, ngroups = 5, kstart = 10,
plot = FALSE)
}
\arguments{
\item{X}{numeric matrix of the data.}
\item{Y}{Group membership (if known). Where groups are integers in 1:ngroups. If provided ngroups can}
\item{Burnin}{Ratio of observations to use as a burn in before algorithm begins.}
\item{ngroups}{Number of mixture components. If Y is provided, and groups is not then is overridden by Y.}
\item{kstart}{number of kmeans starts to initialise.}
\item{plot}{If TRUE generates a plot of the clustering.}
}
\value{
A list containing
\item{Cluster}{The clustering of each observation.}
\item{plot}{A plot of the clustering (if requested).}
\item{l2}{Estimate of Lambda^2}
\item{ARI1}{Adjusted Rand Index 1 - using k-means}
\item{ARI2}{Adjusted Rand Index 2 - using GMM Clusters}
\item{ARI3}{Adjusted Rand Index 3 - using intialiation k-means}
\item{KM}{Initial K-means clustering of the data.}
\item{pi}{The cluster proportions (vector of length ngroups)}
\item{tau}{tau matrix of conditional probabilities.}
\item{fit}{Full output details from inner C++ loop.}
}
\description{
Fit a GMM via Stochastic Approximation. See Reference.
}
\examples{
sims<-generateSimData(ngroups=10, Dimensions=10, Number=10^4)
res1<-SAGMMFit(sims$X, sims$Y)
res2<-SAGMMFit(sims$X, ngroups=5)
}
\references{
Nguyen & Jones (2018). Big Data-Appropriate Clustering via Stochastic Approximation and Gaussian Mixture Models. In Data Analytics (pp. 79-96). CRC Press.
}
\author{
Andrew T. Jones and Hien D. Nguyen
}
| /fuzzedpackages/SAGMM/man/SAGMMFit.Rd | no_license | akhikolla/testpackages | R | false | true | 1,729 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{SAGMMFit}
\alias{SAGMMFit}
\title{Clustering via Stochastic Approximation and Gaussian Mixture Models (GMM)}
\usage{
SAGMMFit(X, Y = NULL, Burnin = 5, ngroups = 5, kstart = 10,
plot = FALSE)
}
\arguments{
\item{X}{numeric matrix of the data.}
\item{Y}{Group membership (if known). Where groups are integers in 1:ngroups. If provided ngroups can}
\item{Burnin}{Ratio of observations to use as a burn in before algorithm begins.}
\item{ngroups}{Number of mixture components. If Y is provided, and groups is not then is overridden by Y.}
\item{kstart}{number of kmeans starts to initialise.}
\item{plot}{If TRUE generates a plot of the clustering.}
}
\value{
A list containing
\item{Cluster}{The clustering of each observation.}
\item{plot}{A plot of the clustering (if requested).}
\item{l2}{Estimate of Lambda^2}
\item{ARI1}{Adjusted Rand Index 1 - using k-means}
\item{ARI2}{Adjusted Rand Index 2 - using GMM Clusters}
\item{ARI3}{Adjusted Rand Index 3 - using intialiation k-means}
\item{KM}{Initial K-means clustering of the data.}
\item{pi}{The cluster proportions (vector of length ngroups)}
\item{tau}{tau matrix of conditional probabilities.}
\item{fit}{Full output details from inner C++ loop.}
}
\description{
Fit a GMM via Stochastic Approximation. See Reference.
}
\examples{
sims<-generateSimData(ngroups=10, Dimensions=10, Number=10^4)
res1<-SAGMMFit(sims$X, sims$Y)
res2<-SAGMMFit(sims$X, ngroups=5)
}
\references{
Nguyen & Jones (2018). Big Data-Appropriate Clustering via Stochastic Approximation and Gaussian Mixture Models. In Data Analytics (pp. 79-96). CRC Press.
}
\author{
Andrew T. Jones and Hien D. Nguyen
}
|
library(shiny)
library(ggplot2)
library(tidyr)
# Define UI
ui <- fluidPage(
# Application title
titlePanel("Quality Control app"),
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "style.css")
),
sidebarLayout(
# Sidebar with a slider input
sidebarPanel(
fileInput(inputId = "data_file",
label = "Choose CSV File",
multiple = TRUE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
selectInput(inputId = "data_columns",
label = "Select columns for analysis",
choices = "Please Upload a file first",
multiple = TRUE
),
tags$hr(),
uiOutput("finalresult")
),
# Show a plot of the generated distribution
mainPanel(
fluidRow(
column(8,
div(class = "plotresult", tags$label("Boxplot"),plotOutput("boxplot"))
),
column(4,
div(class = "tableresult", tags$label("Summary Table"), tableOutput("summary"))
)
),
fluidRow(
column(8,
div(class = "ftest", tags$label("F Statistic"), verbatimTextOutput("FTest"))
),
column(4,
div(class = "ftestresult", tags$label("Quality Control for F-Statistic"), uiOutput("FTestString"))
)
),
fluidRow(
column(8,
NULL
),
column(4,
div(class = "meanresult", tags$label("Quality Control for Mean"), uiOutput("MeanTest"))
)
)
)
)
)
# Server logic
server <- function(input, output, session) {
input_data <- reactive({
validate(need(length(input$data_file$datapath) == 1, "Please upload a file"))
df <- read.csv(input$data_file$datapath,
header = TRUE)
validate(need(length(names(df) > 1), "The data set needs to contain at least two columns"))
updateSelectInput(session, "data_columns", choices = names(df), selected = names(df)[c(1,2)])
df
})
data_filtered_long <- reactive({
df <- input_data()
validate(need(length(input$data_columns) > 1, "Please select at least two columns from the data"))
df <- df[, input$data_columns]
tidyr::gather(df, samples, measurement, factor_key=TRUE)
})
output$boxplot <- renderPlot({
ggplot(data_filtered_long()) +
geom_boxplot(aes(y = measurement, fill = samples))
})
output$summary <- renderTable({
dplyr::summarize(dplyr::group_by(data_filtered_long(), samples),
Mean=mean(measurement),
SD=sd(measurement),
Median = median(measurement),
Q25 = quantile(measurement, probs = 0.25),
Q75 = quantile(measurement, probs = 0.75)
)
}, escape=FALSE)
f_statistics <- reactive({
validate(need(length(input$data_columns) > 1, "Please select at least two columns from the data"))
lapply(2:length(input$data_columns), function(x){
append(
var.test(
x = data_filtered_long()[which(data_filtered_long()$samples == input$data_columns[1]), "measurement"],
y = data_filtered_long()[which(data_filtered_long()$samples == input$data_columns[x]), "measurement"],
alternative = "two.sided"),
list(
x = input$data_columns[1],
y = input$data_columns[x]
)
)
})
})
mean_test <- reactive({
validate(need(length(input$data_columns) > 1, "Please select at least two columns from the data"))
lapply(2:length(input$data_columns), function(x){
append(
list(mean_compare = mean(data_filtered_long()[which(data_filtered_long()$samples == input$data_columns[x]), "measurement"], na.rm = T) <
1.2 * mean(data_filtered_long()[which(data_filtered_long()$samples == input$data_columns[1]), "measurement"], na.rm = T) &&
mean(data_filtered_long()[which(data_filtered_long()$samples == input$data_columns[x]), "measurement"], na.rm = T) >
0.8 * mean(data_filtered_long()[which(data_filtered_long()$samples == input$data_columns[1]), "measurement"], na.rm = T)
),
list(
x = input$data_columns[1],
y = input$data_columns[x]
)
)
})
})
output$FTest <- renderText({
paste(unlist(lapply(f_statistics(), function(x){
paste("X, Y = ", x$x,",", x$y, "\n", "F = ", unname(x$statistic), ", p value = ", unname(x$p.value))
})), collapse = "\n")
})
output$FTestString <- renderUI({
lapply(f_statistics(), function(x){
p(paste("X, Y = ", x$x,",", x$y, "\n", ifelse(unname(x$statistic) > 3 || unname(x$statistic) < 1/3, "F-Test failed", "F-Test ok")))
})
})
output$MeanTest <- renderUI({
lapply(mean_test(), function(x){
p(paste("X, Y = ", x$x,",", x$y, "\n", ifelse(unname(x$mean_compare), "Mean Test ok", "Mean Test failed")))
})
})
output$finalresult <- renderUI({
do.call("div",
lapply( seq_along(mean_test()), function(x){
p(
if(
mean_test()[[x]]$mean_compare &&
f_statistics()[[x]]$statistic < 3 &&
f_statistics()[[x]]$statistic > 1/3) {
tags$img(src = "check.png", width = 20)
}else{
tags$img(src = "cross.png", width = 20)
},
tags$label(paste(f_statistics()[[x]]$x,",", f_statistics()[[x]]$y))
)
}
)
)
})
}
# Complete app with UI and server components
shinyApp(ui, server) | /shinytest/app.R | no_license | zappingseb/EARL2019 | R | false | false | 5,877 | r | library(shiny)
library(ggplot2)
library(tidyr)
# Define UI
ui <- fluidPage(
# Application title
titlePanel("Quality Control app"),
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "style.css")
),
sidebarLayout(
# Sidebar with a slider input
sidebarPanel(
fileInput(inputId = "data_file",
label = "Choose CSV File",
multiple = TRUE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
selectInput(inputId = "data_columns",
label = "Select columns for analysis",
choices = "Please Upload a file first",
multiple = TRUE
),
tags$hr(),
uiOutput("finalresult")
),
# Show a plot of the generated distribution
mainPanel(
fluidRow(
column(8,
div(class = "plotresult", tags$label("Boxplot"),plotOutput("boxplot"))
),
column(4,
div(class = "tableresult", tags$label("Summary Table"), tableOutput("summary"))
)
),
fluidRow(
column(8,
div(class = "ftest", tags$label("F Statistic"), verbatimTextOutput("FTest"))
),
column(4,
div(class = "ftestresult", tags$label("Quality Control for F-Statistic"), uiOutput("FTestString"))
)
),
fluidRow(
column(8,
NULL
),
column(4,
div(class = "meanresult", tags$label("Quality Control for Mean"), uiOutput("MeanTest"))
)
)
)
)
)
# Server logic
server <- function(input, output, session) {
input_data <- reactive({
validate(need(length(input$data_file$datapath) == 1, "Please upload a file"))
df <- read.csv(input$data_file$datapath,
header = TRUE)
validate(need(length(names(df) > 1), "The data set needs to contain at least two columns"))
updateSelectInput(session, "data_columns", choices = names(df), selected = names(df)[c(1,2)])
df
})
data_filtered_long <- reactive({
df <- input_data()
validate(need(length(input$data_columns) > 1, "Please select at least two columns from the data"))
df <- df[, input$data_columns]
tidyr::gather(df, samples, measurement, factor_key=TRUE)
})
output$boxplot <- renderPlot({
ggplot(data_filtered_long()) +
geom_boxplot(aes(y = measurement, fill = samples))
})
output$summary <- renderTable({
dplyr::summarize(dplyr::group_by(data_filtered_long(), samples),
Mean=mean(measurement),
SD=sd(measurement),
Median = median(measurement),
Q25 = quantile(measurement, probs = 0.25),
Q75 = quantile(measurement, probs = 0.75)
)
}, escape=FALSE)
f_statistics <- reactive({
validate(need(length(input$data_columns) > 1, "Please select at least two columns from the data"))
lapply(2:length(input$data_columns), function(x){
append(
var.test(
x = data_filtered_long()[which(data_filtered_long()$samples == input$data_columns[1]), "measurement"],
y = data_filtered_long()[which(data_filtered_long()$samples == input$data_columns[x]), "measurement"],
alternative = "two.sided"),
list(
x = input$data_columns[1],
y = input$data_columns[x]
)
)
})
})
mean_test <- reactive({
validate(need(length(input$data_columns) > 1, "Please select at least two columns from the data"))
lapply(2:length(input$data_columns), function(x){
append(
list(mean_compare = mean(data_filtered_long()[which(data_filtered_long()$samples == input$data_columns[x]), "measurement"], na.rm = T) <
1.2 * mean(data_filtered_long()[which(data_filtered_long()$samples == input$data_columns[1]), "measurement"], na.rm = T) &&
mean(data_filtered_long()[which(data_filtered_long()$samples == input$data_columns[x]), "measurement"], na.rm = T) >
0.8 * mean(data_filtered_long()[which(data_filtered_long()$samples == input$data_columns[1]), "measurement"], na.rm = T)
),
list(
x = input$data_columns[1],
y = input$data_columns[x]
)
)
})
})
output$FTest <- renderText({
paste(unlist(lapply(f_statistics(), function(x){
paste("X, Y = ", x$x,",", x$y, "\n", "F = ", unname(x$statistic), ", p value = ", unname(x$p.value))
})), collapse = "\n")
})
output$FTestString <- renderUI({
lapply(f_statistics(), function(x){
p(paste("X, Y = ", x$x,",", x$y, "\n", ifelse(unname(x$statistic) > 3 || unname(x$statistic) < 1/3, "F-Test failed", "F-Test ok")))
})
})
output$MeanTest <- renderUI({
lapply(mean_test(), function(x){
p(paste("X, Y = ", x$x,",", x$y, "\n", ifelse(unname(x$mean_compare), "Mean Test ok", "Mean Test failed")))
})
})
output$finalresult <- renderUI({
do.call("div",
lapply( seq_along(mean_test()), function(x){
p(
if(
mean_test()[[x]]$mean_compare &&
f_statistics()[[x]]$statistic < 3 &&
f_statistics()[[x]]$statistic > 1/3) {
tags$img(src = "check.png", width = 20)
}else{
tags$img(src = "cross.png", width = 20)
},
tags$label(paste(f_statistics()[[x]]$x,",", f_statistics()[[x]]$y))
)
}
)
)
})
}
# Complete app with UI and server components
shinyApp(ui, server) |
testlist <- list(data = structure(0, .Dim = c(1L, 1L)), x = structure(c(3.00384971227444e-307, 6.45861169576221e-198, 1.41240040169298e-310, 1.24620568844188e-308, 6.16111399153334e-270, 6.45861474821585e-198, 1.24620568811809e-308, 4.1632112772405e-256, 1.39069239013775e-308, 0), .Dim = c(10L, 1L)))
result <- do.call(distr6:::C_EmpiricalMVPdf,testlist)
str(result) | /distr6/inst/testfiles/C_EmpiricalMVPdf/libFuzzer_C_EmpiricalMVPdf/C_EmpiricalMVPdf_valgrind_files/1610035470-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 371 | r | testlist <- list(data = structure(0, .Dim = c(1L, 1L)), x = structure(c(3.00384971227444e-307, 6.45861169576221e-198, 1.41240040169298e-310, 1.24620568844188e-308, 6.16111399153334e-270, 6.45861474821585e-198, 1.24620568811809e-308, 4.1632112772405e-256, 1.39069239013775e-308, 0), .Dim = c(10L, 1L)))
result <- do.call(distr6:::C_EmpiricalMVPdf,testlist)
str(result) |
#' @importFrom tools file_path_sans_ext
#' @importFrom utils package.skeleton
NULL | /R/zzz.R | no_license | elferdo/rustinr | R | false | false | 82 | r | #' @importFrom tools file_path_sans_ext
#' @importFrom utils package.skeleton
NULL |
## Setting Working Directory
setwd(dir = "D:\\Books\\Exploratory-Data-Analysis-with-R\\2.Exploratory Data Analysis Checklist")
list.files()
# Creating Directory For datasets
if(dir.exists("dataset")){dir.create("dataset")}
#5.2# Read in Data
##Load Library
library(readr)
library(dplyr)
ozone <-read_csv(".\\Dataset\\hourly_44201_2014\\hourly_44201_2014.csv",
col_types = "ccccinnccccccncnncccccc")
# c : character
# i : integer
# n : Numeric
base::names(ozone)
base::names(ozone) <- base::make.names(base::names(ozone))
#5.3# Check The Package
base::nrow(ozone) # 9060694
base::ncol(ozone) # 23
#5.4# Run Str()
utils::str(ozone)
#5.5# Look at the top and the bottom of data
head(ozone[, c(6:7 , 10)]) # take 6,7,and 10 col
tail(ozone[, c(6:7,10)])
#5.6# Check your "n"s
table(ozone$Time.Local) ## Result seems already fixed,different from the books
dplyr::filter(ozone , Time.Local == "13:00" && County.Name == "Franklin") %>%
dplyr::select(State.Name , County.Name , Date.Local,
Time.Local , Sample.Measurement)# No output
dplyr::filter(ozone , State.Code =="36" &
County.Code == "033", Date.Local =="2014-09-30") %>%
dplyr::select(Date.Local , Time.Local ,Sample.Measurement) %>% as.data.frame
## Data Is Tidy ,there's nothing false
## Checking How many State
dplyr::select(ozone, State.Name) %>% base::unique() %>% nrow() #53
## Seems odd because US only has 50 States
base::unique(ozone$State.Name)
## Washington , D.C (District of Columbia ,puerto Rico ,country of mexico) Are tje extra states.
# BUt its okay .
## Validate with At least one external data source
# So from the link We get that in 2015 should not exceed 0.075 ppm
base::summary(ozone$Sample.Measurement)
#Reality Mean is (0.03027 , and median 0.0300) <-- lower than 0.075
stats::quantile(ozone$Sample.Measurement , probs = seq(0, 1, 0.1))
?stats::quantile
## Means that data in right magnitude probably an outlier.
##Roughly < 10% of data are above 0.075
dplyr::filter(ozone , Sample.Measurement > 0.075) %>%
select(County.Code ,Sample.Measurement) %>% print
# Roughly 30,102 obs ,quite high actually ,
dplyr::filter(ozone , Sample.Measurement > 0.075) %>%
select(County.Code ,Sample.Measurement) %>% as.data.frame %>%
dplyr::arrange(Sample.Measurement) %>%
tail(n=100)
# Wow there's alot observation > 0.100 ppm
#5.8# Try the easy solution first
# Example Question
# Which counties in the United States have the highest levels of ambient ozone
# pollution?
ozone %>% dplyr::group_by(State.Name,County.Name) %>%
dplyr::summarize(ozone = mean(Sample.Measurement)) %>%
as.data.frame() %>%
dplyr::arrange(desc(ozone)) -> rangking
## Checking The Highest County
ozone %>% dplyr::filter(State.Name == "Colorado" &
County.Name == "Clear Creek") %>% nrow() # 6447 <<-- Well if
ozone <- dplyr::mutate(ozone , Date.Local = as.Date(Date.Local))
ozone %>% dplyr::filter(State.Name == "Colorado" & County.Name == "Clear Creek") %>%
dplyr::mutate(dmonth = factor(base::months(Date.Local) , levels = month.name)) %>%
dplyr::group_by(dmonth) %>%
dplyr::summarize(ozone = mean(Sample.Measurement))
# March Missing
## Cheking the Lowest County
ozone %>% dplyr::filter(State.Name == "Puerto Rico",County.Name == "Catano") %>%
dplyr::mutate(dmonth = base::factor(base::months(Date.Local) , levels = month.name)) %>%
dplyr::group_by(dmonth) %>%
dplyr::summarize(ozone = mean(Sample.Measurement))
0.007 > 0.003
# Probably The low ozone means there are in certain Season in fact its summer season,well in 2014 maybe people in
## puerto rico dont use their car / maybe lazy ,i dont know ...
#5.9# Challenge your Solution
## Even if you get that ez thngs ,it should not stop you
# the easy answer seemed to work okay in that it gave us a listing of counties that had
#the highest average levels of ozone for 2014.
## We're going to Shuffle the data ,we will approximate is our ranking is stable from year to year
base::set.seed(10234)
N <- base::nrow(ozone)
idx <- base::sample(x = N , size = N ,replace = T)
ozone2 <- ozone[idx ,]
ozone2 %>% dplyr::group_by(State.Name , County.Name) %>%
dplyr::summarize(ozone = mean(Sample.Measurement)) %>%
as.data.frame() %>% dplyr::arrange(dplyr::desc(ozone)) -> rangking2
base::cbind(head(rangking , 10),
head(rangking2 , 10) )
base::cbind(tail(rangking , 10),
tail(rangking2 , 10) )
## It's nearly identical,it show that roughly next year will be stable , but that is a hypothesis.
| /2.Exploratory Data Analysis Checklist/Explorartory_ChecklistAddition.R | no_license | Xwyzworms/Exploratory-Data-Analysis-with-R | R | false | false | 4,558 | r | ## Setting Working Directory
setwd(dir = "D:\\Books\\Exploratory-Data-Analysis-with-R\\2.Exploratory Data Analysis Checklist")
list.files()
# Creating Directory For datasets
if(dir.exists("dataset")){dir.create("dataset")}
#5.2# Read in Data
##Load Library
library(readr)
library(dplyr)
ozone <-read_csv(".\\Dataset\\hourly_44201_2014\\hourly_44201_2014.csv",
col_types = "ccccinnccccccncnncccccc")
# c : character
# i : integer
# n : Numeric
base::names(ozone)
base::names(ozone) <- base::make.names(base::names(ozone))
#5.3# Check The Package
base::nrow(ozone) # 9060694
base::ncol(ozone) # 23
#5.4# Run Str()
utils::str(ozone)
#5.5# Look at the top and the bottom of data
head(ozone[, c(6:7 , 10)]) # take 6,7,and 10 col
tail(ozone[, c(6:7,10)])
#5.6# Check your "n"s
table(ozone$Time.Local) ## Result seems already fixed,different from the books
dplyr::filter(ozone , Time.Local == "13:00" && County.Name == "Franklin") %>%
dplyr::select(State.Name , County.Name , Date.Local,
Time.Local , Sample.Measurement)# No output
dplyr::filter(ozone , State.Code =="36" &
County.Code == "033", Date.Local =="2014-09-30") %>%
dplyr::select(Date.Local , Time.Local ,Sample.Measurement) %>% as.data.frame
## Data Is Tidy ,there's nothing false
## Checking How many State
dplyr::select(ozone, State.Name) %>% base::unique() %>% nrow() #53
## Seems odd because US only has 50 States
base::unique(ozone$State.Name)
## Washington , D.C (District of Columbia ,puerto Rico ,country of mexico) Are tje extra states.
# BUt its okay .
## Validate with At least one external data source
# So from the link We get that in 2015 should not exceed 0.075 ppm
base::summary(ozone$Sample.Measurement)
#Reality Mean is (0.03027 , and median 0.0300) <-- lower than 0.075
stats::quantile(ozone$Sample.Measurement , probs = seq(0, 1, 0.1))
?stats::quantile
## Means that data in right magnitude probably an outlier.
##Roughly < 10% of data are above 0.075
dplyr::filter(ozone , Sample.Measurement > 0.075) %>%
select(County.Code ,Sample.Measurement) %>% print
# Roughly 30,102 obs ,quite high actually ,
dplyr::filter(ozone , Sample.Measurement > 0.075) %>%
select(County.Code ,Sample.Measurement) %>% as.data.frame %>%
dplyr::arrange(Sample.Measurement) %>%
tail(n=100)
# Wow there's alot observation > 0.100 ppm
#5.8# Try the easy solution first
# Example Question
# Which counties in the United States have the highest levels of ambient ozone
# pollution?
ozone %>% dplyr::group_by(State.Name,County.Name) %>%
dplyr::summarize(ozone = mean(Sample.Measurement)) %>%
as.data.frame() %>%
dplyr::arrange(desc(ozone)) -> rangking
## Checking The Highest County
ozone %>% dplyr::filter(State.Name == "Colorado" &
County.Name == "Clear Creek") %>% nrow() # 6447 <<-- Well if
ozone <- dplyr::mutate(ozone , Date.Local = as.Date(Date.Local))
ozone %>% dplyr::filter(State.Name == "Colorado" & County.Name == "Clear Creek") %>%
dplyr::mutate(dmonth = factor(base::months(Date.Local) , levels = month.name)) %>%
dplyr::group_by(dmonth) %>%
dplyr::summarize(ozone = mean(Sample.Measurement))
# March Missing
## Cheking the Lowest County
ozone %>% dplyr::filter(State.Name == "Puerto Rico",County.Name == "Catano") %>%
dplyr::mutate(dmonth = base::factor(base::months(Date.Local) , levels = month.name)) %>%
dplyr::group_by(dmonth) %>%
dplyr::summarize(ozone = mean(Sample.Measurement))
0.007 > 0.003
# Probably The low ozone means there are in certain Season in fact its summer season,well in 2014 maybe people in
## puerto rico dont use their car / maybe lazy ,i dont know ...
#5.9# Challenge your Solution
## Even if you get that ez thngs ,it should not stop you
# the easy answer seemed to work okay in that it gave us a listing of counties that had
#the highest average levels of ozone for 2014.
## We're going to Shuffle the data ,we will approximate is our ranking is stable from year to year
base::set.seed(10234)
N <- base::nrow(ozone)
idx <- base::sample(x = N , size = N ,replace = T)
ozone2 <- ozone[idx ,]
ozone2 %>% dplyr::group_by(State.Name , County.Name) %>%
dplyr::summarize(ozone = mean(Sample.Measurement)) %>%
as.data.frame() %>% dplyr::arrange(dplyr::desc(ozone)) -> rangking2
base::cbind(head(rangking , 10),
head(rangking2 , 10) )
base::cbind(tail(rangking , 10),
tail(rangking2 , 10) )
## It's nearly identical,it show that roughly next year will be stable , but that is a hypothesis.
|
\name{texreg}
\alias{texreg}
\alias{htmlreg}
\alias{screenreg}
\title{Convert regression output to LaTeX or HTML tables}
\description{Conversion of R regression output to LaTeX or HTML tables.}
\usage{
texreg(l, file = NULL, single.row = FALSE,
stars = c(0.001, 0.01, 0.05), custom.model.names = NULL,
custom.coef.names = NULL, custom.coef.map = NULL,
custom.gof.names = NULL, custom.note = NULL, digits = 2,
leading.zero = TRUE, symbol = "\\\\cdot", override.coef = 0,
override.se = 0, override.pvalues = 0, override.ci.low = 0,
override.ci.up = 0, omit.coef = NULL, reorder.coef = NULL,
reorder.gof = NULL, ci.force = FALSE, ci.force.level = 0.95,
ci.test = 0, groups = NULL, custom.columns = NULL,
custom.col.pos = NULL, bold = 0.00, center = TRUE,
caption = "Statistical models", caption.above = FALSE,
label = "table:coefficients", booktabs = FALSE,
siunitx = FALSE, lyx = FALSE,sideways = FALSE,
longtable = FALSE, use.packages = TRUE, table = TRUE,
no.margin = FALSE, fontsize = NULL, scalebox = NULL,
float.pos = "", col.groups = NULL, col.groups.2 = NULL,
no.table.format = FALSE, add.lines = NULL, add.lines.sep = FALSE,
center.gof = TRUE, ...)
htmlreg(l, file = NULL, single.row = FALSE, stars = c(0.001,
0.01, 0.05), custom.model.names = NULL,
custom.coef.names = NULL, custom.coef.map = NULL,
custom.gof.names = NULL, custom.note = NULL, digits = 2,
leading.zero = TRUE, symbol = "·", override.coef = 0,
override.se = 0, override.pvalues = 0, override.ci.low = 0,
override.ci.up = 0, omit.coef = NULL, reorder.coef = NULL,
reorder.gof = NULL, ci.force = FALSE, ci.force.level = 0.95,
ci.test = 0, groups = NULL, custom.columns = NULL,
custom.col.pos = NULL, bold = 0.00, center = TRUE,
caption = "Statistical models", caption.above = FALSE,
star.symbol = "*", inline.css = TRUE, doctype = TRUE,
html.tag = FALSE, head.tag = FALSE, body.tag = FALSE,
indentation = "", vertical.align.px = 0, ...)
screenreg(l, file = NULL, single.row = FALSE, stars = c(0.001,
0.01, 0.05), custom.model.names = NULL,
custom.coef.names = NULL, custom.coef.map = NULL,
custom.gof.names = NULL, custom.note = NULL, digits = 2,
leading.zero = TRUE, symbol = ".", override.coef = 0,
override.se = 0, override.pvalues = 0, override.ci.low = 0,
override.ci.up = 0, omit.coef = NULL, reorder.coef = NULL,
reorder.gof = NULL, ci.force = FALSE, ci.force.level = 0.95,
ci.test = 0, groups = NULL, custom.columns = NULL,
custom.col.pos = NULL, column.spacing = 2, outer.rule = "=",
inner.rule = "-", ...)
}
\details{
texreg converts coefficients, standard errors, significance stars,
and goodness-of-fit statistics of statistical models into LaTeX
tables or HTML tables or into nicely formatted screen output for
the R console. A list of several models can be combined in a
single table. The output is customizable. New model types can be
easily implemented. Confidence intervals can be used instead of
standard errors and p values.
The \code{texreg()} function creates LaTeX code for inclusion
in a LaTeX document or for usage with \pkg{Sweave} or \pkg{knitr}.
The \code{htmlreg()} function creates HTML code. Tables in HTML
format can be saved with a ".html" extension and displayed in
a web browser. Alternatively, they can be saved with a ".doc"
extension and opened in MS Word for inclusion in office
documents. \code{htmlreg()} also works with \pkg{knitr} and HTML
or Markdown. Note that the \code{inline.css}, \code{doctype},
\code{html.tag}, \code{head.tag}, and \code{body.tag} arguments
must be adjusted for the different purposes (see the description
of the arguments).
The \code{screenreg()} function creates text representations of
tables and prints them to the R console. This is an alternative
to the \code{summary} method and serves easy model comparison.
Moreover, once a table has been prepared in the R console, it
can be later exported to LaTeX or HTML with little extra effort
because the majority of arguments of the three functions is
identical.
}
\arguments{
\item{l}{A statistical model or a list of statistical models. Lists of models can be specified as \code{l = list(model.1, model.2, ...)}. Different object types can also be mixed.}
\item{file}{Using this argument, the resulting table is written to a file rather than to the R prompt. The file name can be specified as a character string. Writing a table to a file can be useful for working with MS Office or LibreOffice. For example, using the \code{htmlreg} function, an HTML table can be written to a file with the extension \code{.doc} and opened with MS Word. The table can then be simply copied into any Word document, retaining the formatting of the table. Note that LibreOffice can import only plain HTML; CSS decorations are not supported; the resulting tables do not retain the full formatting in LibreOffice.}
\item{single.row}{By default, a model parameter takes up two lines of the table: the standard error is listed in parentheses under the coefficient. This saves a lot of horizontal space on the page and is the default table format in most academic journals. If \code{single.row = TRUE} is activated, however, both coefficient and standard error are placed in a single table cell in the same line.}
\item{stars}{The significance levels to be used to draw stars. Between 0 and 4 threshold values can be provided as a numeric vector. For example, \code{stars = numeric(0)} will not print any stars and will not print any note about significance levels below the table. \code{stars = 0.05} will attach one single star to all coefficients where the p value is below 0.05. \code{stars = c(0.001, 0.01, 0.05, 0.1)} will print one, two, or three stars, or a symbol as specified by the \code{symbol} argument depending on the p values.}
\item{custom.model.names}{A character vector of labels for the models. By default, the models are named Model 1, Model 2, etc. Specifying \code{model.names = c("My name 1", "My name 2")} etc. overrides the default behavior.}
\item{custom.coef.names}{By default, \pkg{texreg} uses the coefficient names which are stored in the models. The \code{custom.coef.names} argument can be used to replace them by other character strings in the order of appearance. For example, if a table shows a total of three different coefficients (including the intercept), the argument \code{custom.coef.names = c("Intercept", "variable 1", "variable 2")} will replace their names in this order.
Sometimes it happens that the same variable has a different name in different models. In this case, the user can use this function to assign identical names. If possible, the rows will then be merged into a single row unless both rows contain values in the same column.
Where the argument contains an \code{NA} value, the original name of the coefficient is kept. For example, \code{custom.coef.names = c(NA, "age", NA)} will only replace the second coef name and leave the first and third name as they are in the original model.}
\item{custom.coef.map}{The \code{custom.coef.map} argument can be used to select, omit, rename, and reorder coefficients.
Users must supply a named list of this form: \code{list('x' = 'First variable', 'y' = NA, 'z' = 'Third variable')}. With that particular example of \code{custom.coef.map},
1. coefficients will presented in order: x, y, z.
2. variable x will appear as "First variable", variable y will appear as "y", and variable "z" will appear as "Third variable".
3. all variables not named "x", "y", or "z" will be omitted from the table.}
\item{custom.gof.names}{A character vector which is used to replace the names of the goodness-of-fit statistics at the bottom of the table. The vector must have the same length as the number of GOF statistics in the final table. The argument works like the \code{custom.coef.names} argument, but for the GOF values. \code{NA} values can be included where the original GOF name should be kept.}
\item{custom.note}{With this argument, a replacement text for the significance note below the table can be provided. If an empty character object is provided (\code{custom.note = ""}), the note will be omitted completely. If some character string is provided (e.g., \code{custom.note = "My note"}), the significance legend is replaced by \code{My note}. The original significance legend can be included by inserting the \code{\%stars} wildcard. For example, a custom note can be added right after the significance legend by providing \code{custom.note = "\%stars. My note"}.}
\item{digits}{Set the number of decimal places for coefficients, standard errors and goodness-of-fit statistics. Do not use negative values! The argument works like the \code{digits} argument in the \code{round} function of the \pkg{base} package.}
\item{leading.zero}{Most journals require leading zeros of coefficients and standard errors (for example, \code{0.35}). This is also the default texreg behavior. Some journals, however, require omission of leading zeros (for example, \code{.35}). This can be achieved by setting \code{leading.zero = FALSE}.}
\item{symbol}{If four threshold values are handed over to the \code{stars} argument, p values smaller than the largest threshold value but larger than the second-largest threshold value are denoted by this symbol. The default symbol is \code{"\\\\cdot"} for the LaTeX dot, \code{"·"} for the HTML dot, or simply \code{"."} for the ASCII dot. If the \code{texreg} function is used, any other mathematical LaTeX symbol or plain text symbol can be used, for example \code{symbol = "\\\\circ"} for a small circle (note that backslashes must be escaped). If the \code{htmlreg} function is used, any other HTML character or symbol can be used. For the \code{screenreg} function, only plain text characters can be used.}
\item{override.coef}{Set custom values for the coefficients. New coefficients are provided as a list of numeric vectors. The list contains vectors of coefficients for each model. There must be as many vectors of coefficients as there are models. For example, if there are two models with three model terms each, the argument could be specified as \code{override.coef = list(c(0.1, 0.2, 0.3), c(0.05, 0.06, 0.07))}. If there is only one model, custom values can be provided as a plain vector (not embedded in a list). For example: \code{override.coef = c(0.05, 0.06, 0.07)}.}
\item{override.se}{Set custom values for the standard errors. New standard errors are provided as a list of numeric vectors. The list contains vectors of standard errors for each model. There must be as many vectors of standard errors as there are models. For example, if there are two models with three coefficients each, the argument could be specified as \code{override.se = list(c(0.1, 0.2, 0.3), c(0.05, 0.06, 0.07))}. If there is only one model, custom values can be provided as a plain vector (not embedded in a list). For example: \code{override.se = c(0.05, 0.06, 0.07)}. Overriding standard errors can be useful for the implementation of robust SEs, for example.}
\item{override.pvalues}{Set custom values for the p values. New p values are provided as a list of numeric vectors. The list contains vectors of p values for each model. There must be as many vectors of p values as there are models. For example, if there are two models with three coefficients each, the argument could be specified as \code{override.pvalues = list(c(0.1, 0.2, 0.3), c(0.05, 0.06, 0.07))}. If there is only one model, custom values can be provided as a plain vector (not embedded in a list). For example: \code{override.pvalues = c(0.05, 0.06, 0.07)}. Overriding p values can be useful for the implementation of robust SEs and p values, for example.}
\item{override.ci.low}{Set custom lower confidence interval bounds. This works like the other override arguments, with one exception: if confidence intervals are provided here and in the \code{override.ci.up} argument, the standard errors and p values as well as the \code{ci.force} argument are ignored.}
\item{override.ci.up}{Set custom upper confidence interval bounds. This works like the other override arguments, with one exception: if confidence intervals are provided here and in the \code{override.ci.low} argument, the standard errors and p values as well as the \code{ci.force} argument are ignored.}
\item{omit.coef}{A character string which is used as a regular expression to remove coefficient rows from the table. For example, \code{omit.coef = "group"} deletes all coefficient rows from the table where the name of the coefficient contains the character sequence "group". More complex regular expressions can be used to filter out several kinds of model terms, for example \code{omit.coef = "(thresh)|(ranef)"} to remove all model terms matching either "thresh" or "ranef".The \code{omit.coef} argument is processed after the \code{custom.coef.names} argument, so the regular expression should refer to the custom coefficient names. To omit GOF entries instead of coefficient entries, use the custom arguments of the extract functions instead (see the help entry of the \link{extract} function or \link{extract-methods}.}
\item{reorder.coef}{Reorder the rows of the coefficient block of the resulting table in a custom way. The argument takes a vector of the same length as the number of coefficients. For example, if there are three coefficients, \code{reorder.coef = c(3, 2, 1)} will put the third coefficient in the first row and the first coefficient in the third row. Reordering can be sensible because interaction effects are often added to the end of the model output although they were specified earlier in the model formula. Note: Reordering takes place after processing custom coefficient names and after omitting coefficients, so the \code{custom.coef.names} and \code{omit.coef} arguments should follow the original order.}
\item{reorder.gof}{Reorder the rows of the goodness-of-fit block of the resulting table in a custom way. The argument takes a vector of the same length as the number of GOF statistics. For example, if there are three goodness-of-fit rows, \code{reorder.gof = c(3, 2, 1)} will exchange the first and the third row. Note: Reordering takes place after processing custom GOF names, so the \code{custom.gof.names} argument should follow the original order.}
\item{ci.force}{Should confidence intervals be used instead of the default standard errors and p values? Most models implemented in the \pkg{texreg} package report standard errors and p values by default while few models report confidence intervals. However, the functions in the \pkg{texreg} package can convert standard errors and into confidence intervals if desired. To enforce confidence intervals instead of standard errors, the \code{ci.force} argument accepts either a logical value indicating whether all models or none of the models should be forced to report confidence intervals (\code{ci.force = TRUE} for all and \code{ci.force = FALSE} for none) or a vector of logical values indicating for each model separately whether the model should be forced to report confidence intervals (e.g., \code{ci.force = c(FALSE, TRUE, FALSE)}). Confidence intervals are computed using the standard normal distribution (z values based on the \code{qnorm} function).}
\item{ci.force.level}{If the \code{ci.force} argument is used to convert standard errors to confidence intervals, what confidence level should be used? By default, \code{0.95} is used (i.e., an alpha value of 0.05).}
\item{ci.test}{If confidence intervals are reported, the \code{ci.test} argument specifies the reference value to establish whether a coefficient/CI is significant. The default value \code{ci.test = 0}, for example, will attach a significance star to coefficients if the confidence interval does not contain \code{0}. If no star should be printed at all, \code{ci.test = NULL} can be used. The \code{ci.test} argument works both for models with native support for confidence intervals and in cases where the \code{ci.force} argument is used.}
\item{groups}{This argument can be used to group the rows of the table into blocks. For example, there could be one block for hypotheses and another block for control variables. Each group has a heading, and the row labels within a group are indented. The partitions must be handed over as a list of named numeric vectors, where each number is a row index and each name is the heading of the group. Example: \code{groups = list("first group" = 1:4, "second group" = 7:8)}.}
\item{custom.columns}{An optional list of additional text columns to be inserted into the table, for example coefficient types. The list should contain one or more character vectors with as many character or numeric elements as there are rows. If the vectors in the list are named, the names are used as labels in the table header. For example, \code{custom.columns = list(type = c("a", "b", "c"), 1:3)} will add two columns; the first one is labeled while the second one is not. Note that the numeric elements of the second column will be converted to character objects in this example. The consequence is that decimal alignment with the \pkg{siunitx} package is switched off in these columns. Note that this argument is processed after any arguments that affect the number of rows.}
\item{custom.col.pos}{An optional integer vector of positions for the columns given in the \code{custom columns} argument. For example, if there are three custom columns, \code{custom.col.pos = c(1, 3, 3)} will insert the first custom column before the first column of the original table and the remaining two custom columns after the second column of the original table. By default, all custom columns are placed after the first column, which usually contains the coefficient names.}
\item{bold}{[only in the \code{texreg} and \code{htmlreg} functions] The p value threshold below which the coefficient shall be formatted in a bold font. For example, \code{bold = 0.05} will cause all coefficients which are significant at the 95\% level to be formatted in bold. Note that this is not compatible with the siunitx argument in the \code{texreg} function. If both are \code{TRUE}, siunitx is switched off and a warning message appears. Note also that it is advisable to use \code{stars = FALSE} together with the \code{bold} argument because having both bolded coefficients and significance stars usually does not make any sense.}
\item{center}{[only in the \code{texreg} and \code{htmlreg} functions] Should the table be horizontally aligned at the center of the page?}
\item{caption}{[only in the \code{texreg} and \code{htmlreg} functions] Set the caption of the table. }
\item{caption.above}{[only in the \code{texreg} and \code{htmlreg} functions] Should the caption of the table be placed above the table? By default, it is placed below the table.}
\item{label}{[only in the \code{texreg} function] Set the label of the \code{table} environment.}
\item{booktabs}{[only in the \code{texreg} function] Use the \code{booktabs} LaTeX package to get thick horizontal rules in the output table (recommended).}
\item{siunitx}{[only in the \code{texreg} function] Use the \code{siunitx} LaTeX package to get a nice alignment of the coefficients (recommended).}
\item{lyx}{[only in the \code{texreg} function] \code{logical}; if \code{TRUE}, each newline in the output is doubled, which facilitates transferring the output into the LyX document processor. }
\item{sideways}{[only in the \code{texreg} function] If \code{sideways = TRUE} is set, the \code{table} floating environment is replaced by a \code{sidewaystable} float, and the \code{rotating} package is loaded in the preamble. The argument only has an effect if \code{table = TRUE} is also set.}
\item{longtable}{ [only in the \code{texreg} function] If \code{longtable = TRUE} is set, the \code{longtable} environment from the \code{longtable} LaTeX package is used to set tables across multiple pages. Note that this argument is not compatible with the \code{sideways} and \code{scalebox} arguments. These arguments will be automatically switched off when \code{longtable = TRUE} is set. }
\item{use.packages}{[only in the \code{texreg} function] If this argument is set to \code{TRUE} (= the default behavior), the required LaTeX packages are loaded in the beginning. If set to \code{FALSE}, the use package statements are omitted from the output.}
\item{table}{[only in the \code{texreg} function] By default, texreg puts the actual \code{tabular} object in a \code{table} floating environment. To get only the \code{tabular} object without the whole table header, set \code{table = FALSE}.}
\item{no.margin}{[only in the \code{texreg} function] In order to save space, inner margins of tables can be switched off.}
\item{fontsize}{[only in the \code{texreg} function] The \code{fontsize} argument serves to change the font size used in the table. Valid values are \code{"tiny"}, \code{"scriptsize"}, \code{"footnotesize"}, \code{"small"}, \code{"normalsize"}, \code{"large"}, \code{"Large"}, \code{"LARGE"}, \code{"huge"}, and \code{"Huge"}. Note that the \code{scalebox} argument often achieves better results when the goal is to change the size of the table. }
\item{scalebox}{[only in the \code{texreg} function] The \code{scalebox} argument serves to resize the table. For example, \code{scalebox = 1.0} is equivalent to the normal size, \code{scalebox = 0.5} decreases the size of the table by one half, and \code{scalebox = 2.0} doubles the space occupied by the table. Note that the \code{scalebox} argument does not work when the \code{longtable} argument is used. }
\item{float.pos}{[only in the \code{texreg} function] This argument specifies where the table should be located on the page or in the document. By default, no floating position is specified, and LaTeX takes care of the position automatically. Possible values include \code{h} (here), \code{p} (page), \code{t} (top), \code{b} (bottom), any combination thereof, e.g. \code{tb}, or any of these values followed by an exclamation mark, e.g. \code{t!}, in order to enforce this position. The square brackets do not have to be specified.}
\item{col.groups}{[only in the \code{texreg} function] This argument allows for grouping models. Must be specified as a list of the form \code{list("Group name" = m:n)}, where m and n are the first and last column contained in the group. Several groups can be specified.}
\item{col.groups}{[only in the \code{texreg} function] Add a second column group.}
\item{no.table.format}{[only in the \code{texreg} function] This argument modifies how the \code{siunitx} argument sets column formats; if \code{no.table.format = TRUE}, the no-frills "S" column is used. If \code{no.table.format = FALSE}, the column format is set according to the max number of integers and decimals in a given column, e.g. S[table-format = 3.2]. Defaults to \code{FALSE}.}
\item{add.lines}{[only in the \code{texreg} function] Adds lines to the bottom part of the table. Input must be a list, with vector elements of length equal to the number of columns in the table.}
\item{center.gof}{[only in the \code{texreg} function] If \code{siunitx = TRUE}, setting center.gof = TRUE will put the GOF rows in \\multicolumn's, exempting them from being interpreted by siunitx, and instead centering cells.}
\item{star.symbol}{[only in the \code{htmlreg} function] Alternative characters for the significance stars can be specified. This is useful if \pkg{knitr} and Markdown are used for HTML report generation. In Markdown, asterisks or stars are interpreted as special characters, so they have to be escaped. To make \code{htmlreg} compatible with Markdown, specify \code{star.symbol = "\\*"}. Note that some other modifications are recommended for usage with \pkg{knitr} in combination with Markdown or HTML (see the \code{inline.css}, \code{doctype}, \code{html.tag}, \code{head.tag}, and \code{body.tag} arguments).}
\item{inline.css}{[only in the \code{htmlreg} function] Should the CSS stylesheets be embedded directly in the code of the table (\code{inline.css = TRUE}), or should the CSS stylesheets be enclosed in the <head> tag, that is, separated from the table code (\code{inline.css = FALSE})? Having inline CSS code makes the code of the table more complex, but sometimes it may be helpful when only the table shall be printed, without the head of the HTML file (for example when the table is embedded in a \pkg{knitr} report). As a rule of thumb: use inline CSS if the table is not saved to a file.}
\item{doctype}{[only in the \code{htmlreg} function] Should the first line of the HTML code contain the DOCTYPE definition? If \code{TRUE}, the HTML 4 TRANSITIONAL version is used. If \code{FALSE}, no DOCTYPE will be included. Omitting the DOCTYPE can be helpful when the \pkg{knitr} package is used to generate HTML code because \pkg{knitr} requires only the plain table, not the whole HTML document including the document type declaration. Including the DOCTYPE can be helpful when the code is saved to a file, for example as an MS Word document.}
\item{html.tag}{[only in the \code{htmlreg} function] Should the table code (and possibly the <body> and <head> tags) be enclosed in an <html> tag? Suppressing this tag is recommended when \pkg{knitr} is used for dynamic HTML or Markdown report generation. Including this tag is recommended when the code is saved to a file, for example as an MS Word document.}
\item{head.tag}{[only in the \code{htmlreg} function] Should the <head> tag (including CSS definitions and title/caption) be included in the HTML code? Suppressing this tag is recommended when \pkg{knitr} is used for dynamic HTML or Markdown report generation. Including this tag is recommended when the code is saved to a file, for example as an MS Word document.}
\item{body.tag}{[only in the \code{htmlreg} function] Should the table code be enclosed in a <body> HTML tag? Suppressing this tag is recommended when \pkg{knitr} is used for dynamic HTML or Markdown report generation. Including this tag is recommended when the code is saved to a file, for example as an MS Word document.}
\item{indentation}{[only in the \code{htmlreg} function] Characters used for indentation of the HTML code. By default, \code{indentation = ""} uses no indentation. Any number of spaces or characters can be used instead. For example, \code{indentation = " "} uses two spaces of (additional) indentation for each subelement.}
\item{vertical.align.px}{[only in the \code{htmlreg} function] Vertical alignment of significance stars. Browsers differ in their ways of displaying superscripted significance stars; in some browsers the stars are elevated by default, and in other browsers the stars are aligned vertically with the text, without any actual superscripting. This argument controls by how many additional pixels the stars are elevated. The default setting of 0 uses the defaults of the browser. In RStudio's internal browser, this looks OK, but in Firefox, this looks too low. A value of 4 looks OK in Firefox, for example, but is above the line in RStudio's internal browser. }
\item{column.spacing}{[only in the \code{screenreg} function] The amount of space between any two columns of a table. By default, two spaces are used. If the tables do not fit on a single page horizontally, the value can be set to \code{1} or \code{0}.}
\item{outer.rule}{[only in the \code{screenreg} function] The character which is used to draw the outer horizontal line above and below a table. If an empty character object is provided (i.e., \code{outer.rule = ""}), there will be no outer horizontal lines. Recommended values are \code{""}, \code{"="}, \code{"-"}, \code{"_"}, or \code{"#"}.}
\item{inner.rule}{[only in the \code{screenreg} function] The character which is used to draw the inner horizontal line above and below a table. If an empty character object is provided (i.e., \code{outer.rule = ""}), there will be no inner horizontal lines. Recommended values are \code{""}, \code{"-"}, or \code{"_"}.}
\item{...}{Custom options to be passed on to the extract function. For example, most extract methods provide custom options for the inclusion or exclusion of specific goodness-of-fit statistics. See the help entries of \link{extract} and \link{extract-methods} for more information.}
}
\references{
Leifeld, Philip (2013). texreg: Conversion of Statistical Model Output in R to
LaTeX and HTML Tables. Journal of Statistical Software, 55(8), 1-24.
\url{http://www.jstatsoft.org/v55/i08/}.
}
\seealso{
\code{\link{texreg-package} \link{extract} \link{extract-methods} \link{plotreg}}
}
\author{
Philip Leifeld (\url{http://www.philipleifeld.com})
}
\examples{
#Linear mixed-effects models
library(nlme)
model.1 <- lme(distance ~ age, data = Orthodont, random = ~ 1)
model.2 <- lme(distance ~ age + Sex, data = Orthodont, random = ~ 1)
texreg(list(model.1, model.2), booktabs = TRUE, siunitx = TRUE)
#Ordinary least squares model (example from the 'lm' help file)
ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2,10,20, labels = c("Ctl","Trt"))
weight <- c(ctl, trt)
lm.D9 <- lm(weight ~ group)
table.string <- texreg(lm.D9, return.string = TRUE)
cat(table.string)
#Create a 'fake' Office document containing a regression table
htmlreg(list(model.1, model.2), file = "texreg.doc",
inline.css = FALSE, doctype = TRUE, html.tag = TRUE,
head.tag = TRUE, body.tag = TRUE)
unlink("texreg.doc")
}
\keyword{print}
\keyword{misc}
\keyword{utilities}
\keyword{IO}
\keyword{programming|interface}
| /man/texreg.Rd | no_license | eivindhammers/texreg | R | false | false | 29,968 | rd | \name{texreg}
\alias{texreg}
\alias{htmlreg}
\alias{screenreg}
\title{Convert regression output to LaTeX or HTML tables}
\description{Conversion of R regression output to LaTeX or HTML tables.}
\usage{
texreg(l, file = NULL, single.row = FALSE,
stars = c(0.001, 0.01, 0.05), custom.model.names = NULL,
custom.coef.names = NULL, custom.coef.map = NULL,
custom.gof.names = NULL, custom.note = NULL, digits = 2,
leading.zero = TRUE, symbol = "\\\\cdot", override.coef = 0,
override.se = 0, override.pvalues = 0, override.ci.low = 0,
override.ci.up = 0, omit.coef = NULL, reorder.coef = NULL,
reorder.gof = NULL, ci.force = FALSE, ci.force.level = 0.95,
ci.test = 0, groups = NULL, custom.columns = NULL,
custom.col.pos = NULL, bold = 0.00, center = TRUE,
caption = "Statistical models", caption.above = FALSE,
label = "table:coefficients", booktabs = FALSE,
siunitx = FALSE, lyx = FALSE,sideways = FALSE,
longtable = FALSE, use.packages = TRUE, table = TRUE,
no.margin = FALSE, fontsize = NULL, scalebox = NULL,
float.pos = "", col.groups = NULL, col.groups.2 = NULL,
no.table.format = FALSE, add.lines = NULL, add.lines.sep = FALSE,
center.gof = TRUE, ...)
htmlreg(l, file = NULL, single.row = FALSE, stars = c(0.001,
0.01, 0.05), custom.model.names = NULL,
custom.coef.names = NULL, custom.coef.map = NULL,
custom.gof.names = NULL, custom.note = NULL, digits = 2,
leading.zero = TRUE, symbol = "·", override.coef = 0,
override.se = 0, override.pvalues = 0, override.ci.low = 0,
override.ci.up = 0, omit.coef = NULL, reorder.coef = NULL,
reorder.gof = NULL, ci.force = FALSE, ci.force.level = 0.95,
ci.test = 0, groups = NULL, custom.columns = NULL,
custom.col.pos = NULL, bold = 0.00, center = TRUE,
caption = "Statistical models", caption.above = FALSE,
star.symbol = "*", inline.css = TRUE, doctype = TRUE,
html.tag = FALSE, head.tag = FALSE, body.tag = FALSE,
indentation = "", vertical.align.px = 0, ...)
screenreg(l, file = NULL, single.row = FALSE, stars = c(0.001,
0.01, 0.05), custom.model.names = NULL,
custom.coef.names = NULL, custom.coef.map = NULL,
custom.gof.names = NULL, custom.note = NULL, digits = 2,
leading.zero = TRUE, symbol = ".", override.coef = 0,
override.se = 0, override.pvalues = 0, override.ci.low = 0,
override.ci.up = 0, omit.coef = NULL, reorder.coef = NULL,
reorder.gof = NULL, ci.force = FALSE, ci.force.level = 0.95,
ci.test = 0, groups = NULL, custom.columns = NULL,
custom.col.pos = NULL, column.spacing = 2, outer.rule = "=",
inner.rule = "-", ...)
}
\details{
texreg converts coefficients, standard errors, significance stars,
and goodness-of-fit statistics of statistical models into LaTeX
tables or HTML tables or into nicely formatted screen output for
the R console. A list of several models can be combined in a
single table. The output is customizable. New model types can be
easily implemented. Confidence intervals can be used instead of
standard errors and p values.
The \code{texreg()} function creates LaTeX code for inclusion
in a LaTeX document or for usage with \pkg{Sweave} or \pkg{knitr}.
The \code{htmlreg()} function creates HTML code. Tables in HTML
format can be saved with a ".html" extension and displayed in
a web browser. Alternatively, they can be saved with a ".doc"
extension and opened in MS Word for inclusion in office
documents. \code{htmlreg()} also works with \pkg{knitr} and HTML
or Markdown. Note that the \code{inline.css}, \code{doctype},
\code{html.tag}, \code{head.tag}, and \code{body.tag} arguments
must be adjusted for the different purposes (see the description
of the arguments).
The \code{screenreg()} function creates text representations of
tables and prints them to the R console. This is an alternative
to the \code{summary} method and serves easy model comparison.
Moreover, once a table has been prepared in the R console, it
can be later exported to LaTeX or HTML with little extra effort
because the majority of arguments of the three functions is
identical.
}
\arguments{
\item{l}{A statistical model or a list of statistical models. Lists of models can be specified as \code{l = list(model.1, model.2, ...)}. Different object types can also be mixed.}
\item{file}{Using this argument, the resulting table is written to a file rather than to the R prompt. The file name can be specified as a character string. Writing a table to a file can be useful for working with MS Office or LibreOffice. For example, using the \code{htmlreg} function, an HTML table can be written to a file with the extension \code{.doc} and opened with MS Word. The table can then be simply copied into any Word document, retaining the formatting of the table. Note that LibreOffice can import only plain HTML; CSS decorations are not supported; the resulting tables do not retain the full formatting in LibreOffice.}
\item{single.row}{By default, a model parameter takes up two lines of the table: the standard error is listed in parentheses under the coefficient. This saves a lot of horizontal space on the page and is the default table format in most academic journals. If \code{single.row = TRUE} is activated, however, both coefficient and standard error are placed in a single table cell in the same line.}
\item{stars}{The significance levels to be used to draw stars. Between 0 and 4 threshold values can be provided as a numeric vector. For example, \code{stars = numeric(0)} will not print any stars and will not print any note about significance levels below the table. \code{stars = 0.05} will attach one single star to all coefficients where the p value is below 0.05. \code{stars = c(0.001, 0.01, 0.05, 0.1)} will print one, two, or three stars, or a symbol as specified by the \code{symbol} argument depending on the p values.}
\item{custom.model.names}{A character vector of labels for the models. By default, the models are named Model 1, Model 2, etc. Specifying \code{model.names = c("My name 1", "My name 2")} etc. overrides the default behavior.}
\item{custom.coef.names}{By default, \pkg{texreg} uses the coefficient names which are stored in the models. The \code{custom.coef.names} argument can be used to replace them by other character strings in the order of appearance. For example, if a table shows a total of three different coefficients (including the intercept), the argument \code{custom.coef.names = c("Intercept", "variable 1", "variable 2")} will replace their names in this order.
Sometimes it happens that the same variable has a different name in different models. In this case, the user can use this function to assign identical names. If possible, the rows will then be merged into a single row unless both rows contain values in the same column.
Where the argument contains an \code{NA} value, the original name of the coefficient is kept. For example, \code{custom.coef.names = c(NA, "age", NA)} will only replace the second coef name and leave the first and third name as they are in the original model.}
\item{custom.coef.map}{The \code{custom.coef.map} argument can be used to select, omit, rename, and reorder coefficients.
Users must supply a named list of this form: \code{list('x' = 'First variable', 'y' = NA, 'z' = 'Third variable')}. With that particular example of \code{custom.coef.map},
1. coefficients will presented in order: x, y, z.
2. variable x will appear as "First variable", variable y will appear as "y", and variable "z" will appear as "Third variable".
3. all variables not named "x", "y", or "z" will be omitted from the table.}
\item{custom.gof.names}{A character vector which is used to replace the names of the goodness-of-fit statistics at the bottom of the table. The vector must have the same length as the number of GOF statistics in the final table. The argument works like the \code{custom.coef.names} argument, but for the GOF values. \code{NA} values can be included where the original GOF name should be kept.}
\item{custom.note}{With this argument, a replacement text for the significance note below the table can be provided. If an empty character object is provided (\code{custom.note = ""}), the note will be omitted completely. If some character string is provided (e.g., \code{custom.note = "My note"}), the significance legend is replaced by \code{My note}. The original significance legend can be included by inserting the \code{\%stars} wildcard. For example, a custom note can be added right after the significance legend by providing \code{custom.note = "\%stars. My note"}.}
\item{digits}{Set the number of decimal places for coefficients, standard errors and goodness-of-fit statistics. Do not use negative values! The argument works like the \code{digits} argument in the \code{round} function of the \pkg{base} package.}
\item{leading.zero}{Most journals require leading zeros of coefficients and standard errors (for example, \code{0.35}). This is also the default texreg behavior. Some journals, however, require omission of leading zeros (for example, \code{.35}). This can be achieved by setting \code{leading.zero = FALSE}.}
\item{symbol}{If four threshold values are handed over to the \code{stars} argument, p values smaller than the largest threshold value but larger than the second-largest threshold value are denoted by this symbol. The default symbol is \code{"\\\\cdot"} for the LaTeX dot, \code{"·"} for the HTML dot, or simply \code{"."} for the ASCII dot. If the \code{texreg} function is used, any other mathematical LaTeX symbol or plain text symbol can be used, for example \code{symbol = "\\\\circ"} for a small circle (note that backslashes must be escaped). If the \code{htmlreg} function is used, any other HTML character or symbol can be used. For the \code{screenreg} function, only plain text characters can be used.}
\item{override.coef}{Set custom values for the coefficients. New coefficients are provided as a list of numeric vectors. The list contains vectors of coefficients for each model. There must be as many vectors of coefficients as there are models. For example, if there are two models with three model terms each, the argument could be specified as \code{override.coef = list(c(0.1, 0.2, 0.3), c(0.05, 0.06, 0.07))}. If there is only one model, custom values can be provided as a plain vector (not embedded in a list). For example: \code{override.coef = c(0.05, 0.06, 0.07)}.}
\item{override.se}{Set custom values for the standard errors. New standard errors are provided as a list of numeric vectors. The list contains vectors of standard errors for each model. There must be as many vectors of standard errors as there are models. For example, if there are two models with three coefficients each, the argument could be specified as \code{override.se = list(c(0.1, 0.2, 0.3), c(0.05, 0.06, 0.07))}. If there is only one model, custom values can be provided as a plain vector (not embedded in a list). For example: \code{override.se = c(0.05, 0.06, 0.07)}. Overriding standard errors can be useful for the implementation of robust SEs, for example.}
\item{override.pvalues}{Set custom values for the p values. New p values are provided as a list of numeric vectors. The list contains vectors of p values for each model. There must be as many vectors of p values as there are models. For example, if there are two models with three coefficients each, the argument could be specified as \code{override.pvalues = list(c(0.1, 0.2, 0.3), c(0.05, 0.06, 0.07))}. If there is only one model, custom values can be provided as a plain vector (not embedded in a list). For example: \code{override.pvalues = c(0.05, 0.06, 0.07)}. Overriding p values can be useful for the implementation of robust SEs and p values, for example.}
\item{override.ci.low}{Set custom lower confidence interval bounds. This works like the other override arguments, with one exception: if confidence intervals are provided here and in the \code{override.ci.up} argument, the standard errors and p values as well as the \code{ci.force} argument are ignored.}
\item{override.ci.up}{Set custom upper confidence interval bounds. This works like the other override arguments, with one exception: if confidence intervals are provided here and in the \code{override.ci.low} argument, the standard errors and p values as well as the \code{ci.force} argument are ignored.}
\item{omit.coef}{A character string which is used as a regular expression to remove coefficient rows from the table. For example, \code{omit.coef = "group"} deletes all coefficient rows from the table where the name of the coefficient contains the character sequence "group". More complex regular expressions can be used to filter out several kinds of model terms, for example \code{omit.coef = "(thresh)|(ranef)"} to remove all model terms matching either "thresh" or "ranef".The \code{omit.coef} argument is processed after the \code{custom.coef.names} argument, so the regular expression should refer to the custom coefficient names. To omit GOF entries instead of coefficient entries, use the custom arguments of the extract functions instead (see the help entry of the \link{extract} function or \link{extract-methods}.}
\item{reorder.coef}{Reorder the rows of the coefficient block of the resulting table in a custom way. The argument takes a vector of the same length as the number of coefficients. For example, if there are three coefficients, \code{reorder.coef = c(3, 2, 1)} will put the third coefficient in the first row and the first coefficient in the third row. Reordering can be sensible because interaction effects are often added to the end of the model output although they were specified earlier in the model formula. Note: Reordering takes place after processing custom coefficient names and after omitting coefficients, so the \code{custom.coef.names} and \code{omit.coef} arguments should follow the original order.}
\item{reorder.gof}{Reorder the rows of the goodness-of-fit block of the resulting table in a custom way. The argument takes a vector of the same length as the number of GOF statistics. For example, if there are three goodness-of-fit rows, \code{reorder.gof = c(3, 2, 1)} will exchange the first and the third row. Note: Reordering takes place after processing custom GOF names, so the \code{custom.gof.names} argument should follow the original order.}
\item{ci.force}{Should confidence intervals be used instead of the default standard errors and p values? Most models implemented in the \pkg{texreg} package report standard errors and p values by default while few models report confidence intervals. However, the functions in the \pkg{texreg} package can convert standard errors and into confidence intervals if desired. To enforce confidence intervals instead of standard errors, the \code{ci.force} argument accepts either a logical value indicating whether all models or none of the models should be forced to report confidence intervals (\code{ci.force = TRUE} for all and \code{ci.force = FALSE} for none) or a vector of logical values indicating for each model separately whether the model should be forced to report confidence intervals (e.g., \code{ci.force = c(FALSE, TRUE, FALSE)}). Confidence intervals are computed using the standard normal distribution (z values based on the \code{qnorm} function).}
\item{ci.force.level}{If the \code{ci.force} argument is used to convert standard errors to confidence intervals, what confidence level should be used? By default, \code{0.95} is used (i.e., an alpha value of 0.05).}
\item{ci.test}{If confidence intervals are reported, the \code{ci.test} argument specifies the reference value to establish whether a coefficient/CI is significant. The default value \code{ci.test = 0}, for example, will attach a significance star to coefficients if the confidence interval does not contain \code{0}. If no star should be printed at all, \code{ci.test = NULL} can be used. The \code{ci.test} argument works both for models with native support for confidence intervals and in cases where the \code{ci.force} argument is used.}
\item{groups}{This argument can be used to group the rows of the table into blocks. For example, there could be one block for hypotheses and another block for control variables. Each group has a heading, and the row labels within a group are indented. The partitions must be handed over as a list of named numeric vectors, where each number is a row index and each name is the heading of the group. Example: \code{groups = list("first group" = 1:4, "second group" = 7:8)}.}
\item{custom.columns}{An optional list of additional text columns to be inserted into the table, for example coefficient types. The list should contain one or more character vectors with as many character or numeric elements as there are rows. If the vectors in the list are named, the names are used as labels in the table header. For example, \code{custom.columns = list(type = c("a", "b", "c"), 1:3)} will add two columns; the first one is labeled while the second one is not. Note that the numeric elements of the second column will be converted to character objects in this example. The consequence is that decimal alignment with the \pkg{siunitx} package is switched off in these columns. Note that this argument is processed after any arguments that affect the number of rows.}
\item{custom.col.pos}{An optional integer vector of positions for the columns given in the \code{custom columns} argument. For example, if there are three custom columns, \code{custom.col.pos = c(1, 3, 3)} will insert the first custom column before the first column of the original table and the remaining two custom columns after the second column of the original table. By default, all custom columns are placed after the first column, which usually contains the coefficient names.}
\item{bold}{[only in the \code{texreg} and \code{htmlreg} functions] The p value threshold below which the coefficient shall be formatted in a bold font. For example, \code{bold = 0.05} will cause all coefficients which are significant at the 95\% level to be formatted in bold. Note that this is not compatible with the siunitx argument in the \code{texreg} function. If both are \code{TRUE}, siunitx is switched off and a warning message appears. Note also that it is advisable to use \code{stars = FALSE} together with the \code{bold} argument because having both bolded coefficients and significance stars usually does not make any sense.}
\item{center}{[only in the \code{texreg} and \code{htmlreg} functions] Should the table be horizontally aligned at the center of the page?}
\item{caption}{[only in the \code{texreg} and \code{htmlreg} functions] Set the caption of the table. }
\item{caption.above}{[only in the \code{texreg} and \code{htmlreg} functions] Should the caption of the table be placed above the table? By default, it is placed below the table.}
\item{label}{[only in the \code{texreg} function] Set the label of the \code{table} environment.}
\item{booktabs}{[only in the \code{texreg} function] Use the \code{booktabs} LaTeX package to get thick horizontal rules in the output table (recommended).}
\item{siunitx}{[only in the \code{texreg} function] Use the \code{siunitx} LaTeX package to get a nice alignment of the coefficients (recommended).}
\item{lyx}{[only in the \code{texreg} function] \code{logical}; if \code{TRUE}, each newline in the output is doubled, which facilitates transferring the output into the LyX document processor. }
\item{sideways}{[only in the \code{texreg} function] If \code{sideways = TRUE} is set, the \code{table} floating environment is replaced by a \code{sidewaystable} float, and the \code{rotating} package is loaded in the preamble. The argument only has an effect if \code{table = TRUE} is also set.}
\item{longtable}{ [only in the \code{texreg} function] If \code{longtable = TRUE} is set, the \code{longtable} environment from the \code{longtable} LaTeX package is used to set tables across multiple pages. Note that this argument is not compatible with the \code{sideways} and \code{scalebox} arguments. These arguments will be automatically switched off when \code{longtable = TRUE} is set. }
\item{use.packages}{[only in the \code{texreg} function] If this argument is set to \code{TRUE} (= the default behavior), the required LaTeX packages are loaded in the beginning. If set to \code{FALSE}, the use package statements are omitted from the output.}
\item{table}{[only in the \code{texreg} function] By default, texreg puts the actual \code{tabular} object in a \code{table} floating environment. To get only the \code{tabular} object without the whole table header, set \code{table = FALSE}.}
\item{no.margin}{[only in the \code{texreg} function] In order to save space, inner margins of tables can be switched off.}
\item{fontsize}{[only in the \code{texreg} function] The \code{fontsize} argument serves to change the font size used in the table. Valid values are \code{"tiny"}, \code{"scriptsize"}, \code{"footnotesize"}, \code{"small"}, \code{"normalsize"}, \code{"large"}, \code{"Large"}, \code{"LARGE"}, \code{"huge"}, and \code{"Huge"}. Note that the \code{scalebox} argument often achieves better results when the goal is to change the size of the table. }
\item{scalebox}{[only in the \code{texreg} function] The \code{scalebox} argument serves to resize the table. For example, \code{scalebox = 1.0} is equivalent to the normal size, \code{scalebox = 0.5} decreases the size of the table by one half, and \code{scalebox = 2.0} doubles the space occupied by the table. Note that the \code{scalebox} argument does not work when the \code{longtable} argument is used. }
\item{float.pos}{[only in the \code{texreg} function] This argument specifies where the table should be located on the page or in the document. By default, no floating position is specified, and LaTeX takes care of the position automatically. Possible values include \code{h} (here), \code{p} (page), \code{t} (top), \code{b} (bottom), any combination thereof, e.g. \code{tb}, or any of these values followed by an exclamation mark, e.g. \code{t!}, in order to enforce this position. The square brackets do not have to be specified.}
\item{col.groups}{[only in the \code{texreg} function] This argument allows for grouping models. Must be specified as a list of the form \code{list("Group name" = m:n)}, where m and n are the first and last column contained in the group. Several groups can be specified.}
\item{col.groups}{[only in the \code{texreg} function] Add a second column group.}
\item{no.table.format}{[only in the \code{texreg} function] This argument modifies how the \code{siunitx} argument sets column formats; if \code{no.table.format = TRUE}, the no-frills "S" column is used. If \code{no.table.format = FALSE}, the column format is set according to the max number of integers and decimals in a given column, e.g. S[table-format = 3.2]. Defaults to \code{FALSE}.}
\item{add.lines}{[only in the \code{texreg} function] Adds lines to the bottom part of the table. Input must be a list, with vector elements of length equal to the number of columns in the table.}
\item{center.gof}{[only in the \code{texreg} function] If \code{siunitx = TRUE}, setting center.gof = TRUE will put the GOF rows in \\multicolumn's, exempting them from being interpreted by siunitx, and instead centering cells.}
\item{star.symbol}{[only in the \code{htmlreg} function] Alternative characters for the significance stars can be specified. This is useful if \pkg{knitr} and Markdown are used for HTML report generation. In Markdown, asterisks or stars are interpreted as special characters, so they have to be escaped. To make \code{htmlreg} compatible with Markdown, specify \code{star.symbol = "\\*"}. Note that some other modifications are recommended for usage with \pkg{knitr} in combination with Markdown or HTML (see the \code{inline.css}, \code{doctype}, \code{html.tag}, \code{head.tag}, and \code{body.tag} arguments).}
\item{inline.css}{[only in the \code{htmlreg} function] Should the CSS stylesheets be embedded directly in the code of the table (\code{inline.css = TRUE}), or should the CSS stylesheets be enclosed in the <head> tag, that is, separated from the table code (\code{inline.css = FALSE})? Having inline CSS code makes the code of the table more complex, but sometimes it may be helpful when only the table shall be printed, without the head of the HTML file (for example when the table is embedded in a \pkg{knitr} report). As a rule of thumb: use inline CSS if the table is not saved to a file.}
\item{doctype}{[only in the \code{htmlreg} function] Should the first line of the HTML code contain the DOCTYPE definition? If \code{TRUE}, the HTML 4 TRANSITIONAL version is used. If \code{FALSE}, no DOCTYPE will be included. Omitting the DOCTYPE can be helpful when the \pkg{knitr} package is used to generate HTML code because \pkg{knitr} requires only the plain table, not the whole HTML document including the document type declaration. Including the DOCTYPE can be helpful when the code is saved to a file, for example as an MS Word document.}
\item{html.tag}{[only in the \code{htmlreg} function] Should the table code (and possibly the <body> and <head> tags) be enclosed in an <html> tag? Suppressing this tag is recommended when \pkg{knitr} is used for dynamic HTML or Markdown report generation. Including this tag is recommended when the code is saved to a file, for example as an MS Word document.}
\item{head.tag}{[only in the \code{htmlreg} function] Should the <head> tag (including CSS definitions and title/caption) be included in the HTML code? Suppressing this tag is recommended when \pkg{knitr} is used for dynamic HTML or Markdown report generation. Including this tag is recommended when the code is saved to a file, for example as an MS Word document.}
\item{body.tag}{[only in the \code{htmlreg} function] Should the table code be enclosed in a <body> HTML tag? Suppressing this tag is recommended when \pkg{knitr} is used for dynamic HTML or Markdown report generation. Including this tag is recommended when the code is saved to a file, for example as an MS Word document.}
\item{indentation}{[only in the \code{htmlreg} function] Characters used for indentation of the HTML code. By default, \code{indentation = ""} uses no indentation. Any number of spaces or characters can be used instead. For example, \code{indentation = " "} uses two spaces of (additional) indentation for each subelement.}
\item{vertical.align.px}{[only in the \code{htmlreg} function] Vertical alignment of significance stars. Browsers differ in their ways of displaying superscripted significance stars; in some browsers the stars are elevated by default, and in other browsers the stars are aligned vertically with the text, without any actual superscripting. This argument controls by how many additional pixels the stars are elevated. The default setting of 0 uses the defaults of the browser. In RStudio's internal browser, this looks OK, but in Firefox, this looks too low. A value of 4 looks OK in Firefox, for example, but is above the line in RStudio's internal browser. }
\item{column.spacing}{[only in the \code{screenreg} function] The amount of space between any two columns of a table. By default, two spaces are used. If the tables do not fit on a single page horizontally, the value can be set to \code{1} or \code{0}.}
\item{outer.rule}{[only in the \code{screenreg} function] The character which is used to draw the outer horizontal line above and below a table. If an empty character object is provided (i.e., \code{outer.rule = ""}), there will be no outer horizontal lines. Recommended values are \code{""}, \code{"="}, \code{"-"}, \code{"_"}, or \code{"#"}.}
\item{inner.rule}{[only in the \code{screenreg} function] The character which is used to draw the inner horizontal line above and below a table. If an empty character object is provided (i.e., \code{outer.rule = ""}), there will be no inner horizontal lines. Recommended values are \code{""}, \code{"-"}, or \code{"_"}.}
\item{...}{Custom options to be passed on to the extract function. For example, most extract methods provide custom options for the inclusion or exclusion of specific goodness-of-fit statistics. See the help entries of \link{extract} and \link{extract-methods} for more information.}
}
\references{
Leifeld, Philip (2013). texreg: Conversion of Statistical Model Output in R to
LaTeX and HTML Tables. Journal of Statistical Software, 55(8), 1-24.
\url{http://www.jstatsoft.org/v55/i08/}.
}
\seealso{
\code{\link{texreg-package} \link{extract} \link{extract-methods} \link{plotreg}}
}
\author{
Philip Leifeld (\url{http://www.philipleifeld.com})
}
\examples{
#Linear mixed-effects models
library(nlme)
model.1 <- lme(distance ~ age, data = Orthodont, random = ~ 1)
model.2 <- lme(distance ~ age + Sex, data = Orthodont, random = ~ 1)
texreg(list(model.1, model.2), booktabs = TRUE, siunitx = TRUE)
#Ordinary least squares model (example from the 'lm' help file)
ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2,10,20, labels = c("Ctl","Trt"))
weight <- c(ctl, trt)
lm.D9 <- lm(weight ~ group)
table.string <- texreg(lm.D9, return.string = TRUE)
cat(table.string)
#Create a 'fake' Office document containing a regression table
htmlreg(list(model.1, model.2), file = "texreg.doc",
inline.css = FALSE, doctype = TRUE, html.tag = TRUE,
head.tag = TRUE, body.tag = TRUE)
unlink("texreg.doc")
}
\keyword{print}
\keyword{misc}
\keyword{utilities}
\keyword{IO}
\keyword{programming|interface}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MSG-package.R
\docType{data}
\name{quake6}
\alias{quake6}
\title{Earth quakes from 1973 to 2010}
\description{
The time, location and magnitude of all the earth quakes with magnitude being
greater than 6 since 1973.
}
\examples{
data(quake6)
library(ggplot2)
qplot(year, month, data = quake6) + stat_sum(aes(size = ..n..)) +
scale_size(range = c(1, 10))
}
\references{
\url{http://cos.name/cn/topic/101510}
}
| /man/quake6.Rd | no_license | mlinking/MSG | R | false | true | 492 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MSG-package.R
\docType{data}
\name{quake6}
\alias{quake6}
\title{Earth quakes from 1973 to 2010}
\description{
The time, location and magnitude of all the earth quakes with magnitude being
greater than 6 since 1973.
}
\examples{
data(quake6)
library(ggplot2)
qplot(year, month, data = quake6) + stat_sum(aes(size = ..n..)) +
scale_size(range = c(1, 10))
}
\references{
\url{http://cos.name/cn/topic/101510}
}
|
## ---- echo = FALSE------------------------------------------------------------
map_dpi <- 72 # dpi res for all maps
fetch_osm <- FALSE
## ----GMFuncs, message=FALSE, eval = fetch_osm---------------------------------
# library (osmplotr)
# library (osmdata)
# library (magrittr)
#
# bbox <- osmdata::getbb ("greater melbourne, australia")
# coast <- opq (bbox = bbox) %>%
# add_osm_feature (key = "natural", value = "coastline") %>%
# osmdata_sf (quiet = FALSE)
## ---- eval = FALSE------------------------------------------------------------
# coast
## ---- echo = FALSE------------------------------------------------------------
message (paste0 ("Object of class 'osmdata' with:\n",
" $bbox : -38.49937,144.44405,-37.40175,146.1925\n",
" $overpass_call : The call submitted to the overpass API\n",
" $timestamp : [ Thurs 5 Oct 2017 10:23:18 ]\n",
" $osm_points : 'sf' Simple Features Collection with 13635 points\n",
" $osm_lines : 'sf' Simple Features Collection with 73 linestrings\n",
" $osm_polygons : 'sf' Simple Features Collection with 12 polygons\n",
" $osm_multilines : 'sf' Simple Features Collection with 0 multilinestrings\n",
" $osm_multipolygons : 'sf' Simple Features Collection with 0 multipolygons"))
## ---- eval = fetch_osm--------------------------------------------------------
# coast_poly <- osm_line2poly (coast$osm_lines, bbox)
# names(coast_poly)
## ---- echo = FALSE------------------------------------------------------------
c ("sea", "land", "islands")
## ---- eval = FALSE------------------------------------------------------------
# class (coast_poly$sea)
## ---- echo = FALSE------------------------------------------------------------
c ("sf", "data.frame")
## ---- eval = fetch_osm--------------------------------------------------------
# map <- osm_basemap (bbox = bbox, bg = "cadetblue2") %>%
# add_osm_objects (coast_poly$land, col = "lightyellow1") %>%
# add_osm_objects (coast_poly$islands, col="orange") %>%
# add_osm_objects (coast$osm_polygons, col="purple", border = "black") %>%
# add_osm_objects (coast$osm_lines, col="black") %>%
# print_osm_map ()
## ---- echo=FALSE, eval = fetch_osm--------------------------------------------
# print_osm_map (map, filename = 'melb_a1.png', width = 600,
# units = 'px', dpi = map_dpi)
## ---- echo=FALSE--------------------------------------------------------------
getCoast <- function(bbox)
{
qry <- opq(bbox)
qry <- add_osm_feature(qry, key = "natural", value = "coastline")
return(osmdata_sf(qry))
}
testPlot <- function(coast, bbox)
{
if (!dev.cur()) dev.off()
map <- osm_basemap(bbox=bbox)
map <- add_osm_objects(map, coast$osm_lines)
print_osm_map(map)
}
testPlotPoly <- function(coast, bbox, fname)
{
## trouble doing this check properly on Travis
if (nrow(coast$osm_lines) > 0) {
coastp <- osm_line2poly(coast$osm_lines, bbox=bbox)
map <- osm_basemap(bbox=bbox)
map <- add_osm_objects(map, coastp$sea, col='cadetblue2')
map <- add_osm_objects(map, coastp$land, col='sienna2')
print_osm_map(map,filename = fname, width = 200,
units = 'px', dpi = map_dpi)
} else {
warning("osm query probably failed - not plotting")
invisible(NULL)
}
}
## ---- eval = fetch_osm--------------------------------------------------------
# test_plot <- function (bbox)
# {
# dat <- opq (bbox) %>%
# add_osm_feature (key = "natural", value = "coastline") %>%
# osmdata_sf (quiet = FALSE)
# coast <- osm_line2poly (dat$osm_lines, bbox)
# osm_basemap (bbox = bbox) %>%
# add_osm_objects(coast$sea, col = 'cadetblue2') %>%
# add_osm_objects(coast$land, col = 'sienna2')
# }
## ---- eval = fetch_osm, echo = FALSE------------------------------------------
# test_plot <- function (bbox, filename, map_dpi)
# {
# dat <- opq (bbox) %>%
# add_osm_feature (key = "natural", value = "coastline") %>%
# osmdata_sf (quiet = FALSE)
# coast <- osm_line2poly (dat$osm_lines, bbox)
# osm_basemap (bbox = bbox) %>%
# add_osm_objects(coast$sea, col = 'cadetblue2') %>%
# add_osm_objects(coast$land, col = 'sienna2') %>%
# print_osm_map (file = filename, width = 200,
# units = "px", dpi = map_dpi)
# }
## ---- eval = fetch_osm--------------------------------------------------------
# bbWE <- get_bbox (c(142.116906, -38.352713, 142.205162, -38.409661))
# coastWE <- getCoast(bbWE)
#
# bbEW <- get_bbox(c(144.603127, -38.104003, 144.685557, -38.135596))
# coastEW <- getCoast(bbEW)
#
# bbNS <- get_bbox(c(143.807998, -39.770986, 143.906494, -39.918643))
# coastNS <- getCoast(bbNS)
#
# bbSN <- get_bbox(c(144.073544, -39.854586, 144.149318, -39.960047))
# coastSN <- getCoast(bbSN)
#
# bbWW <- get_bbox(c(144.904865, -37.858295, 144.923679, -37.874367))
# coastWW <- getCoast(bbWW)
#
# bbEE <- get_bbox(c(144.643383, -38.294671, 144.692197, -38.336022))
# coastEE <- getCoast(bbEE)
#
# bbNN <- get_bbox(c(145.856321, -38.831642, 146.050920, -38.914031))
# coastNN <- getCoast(bbNN)
#
# bbSS <- get_bbox(c(146.363768, -38.770345, 146.486389, -38.837287))
# coastSS <- getCoast(bbSS)
#
# bbEN <- get_bbox(c(144.738212, -38.337690, 144.758053, -38.346966))
# coastEN <- getCoast(bbEN)
#
# bbEWWS <- get_bbox(c(144.693077, -38.307526, 144.729113, -38.343997 ))
# coastEWWS <- getCoast(bbEWWS)
#
# bbWS <- get_bbox(c(143.164906 ,-38.704885, 143.2075563, -38.7462058 ))
# coastWS <- getCoast(bbWS)
#
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastWE, bbWE, "testWE.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastEW, bbEW, "testEW.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastNS, bbNS, "testNS.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastSN, bbSN, "testSN.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastWW, bbWW, "testWW.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastEE, bbEE, "testEE.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastNN, bbNN, "testNN.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastSS, bbSS, "testSS.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastEN, bbEN, "testEN.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastEWWS, bbEWWS, "testEWWS.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastWS, bbWS, "testWS.png")
| /inst/doc/maps-with-ocean.R | no_license | cran/osmplotr | R | false | false | 7,201 | r | ## ---- echo = FALSE------------------------------------------------------------
map_dpi <- 72 # dpi res for all maps
fetch_osm <- FALSE
## ----GMFuncs, message=FALSE, eval = fetch_osm---------------------------------
# library (osmplotr)
# library (osmdata)
# library (magrittr)
#
# bbox <- osmdata::getbb ("greater melbourne, australia")
# coast <- opq (bbox = bbox) %>%
# add_osm_feature (key = "natural", value = "coastline") %>%
# osmdata_sf (quiet = FALSE)
## ---- eval = FALSE------------------------------------------------------------
# coast
## ---- echo = FALSE------------------------------------------------------------
message (paste0 ("Object of class 'osmdata' with:\n",
" $bbox : -38.49937,144.44405,-37.40175,146.1925\n",
" $overpass_call : The call submitted to the overpass API\n",
" $timestamp : [ Thurs 5 Oct 2017 10:23:18 ]\n",
" $osm_points : 'sf' Simple Features Collection with 13635 points\n",
" $osm_lines : 'sf' Simple Features Collection with 73 linestrings\n",
" $osm_polygons : 'sf' Simple Features Collection with 12 polygons\n",
" $osm_multilines : 'sf' Simple Features Collection with 0 multilinestrings\n",
" $osm_multipolygons : 'sf' Simple Features Collection with 0 multipolygons"))
## ---- eval = fetch_osm--------------------------------------------------------
# coast_poly <- osm_line2poly (coast$osm_lines, bbox)
# names(coast_poly)
## ---- echo = FALSE------------------------------------------------------------
c ("sea", "land", "islands")
## ---- eval = FALSE------------------------------------------------------------
# class (coast_poly$sea)
## ---- echo = FALSE------------------------------------------------------------
c ("sf", "data.frame")
## ---- eval = fetch_osm--------------------------------------------------------
# map <- osm_basemap (bbox = bbox, bg = "cadetblue2") %>%
# add_osm_objects (coast_poly$land, col = "lightyellow1") %>%
# add_osm_objects (coast_poly$islands, col="orange") %>%
# add_osm_objects (coast$osm_polygons, col="purple", border = "black") %>%
# add_osm_objects (coast$osm_lines, col="black") %>%
# print_osm_map ()
## ---- echo=FALSE, eval = fetch_osm--------------------------------------------
# print_osm_map (map, filename = 'melb_a1.png', width = 600,
# units = 'px', dpi = map_dpi)
## ---- echo=FALSE--------------------------------------------------------------
getCoast <- function(bbox)
{
qry <- opq(bbox)
qry <- add_osm_feature(qry, key = "natural", value = "coastline")
return(osmdata_sf(qry))
}
testPlot <- function(coast, bbox)
{
if (!dev.cur()) dev.off()
map <- osm_basemap(bbox=bbox)
map <- add_osm_objects(map, coast$osm_lines)
print_osm_map(map)
}
testPlotPoly <- function(coast, bbox, fname)
{
## trouble doing this check properly on Travis
if (nrow(coast$osm_lines) > 0) {
coastp <- osm_line2poly(coast$osm_lines, bbox=bbox)
map <- osm_basemap(bbox=bbox)
map <- add_osm_objects(map, coastp$sea, col='cadetblue2')
map <- add_osm_objects(map, coastp$land, col='sienna2')
print_osm_map(map,filename = fname, width = 200,
units = 'px', dpi = map_dpi)
} else {
warning("osm query probably failed - not plotting")
invisible(NULL)
}
}
## ---- eval = fetch_osm--------------------------------------------------------
# test_plot <- function (bbox)
# {
# dat <- opq (bbox) %>%
# add_osm_feature (key = "natural", value = "coastline") %>%
# osmdata_sf (quiet = FALSE)
# coast <- osm_line2poly (dat$osm_lines, bbox)
# osm_basemap (bbox = bbox) %>%
# add_osm_objects(coast$sea, col = 'cadetblue2') %>%
# add_osm_objects(coast$land, col = 'sienna2')
# }
## ---- eval = fetch_osm, echo = FALSE------------------------------------------
# test_plot <- function (bbox, filename, map_dpi)
# {
# dat <- opq (bbox) %>%
# add_osm_feature (key = "natural", value = "coastline") %>%
# osmdata_sf (quiet = FALSE)
# coast <- osm_line2poly (dat$osm_lines, bbox)
# osm_basemap (bbox = bbox) %>%
# add_osm_objects(coast$sea, col = 'cadetblue2') %>%
# add_osm_objects(coast$land, col = 'sienna2') %>%
# print_osm_map (file = filename, width = 200,
# units = "px", dpi = map_dpi)
# }
## ---- eval = fetch_osm--------------------------------------------------------
# bbWE <- get_bbox (c(142.116906, -38.352713, 142.205162, -38.409661))
# coastWE <- getCoast(bbWE)
#
# bbEW <- get_bbox(c(144.603127, -38.104003, 144.685557, -38.135596))
# coastEW <- getCoast(bbEW)
#
# bbNS <- get_bbox(c(143.807998, -39.770986, 143.906494, -39.918643))
# coastNS <- getCoast(bbNS)
#
# bbSN <- get_bbox(c(144.073544, -39.854586, 144.149318, -39.960047))
# coastSN <- getCoast(bbSN)
#
# bbWW <- get_bbox(c(144.904865, -37.858295, 144.923679, -37.874367))
# coastWW <- getCoast(bbWW)
#
# bbEE <- get_bbox(c(144.643383, -38.294671, 144.692197, -38.336022))
# coastEE <- getCoast(bbEE)
#
# bbNN <- get_bbox(c(145.856321, -38.831642, 146.050920, -38.914031))
# coastNN <- getCoast(bbNN)
#
# bbSS <- get_bbox(c(146.363768, -38.770345, 146.486389, -38.837287))
# coastSS <- getCoast(bbSS)
#
# bbEN <- get_bbox(c(144.738212, -38.337690, 144.758053, -38.346966))
# coastEN <- getCoast(bbEN)
#
# bbEWWS <- get_bbox(c(144.693077, -38.307526, 144.729113, -38.343997 ))
# coastEWWS <- getCoast(bbEWWS)
#
# bbWS <- get_bbox(c(143.164906 ,-38.704885, 143.2075563, -38.7462058 ))
# coastWS <- getCoast(bbWS)
#
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastWE, bbWE, "testWE.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastEW, bbEW, "testEW.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastNS, bbNS, "testNS.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastSN, bbSN, "testSN.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastWW, bbWW, "testWW.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastEE, bbEE, "testEE.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastNN, bbNN, "testNN.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastSS, bbSS, "testSS.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastEN, bbEN, "testEN.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastEWWS, bbEWWS, "testEWWS.png")
## ---- eval = fetch_osm--------------------------------------------------------
# testPlotPoly(coastWS, bbWS, "testWS.png")
|
#d:\GITHUB\Machine_learning\02_Simple_Linear_Regression\Salary_Data.csv
dataset = read.csv('d:\\GITHUB\\Machine_learning\\02_Simple_Linear_Regression\\Salary_Data.csv')
library(caTools)
set.seed(123)
split = sample.split(dataset$Salary, SplitRatio = 2/3)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
regressor = lm(formula = Salary ~ YearsExperience,
data = training_set)
y_pred = predict(regressor, newdata = test_set)
#install.packages('ggplot2')
library(ggplot2)
ggplot() +
geom_point(aes(x = training_set$YearsExperience, y = training_set$Salary),
colour = 'red') +
geom_line(aes(x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'green') +
ggtitle('Salary vs Experience (Training set)') +
xlab('Years of Experience') +
ylab('Salary')
ggplot() +
geom_point(aes(x = test_set$YearsExperience, y = test_set$Salary),
colour = 'red') +
geom_line(aes(x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'green') +
ggtitle('Salary vs Experience (Test set)') +
xlab('Years of Experience') +
ylab('Salary')
| /02_Simple_Linear_Regression/R/simple_linear_regression.R | no_license | ritusca00/Machine_learning | R | false | false | 1,228 | r | #d:\GITHUB\Machine_learning\02_Simple_Linear_Regression\Salary_Data.csv
dataset = read.csv('d:\\GITHUB\\Machine_learning\\02_Simple_Linear_Regression\\Salary_Data.csv')
library(caTools)
set.seed(123)
split = sample.split(dataset$Salary, SplitRatio = 2/3)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
regressor = lm(formula = Salary ~ YearsExperience,
data = training_set)
y_pred = predict(regressor, newdata = test_set)
#install.packages('ggplot2')
library(ggplot2)
ggplot() +
geom_point(aes(x = training_set$YearsExperience, y = training_set$Salary),
colour = 'red') +
geom_line(aes(x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'green') +
ggtitle('Salary vs Experience (Training set)') +
xlab('Years of Experience') +
ylab('Salary')
ggplot() +
geom_point(aes(x = test_set$YearsExperience, y = test_set$Salary),
colour = 'red') +
geom_line(aes(x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'green') +
ggtitle('Salary vs Experience (Test set)') +
xlab('Years of Experience') +
ylab('Salary')
|
# Mapa con aeropuertos de destino -----------------------------------------
hcmap("countries/us/us-all", showInLegend = F) %>%
hc_add_series(data = aux_data,
type = "mapbubble",# si se quiere utilizar mapbubble se debe incluir variable z en dataset para dar el tamaño
name = "Aeropuerto",
dataLabels = list(enabled = F),
# minSize = 5, maxSize = 30,
tooltip = list(pointFormat = "{point.name}<br>Vuelos:{point.n_vuelos}<br>Retraseo medio (arr):{point.value}</b><br/>")
) %>%
hc_mapNavigation(enabled = T) %>%
hc_plotOptions(series = list(showInLegend = F)) %>%
hc_title(text = "Distribución de retrasos por aeropuerto de destino")
| /Ejercicio reporte shiny/App/hcmap.R | no_license | majorquev/explorando_highcharter | R | false | false | 792 | r |
# Mapa con aeropuertos de destino -----------------------------------------
hcmap("countries/us/us-all", showInLegend = F) %>%
hc_add_series(data = aux_data,
type = "mapbubble",# si se quiere utilizar mapbubble se debe incluir variable z en dataset para dar el tamaño
name = "Aeropuerto",
dataLabels = list(enabled = F),
# minSize = 5, maxSize = 30,
tooltip = list(pointFormat = "{point.name}<br>Vuelos:{point.n_vuelos}<br>Retraseo medio (arr):{point.value}</b><br/>")
) %>%
hc_mapNavigation(enabled = T) %>%
hc_plotOptions(series = list(showInLegend = F)) %>%
hc_title(text = "Distribución de retrasos por aeropuerto de destino")
|
a23f6bb772cb120c759d632c61f3abc9 horn_400CNF2040_2aQBF_57.qdimacs 400 2040 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Letombe/horn/horn_400CNF2040_2aQBF_57/horn_400CNF2040_2aQBF_57.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 74 | r | a23f6bb772cb120c759d632c61f3abc9 horn_400CNF2040_2aQBF_57.qdimacs 400 2040 |
#' Output format for letter
#'
#' Each function is a wrapper for \code{\link[rmarkdown]{html_document}} to
#' produce documents in ConwayLetterTemplate style.
#' @rdname pmcc
#' @param \dots Arguments passed to \code{\link[rmarkdown]{html_document}}.
#'
#' @return An R Markdown output format object.
#'
#'
#' @export
pmcc <- function(...) {
# locations of resource files in the package
# pkg_resource = function(...) {
# system.file(..., package = "myrmdtemplate")
# }
# template <- pkg_resource("rmarkdown/templates/myrmdtemplate/resources/template.html")
# template <- "https://raw.githubusercontent.com/rstudio/rmarkdown/master/inst/rmd/h/default.html"
# css = pkg_resource("rmarkdown/templates/myrmdtemplate/resources/styles.css")
# header = pkg_resource("rmarkdown/templates/myrmdtemplate/resources/header.html")
rmarkdown::html_document(...,
template = "Extras/template.html",
css = "Extras/styles.css",
)
}
| /R/pmcc.R | no_license | awconway/myrmdtemplate | R | false | false | 1,046 | r | #' Output format for letter
#'
#' Each function is a wrapper for \code{\link[rmarkdown]{html_document}} to
#' produce documents in ConwayLetterTemplate style.
#' @rdname pmcc
#' @param \dots Arguments passed to \code{\link[rmarkdown]{html_document}}.
#'
#' @return An R Markdown output format object.
#'
#'
#' @export
pmcc <- function(...) {
# locations of resource files in the package
# pkg_resource = function(...) {
# system.file(..., package = "myrmdtemplate")
# }
# template <- pkg_resource("rmarkdown/templates/myrmdtemplate/resources/template.html")
# template <- "https://raw.githubusercontent.com/rstudio/rmarkdown/master/inst/rmd/h/default.html"
# css = pkg_resource("rmarkdown/templates/myrmdtemplate/resources/styles.css")
# header = pkg_resource("rmarkdown/templates/myrmdtemplate/resources/header.html")
rmarkdown::html_document(...,
template = "Extras/template.html",
css = "Extras/styles.css",
)
}
|
`%>%` = magrittr::`%>%`
#' Creates a table of different identifiers and caches it
#'
#' @param dset Ensembl organism, e.g.: 'hsapiens_gene_ensembl', 'mmusculus_gene_ensembl'
#' @param force Re-generate table if it already exists
#' @return A data.frame with the following columns:
#' external_gene_name, affy, illumina, genbank, entrezgene, ensembl_gene_id
probeset_table = function(dset="hsapiens_gene_ensembl", version="latest", force=FALSE) {
printv = function(dset) message(sprintf("Using Ensembl %s (%s)",
attr(dset, "ensembl_version"), attr(dset, "dataset_version")))
if (version == "latest")
version = 103 #TODO: get this + be robust offline
fname = sprintf("probeset-%s-ens%s.rds", dset, version)
cache = file.path(module_file(), "cache", fname)
if (file.exists(cache) && !force) {
mapping = readRDS(cache)
printv(mapping)
return(mapping)
}
# workaround: useMart error: SSL certificate problem: unable to get local issuer certificate
httr::set_config(httr::config(ssl_verifypeer = FALSE), override = FALSE)
mart = biomaRt::useMart(biomart="ensembl", dataset=dset)
marts = biomaRt::listMarts(mart)
vstring = marts$version[marts$biomart == "ENSEMBL_MART_ENSEMBL"]
version = as.integer(sub(".* ([0-9]+)$", "\\1", vstring))
datasets = biomaRt::listDatasets(mart, version)
dataset_version = datasets$version[datasets$dataset == dset]
# if biomart has newer ensembl update cache file name
fname = sprintf("probeset-%s-ens%s.rds", dset, version)
cache = file.path(module_file(), "cache", fname)
message("Generating cache file ", sQuote(fname))
probes = list(
affy = grep("^affy_", biomaRt::listAttributes(mart)$name, value=TRUE),
illumina = grep("^illumina_", biomaRt::listAttributes(mart)$name, value=TRUE),
agilent = grep("agilent_", biomaRt::listAttributes(mart)$name, value=TRUE)
)
tablecols = c("external_gene_name", "entrezgene_id", "ensembl_gene_id")
getPS = function(p) {
message(p)
df = biomaRt::getBM(attributes=c(tablecols, p), mart=mart)
colnames(df)[ncol(df)] = "probe_id"
df[!is.na(df$probe_id) & df$probe_id!="",]
as.data.frame(sapply(df, as.character, simplify=FALSE, USE.NAMES=TRUE))
}
assemblePS = function(p) {
re = sapply(p, getPS, simplify=FALSE, USE.NAMES=TRUE) %>%
dplyr::bind_rows() %>%
dplyr::filter(probe_id != "" & !is.na(probe_id)) %>%
dplyr::distinct()
re$external_gene_name[re$external_gene_name == ""] = NA
re$entrezgene[re$entrezgene == ""] = NA
re$ensembl_gene_id[re$ensembl_gene_id == ""] = NA
tibble::as_tibble(re)
}
mapping = sapply(probes, assemblePS, simplify=FALSE, USE.NAMES=TRUE)
attr(mapping, "ensembl_version") = version
attr(mapping, "dataset_version") = dataset_version
dir.create(dirname(cache), showWarnings=FALSE)
saveRDS(mapping, file=cache)
printv(mapping)
mapping
}
if (is.null(module_name())) {
probeset_table("hsapiens_gene_ensembl")
probeset_table("mmusculus_gene_ensembl")
}
| /seq/probeset_table.r | permissive | mschubert/ebits | R | false | false | 3,171 | r | `%>%` = magrittr::`%>%`
#' Creates a table of different identifiers and caches it
#'
#' @param dset Ensembl organism, e.g.: 'hsapiens_gene_ensembl', 'mmusculus_gene_ensembl'
#' @param force Re-generate table if it already exists
#' @return A data.frame with the following columns:
#' external_gene_name, affy, illumina, genbank, entrezgene, ensembl_gene_id
probeset_table = function(dset="hsapiens_gene_ensembl", version="latest", force=FALSE) {
printv = function(dset) message(sprintf("Using Ensembl %s (%s)",
attr(dset, "ensembl_version"), attr(dset, "dataset_version")))
if (version == "latest")
version = 103 #TODO: get this + be robust offline
fname = sprintf("probeset-%s-ens%s.rds", dset, version)
cache = file.path(module_file(), "cache", fname)
if (file.exists(cache) && !force) {
mapping = readRDS(cache)
printv(mapping)
return(mapping)
}
# workaround: useMart error: SSL certificate problem: unable to get local issuer certificate
httr::set_config(httr::config(ssl_verifypeer = FALSE), override = FALSE)
mart = biomaRt::useMart(biomart="ensembl", dataset=dset)
marts = biomaRt::listMarts(mart)
vstring = marts$version[marts$biomart == "ENSEMBL_MART_ENSEMBL"]
version = as.integer(sub(".* ([0-9]+)$", "\\1", vstring))
datasets = biomaRt::listDatasets(mart, version)
dataset_version = datasets$version[datasets$dataset == dset]
# if biomart has newer ensembl update cache file name
fname = sprintf("probeset-%s-ens%s.rds", dset, version)
cache = file.path(module_file(), "cache", fname)
message("Generating cache file ", sQuote(fname))
probes = list(
affy = grep("^affy_", biomaRt::listAttributes(mart)$name, value=TRUE),
illumina = grep("^illumina_", biomaRt::listAttributes(mart)$name, value=TRUE),
agilent = grep("agilent_", biomaRt::listAttributes(mart)$name, value=TRUE)
)
tablecols = c("external_gene_name", "entrezgene_id", "ensembl_gene_id")
getPS = function(p) {
message(p)
df = biomaRt::getBM(attributes=c(tablecols, p), mart=mart)
colnames(df)[ncol(df)] = "probe_id"
df[!is.na(df$probe_id) & df$probe_id!="",]
as.data.frame(sapply(df, as.character, simplify=FALSE, USE.NAMES=TRUE))
}
assemblePS = function(p) {
re = sapply(p, getPS, simplify=FALSE, USE.NAMES=TRUE) %>%
dplyr::bind_rows() %>%
dplyr::filter(probe_id != "" & !is.na(probe_id)) %>%
dplyr::distinct()
re$external_gene_name[re$external_gene_name == ""] = NA
re$entrezgene[re$entrezgene == ""] = NA
re$ensembl_gene_id[re$ensembl_gene_id == ""] = NA
tibble::as_tibble(re)
}
mapping = sapply(probes, assemblePS, simplify=FALSE, USE.NAMES=TRUE)
attr(mapping, "ensembl_version") = version
attr(mapping, "dataset_version") = dataset_version
dir.create(dirname(cache), showWarnings=FALSE)
saveRDS(mapping, file=cache)
printv(mapping)
mapping
}
if (is.null(module_name())) {
probeset_table("hsapiens_gene_ensembl")
probeset_table("mmusculus_gene_ensembl")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrappers.r
\name{calcRiffPool}
\alias{calcRiffPool}
\title{Calculate Whalley 'Riffle/Pool' habitat-specific BMWP indices}
\usage{
calcRiffPool(df, type = "num")
}
\arguments{
\item{df}{A dataframe containing list of taxa in first column, followed by
columns of abundances with sample names in header row.}
\item{type}{Indicates format of data. Options are "num" for numeric data,
"log" for integer log abundance categories (1-5) or "alpha" for alphabetic
abundance categories (A-E). Default value is "num".}
}
\value{
A data frame consisting of columns of index values with samples
in rows.
}
\description{
Calculates Whalley riffle/pool-specific BMWP, ASPT and N-taxa
indices for invertebrate samples.
}
\examples{
# calculate the Whalley Riffle/Pool BMWP indices for the Braid Burn dataset
# data are alphabetic log abundance categories, so type is "alpha"
calcRiffPool(braidburn, "alpha")
}
| /man/calcRiffPool.Rd | no_license | robbriers/biotic | R | false | true | 976 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrappers.r
\name{calcRiffPool}
\alias{calcRiffPool}
\title{Calculate Whalley 'Riffle/Pool' habitat-specific BMWP indices}
\usage{
calcRiffPool(df, type = "num")
}
\arguments{
\item{df}{A dataframe containing list of taxa in first column, followed by
columns of abundances with sample names in header row.}
\item{type}{Indicates format of data. Options are "num" for numeric data,
"log" for integer log abundance categories (1-5) or "alpha" for alphabetic
abundance categories (A-E). Default value is "num".}
}
\value{
A data frame consisting of columns of index values with samples
in rows.
}
\description{
Calculates Whalley riffle/pool-specific BMWP, ASPT and N-taxa
indices for invertebrate samples.
}
\examples{
# calculate the Whalley Riffle/Pool BMWP indices for the Braid Burn dataset
# data are alphabetic log abundance categories, so type is "alpha"
calcRiffPool(braidburn, "alpha")
}
|
testlist <- list(a = 0L, b = 0L, x = c(1869963884L, 1986355048L, 1634624620L, 1697196115L, 1163415584L, 1668247155L, 1949061957L, 1481646179L, 1869509492L, 754974719L, -58854L, 16714495L, -16327717L, -1L, 1560281088L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610130450-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 440 | r | testlist <- list(a = 0L, b = 0L, x = c(1869963884L, 1986355048L, 1634624620L, 1697196115L, 1163415584L, 1668247155L, 1949061957L, 1481646179L, 1869509492L, 754974719L, -58854L, 16714495L, -16327717L, -1L, 1560281088L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
# set working directory
setwd("~/Dropbox/coursera/data science/getting and cleaning data - 4/UCI HAR Dataset")
library(tidyverse)
# read in training dataset
activities <- read.table(file = "activity_labels.txt", col.names = c("code", "Activity"))
features <- read.table(file = "features.txt", col.names = c("n", "functions"))
subject_train <- read.table(file = "./train/subject_train.txt", col.names = "Subject")
y_train <- read.table(file = "./train/Y_train.txt", col.names ="code")
x_train <- read.table(file = "./train/X_train.txt", col.names = features$functions)
subject_test <- read.table(file = "./test/subject_test.txt", col.names = "Subject")
y_test <- read.table(file = "./test/Y_test.txt", col.names = "code")
x_test <- read.table(file = "./test/X_test.txt", col.names = features$functions)
# combine and tidy
subject_all <- rbind(subject_train, subject_test)
y_all <- rbind(y_train, y_test)
x_all <- rbind(x_train, x_test)
data_all <- cbind(subject_all, y_all, x_all)
tidy_up <- data_all %>%
select(Subject, code, contains("mean"), contains("std"))
# recode activities into tidy_up
tidy_up$code <- activities[tidy_up$code, 2]
# applying descriptive labels
# names(tidy_up)
names(tidy_up)[2] = "Activity"
names(tidy_up) <- gsub("Acc", "Accelerometer", names(tidy_up))
names(tidy_up) <- gsub("Gyro", "Gyroscope", names(tidy_up))
names(tidy_up) <- gsub("BodyBody", "Body", names(tidy_up))
names(tidy_up) <- gsub("Mag", "Magnitude", names(tidy_up))
names(tidy_up) <- gsub("^t", "Time", names(tidy_up))
names(tidy_up) <- gsub("^f", "Frequency", names(tidy_up))
names(tidy_up) <- gsub("tBody", "Body_time", names(tidy_up))
names(tidy_up) <- gsub("-mean()", "Mean", names(tidy_up), ignore.case = TRUE)
names(tidy_up) <- gsub("-std()", "Std_deviation", names(tidy_up), ignore.case = TRUE)
names(tidy_up) <- gsub("-freq()", "Frequency", names(tidy_up), ignore.case = TRUE)
names(tidy_up) <- gsub("angle", "Angle", names(tidy_up))
names(tidy_up) <- gsub("gravity", "Gravity", names(tidy_up))
# create new dataset, with average for each activity and each subject
tidy_summary <- tidy_up %>%
group_by(Subject, Activity) %>%
summarise_all(list(mean = mean))
write.table(tidy_summary, "SummaryData.txt", row.name=FALSE)
| /run_analysis.R | no_license | mc729345/Coursera---Getting-and-Cleaning-Data | R | false | false | 2,268 | r | # set working directory
setwd("~/Dropbox/coursera/data science/getting and cleaning data - 4/UCI HAR Dataset")
library(tidyverse)
# read in training dataset
activities <- read.table(file = "activity_labels.txt", col.names = c("code", "Activity"))
features <- read.table(file = "features.txt", col.names = c("n", "functions"))
subject_train <- read.table(file = "./train/subject_train.txt", col.names = "Subject")
y_train <- read.table(file = "./train/Y_train.txt", col.names ="code")
x_train <- read.table(file = "./train/X_train.txt", col.names = features$functions)
subject_test <- read.table(file = "./test/subject_test.txt", col.names = "Subject")
y_test <- read.table(file = "./test/Y_test.txt", col.names = "code")
x_test <- read.table(file = "./test/X_test.txt", col.names = features$functions)
# combine and tidy
subject_all <- rbind(subject_train, subject_test)
y_all <- rbind(y_train, y_test)
x_all <- rbind(x_train, x_test)
data_all <- cbind(subject_all, y_all, x_all)
tidy_up <- data_all %>%
select(Subject, code, contains("mean"), contains("std"))
# recode activities into tidy_up
tidy_up$code <- activities[tidy_up$code, 2]
# applying descriptive labels
# names(tidy_up)
names(tidy_up)[2] = "Activity"
names(tidy_up) <- gsub("Acc", "Accelerometer", names(tidy_up))
names(tidy_up) <- gsub("Gyro", "Gyroscope", names(tidy_up))
names(tidy_up) <- gsub("BodyBody", "Body", names(tidy_up))
names(tidy_up) <- gsub("Mag", "Magnitude", names(tidy_up))
names(tidy_up) <- gsub("^t", "Time", names(tidy_up))
names(tidy_up) <- gsub("^f", "Frequency", names(tidy_up))
names(tidy_up) <- gsub("tBody", "Body_time", names(tidy_up))
names(tidy_up) <- gsub("-mean()", "Mean", names(tidy_up), ignore.case = TRUE)
names(tidy_up) <- gsub("-std()", "Std_deviation", names(tidy_up), ignore.case = TRUE)
names(tidy_up) <- gsub("-freq()", "Frequency", names(tidy_up), ignore.case = TRUE)
names(tidy_up) <- gsub("angle", "Angle", names(tidy_up))
names(tidy_up) <- gsub("gravity", "Gravity", names(tidy_up))
# create new dataset, with average for each activity and each subject
tidy_summary <- tidy_up %>%
group_by(Subject, Activity) %>%
summarise_all(list(mean = mean))
write.table(tidy_summary, "SummaryData.txt", row.name=FALSE)
|
#if "<-" it's lines of code from precept; if "=" it's lines I made
closeAllConnections()
rm(list=ls())
library(data.table)
library(chron)
setwd("C:\\Users\\benny\\OneDrive\\Desktop\\github\\lee-ready_algorithm")
Q <- fread("TAQquotes.gz", header=TRUE)
Q$DATE <- as.Date(as.character(Q$DATE),format='%Y%m%d')
Q$TIME_M <- chron(times=Q$TIME_M)
Q <- Q[Q$TIME_M >= chron(times='09:30:00') & Q$TIME_M <= chron(times='16:00:00')]
Q <- Q[Q$EX == 'N' | Q$EX == 'T' | Q$EX == 'P' | Q$EX == 'Z']
T <- fread("TAQtrades.gz", header=TRUE)
T$DATE <- as.Date(as.character(T$DATE),format='%Y%m%d')
T$TIME_M <- chron(times=T$TIME_M)
T <- T[T$TIME_M >= chron(times='09:30:00') & T$TIME_M <= chron(times='16:00:00')]
T <- T[T$EX == 'N' | T$EX == 'T' | T$EX == 'P' | T$EX == 'Z']
Q$EX <- factor(Q$EX) # make exchanges a categorical variable
get_nbbo <- function(type, df, time) {
arr <- c()
for (ex in levels(df$EX)){
if (type == 'bid') {
tmp <- df[df$EX == ex & df$TIME_M <= time]
nbbo <- tail(tmp, 1)$BID
} else if (type == 'offer') {
tmp <- df[df$EX == ex & df$TIME_M <= time]
nbbo <- tail(tmp, 1)$ASK
}
arr <- c(arr, nbbo)
}
if (length(arr) > 1) {
if (type == 'bid') {
max(arr)
} else if (type == 'offer') {
min(arr)
}
}
else {
arr
}
}
T$NBBO_OFFER <- lapply(T$TIME_M, {function (x) get_nbbo('offer', Q, x)})
T$NBBO_BID <- lapply(T$TIME_M, {function (x) get_nbbo('bid', Q, x)})
tcopy = T #record keeping of big data
#get rid of no bids/offers
T$NBBO_OFFER[T$NBBO_OFFER == 0] <- NA
T$NBBO_BID[T$NBBO_BID == 0] <- NA
T$MIDPOINT = (as.numeric(T$NBBO_BID)+as.numeric(T$NBBO_OFFER))/2
T$a = T$NBBO_BID > T$PRICE
T$b = T$NBBO_BID == T$PRICE
T$c = T$NBBO_BID < T$PRICE & T$NBBO_OFFER > T$PRICE
T$d = T$NBBO_OFFER == T$PRICE
T$e = T$NBBO_OFFER < T$PRICE
problem4b = c(sum(T$a, na.rm = TRUE)/length(T$TIME_M),
sum(T$b, na.rm = TRUE)/length(T$TIME_M),
sum(T$c, na.rm = TRUE)/length(T$TIME_M),
sum(T$d, na.rm = TRUE)/length(T$TIME_M),
sum(T$e, na.rm = TRUE)/length(T$TIME_M))
priceMatchCheck = function(trueID,dfTrade){
if (dfTrade$PRICE[dfTrade$TR_ID == trueID] > dfTrade$MIDPOINT[dfTrade$TR_ID == trueID]){
1 #denote this integer as a buy
}else if(dfTrade$PRICE[dfTrade$TR_ID == trueID] < dfTrade$MIDPOINT[dfTrade$TR_ID == trueID]){
0 #denote this integer as a sell
}else if(dfTrade$PRICE[dfTrade$TR_ID == trueID] == dfTrade$MIDPOINT[dfTrade$TR_ID == trueID]){
NA
}
}
T$MATCH_INT = lapply(as.numeric(T$TR_ID), {function (x) priceMatchCheck(x, T)})
Tcopy = T
library(zoo)
T$MATCH_INT = na.locf(T$MATCH_INT) #if on midpoint; I just labeled it as previous direction
T = T[ , DIFFERENCE := as.numeric(MATCH_INT) - shift(as.numeric(MATCH_INT))]
T$SameDirection = T$DIFFERENCE == 0
T$OppositeDirection = abs(T$DIFFERENCE) == 1
problem4d = c(sum(T$SameDirection, na.rm = TRUE), sum(T$OppositeDirection
,na.rm = TRUE ))/length(T$DIFFERENCE)
#turn out I didn't need to calulated unique same direction event but I'll keep it
T$LEE_READY = as.numeric(T$MATCH_INT) + T$DIFFERENCE
#0 ss
#1 bb
#2 sb
#-1 bs
| /lee-ready.R | no_license | bj-liang/lee-ready_algorithm_TAQ | R | false | false | 3,282 | r | #if "<-" it's lines of code from precept; if "=" it's lines I made
closeAllConnections()
rm(list=ls())
library(data.table)
library(chron)
setwd("C:\\Users\\benny\\OneDrive\\Desktop\\github\\lee-ready_algorithm")
Q <- fread("TAQquotes.gz", header=TRUE)
Q$DATE <- as.Date(as.character(Q$DATE),format='%Y%m%d')
Q$TIME_M <- chron(times=Q$TIME_M)
Q <- Q[Q$TIME_M >= chron(times='09:30:00') & Q$TIME_M <= chron(times='16:00:00')]
Q <- Q[Q$EX == 'N' | Q$EX == 'T' | Q$EX == 'P' | Q$EX == 'Z']
T <- fread("TAQtrades.gz", header=TRUE)
T$DATE <- as.Date(as.character(T$DATE),format='%Y%m%d')
T$TIME_M <- chron(times=T$TIME_M)
T <- T[T$TIME_M >= chron(times='09:30:00') & T$TIME_M <= chron(times='16:00:00')]
T <- T[T$EX == 'N' | T$EX == 'T' | T$EX == 'P' | T$EX == 'Z']
Q$EX <- factor(Q$EX) # make exchanges a categorical variable
get_nbbo <- function(type, df, time) {
arr <- c()
for (ex in levels(df$EX)){
if (type == 'bid') {
tmp <- df[df$EX == ex & df$TIME_M <= time]
nbbo <- tail(tmp, 1)$BID
} else if (type == 'offer') {
tmp <- df[df$EX == ex & df$TIME_M <= time]
nbbo <- tail(tmp, 1)$ASK
}
arr <- c(arr, nbbo)
}
if (length(arr) > 1) {
if (type == 'bid') {
max(arr)
} else if (type == 'offer') {
min(arr)
}
}
else {
arr
}
}
T$NBBO_OFFER <- lapply(T$TIME_M, {function (x) get_nbbo('offer', Q, x)})
T$NBBO_BID <- lapply(T$TIME_M, {function (x) get_nbbo('bid', Q, x)})
tcopy = T #record keeping of big data
#get rid of no bids/offers
T$NBBO_OFFER[T$NBBO_OFFER == 0] <- NA
T$NBBO_BID[T$NBBO_BID == 0] <- NA
T$MIDPOINT = (as.numeric(T$NBBO_BID)+as.numeric(T$NBBO_OFFER))/2
T$a = T$NBBO_BID > T$PRICE
T$b = T$NBBO_BID == T$PRICE
T$c = T$NBBO_BID < T$PRICE & T$NBBO_OFFER > T$PRICE
T$d = T$NBBO_OFFER == T$PRICE
T$e = T$NBBO_OFFER < T$PRICE
problem4b = c(sum(T$a, na.rm = TRUE)/length(T$TIME_M),
sum(T$b, na.rm = TRUE)/length(T$TIME_M),
sum(T$c, na.rm = TRUE)/length(T$TIME_M),
sum(T$d, na.rm = TRUE)/length(T$TIME_M),
sum(T$e, na.rm = TRUE)/length(T$TIME_M))
priceMatchCheck = function(trueID,dfTrade){
if (dfTrade$PRICE[dfTrade$TR_ID == trueID] > dfTrade$MIDPOINT[dfTrade$TR_ID == trueID]){
1 #denote this integer as a buy
}else if(dfTrade$PRICE[dfTrade$TR_ID == trueID] < dfTrade$MIDPOINT[dfTrade$TR_ID == trueID]){
0 #denote this integer as a sell
}else if(dfTrade$PRICE[dfTrade$TR_ID == trueID] == dfTrade$MIDPOINT[dfTrade$TR_ID == trueID]){
NA
}
}
T$MATCH_INT = lapply(as.numeric(T$TR_ID), {function (x) priceMatchCheck(x, T)})
Tcopy = T
library(zoo)
T$MATCH_INT = na.locf(T$MATCH_INT) #if on midpoint; I just labeled it as previous direction
T = T[ , DIFFERENCE := as.numeric(MATCH_INT) - shift(as.numeric(MATCH_INT))]
T$SameDirection = T$DIFFERENCE == 0
T$OppositeDirection = abs(T$DIFFERENCE) == 1
problem4d = c(sum(T$SameDirection, na.rm = TRUE), sum(T$OppositeDirection
,na.rm = TRUE ))/length(T$DIFFERENCE)
#turn out I didn't need to calulated unique same direction event but I'll keep it
T$LEE_READY = as.numeric(T$MATCH_INT) + T$DIFFERENCE
#0 ss
#1 bb
#2 sb
#-1 bs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.