content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/growthcurve.R
\name{plot.enve.GrowthCurve}
\alias{plot.enve.GrowthCurve}
\title{Enveomics: Plot of Growth Curve}
\usage{
\method{plot}{enve.GrowthCurve}(x, col, pt.alpha = 0.9, ln.alpha = 1,
ln.lwd = 1, ln.lty = 1, band.alpha = 0.4, band.density = NULL,
band.angle = 45, xp.alpha = 0.5, xp.lwd = 1, xp.lty = 1,
pch = 19, new = TRUE, legend = new, add.params = FALSE, ...)
}
\arguments{
\item{x}{An \code{\link{enve.GrowthCurve}} object to plot.}
\item{col}{Base colors to use for the different samples. Can be recycled.
By default, grey for one sample or rainbow colors for more than one.}
\item{pt.alpha}{Color alpha for the observed data points, using \code{col}
as a base.}
\item{ln.alpha}{Color alpha for the fitted growth curve, using \code{col}
as a base.}
\item{ln.lwd}{Line width for the fitted curve.}
\item{ln.lty}{Line type for the fitted curve.}
\item{band.alpha}{Color alpha for the confidence interval band of the
fitted growth curve, using \code{col} as a base.}
\item{band.density}{Density of the filling pattern in the interval band.
If \code{NULL}, a solid color is used.}
\item{band.angle}{Angle of the density filling pattern in the interval
band. Ignored if \code{band.density} is \code{NULL}.}
\item{xp.alpha}{Color alpha for the line connecting individual experiments,
using \code{col} as a base.}
\item{xp.lwd}{Width of line for the experiments.}
\item{xp.lty}{Type of line for the experiments.}
\item{pch}{Point character for observed data points.}
\item{new}{Should a new plot be generated? If \code{FALSE}, the existing
canvas is used.}
\item{legend}{Should the plot include a legend? If \code{FALSE}, no legend
is added. If \code{TRUE}, a legend is added in the bottom-right corner.
Otherwise, a legend is added in the position specified as \code{xy.coords}.}
\item{add.params}{Should the legend include the parameters of the fitted
model?}
\item{...}{Any other graphic parameters.}
}
\description{
Plots an \code{\link{enve.GrowthCurve}} object.
}
\author{
Luis M. Rodriguez-R [aut, cre]
}
| /enveomics.R/man/plot.enve.GrowthCurve.Rd | permissive | fengyq/enveomics | R | false | true | 2,122 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/growthcurve.R
\name{plot.enve.GrowthCurve}
\alias{plot.enve.GrowthCurve}
\title{Enveomics: Plot of Growth Curve}
\usage{
\method{plot}{enve.GrowthCurve}(x, col, pt.alpha = 0.9, ln.alpha = 1,
ln.lwd = 1, ln.lty = 1, band.alpha = 0.4, band.density = NULL,
band.angle = 45, xp.alpha = 0.5, xp.lwd = 1, xp.lty = 1,
pch = 19, new = TRUE, legend = new, add.params = FALSE, ...)
}
\arguments{
\item{x}{An \code{\link{enve.GrowthCurve}} object to plot.}
\item{col}{Base colors to use for the different samples. Can be recycled.
By default, grey for one sample or rainbow colors for more than one.}
\item{pt.alpha}{Color alpha for the observed data points, using \code{col}
as a base.}
\item{ln.alpha}{Color alpha for the fitted growth curve, using \code{col}
as a base.}
\item{ln.lwd}{Line width for the fitted curve.}
\item{ln.lty}{Line type for the fitted curve.}
\item{band.alpha}{Color alpha for the confidence interval band of the
fitted growth curve, using \code{col} as a base.}
\item{band.density}{Density of the filling pattern in the interval band.
If \code{NULL}, a solid color is used.}
\item{band.angle}{Angle of the density filling pattern in the interval
band. Ignored if \code{band.density} is \code{NULL}.}
\item{xp.alpha}{Color alpha for the line connecting individual experiments,
using \code{col} as a base.}
\item{xp.lwd}{Width of line for the experiments.}
\item{xp.lty}{Type of line for the experiments.}
\item{pch}{Point character for observed data points.}
\item{new}{Should a new plot be generated? If \code{FALSE}, the existing
canvas is used.}
\item{legend}{Should the plot include a legend? If \code{FALSE}, no legend
is added. If \code{TRUE}, a legend is added in the bottom-right corner.
Otherwise, a legend is added in the position specified as \code{xy.coords}.}
\item{add.params}{Should the legend include the parameters of the fitted
model?}
\item{...}{Any other graphic parameters.}
}
\description{
Plots an \code{\link{enve.GrowthCurve}} object.
}
\author{
Luis M. Rodriguez-R [aut, cre]
}
|
# installing/loading the package:
if(!require(installr)) { install.packages("installr"); require(installr)} #load / install+load installr
# step by step functions:
check.for.updates.R() # tells you if there is a new version of R or not.
install.R() # download and run the latest R installer
copy.packages.between.libraries() # copy your packages to the newest R installation from the one version before it (if ask=T, it will ask you between which two versions to perform the copying) | /CourseWork/updateR.R | no_license | umeshjn/DataAnalysiswithR | R | false | false | 484 | r | # installing/loading the package:
if(!require(installr)) { install.packages("installr"); require(installr)} #load / install+load installr
# step by step functions:
check.for.updates.R() # tells you if there is a new version of R or not.
install.R() # download and run the latest R installer
copy.packages.between.libraries() # copy your packages to the newest R installation from the one version before it (if ask=T, it will ask you between which two versions to perform the copying) |
#primary key field
#functionName: reportTotalCount
#Description: generate the total number of unique values for the primary key field of a given table
#Input:an R dataframe containing a given database table
#Output: the total number of records in the table, or the total number of unique values of the primary key identifier
#reportTotalCount<-function(df_table)
#{
# assuming the first field is the primary key field
#return(length(unique(df_table[,1])))
#}
describeIdentifier<-function(df_table, field_name)
{
column_index <- which(colnames(df_table)==field_name)
df_table <-df_table[,column_index]
total_distinct_values <-length(unique(df_table))
if(total_distinct_values == 1)
if(is.na(unique(df_table)))
return (0);
return (total_distinct_values);
}
#functionName: reportMissingCount
#Description: count the number of records with no value for a given field
#Inputs:an R dataframe containing a given database table, the name of the field
#Output: number of rows with NA (missing) values for the input field
reportMissingCount<-function(df_table,table_name,field_name, big_data_flag)
{
if(big_data_flag==FALSE)
{
#retrieve the index of the field in the dataframe
column_index <- which(colnames(df_table)==field_name)
if(nrow(df_table)>0)
{
# saving the frequencies in a separate dataframe (including NA values)
dfTab <-as.data.frame(table(df_table[,column_index], exclude=NULL))
#add a new column to this new dataframe containing the percentage frequency information rounded to 2 digits
if(nrow(dfTab)>0)
{
dfTab$label <- as.character(
paste(
round(100 * dfTab$Freq / sum(dfTab$Freq),digits=2)
,'%') # add percentage
)
# the last row contains the frequency for the NA value
return(paste("\n\nPercentage of",table_name,"with missing values for ",field_name," is ",dfTab[nrow(dfTab),3]));
}
else
return(paste("\n\nPercentage of",table_name,"with missing values for ",field_name," is 100%"));
}
else
return(paste("\n\nPercentage of",table_name,"with missing values for ",field_name," is 100%"));
} else # big data with dplyr or query wise that captures frequency
{
colnames(df_table)[2] <- "Freq"
# identify row with null value
new_df_table<-subset(df_table, is.na(df_table[1]))
if(nrow(new_df_table)>0) # if there is a null value
{
#add a new column to this new dataframe containing the percentage frequency information rounded to 2 digits
df_table$label <- as.character(
paste(
round(100 * df_table$Freq / sum(df_table$Freq),digits=2)
,'%') # add percentage
)
# find the row that contains the frequency for the NA value
na_df_table<-subset(df_table, is.na(df_table[1]))
return(paste("\n\nPercentage of",table_name,"with missing values for ",field_name," is ",na_df_table[1,3]));
}
else
return(paste("\n\nPercentage of",table_name,"with missing values for ",field_name," is 0%"));
}
}
#functionName: reportNoMatchingCount
#Description: count the number of records with mo matching concepts (concept_id=0)
#Inputs:an R dataframe containing a given database table, the name of the field
#Output: number of rows with 0 value for the input field
reportNoMatchingCount<-function(df_table,table_name,field_name, big_data_flag)
{
if(big_data_flag==FALSE)
{
#retrieve the index of the field in the dataframe
column_index <- which(colnames(df_table)==field_name)
if(nrow(df_table)>0)
{
# saving the frequencies in a separate dataframe (including NA values)
dfTab <-as.data.frame(table(df_table[,column_index], exclude=NULL))
#add a new column to this new dataframe containing the percentage frequency information rounded to 2 digits
if(nrow(dfTab)>0)
{
dfTab$label <- as.character(
paste(
round(100 * dfTab$Freq / sum(dfTab$Freq),digits=2)
,'%') # add percentage
)
# the last row contains the frequency for the NA value
return(paste("\n\nPercentage of",table_name,"with missing values for ",field_name," is ",dfTab[nrow(dfTab),3]));
}
else
return(paste("\n\nPercentage of",table_name,"with missing values for ",field_name," is 100%"));
}
else
return(paste("\n\nPercentage of",table_name,"with missing values for ",field_name," is 100%"));
}
else # big data with dplyr or query wise that captures frequency
{
colnames(df_table)[2] <- "Freq"
#add a new column to this new dataframe containing the percentage frequency information rounded to 2 digits
df_table$label <- as.character(
paste(
round(100 * df_table$Freq / sum(df_table$Freq),digits=2)
,'%') # add percentage
)
# identify row with no matching concept
new_df_table<-subset(df_table,df_table[1]==0)
if(nrow(new_df_table)>0) # if there is a 0 value
{
# the last row contains the frequency for the NA value
return(paste("\n\nPercentage of",table_name,"with no matching concepts in ",field_name," is ",new_df_table[1,3]));
}
if(nrow(new_df_table)==0)
return(paste("\n\nPercentage of",table_name,"with no matching concepts in ",field_name," is 0%"));
}
}
reportNullFlavors<-function(df_table,table_name,field_name,UN_code,OT_code,NI_code,big_data_flag)
{
if(big_data_flag==FALSE)
{
#retrieve the index of the field in the dataframe
column_index <- which(colnames(df_table)==field_name)
if(nrow(df_table)>0)
{
# saving the frequencies in a separate dataframe (including NA values)
dfTab <-as.data.frame(table(df_table[,column_index], exclude=NULL))
#add a new column to this new dataframe containing the percentage frequency information rounded to 2 digits
dfTab$label <- as.character(
paste(
round(100 * dfTab$Freq / sum(dfTab$Freq),digits=2)
,'%') # add percentage
)
# flog.info(dfTab)
count_ni<-subset(dfTab,Var1==NI_code)$label[1]
if(is.na(count_ni)) count_ni<-"0%";
count_un<-subset(dfTab,Var1==UN_code)$label[1]
if(is.na(count_un)) count_un<-"0%";
count_ot<-subset(dfTab,Var1==OT_code)$label[1]
if(is.na(count_ot)) count_ot<-"0%";
# flog.info(count_ni[1])
# the last row contains the frequency for the NA value
count_missing_values<-dfTab[nrow(dfTab),3];
return(paste(
"\nPercentage of",table_name,"with unknown value for ",field_name," is ",count_un,"\n",
"\nPercentage of",table_name,"with other value for ",field_name," is ",count_ot,"\n",
"\nPercentage of",table_name,"with no information for ",field_name," is ",count_ni,"\n",
"\nPercentage of",table_name,"with missing values for ",field_name," is ",count_missing_values,"\n"
));
}
} else # using dplyr or query wise - when big data flag is true
{
#retrieve the index of the field in the dataframe
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
df_table<-subset(df_table,!is.na(Var1))
if(nrow(df_table)>0)
{
df_table$Var1 <- as.factor(df_table$Var1)
df_table$label <- as.character(
paste(
round(100 * df_table$Freq / sum(df_table$Freq),digits=2)
,'%') # add percentage
)
# flog.info(dfTab)
#print(df_table)
count_ni<-subset(df_table,Var1==NI_code)$label[1]
#print(count_ni)
if(is.na(count_ni)) count_ni<-"0%";
count_un<-subset(df_table,Var1==UN_code)$label[1]
if(is.na(count_un)) count_un<-"0%";
count_ot<-subset(df_table,Var1==OT_code)$label[1]
if(is.na(count_ot)) count_ot<-"0%";
# flog.info(count_ni[1])
count_missing_values<-subset(df_table,is.na(Var1))$label[1]
if(is.na(count_missing_values)) count_missing_values<-"0%";
return(paste(
"\nPercentage of",table_name,"with unknown value for ",field_name," is ",count_un,"\n",
"\nPercentage of",table_name,"with other value for ",field_name," is ",count_ot,"\n",
"\nPercentage of",table_name,"with no information for ",field_name," is ",count_ni,"\n",
"\nPercentage of",table_name,"with missing values for ",field_name," is ",count_missing_values,"\n"
));
} # end of if
else
{
return(paste(
"\nPercentage of",table_name,"with unknown value for ",field_name," is 0%\n",
"\nPercentage of",table_name,"with other value for ",field_name," 0%\n",
"\nPercentage of",table_name,"with no information for ",field_name," is 0%\n",
"\nPercentage of",table_name,"with missing values for ",field_name," is 100%\n"
));
}
} # end else
}
reportUnexpected<-function(df_table,table_name,field_name,permissible_values,big_data_flag)
{
return_message<-""
if(big_data_flag==FALSE)
{
#retrieve the index of the field in the dataframe
column_index <- which(colnames(df_table)==field_name)
# flog.info(column_index)
current_values<-unique(df_table[,column_index])
for( value in current_values)
{
if(!is.element(value,permissible_values))
return_message<-paste(return_message, "invalid value found: ",value,";")
}
}
else # with dplyr package or query wise scripts
{
current_values<-c(df_table[,1])
for(i in 1:nrow(df_table))
{
value <-df_table[i,1]
# flog.info(df_table[i,1])
if(!is.element(value,permissible_values) && !is.na(value))
return_message<-paste(return_message, "invalid value found: ",value,";")
}
}
return(return_message)
}
# Nominal Fields
#functionName: describeNominalField
#Description: generate a barplot for a nominal field
#Inputs:
#1. an R dataframe containing a given database table
#2. the name of the nominal field
#3. label_bin: pre-defined labels for various bins
#4. order_bins: a fixed order for various bins on the plot
#5: color_bins: colors assigned to each bin
#Output: write the barplot to a file
describeNominalField<-function(df_table, table_name,field_name, label_bins, order_bins, color_bins, big_data_flag)
{
flog.info(paste("Plotting for Field: ", field_name))
# flog.info()
if(big_data_flag==FALSE)
{
# retrieve the column index for the field
#column_index<-(grep(field_name, colnames(df_table))
column_index <- which(colnames(df_table)==field_name)
# flog.info(c("columns index is ",column_index))
# saving the frequencies and percentage in a separate dataframe including NA values
if(nrow(df_table)>0)
{
dfTab <-as.data.frame(table(df_table[,column_index], exclude=NULL))
dfTab$label <- as.character(
paste(
round(100 * dfTab$Freq / sum(dfTab$Freq),digits=2)
,'%') # add percentage
)
# flog.info(dfTab)
#creating barplot from dfTab dataframe
p<-ggplot(dfTab, aes(x = Var1, y = Freq, fill = Var1)) + geom_bar(stat = "identity") + ggtitle(paste(field_name,": Distribution"))
# flog.info(p)
# add axis labels
p<-p+ylab(paste(table_name,"Count"))+xlab(field_name)
# specify the order of catgories and also the labels for x-axis
p<-p+scale_x_discrete(labels=label_bins, limits= order_bins)
# specify the color for each category
p<-p+ scale_fill_manual(values=color_bins,na.value="grey64")
#remove legend and set size and orientation of tick labels
p<-p+theme(legend.position="none", text = element_text(size=10),
axis.text.x = element_text(angle=90, vjust=1))
# add the label to each bar (from the dfTab dataframe)
p<-p+geom_text(data=dfTab, aes(x=Var1,y=Freq,label=label), size=3)
# flog.info(p)
#save the barplot image (will be referenced by the final report)
ggsave(file=paste(normalize_directory_path(g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
}
}
else #using dplyr
{
if(nrow(df_table)>0)
{
#dfTab <-as.data.frame(table(df_table[,column_index], exclude=NULL))
#adding new columns
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
df_table$Var1 <- as.factor(df_table$Var1)
df_table$label <- as.character(
paste(
round(100 * df_table$Freq / sum(df_table$Freq),digits=2)
,'%') # add percentage
)
# flog.info(df_table)
#creating barplot from dfTab dataframe
p<-ggplot(df_table, aes(x = Var1, y = Freq, fill = Var1)) + geom_bar(stat = "identity") + ggtitle(paste(field_name,": Distribution"))
# add axis labels
p<-p+ylab(paste(table_name,"Count"))+xlab(field_name)
# specify the order of catgories and also the labels for x-axis
p<-p+scale_x_discrete(labels=label_bins, limits= order_bins)
# specify the color for each category
p<-p+ scale_fill_manual(values=color_bins,na.value="grey64")
#remove legend and set size and orientation of tick labels
p<-p+theme(legend.position="none", text = element_text(size=10),
axis.text.x = element_text(angle=90, vjust=1))
# add the label to each bar (from the dfTab dataframe)
p<-p+geom_text(data=df_table, aes(x=Var1,y=Freq,label=label), size=3)
#save the barplot image (will be referenced by the final report)
# flog.info(p)
#if(!is.null(p$layers$position_stack))
# {
ggsave(file=paste(normalize_directory_path(g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
# flog.info(s)
#}
}
}
}
#updated nominal field
describeNominalField_basic<-function(df_table, table_name,field_name,big_data_flag)
{
flog.info(paste("Plotting for Field: ", field_name))
if(big_data_flag==FALSE)
{
# retrieve the column index for the field
#column_index<-(grep(field_name, colnames(df_table))
column_index <- which(colnames(df_table)==field_name)
# flog.info(column_index)
# saving the frequencies and percentage in a separate dataframe including NA values
#df_table<-subset(df_table,!is.na(Var1))
if(nrow(df_table)>0)
{
dfTab <-as.data.frame(table(df_table[,column_index], exclude=NULL))
# commenting the next line as we do want to include the NA values in this field
#dfTab<-subset(dfTab,!is.na(Var1))
if(nrow(dfTab)>0)
{
dfTab$label <- as.character(
paste(
round(100 * dfTab$Freq / sum(dfTab$Freq),digits=2)
,'%') # add percentage
)
if(nrow(dfTab)==1 && is.na(dfTab[1]))
return("");
#creating barplot from dfTab dataframe
p<-ggplot(dfTab, aes(x = Var1, y = Freq, fill = Var1)) + geom_bar(stat = "identity") + ggtitle(paste(field_name,": Distribution"))
# add axis labels
p<-p+ylab(paste(table_name,"Count"))+xlab(field_name)
#remove legend and set size and orientation of tick labels
p<-p+theme(legend.position="none", text = element_text(size=10),
axis.text.x = element_text(angle=90, vjust=1))
# add the label to each bar (from the dfTab dataframe)
p<-p+geom_text(data=dfTab, aes(x=Var1,y=Freq,label=label), size=3)
# flog.info(p)
#save the barplot image (will be referenced by the final report)
ggsave(file=paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
}
}
}
else
{
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
#df_table<-subset(df_table,!is.na(Var1))
if(nrow(df_table)>0)
{
#dfTab <-as.data.frame(table(df_table[,column_index], exclude=NULL))
#adding new columns
df_table$Var1 <- as.factor(df_table$Var1)
df_table$label <- as.character(
paste(
round(100 * df_table$Freq / sum(df_table$Freq),digits=2)
,'%') # add percentage
)
# flog.info(df_table)
#creating barplot from dfTab dataframe
p<-ggplot(df_table, aes(x = Var1, y = Freq, fill = Var1)) + geom_bar(stat = "identity") + ggtitle(paste(field_name,": Distribution"))
# add axis labels
p<-p+ylab(paste(table_name,"Count"))+xlab(field_name)
#remove legend and set size and orientation of tick labels
p<-p+theme(legend.position="none", text = element_text(size=10),
axis.text.x = element_text(angle=90, vjust=1))
# add the label to each bar (from the dfTab dataframe)
p<-p+geom_text(data=df_table, aes(x=Var1,y=Freq,label=label), size=3)
# flog.info(df_table)
#save the barplot image (will be referenced by the final report)
# flog.info(p)
ggsave(file=paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
}
}
}
# Ordinal Fields
#functionName: describeOrdinalField
#Description: generate a barplot for an ordinal field
#Inputs:
#1. an R dataframe containing a given database table
#2. the name of the ordinal field
#Output: write the barplot to a file
describeOrdinalField<-function(df_table, table_name,field_name,big_data_flag)
{
flog.info(paste("Plotting for Field: ", field_name))
if(big_data_flag==FALSE)
{
column_index <- which(colnames(df_table)==field_name)
if(nrow(df_table)>0)
{
# saving the frequencies in a separate dataframe
dfTab <-as.data.frame(table(df_table[,column_index]))
if(nrow(dfTab)>0)
{
#create a bar plot
p<-ggplot(dfTab, aes(x = Var1, y = Freq, fill = Var1)) + geom_bar(stat = "identity") + ggtitle(paste(field_name,": Distribution"))
# add axis labels
p<-p+ylab(paste(table_name,"Count"))+xlab(field_name)
#remove legend and set size and orientation of tick labels
p<-p+theme(legend.position="none", text = element_text(size=6),axis.text.x = element_text(angle=90, vjust=1), plot.background = element_blank() ,panel.grid.major = element_blank() ,panel.grid.minor = element_blank() ,panel.border = element_blank())
ggsave(file=paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
}
}
}
else #if TRUE using dplyr
{
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
df_table<-subset(df_table,!is.na(Var1))
if(nrow(df_table)>0)
{
df_table$Var1 <- as.factor(df_table$Var1)
#adding new columns
#creating barplot from dfTab dataframe
#p<-ggplot(df_table, aes(x = Var1, y = Freq, fill = Var1)) + geom_bar(stat = "identity") + ggtitle(paste(field_name,": Distribution"))
p<-ggplot(df_table, aes(x = Var1, y = Freq, fill = Var1)) + geom_bar(stat = "identity") + ggtitle(paste(field_name,": Distribution"))
# add axis labels
p<-p+ylab(paste(table_name,"Count"))+xlab(field_name)
#remove legend and set size and orientation of tick labels
p<-p+theme(legend.position="none", text = element_text(size=6),axis.text.x = element_text(angle=90, vjust=1), plot.background = element_blank() ,panel.grid.major = element_blank() ,panel.grid.minor = element_blank() ,panel.border = element_blank())
ggsave(file=paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
# flog.info(p)
}
}
}
# specialized date function
describeDateField<-function(df_table, table_name,field_name,big_data_flag)
{
# flog.info(paste("Plotting for Field: ", field_name))
if(big_data_flag==FALSE)
{
flog.info(paste("Plotting for Field: ", field_name))
column_index <- which(colnames(df_table)==field_name)
# flog.info(nrow(df_table))
if(nrow(df_table)>0)
{
df_table<-df_table[,column_index]
#df_table<-substring(df_table,nchar(df_table)-7) #extracting the date portion of the datetime field
df_table<-as.Date(df_table)
#df_table<-as.Date(format(df_table)," "), "[[", 1)
dfTab <- table(df_table)
if(nrow(dfTab)>0)
{
#calculate the total number of locations
# flog.info(colnames(dfTab))
newdata <- df <- as.data.frame(dfTab)
# flog.info(newdata)
ordered_data<-newdata[order(newdata[,2], decreasing = TRUE),]
# flog.info(ordered_data)
# counting the total number of unique values
total_locations <- nrow(dfTab)
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
# not using ggplot here as it is very expensive for a large number of values
# barplot(dfTab, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_locations,")"), ylab = paste(table_name,"Count"), xaxt='n')
barplot(dfTab, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_locations,")"), ylab = paste(table_name,"Count"))
return_message<-paste("The most frequent values for",field_name,"are:")
index<-1;
while (index<=5)
{
return_message<-paste(return_message,ordered_data[index,1]);
if(index<5)
return_message<-paste(return_message,",")
index<-index+1;
}
return_message<-c(return_message, paste("\n Date range: ",min(df_table,na.rm=TRUE),"-",max(df_table,na.rm=TRUE)))
# check if future dates are included
if(Sys.Date()<max(df_table,na.rm=TRUE))
return_message<-c(return_message,"\nWARNING: includes future dates")
dev.off()
return(return_message)
}
}
}
else #using dplyr or query wise
{
df_table<-subset(df_table,!is.na(df_table[,1]))
if(nrow(df_table)>0)
{
# df table is actually a dataframe of two dataframes
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
# remove NA values - we dont want to plot missing values
#df_table<-subset(df_table,!is.na(Var1))
df_table$Var1<-as.Date(df_table[,1])
#df_table$Var1 <- as.factor(df_table$Var1)
df_table$Var1 <- as.character(df_table$Var1)
# aggregate df_table again by summing frequency for all equivalent dates
df_table<-aggregate(df_table$Freq,FUN=sum, by = list(df_table$Var1))
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
# creare a raw vector and then create a table
new_vector<-rep.int(df_table$Var1,df_table$Freq)
# creating a table out of it so that we can use in the barplot function
dfTab<-table(new_vector)
ordered_data<-df_table[order(-df_table[,2]), ]
total_values<- length(unique(df_table$Var1))
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
# not using ggplot here as it is very expensive for a large number of values
#vector_table <- as.vector(as.matrix(df_table$Freq))
# flog.info("here")
#barplot(df_table$Freq, df_table$Var1, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count"), xaxt='n')
barplot(dfTab, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count"))
return_message<-paste("The most frequent values for",field_name,"are:")
index<-1;
while (index<=5 && nrow(ordered_data)>=index)
{
#if(!is.na(ordered_data[index,1])) # if NA then dont include as the top concept
return_message<-paste(return_message,ordered_data[index,1]);
if(index<5)
return_message<-paste(return_message,",")
index<-index+1;
}
return_message<-c(return_message, paste("\n Date range: ",min(df_table$Var1)," - ",max(df_table$Var1)))
if(Sys.Date()<max(df_table$Var1))
return_message<-c(return_message,"\nWARNING: includes future dates")
dev.off()
return(return_message)
}
}
}
# Ordinal Fields (with large number of values)
#functionName: describeOrdinalField_large
#Description: generate a barplot for an ordinal field
#Inputs:
#1. an R dataframe containing a given database table
#2. the name of the ordinal field
#Output: write the barplot to a file
describeOrdinalField_large<-function(df_table, table_name,field_name,big_data_flag)
{
flog.info(paste("Plotting for Field: ", field_name))
if(big_data_flag==FALSE)
{
column_index <- which(colnames(df_table)==field_name)
# flog.info(nrow(df_table))
if(nrow(df_table)>0)
{
dfTab <- table(df_table[,column_index])
if(nrow(dfTab)>0)
{
#calculate the total number of locations
# flog.info(colnames(dfTab))
newdata <- df <- as.data.frame(dfTab)
# flog.info(newdata)
ordered_data<-newdata[order(newdata[,2], decreasing = TRUE),]
# flog.info(ordered_data)
# counting the total number of unique values
total_locations <- nrow(dfTab)
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
# not using ggplot here as it is very expensive for a large number of values
barplot(dfTab, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_locations,")"), ylab = paste(table_name,"Count")) #, xaxt='n')
return_message<-paste("The most frequent values for",field_name,"are:")
index<-1;
while (index<=5)
{
return_message<-paste(return_message,ordered_data[index,1]);
if(index<5)
return_message<-paste(return_message,",")
index<-index+1;
}
#return_message<-c(return_message, paste("Range: ",min(df_table)," - ",max(df_table)))
dev.off()
return(return_message)
}
}
}
else # for handling bigdata with dplyr
{
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
df_table<-subset(df_table,!is.na(Var1))
if(nrow(df_table)>0)
{
# creare a raw vector and then create a table
new_vector<-rep.int(df_table$Var1,df_table$Freq)
# creating a table out of it so that we can use in the barplot function
dfTab<-table(new_vector)
ordered_data<-df_table[order(df_table[,2], decreasing = TRUE),]
# flog.info(ordered_data)
# counting the total number of unique values
total_values <- nrow(df_table)
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
# not using ggplot here as it is very expensive for a large number of values
barplot(dfTab, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count"))
#barplot(df_table$Freq, df_table$Var1, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count")) #, xaxt='y')
return_message<-paste("The most frequent values for",field_name,"are:","\n")
index<-1;
while (index<=5)
{
#return_message<-paste(return_message,ordered_data[index,1]);
# adding information on total counts also
return_message<-paste(return_message,ordered_data[index,1],"|count=",ordered_data[index,2]);
if(index<5)
return_message<-paste(return_message,",\n")
index<-index+1;
}
#return_message<-c(return_message, paste("Range: ",min(df_table)," - ",max(df_table)))
dev.off()
return(return_message)
}
}
}
describeTimeField<-function(df_table, table_name,field_name,big_data_flag)
{
flog.info(paste("Plotting for Field: ", field_name))
if(big_data_flag==FALSE)
{
column_index <- which(colnames(df_table)==field_name)
# flog.info(nrow(df_table))
if(nrow(df_table)>0)
{
# all time fields are now datetime fields
#handling datetime fields in Oracle
df_table<-df_table[,column_index]
#df_table<-lapply(strsplit(format(df_table)," "), "[[", 2)
# check if contains timesize information
# if contains timezone information
#if(length(strsplit(df_table[1],"-")[[1]])==4)
# {
# remove timezone information
df_table<-substr(df_table,12,19)
# }
#df_table<- paste((as.POSIXlt(df_table,format="%H:%M:%S"))$hour,":",(as.POSIXlt(df_table,format="%H:%M:%S"))$min,":",(as.POSIXlt(df_table,format="%H:%M:%S"))$sec,sep="")
dfTab <- table(df_table)
if(nrow(dfTab)>0)
{
#calculate the total number of locations
# flog.info(colnames(dfTab))
newdata <- as.data.frame(dfTab)
# flog.info(newdata)
ordered_data<-newdata[order(newdata[,2], decreasing = TRUE),]
# flog.info(ordered_data)
# counting the total number of unique values
total_values <- nrow(dfTab)
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,paste(field_name,"_time",sep="")),sep=""))
# not using ggplot here as it is very expensive for a large number of values
barplot(dfTab, main = paste(field_name,": Time Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count"))
#, xaxt='n')
return_message<-paste("The most frequent values for",field_name,"are:")
index<-1;
while (index<=5 && nrow(ordered_data)>=index)
{
return_message<-paste(return_message,ordered_data[index,1]);
if(index<5)
return_message<-paste(return_message,",")
index<-index+1;
}
return_message<-c(return_message, paste("\n Time range: ",min(df_table)," - ",max(df_table)))
dev.off()
return(return_message)
}
}
}
else # for handling bigdata with dplyr and query wise
{
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
df_table<-subset(df_table,!is.na(Var1))
if(nrow(df_table)>0)
{
#if(length(strsplit(df_table[,1],"-")[[1]])==4)
#{
# remove timezone information and extract time
df_table$Var1<-substr(df_table$Var1,12,19)
#}
# remove NA values
#df_table$Var1 <- as.factor(df_table$Var1)
df_table<-aggregate(df_table$Freq,FUN=sum, by = list(df_table$Var1))
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
new_vector<-rep.int(df_table$Var1,df_table$Freq)
# creating a table out of it so that we can use in the barplot function
dfTab<-table(new_vector)
ordered_data<-df_table[order(-df_table[,2]), ]
total_values<- length(unique(df_table$Var1))
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,paste(field_name,"_time",sep="")),sep=""))
# not using ggplot here as it is very expensive for a large number of values
#vector_table <- as.vector(as.matrix(df_table$Freq))
#vector_table_y <- as.vector(as.matrix(df_table$Freq))
# flog.info("here")
#barplot(df_table$Freq, df_table$Var1, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count"), xaxt='n')
barplot(dfTab, main = paste(field_name,": Time Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count"))
return_message<-paste("The most frequent values for",field_name,"are:")
index<-1;
while (index<=5 && nrow(ordered_data)>=index)
{
#if(!is.na(ordered_data[index,1])) # if NA then dont include as the top concept
return_message<-paste(return_message,ordered_data[index,1]);
if(index<5)
return_message<-paste(return_message,",")
index<-index+1;
}
return_message<-c(return_message, paste(" \n Time range: ",min(df_table$Var1)," - ",max(df_table$Var1)))
dev.off()
return(return_message)
}
}
}
# Ratio Fields
#functionName: describeRatioField
#Description: generate a histogram for a ratio field
#Inputs:
#1. an R dataframe containing a given database table
#2. the name of the ratio field
#3. the unit associated with the field
#Output: write the histogram to a file
describeRatioField<-function(df_table,table_name,field_name, unit,big_data_flag)
{
flog.info(paste("Plotting for Field: ", field_name))
if(big_data_flag==FALSE)
{
column_index <- which(colnames(df_table)==field_name)
if(nrow(df_table)>0)
{
# saving the frequencies in a separate dataframe
dfTab <-as.data.frame(table(df_table[,column_index]))
if(nrow(dfTab)>0) # if all values are NULL
{
#caluclating mean and standard deviation
#mean_Var1<-round(mean(df_table[,column_index]), digits=2)
mean_Var1<-round(mean(df_table[,column_index],trim=0,na.rm=TRUE), digits=2)
#sd_Var1<-round(sd(df_table[,column_index]), digits=2)
sd_Var1<-round(sd(df_table[,column_index],na.rm=TRUE), digits=2)
max_Var1<-df_table[which.max(df_table[,column_index]),column_index]
min_Var1<-df_table[which.min(df_table[,column_index]),column_index]
median_Var1<-round(median(df_table[,column_index],na.rm=TRUE), digits=2)
#compute extreme outliers.
lowerq <- quantile(df_table[,column_index],na.rm=TRUE)[2]
upperq <- quantile(df_table[,column_index],na.rm=TRUE)[3]
iqr <- upperq - lowerq # interquartile range
extreme.threshold.upper <-(iqr * 3) + upperq
extreme.threshold.lower <- lowerq - (iqr * 3)
#cases outside the upper and lower threshold.
total_data_points_upper_threshold<-nrow(subset(df_table,df_table[,column_index]>extreme.threshold.upper[[1]]) )
total_data_points_below_lower_threshold<-nrow(subset(df_table,df_table[,column_index]<extreme.threshold.lower[[1]]) )
outlier_message<-paste("Investigate ",total_data_points_upper_threshold," data points above the upper threshold (",extreme.threshold.upper, ") and ",total_data_points_below_lower_threshold,
" data points below the lower threshold (", extreme.threshold.lower,")",sep="")
#create histogram
p<-ggplot(data=dfTab, aes(x=Var1, y=Freq, width=1, fill=Freq)) +
geom_bar(stat="identity", position="identity") + ggtitle(paste(field_name,": Distribution\n(Mean=",mean_Var1," Std. Deviation=",sd_Var1,")"))
p<-p+ylab(paste(table_name,"Count"))+xlab(paste(field_name,"(in",unit,")"))
#remove legend and set size and orientation of tick labels
p<-p+theme(legend.position="none", text = element_text(size=6),axis.text.x = element_text(angle=90, vjust=1), plot.background = element_blank() ,panel.grid.major = element_blank() ,panel.grid.minor = element_blank() ,panel.border = element_blank())
# flog.info(p)
ggsave(file=paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
return(paste("\n\nMaximum:",max_Var1 ,"Minimum:", min_Var1, "Mean:",mean_Var1,"Std Deviation:",sd_Var1,"Median:",median_Var1,"\n\n" ))
}
}
}
else #using dplyr
{
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
df_table<-subset(df_table,!is.na(Var1))
if(nrow(df_table)>0)
{
#caluclating mean and standard deviation, etc.
raw_vector<-rep.int(df_table$Var1, df_table$Freq)
mean_Var1<-round(mean(raw_vector, na.rm=TRUE),2)
sd_Var1<-round(sd(raw_vector, na.rm=TRUE),2)
median_Var1<-median(mean(raw_vector, na.rm=TRUE),2)
max_Var1<-df_table[which.max(df_table[,1]),1]
min_Var1<-df_table[which.min(df_table[,1]),1]
# #compute extreme outliers.
lowerq <- quantile(raw_vector,na.rm=TRUE)[2]
upperq <- quantile(raw_vector,na.rm=TRUE)[3]
iqr <- upperq - lowerq # interquartile range
extreme.threshold.upper <-(iqr * 3) + upperq
extreme.threshold.lower <- lowerq - (iqr * 3)
#cases outside the upper and lower threshold.
total_data_points_upper_threshold<-length(raw_vector[raw_vector>extreme.threshold.upper[[1]]])
outlier_message<-""
if(total_data_points_upper_threshold>0)
{
outlier_message<-paste("Investigate ",total_data_points_upper_threshold," data points above the upper threshold (",extreme.threshold.upper[[1]], ")"
,sep="")
}
if(extreme.threshold.lower[[1]]<0)
{
extreme.threshold.lower[[1]]<-0
}
total_data_points_below_lower_threshold<-length(raw_vector[raw_vector<extreme.threshold.lower[[1]]])
if(total_data_points_below_lower_threshold>0)
{
outlier_message<-paste(outlier_message," Investigate ",total_data_points_below_lower_threshold,
" data points below the lower threshold (", extreme.threshold.lower,")",sep="")
}
#create histogram
p<-ggplot(data=df_table, aes(x=Var1, y=Freq, width=1, fill=Freq)) +
geom_bar(stat="identity", position="identity") + ggtitle(paste(field_name,": Distribution\n(Mean=",mean_Var1," Std. Deviation=",sd_Var1,")"))
p<-p+ylab(paste(table_name,"Count"))+xlab(paste(field_name,"(in",unit,")"))
#remove legend and set size and orientation of tick labels
p<-p+theme(legend.position="none", text = element_text(size=6),axis.text.x = element_text(angle=90, vjust=1), plot.background = element_blank() ,panel.grid.major = element_blank() ,panel.grid.minor = element_blank() ,panel.border = element_blank())
# flog.info(p)
ggsave(file=paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
return(paste("\n\nMaximum:",max_Var1 ,"Minimum:", min_Var1, "Mean:",mean_Var1,"Std Deviation:",sd_Var1,"Median:",median_Var1,"\n\n" ))
}
}
}
# Foreign Key Fields
#functionName: describeForeignKeyIdentifiers
#Description: generate a barplot for a foreignkey field in a table
#Inputs:
#1. an R dataframe containing a given database table
#2. the name of the foreign key field
#Output: write the barplot to a file
describeForeignKeyIdentifiers<-function(df_table, table_name, field_name,big_data_flag)
{
# flog.info(field_name)
flog.info(paste("Plotting for Field: ", field_name))
if(big_data_flag==FALSE)
{
column_index <- which(colnames(df_table)==field_name)
if(nrow(df_table)>0)
{
dfTab <- table(df_table[,column_index])
if(nrow(dfTab)>0)
{
#calculate the total number of locations
total_locations <- nrow(dfTab)
# flog.info(total_locations)
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
# not using ggplot here as it is very expensive for a large number of values
barplot(dfTab, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_locations,")"), ylab = paste(table_name,"Count"), xaxt='n')
#also plot in decreasing order of frequency (to compare distribution with source data)
df_dfTab<-data.frame(dfTab)
ordered_dfTab<-df_dfTab[order(-df_dfTab[,2]),]
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name_sorted(table_name,field_name),sep=""))
barplot(ordered_dfTab$Freq, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_locations,")"), ylab = paste(table_name,"Count"), xaxt='n')
return_message<-paste("The most frequent values for",field_name,"are:")
index<-1;
while (index<=5)
{
if(is.na(ordered_dfTab[index,1]))
return(return_message);
return_message<-paste(return_message,ordered_dfTab[index,1]);
if(index<5)
return_message<-paste(return_message,",")
index<-index+1;
}
# Turn off both png devices
dev.off()
dev.off()
return(return_message);
}
}
}
#handling big data using dplyr
else
{
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
# remove NA values
df_table<-subset(df_table,!is.na(Var1))
if(nrow(df_table)>0)
{
#df_table$Var1 <- as.factor(df_table$Var1)
ordered_data<-df_table[order(-df_table[,2]), ]
total_values<- length(unique(df_table$Var1))
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
# not using ggplot here as it is very expensive for a large number of values
barplot(df_table$Freq, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count"), xaxt='n')
#also plot in decreasing order of frequency (to compare distribution with source data)
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name_sorted(table_name,field_name),sep=""))
#ordered_vector_table <- as.vector(as.matrix(ordered_data$Freq))
barplot(ordered_data$Freq, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count"), xaxt='n')
return_message<-paste("The most frequent values for",field_name,"are:")
index<-1;
while (index<=5)
{
return_message<-paste(return_message,ordered_data[index,1]);
if(index<5)
return_message<-paste(return_message,",")
index<-index+1;
}
dev.off()
dev.off()
return(return_message)
}
}
}
#update values of concept_id fields to include concept names - to improve readability for plotting purposes
EnhanceFieldValues<-function(df_table,field_name,df_ref)
{
df_table_enhanced<-df_table
column_index <- which(colnames(df_table_enhanced)==field_name)
outer_loop_index =1;inner_loop_index =1;
while(inner_loop_index<=nrow(df_ref))
{
# df_table[,column_index][df_table[,column_index]==df_ref[inner_loop_index,1]] <- "replaced"
df_table_enhanced[,column_index][df_table_enhanced[,column_index]==df_ref[inner_loop_index,1]] <- paste(df_ref[inner_loop_index,2]," (",df_ref[inner_loop_index,1],")",sep="");
#df$concept_id[df==""]<-NA
inner_loop_index<-inner_loop_index+1;
}
# df_table[outer_loop_index,column_index]<-paste(df_ref[inner_loop_index,2]," (",df_ref[inner_loop_index,1],")",sep="");
# break;
return(df_table_enhanced);
}
| /Level1/library/CreatePlots.R | no_license | janetzahner/Data-Quality-Analysis | R | false | false | 44,303 | r | #primary key field
#functionName: reportTotalCount
#Description: generate the total number of unique values for the primary key field of a given table
#Input:an R dataframe containing a given database table
#Output: the total number of records in the table, or the total number of unique values of the primary key identifier
#reportTotalCount<-function(df_table)
#{
# assuming the first field is the primary key field
#return(length(unique(df_table[,1])))
#}
describeIdentifier<-function(df_table, field_name)
{
column_index <- which(colnames(df_table)==field_name)
df_table <-df_table[,column_index]
total_distinct_values <-length(unique(df_table))
if(total_distinct_values == 1)
if(is.na(unique(df_table)))
return (0);
return (total_distinct_values);
}
#functionName: reportMissingCount
#Description: count the number of records with no value for a given field
#Inputs:an R dataframe containing a given database table, the name of the field
#Output: number of rows with NA (missing) values for the input field
reportMissingCount<-function(df_table,table_name,field_name, big_data_flag)
{
if(big_data_flag==FALSE)
{
#retrieve the index of the field in the dataframe
column_index <- which(colnames(df_table)==field_name)
if(nrow(df_table)>0)
{
# saving the frequencies in a separate dataframe (including NA values)
dfTab <-as.data.frame(table(df_table[,column_index], exclude=NULL))
#add a new column to this new dataframe containing the percentage frequency information rounded to 2 digits
if(nrow(dfTab)>0)
{
dfTab$label <- as.character(
paste(
round(100 * dfTab$Freq / sum(dfTab$Freq),digits=2)
,'%') # add percentage
)
# the last row contains the frequency for the NA value
return(paste("\n\nPercentage of",table_name,"with missing values for ",field_name," is ",dfTab[nrow(dfTab),3]));
}
else
return(paste("\n\nPercentage of",table_name,"with missing values for ",field_name," is 100%"));
}
else
return(paste("\n\nPercentage of",table_name,"with missing values for ",field_name," is 100%"));
} else # big data with dplyr or query wise that captures frequency
{
colnames(df_table)[2] <- "Freq"
# identify row with null value
new_df_table<-subset(df_table, is.na(df_table[1]))
if(nrow(new_df_table)>0) # if there is a null value
{
#add a new column to this new dataframe containing the percentage frequency information rounded to 2 digits
df_table$label <- as.character(
paste(
round(100 * df_table$Freq / sum(df_table$Freq),digits=2)
,'%') # add percentage
)
# find the row that contains the frequency for the NA value
na_df_table<-subset(df_table, is.na(df_table[1]))
return(paste("\n\nPercentage of",table_name,"with missing values for ",field_name," is ",na_df_table[1,3]));
}
else
return(paste("\n\nPercentage of",table_name,"with missing values for ",field_name," is 0%"));
}
}
#functionName: reportNoMatchingCount
#Description: count the number of records with mo matching concepts (concept_id=0)
#Inputs:an R dataframe containing a given database table, the name of the field
#Output: number of rows with 0 value for the input field
reportNoMatchingCount<-function(df_table,table_name,field_name, big_data_flag)
{
if(big_data_flag==FALSE)
{
#retrieve the index of the field in the dataframe
column_index <- which(colnames(df_table)==field_name)
if(nrow(df_table)>0)
{
# saving the frequencies in a separate dataframe (including NA values)
dfTab <-as.data.frame(table(df_table[,column_index], exclude=NULL))
#add a new column to this new dataframe containing the percentage frequency information rounded to 2 digits
if(nrow(dfTab)>0)
{
dfTab$label <- as.character(
paste(
round(100 * dfTab$Freq / sum(dfTab$Freq),digits=2)
,'%') # add percentage
)
# the last row contains the frequency for the NA value
return(paste("\n\nPercentage of",table_name,"with missing values for ",field_name," is ",dfTab[nrow(dfTab),3]));
}
else
return(paste("\n\nPercentage of",table_name,"with missing values for ",field_name," is 100%"));
}
else
return(paste("\n\nPercentage of",table_name,"with missing values for ",field_name," is 100%"));
}
else # big data with dplyr or query wise that captures frequency
{
colnames(df_table)[2] <- "Freq"
#add a new column to this new dataframe containing the percentage frequency information rounded to 2 digits
df_table$label <- as.character(
paste(
round(100 * df_table$Freq / sum(df_table$Freq),digits=2)
,'%') # add percentage
)
# identify row with no matching concept
new_df_table<-subset(df_table,df_table[1]==0)
if(nrow(new_df_table)>0) # if there is a 0 value
{
# the last row contains the frequency for the NA value
return(paste("\n\nPercentage of",table_name,"with no matching concepts in ",field_name," is ",new_df_table[1,3]));
}
if(nrow(new_df_table)==0)
return(paste("\n\nPercentage of",table_name,"with no matching concepts in ",field_name," is 0%"));
}
}
reportNullFlavors<-function(df_table,table_name,field_name,UN_code,OT_code,NI_code,big_data_flag)
{
if(big_data_flag==FALSE)
{
#retrieve the index of the field in the dataframe
column_index <- which(colnames(df_table)==field_name)
if(nrow(df_table)>0)
{
# saving the frequencies in a separate dataframe (including NA values)
dfTab <-as.data.frame(table(df_table[,column_index], exclude=NULL))
#add a new column to this new dataframe containing the percentage frequency information rounded to 2 digits
dfTab$label <- as.character(
paste(
round(100 * dfTab$Freq / sum(dfTab$Freq),digits=2)
,'%') # add percentage
)
# flog.info(dfTab)
count_ni<-subset(dfTab,Var1==NI_code)$label[1]
if(is.na(count_ni)) count_ni<-"0%";
count_un<-subset(dfTab,Var1==UN_code)$label[1]
if(is.na(count_un)) count_un<-"0%";
count_ot<-subset(dfTab,Var1==OT_code)$label[1]
if(is.na(count_ot)) count_ot<-"0%";
# flog.info(count_ni[1])
# the last row contains the frequency for the NA value
count_missing_values<-dfTab[nrow(dfTab),3];
return(paste(
"\nPercentage of",table_name,"with unknown value for ",field_name," is ",count_un,"\n",
"\nPercentage of",table_name,"with other value for ",field_name," is ",count_ot,"\n",
"\nPercentage of",table_name,"with no information for ",field_name," is ",count_ni,"\n",
"\nPercentage of",table_name,"with missing values for ",field_name," is ",count_missing_values,"\n"
));
}
} else # using dplyr or query wise - when big data flag is true
{
#retrieve the index of the field in the dataframe
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
df_table<-subset(df_table,!is.na(Var1))
if(nrow(df_table)>0)
{
df_table$Var1 <- as.factor(df_table$Var1)
df_table$label <- as.character(
paste(
round(100 * df_table$Freq / sum(df_table$Freq),digits=2)
,'%') # add percentage
)
# flog.info(dfTab)
#print(df_table)
count_ni<-subset(df_table,Var1==NI_code)$label[1]
#print(count_ni)
if(is.na(count_ni)) count_ni<-"0%";
count_un<-subset(df_table,Var1==UN_code)$label[1]
if(is.na(count_un)) count_un<-"0%";
count_ot<-subset(df_table,Var1==OT_code)$label[1]
if(is.na(count_ot)) count_ot<-"0%";
# flog.info(count_ni[1])
count_missing_values<-subset(df_table,is.na(Var1))$label[1]
if(is.na(count_missing_values)) count_missing_values<-"0%";
return(paste(
"\nPercentage of",table_name,"with unknown value for ",field_name," is ",count_un,"\n",
"\nPercentage of",table_name,"with other value for ",field_name," is ",count_ot,"\n",
"\nPercentage of",table_name,"with no information for ",field_name," is ",count_ni,"\n",
"\nPercentage of",table_name,"with missing values for ",field_name," is ",count_missing_values,"\n"
));
} # end of if
else
{
return(paste(
"\nPercentage of",table_name,"with unknown value for ",field_name," is 0%\n",
"\nPercentage of",table_name,"with other value for ",field_name," 0%\n",
"\nPercentage of",table_name,"with no information for ",field_name," is 0%\n",
"\nPercentage of",table_name,"with missing values for ",field_name," is 100%\n"
));
}
} # end else
}
reportUnexpected<-function(df_table,table_name,field_name,permissible_values,big_data_flag)
{
return_message<-""
if(big_data_flag==FALSE)
{
#retrieve the index of the field in the dataframe
column_index <- which(colnames(df_table)==field_name)
# flog.info(column_index)
current_values<-unique(df_table[,column_index])
for( value in current_values)
{
if(!is.element(value,permissible_values))
return_message<-paste(return_message, "invalid value found: ",value,";")
}
}
else # with dplyr package or query wise scripts
{
current_values<-c(df_table[,1])
for(i in 1:nrow(df_table))
{
value <-df_table[i,1]
# flog.info(df_table[i,1])
if(!is.element(value,permissible_values) && !is.na(value))
return_message<-paste(return_message, "invalid value found: ",value,";")
}
}
return(return_message)
}
# Nominal Fields
#functionName: describeNominalField
#Description: generate a barplot for a nominal field
#Inputs:
#1. an R dataframe containing a given database table
#2. the name of the nominal field
#3. label_bin: pre-defined labels for various bins
#4. order_bins: a fixed order for various bins on the plot
#5: color_bins: colors assigned to each bin
#Output: write the barplot to a file
describeNominalField<-function(df_table, table_name,field_name, label_bins, order_bins, color_bins, big_data_flag)
{
flog.info(paste("Plotting for Field: ", field_name))
# flog.info()
if(big_data_flag==FALSE)
{
# retrieve the column index for the field
#column_index<-(grep(field_name, colnames(df_table))
column_index <- which(colnames(df_table)==field_name)
# flog.info(c("columns index is ",column_index))
# saving the frequencies and percentage in a separate dataframe including NA values
if(nrow(df_table)>0)
{
dfTab <-as.data.frame(table(df_table[,column_index], exclude=NULL))
dfTab$label <- as.character(
paste(
round(100 * dfTab$Freq / sum(dfTab$Freq),digits=2)
,'%') # add percentage
)
# flog.info(dfTab)
#creating barplot from dfTab dataframe
p<-ggplot(dfTab, aes(x = Var1, y = Freq, fill = Var1)) + geom_bar(stat = "identity") + ggtitle(paste(field_name,": Distribution"))
# flog.info(p)
# add axis labels
p<-p+ylab(paste(table_name,"Count"))+xlab(field_name)
# specify the order of catgories and also the labels for x-axis
p<-p+scale_x_discrete(labels=label_bins, limits= order_bins)
# specify the color for each category
p<-p+ scale_fill_manual(values=color_bins,na.value="grey64")
#remove legend and set size and orientation of tick labels
p<-p+theme(legend.position="none", text = element_text(size=10),
axis.text.x = element_text(angle=90, vjust=1))
# add the label to each bar (from the dfTab dataframe)
p<-p+geom_text(data=dfTab, aes(x=Var1,y=Freq,label=label), size=3)
# flog.info(p)
#save the barplot image (will be referenced by the final report)
ggsave(file=paste(normalize_directory_path(g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
}
}
else #using dplyr
{
if(nrow(df_table)>0)
{
#dfTab <-as.data.frame(table(df_table[,column_index], exclude=NULL))
#adding new columns
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
df_table$Var1 <- as.factor(df_table$Var1)
df_table$label <- as.character(
paste(
round(100 * df_table$Freq / sum(df_table$Freq),digits=2)
,'%') # add percentage
)
# flog.info(df_table)
#creating barplot from dfTab dataframe
p<-ggplot(df_table, aes(x = Var1, y = Freq, fill = Var1)) + geom_bar(stat = "identity") + ggtitle(paste(field_name,": Distribution"))
# add axis labels
p<-p+ylab(paste(table_name,"Count"))+xlab(field_name)
# specify the order of catgories and also the labels for x-axis
p<-p+scale_x_discrete(labels=label_bins, limits= order_bins)
# specify the color for each category
p<-p+ scale_fill_manual(values=color_bins,na.value="grey64")
#remove legend and set size and orientation of tick labels
p<-p+theme(legend.position="none", text = element_text(size=10),
axis.text.x = element_text(angle=90, vjust=1))
# add the label to each bar (from the dfTab dataframe)
p<-p+geom_text(data=df_table, aes(x=Var1,y=Freq,label=label), size=3)
#save the barplot image (will be referenced by the final report)
# flog.info(p)
#if(!is.null(p$layers$position_stack))
# {
ggsave(file=paste(normalize_directory_path(g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
# flog.info(s)
#}
}
}
}
#updated nominal field
describeNominalField_basic<-function(df_table, table_name,field_name,big_data_flag)
{
flog.info(paste("Plotting for Field: ", field_name))
if(big_data_flag==FALSE)
{
# retrieve the column index for the field
#column_index<-(grep(field_name, colnames(df_table))
column_index <- which(colnames(df_table)==field_name)
# flog.info(column_index)
# saving the frequencies and percentage in a separate dataframe including NA values
#df_table<-subset(df_table,!is.na(Var1))
if(nrow(df_table)>0)
{
dfTab <-as.data.frame(table(df_table[,column_index], exclude=NULL))
# commenting the next line as we do want to include the NA values in this field
#dfTab<-subset(dfTab,!is.na(Var1))
if(nrow(dfTab)>0)
{
dfTab$label <- as.character(
paste(
round(100 * dfTab$Freq / sum(dfTab$Freq),digits=2)
,'%') # add percentage
)
if(nrow(dfTab)==1 && is.na(dfTab[1]))
return("");
#creating barplot from dfTab dataframe
p<-ggplot(dfTab, aes(x = Var1, y = Freq, fill = Var1)) + geom_bar(stat = "identity") + ggtitle(paste(field_name,": Distribution"))
# add axis labels
p<-p+ylab(paste(table_name,"Count"))+xlab(field_name)
#remove legend and set size and orientation of tick labels
p<-p+theme(legend.position="none", text = element_text(size=10),
axis.text.x = element_text(angle=90, vjust=1))
# add the label to each bar (from the dfTab dataframe)
p<-p+geom_text(data=dfTab, aes(x=Var1,y=Freq,label=label), size=3)
# flog.info(p)
#save the barplot image (will be referenced by the final report)
ggsave(file=paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
}
}
}
else
{
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
#df_table<-subset(df_table,!is.na(Var1))
if(nrow(df_table)>0)
{
#dfTab <-as.data.frame(table(df_table[,column_index], exclude=NULL))
#adding new columns
df_table$Var1 <- as.factor(df_table$Var1)
df_table$label <- as.character(
paste(
round(100 * df_table$Freq / sum(df_table$Freq),digits=2)
,'%') # add percentage
)
# flog.info(df_table)
#creating barplot from dfTab dataframe
p<-ggplot(df_table, aes(x = Var1, y = Freq, fill = Var1)) + geom_bar(stat = "identity") + ggtitle(paste(field_name,": Distribution"))
# add axis labels
p<-p+ylab(paste(table_name,"Count"))+xlab(field_name)
#remove legend and set size and orientation of tick labels
p<-p+theme(legend.position="none", text = element_text(size=10),
axis.text.x = element_text(angle=90, vjust=1))
# add the label to each bar (from the dfTab dataframe)
p<-p+geom_text(data=df_table, aes(x=Var1,y=Freq,label=label), size=3)
# flog.info(df_table)
#save the barplot image (will be referenced by the final report)
# flog.info(p)
ggsave(file=paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
}
}
}
# Ordinal Fields
#functionName: describeOrdinalField
#Description: generate a barplot for an ordinal field
#Inputs:
#1. an R dataframe containing a given database table
#2. the name of the ordinal field
#Output: write the barplot to a file
describeOrdinalField<-function(df_table, table_name,field_name,big_data_flag)
{
flog.info(paste("Plotting for Field: ", field_name))
if(big_data_flag==FALSE)
{
column_index <- which(colnames(df_table)==field_name)
if(nrow(df_table)>0)
{
# saving the frequencies in a separate dataframe
dfTab <-as.data.frame(table(df_table[,column_index]))
if(nrow(dfTab)>0)
{
#create a bar plot
p<-ggplot(dfTab, aes(x = Var1, y = Freq, fill = Var1)) + geom_bar(stat = "identity") + ggtitle(paste(field_name,": Distribution"))
# add axis labels
p<-p+ylab(paste(table_name,"Count"))+xlab(field_name)
#remove legend and set size and orientation of tick labels
p<-p+theme(legend.position="none", text = element_text(size=6),axis.text.x = element_text(angle=90, vjust=1), plot.background = element_blank() ,panel.grid.major = element_blank() ,panel.grid.minor = element_blank() ,panel.border = element_blank())
ggsave(file=paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
}
}
}
else #if TRUE using dplyr
{
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
df_table<-subset(df_table,!is.na(Var1))
if(nrow(df_table)>0)
{
df_table$Var1 <- as.factor(df_table$Var1)
#adding new columns
#creating barplot from dfTab dataframe
#p<-ggplot(df_table, aes(x = Var1, y = Freq, fill = Var1)) + geom_bar(stat = "identity") + ggtitle(paste(field_name,": Distribution"))
p<-ggplot(df_table, aes(x = Var1, y = Freq, fill = Var1)) + geom_bar(stat = "identity") + ggtitle(paste(field_name,": Distribution"))
# add axis labels
p<-p+ylab(paste(table_name,"Count"))+xlab(field_name)
#remove legend and set size and orientation of tick labels
p<-p+theme(legend.position="none", text = element_text(size=6),axis.text.x = element_text(angle=90, vjust=1), plot.background = element_blank() ,panel.grid.major = element_blank() ,panel.grid.minor = element_blank() ,panel.border = element_blank())
ggsave(file=paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
# flog.info(p)
}
}
}
# specialized date function
describeDateField<-function(df_table, table_name,field_name,big_data_flag)
{
# flog.info(paste("Plotting for Field: ", field_name))
if(big_data_flag==FALSE)
{
flog.info(paste("Plotting for Field: ", field_name))
column_index <- which(colnames(df_table)==field_name)
# flog.info(nrow(df_table))
if(nrow(df_table)>0)
{
df_table<-df_table[,column_index]
#df_table<-substring(df_table,nchar(df_table)-7) #extracting the date portion of the datetime field
df_table<-as.Date(df_table)
#df_table<-as.Date(format(df_table)," "), "[[", 1)
dfTab <- table(df_table)
if(nrow(dfTab)>0)
{
#calculate the total number of locations
# flog.info(colnames(dfTab))
newdata <- df <- as.data.frame(dfTab)
# flog.info(newdata)
ordered_data<-newdata[order(newdata[,2], decreasing = TRUE),]
# flog.info(ordered_data)
# counting the total number of unique values
total_locations <- nrow(dfTab)
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
# not using ggplot here as it is very expensive for a large number of values
# barplot(dfTab, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_locations,")"), ylab = paste(table_name,"Count"), xaxt='n')
barplot(dfTab, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_locations,")"), ylab = paste(table_name,"Count"))
return_message<-paste("The most frequent values for",field_name,"are:")
index<-1;
while (index<=5)
{
return_message<-paste(return_message,ordered_data[index,1]);
if(index<5)
return_message<-paste(return_message,",")
index<-index+1;
}
return_message<-c(return_message, paste("\n Date range: ",min(df_table,na.rm=TRUE),"-",max(df_table,na.rm=TRUE)))
# check if future dates are included
if(Sys.Date()<max(df_table,na.rm=TRUE))
return_message<-c(return_message,"\nWARNING: includes future dates")
dev.off()
return(return_message)
}
}
}
else #using dplyr or query wise
{
df_table<-subset(df_table,!is.na(df_table[,1]))
if(nrow(df_table)>0)
{
# df table is actually a dataframe of two dataframes
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
# remove NA values - we dont want to plot missing values
#df_table<-subset(df_table,!is.na(Var1))
df_table$Var1<-as.Date(df_table[,1])
#df_table$Var1 <- as.factor(df_table$Var1)
df_table$Var1 <- as.character(df_table$Var1)
# aggregate df_table again by summing frequency for all equivalent dates
df_table<-aggregate(df_table$Freq,FUN=sum, by = list(df_table$Var1))
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
# creare a raw vector and then create a table
new_vector<-rep.int(df_table$Var1,df_table$Freq)
# creating a table out of it so that we can use in the barplot function
dfTab<-table(new_vector)
ordered_data<-df_table[order(-df_table[,2]), ]
total_values<- length(unique(df_table$Var1))
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
# not using ggplot here as it is very expensive for a large number of values
#vector_table <- as.vector(as.matrix(df_table$Freq))
# flog.info("here")
#barplot(df_table$Freq, df_table$Var1, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count"), xaxt='n')
barplot(dfTab, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count"))
return_message<-paste("The most frequent values for",field_name,"are:")
index<-1;
while (index<=5 && nrow(ordered_data)>=index)
{
#if(!is.na(ordered_data[index,1])) # if NA then dont include as the top concept
return_message<-paste(return_message,ordered_data[index,1]);
if(index<5)
return_message<-paste(return_message,",")
index<-index+1;
}
return_message<-c(return_message, paste("\n Date range: ",min(df_table$Var1)," - ",max(df_table$Var1)))
if(Sys.Date()<max(df_table$Var1))
return_message<-c(return_message,"\nWARNING: includes future dates")
dev.off()
return(return_message)
}
}
}
# Ordinal Fields (with large number of values)
#functionName: describeOrdinalField_large
#Description: generate a barplot for an ordinal field
#Inputs:
#1. an R dataframe containing a given database table
#2. the name of the ordinal field
#Output: write the barplot to a file
describeOrdinalField_large<-function(df_table, table_name,field_name,big_data_flag)
{
flog.info(paste("Plotting for Field: ", field_name))
if(big_data_flag==FALSE)
{
column_index <- which(colnames(df_table)==field_name)
# flog.info(nrow(df_table))
if(nrow(df_table)>0)
{
dfTab <- table(df_table[,column_index])
if(nrow(dfTab)>0)
{
#calculate the total number of locations
# flog.info(colnames(dfTab))
newdata <- df <- as.data.frame(dfTab)
# flog.info(newdata)
ordered_data<-newdata[order(newdata[,2], decreasing = TRUE),]
# flog.info(ordered_data)
# counting the total number of unique values
total_locations <- nrow(dfTab)
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
# not using ggplot here as it is very expensive for a large number of values
barplot(dfTab, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_locations,")"), ylab = paste(table_name,"Count")) #, xaxt='n')
return_message<-paste("The most frequent values for",field_name,"are:")
index<-1;
while (index<=5)
{
return_message<-paste(return_message,ordered_data[index,1]);
if(index<5)
return_message<-paste(return_message,",")
index<-index+1;
}
#return_message<-c(return_message, paste("Range: ",min(df_table)," - ",max(df_table)))
dev.off()
return(return_message)
}
}
}
else # for handling bigdata with dplyr
{
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
df_table<-subset(df_table,!is.na(Var1))
if(nrow(df_table)>0)
{
# creare a raw vector and then create a table
new_vector<-rep.int(df_table$Var1,df_table$Freq)
# creating a table out of it so that we can use in the barplot function
dfTab<-table(new_vector)
ordered_data<-df_table[order(df_table[,2], decreasing = TRUE),]
# flog.info(ordered_data)
# counting the total number of unique values
total_values <- nrow(df_table)
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
# not using ggplot here as it is very expensive for a large number of values
barplot(dfTab, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count"))
#barplot(df_table$Freq, df_table$Var1, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count")) #, xaxt='y')
return_message<-paste("The most frequent values for",field_name,"are:","\n")
index<-1;
while (index<=5)
{
#return_message<-paste(return_message,ordered_data[index,1]);
# adding information on total counts also
return_message<-paste(return_message,ordered_data[index,1],"|count=",ordered_data[index,2]);
if(index<5)
return_message<-paste(return_message,",\n")
index<-index+1;
}
#return_message<-c(return_message, paste("Range: ",min(df_table)," - ",max(df_table)))
dev.off()
return(return_message)
}
}
}
describeTimeField<-function(df_table, table_name,field_name,big_data_flag)
{
flog.info(paste("Plotting for Field: ", field_name))
if(big_data_flag==FALSE)
{
column_index <- which(colnames(df_table)==field_name)
# flog.info(nrow(df_table))
if(nrow(df_table)>0)
{
# all time fields are now datetime fields
#handling datetime fields in Oracle
df_table<-df_table[,column_index]
#df_table<-lapply(strsplit(format(df_table)," "), "[[", 2)
# check if contains timesize information
# if contains timezone information
#if(length(strsplit(df_table[1],"-")[[1]])==4)
# {
# remove timezone information
df_table<-substr(df_table,12,19)
# }
#df_table<- paste((as.POSIXlt(df_table,format="%H:%M:%S"))$hour,":",(as.POSIXlt(df_table,format="%H:%M:%S"))$min,":",(as.POSIXlt(df_table,format="%H:%M:%S"))$sec,sep="")
dfTab <- table(df_table)
if(nrow(dfTab)>0)
{
#calculate the total number of locations
# flog.info(colnames(dfTab))
newdata <- as.data.frame(dfTab)
# flog.info(newdata)
ordered_data<-newdata[order(newdata[,2], decreasing = TRUE),]
# flog.info(ordered_data)
# counting the total number of unique values
total_values <- nrow(dfTab)
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,paste(field_name,"_time",sep="")),sep=""))
# not using ggplot here as it is very expensive for a large number of values
barplot(dfTab, main = paste(field_name,": Time Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count"))
#, xaxt='n')
return_message<-paste("The most frequent values for",field_name,"are:")
index<-1;
while (index<=5 && nrow(ordered_data)>=index)
{
return_message<-paste(return_message,ordered_data[index,1]);
if(index<5)
return_message<-paste(return_message,",")
index<-index+1;
}
return_message<-c(return_message, paste("\n Time range: ",min(df_table)," - ",max(df_table)))
dev.off()
return(return_message)
}
}
}
else # for handling bigdata with dplyr and query wise
{
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
df_table<-subset(df_table,!is.na(Var1))
if(nrow(df_table)>0)
{
#if(length(strsplit(df_table[,1],"-")[[1]])==4)
#{
# remove timezone information and extract time
df_table$Var1<-substr(df_table$Var1,12,19)
#}
# remove NA values
#df_table$Var1 <- as.factor(df_table$Var1)
df_table<-aggregate(df_table$Freq,FUN=sum, by = list(df_table$Var1))
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
new_vector<-rep.int(df_table$Var1,df_table$Freq)
# creating a table out of it so that we can use in the barplot function
dfTab<-table(new_vector)
ordered_data<-df_table[order(-df_table[,2]), ]
total_values<- length(unique(df_table$Var1))
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,paste(field_name,"_time",sep="")),sep=""))
# not using ggplot here as it is very expensive for a large number of values
#vector_table <- as.vector(as.matrix(df_table$Freq))
#vector_table_y <- as.vector(as.matrix(df_table$Freq))
# flog.info("here")
#barplot(df_table$Freq, df_table$Var1, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count"), xaxt='n')
barplot(dfTab, main = paste(field_name,": Time Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count"))
return_message<-paste("The most frequent values for",field_name,"are:")
index<-1;
while (index<=5 && nrow(ordered_data)>=index)
{
#if(!is.na(ordered_data[index,1])) # if NA then dont include as the top concept
return_message<-paste(return_message,ordered_data[index,1]);
if(index<5)
return_message<-paste(return_message,",")
index<-index+1;
}
return_message<-c(return_message, paste(" \n Time range: ",min(df_table$Var1)," - ",max(df_table$Var1)))
dev.off()
return(return_message)
}
}
}
# Ratio Fields
#functionName: describeRatioField
#Description: generate a histogram for a ratio field
#Inputs:
#1. an R dataframe containing a given database table
#2. the name of the ratio field
#3. the unit associated with the field
#Output: write the histogram to a file
describeRatioField<-function(df_table,table_name,field_name, unit,big_data_flag)
{
flog.info(paste("Plotting for Field: ", field_name))
if(big_data_flag==FALSE)
{
column_index <- which(colnames(df_table)==field_name)
if(nrow(df_table)>0)
{
# saving the frequencies in a separate dataframe
dfTab <-as.data.frame(table(df_table[,column_index]))
if(nrow(dfTab)>0) # if all values are NULL
{
#caluclating mean and standard deviation
#mean_Var1<-round(mean(df_table[,column_index]), digits=2)
mean_Var1<-round(mean(df_table[,column_index],trim=0,na.rm=TRUE), digits=2)
#sd_Var1<-round(sd(df_table[,column_index]), digits=2)
sd_Var1<-round(sd(df_table[,column_index],na.rm=TRUE), digits=2)
max_Var1<-df_table[which.max(df_table[,column_index]),column_index]
min_Var1<-df_table[which.min(df_table[,column_index]),column_index]
median_Var1<-round(median(df_table[,column_index],na.rm=TRUE), digits=2)
#compute extreme outliers.
lowerq <- quantile(df_table[,column_index],na.rm=TRUE)[2]
upperq <- quantile(df_table[,column_index],na.rm=TRUE)[3]
iqr <- upperq - lowerq # interquartile range
extreme.threshold.upper <-(iqr * 3) + upperq
extreme.threshold.lower <- lowerq - (iqr * 3)
#cases outside the upper and lower threshold.
total_data_points_upper_threshold<-nrow(subset(df_table,df_table[,column_index]>extreme.threshold.upper[[1]]) )
total_data_points_below_lower_threshold<-nrow(subset(df_table,df_table[,column_index]<extreme.threshold.lower[[1]]) )
outlier_message<-paste("Investigate ",total_data_points_upper_threshold," data points above the upper threshold (",extreme.threshold.upper, ") and ",total_data_points_below_lower_threshold,
" data points below the lower threshold (", extreme.threshold.lower,")",sep="")
#create histogram
p<-ggplot(data=dfTab, aes(x=Var1, y=Freq, width=1, fill=Freq)) +
geom_bar(stat="identity", position="identity") + ggtitle(paste(field_name,": Distribution\n(Mean=",mean_Var1," Std. Deviation=",sd_Var1,")"))
p<-p+ylab(paste(table_name,"Count"))+xlab(paste(field_name,"(in",unit,")"))
#remove legend and set size and orientation of tick labels
p<-p+theme(legend.position="none", text = element_text(size=6),axis.text.x = element_text(angle=90, vjust=1), plot.background = element_blank() ,panel.grid.major = element_blank() ,panel.grid.minor = element_blank() ,panel.border = element_blank())
# flog.info(p)
ggsave(file=paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
return(paste("\n\nMaximum:",max_Var1 ,"Minimum:", min_Var1, "Mean:",mean_Var1,"Std Deviation:",sd_Var1,"Median:",median_Var1,"\n\n" ))
}
}
}
else #using dplyr
{
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
df_table<-subset(df_table,!is.na(Var1))
if(nrow(df_table)>0)
{
#caluclating mean and standard deviation, etc.
raw_vector<-rep.int(df_table$Var1, df_table$Freq)
mean_Var1<-round(mean(raw_vector, na.rm=TRUE),2)
sd_Var1<-round(sd(raw_vector, na.rm=TRUE),2)
median_Var1<-median(mean(raw_vector, na.rm=TRUE),2)
max_Var1<-df_table[which.max(df_table[,1]),1]
min_Var1<-df_table[which.min(df_table[,1]),1]
# #compute extreme outliers.
lowerq <- quantile(raw_vector,na.rm=TRUE)[2]
upperq <- quantile(raw_vector,na.rm=TRUE)[3]
iqr <- upperq - lowerq # interquartile range
extreme.threshold.upper <-(iqr * 3) + upperq
extreme.threshold.lower <- lowerq - (iqr * 3)
#cases outside the upper and lower threshold.
total_data_points_upper_threshold<-length(raw_vector[raw_vector>extreme.threshold.upper[[1]]])
outlier_message<-""
if(total_data_points_upper_threshold>0)
{
outlier_message<-paste("Investigate ",total_data_points_upper_threshold," data points above the upper threshold (",extreme.threshold.upper[[1]], ")"
,sep="")
}
if(extreme.threshold.lower[[1]]<0)
{
extreme.threshold.lower[[1]]<-0
}
total_data_points_below_lower_threshold<-length(raw_vector[raw_vector<extreme.threshold.lower[[1]]])
if(total_data_points_below_lower_threshold>0)
{
outlier_message<-paste(outlier_message," Investigate ",total_data_points_below_lower_threshold,
" data points below the lower threshold (", extreme.threshold.lower,")",sep="")
}
#create histogram
p<-ggplot(data=df_table, aes(x=Var1, y=Freq, width=1, fill=Freq)) +
geom_bar(stat="identity", position="identity") + ggtitle(paste(field_name,": Distribution\n(Mean=",mean_Var1," Std. Deviation=",sd_Var1,")"))
p<-p+ylab(paste(table_name,"Count"))+xlab(paste(field_name,"(in",unit,")"))
#remove legend and set size and orientation of tick labels
p<-p+theme(legend.position="none", text = element_text(size=6),axis.text.x = element_text(angle=90, vjust=1), plot.background = element_blank() ,panel.grid.major = element_blank() ,panel.grid.minor = element_blank() ,panel.border = element_blank())
# flog.info(p)
ggsave(file=paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
return(paste("\n\nMaximum:",max_Var1 ,"Minimum:", min_Var1, "Mean:",mean_Var1,"Std Deviation:",sd_Var1,"Median:",median_Var1,"\n\n" ))
}
}
}
# Foreign Key Fields
#functionName: describeForeignKeyIdentifiers
#Description: generate a barplot for a foreignkey field in a table
#Inputs:
#1. an R dataframe containing a given database table
#2. the name of the foreign key field
#Output: write the barplot to a file
describeForeignKeyIdentifiers<-function(df_table, table_name, field_name,big_data_flag)
{
# flog.info(field_name)
flog.info(paste("Plotting for Field: ", field_name))
if(big_data_flag==FALSE)
{
column_index <- which(colnames(df_table)==field_name)
if(nrow(df_table)>0)
{
dfTab <- table(df_table[,column_index])
if(nrow(dfTab)>0)
{
#calculate the total number of locations
total_locations <- nrow(dfTab)
# flog.info(total_locations)
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
# not using ggplot here as it is very expensive for a large number of values
barplot(dfTab, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_locations,")"), ylab = paste(table_name,"Count"), xaxt='n')
#also plot in decreasing order of frequency (to compare distribution with source data)
df_dfTab<-data.frame(dfTab)
ordered_dfTab<-df_dfTab[order(-df_dfTab[,2]),]
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name_sorted(table_name,field_name),sep=""))
barplot(ordered_dfTab$Freq, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_locations,")"), ylab = paste(table_name,"Count"), xaxt='n')
return_message<-paste("The most frequent values for",field_name,"are:")
index<-1;
while (index<=5)
{
if(is.na(ordered_dfTab[index,1]))
return(return_message);
return_message<-paste(return_message,ordered_dfTab[index,1]);
if(index<5)
return_message<-paste(return_message,",")
index<-index+1;
}
# Turn off both png devices
dev.off()
dev.off()
return(return_message);
}
}
}
#handling big data using dplyr
else
{
colnames(df_table)[1] <- "Var1"
colnames(df_table)[2] <- "Freq"
# remove NA values
df_table<-subset(df_table,!is.na(Var1))
if(nrow(df_table)>0)
{
#df_table$Var1 <- as.factor(df_table$Var1)
ordered_data<-df_table[order(-df_table[,2]), ]
total_values<- length(unique(df_table$Var1))
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name(table_name,field_name),sep=""))
# not using ggplot here as it is very expensive for a large number of values
barplot(df_table$Freq, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count"), xaxt='n')
#also plot in decreasing order of frequency (to compare distribution with source data)
png(paste(normalize_directory_path( g_config$reporting$site_directory),get_image_name_sorted(table_name,field_name),sep=""))
#ordered_vector_table <- as.vector(as.matrix(ordered_data$Freq))
barplot(ordered_data$Freq, main = paste(field_name,": Distribution"), xlab = paste(field_name,"(Total: ",total_values,")"), ylab = paste(table_name,"Count"), xaxt='n')
return_message<-paste("The most frequent values for",field_name,"are:")
index<-1;
while (index<=5)
{
return_message<-paste(return_message,ordered_data[index,1]);
if(index<5)
return_message<-paste(return_message,",")
index<-index+1;
}
dev.off()
dev.off()
return(return_message)
}
}
}
#update values of concept_id fields to include concept names - to improve readability for plotting purposes
EnhanceFieldValues<-function(df_table,field_name,df_ref)
{
df_table_enhanced<-df_table
column_index <- which(colnames(df_table_enhanced)==field_name)
outer_loop_index =1;inner_loop_index =1;
while(inner_loop_index<=nrow(df_ref))
{
# df_table[,column_index][df_table[,column_index]==df_ref[inner_loop_index,1]] <- "replaced"
df_table_enhanced[,column_index][df_table_enhanced[,column_index]==df_ref[inner_loop_index,1]] <- paste(df_ref[inner_loop_index,2]," (",df_ref[inner_loop_index,1],")",sep="");
#df$concept_id[df==""]<-NA
inner_loop_index<-inner_loop_index+1;
}
# df_table[outer_loop_index,column_index]<-paste(df_ref[inner_loop_index,2]," (",df_ref[inner_loop_index,1],")",sep="");
# break;
return(df_table_enhanced);
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/FLVIF.R
\name{FLVIF}
\alias{FLVIF}
\title{Variance Inflation Factor}
\usage{
FLVIF(table, primary_key, response, exclude = c(), class_spec = list(),
where_clause = "", note = "From RWrapper For DBLytix")
}
\arguments{
\item{table}{an object of class \code{FLTable}}
\item{primary_key}{name of primary key column of the table mapped to \code{table}}
\item{response}{name of the dependent variable column}
\item{exclude}{vector of names of the columns which are to be excluded}
\item{class_spec}{list that identifies the value of the categorical variable
which is to be used as reference when converting to dummy binary variables}
\item{where_clause}{condition to filter out data from the table}
\item{note}{free form string that will be stored with the results, typically
used to document the purpose of the analysis}
}
\value{
an object of class \code{FLVIF}
}
\description{
Performs variance inflation factor analysis on data
}
\details{
Variance Inflation Factor is used to identify redundant variables in
a dataset. The square root of the variance inflation factor tells you how
much larger the standard error is, compared with what it would be if that
variable were uncorrelated with the other predictor variables in the model.
}
\examples{
\dontrun{
connection <- odbcConnect("Gandalf")
table <- FLTable(connection, "FL_R_WRAP", "tblAutoMpg")
result <- FLVIF( table, primary_key = "ObsID", response = "MPG", exclude = c("CarNum","CarNumber"), class_spec = list(CarName = "Audi"))
vifResult <- FLFetch(result)
}
}
| /man/FLVIF.Rd | no_license | mcetraro/AdapteR | R | false | false | 1,613 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/FLVIF.R
\name{FLVIF}
\alias{FLVIF}
\title{Variance Inflation Factor}
\usage{
FLVIF(table, primary_key, response, exclude = c(), class_spec = list(),
where_clause = "", note = "From RWrapper For DBLytix")
}
\arguments{
\item{table}{an object of class \code{FLTable}}
\item{primary_key}{name of primary key column of the table mapped to \code{table}}
\item{response}{name of the dependent variable column}
\item{exclude}{vector of names of the columns which are to be excluded}
\item{class_spec}{list that identifies the value of the categorical variable
which is to be used as reference when converting to dummy binary variables}
\item{where_clause}{condition to filter out data from the table}
\item{note}{free form string that will be stored with the results, typically
used to document the purpose of the analysis}
}
\value{
an object of class \code{FLVIF}
}
\description{
Performs variance inflation factor analysis on data
}
\details{
Variance Inflation Factor is used to identify redundant variables in
a dataset. The square root of the variance inflation factor tells you how
much larger the standard error is, compared with what it would be if that
variable were uncorrelated with the other predictor variables in the model.
}
\examples{
\dontrun{
connection <- odbcConnect("Gandalf")
table <- FLTable(connection, "FL_R_WRAP", "tblAutoMpg")
result <- FLVIF( table, primary_key = "ObsID", response = "MPG", exclude = c("CarNum","CarNumber"), class_spec = list(CarName = "Audi"))
vifResult <- FLFetch(result)
}
}
|
#' Elastic Net
#'
#' A function that fits elastic net to the given data using coordinate descent algorithm.
#'
#'@param y The response
#'@param X The covariate
#'@param beta The initialization of the coeffient. The default value is 0.
#'@param alpha Elastic net parameter, 0<=alpha<=1
#'@param lambda Regularization parameter
#'@param maxit Maximum Number of Iteration. The default value is 10000.
#'@param tol stop criterion. If the difference between the current result
#' and result from the last iteration is less than the Tolerance, then we say the algorithm converges
#' The default value is 1e-6
#'@return The solution that fits elastic net to the given data using
#'coordinate descent algorithm.
#'
#'@author Yixiao Lin
#'
#'@export
elnet_coord <- function(y,X, beta, alpha,lambda, maxit= 10000, tol = 1e-6){
X = as.matrix(X)
n = dim(X)[1]
p = dim(X)[2]
ymean = mean(Y); ysd = sd(Y); Y = (Y-ymean)/ysd;
Xmean = apply(X,2,mean); Xsd = apply(X,2,sd);
X = (X - matrix(Xmean,nrow(X),ncol(X),byrow=TRUE))%*%diag(1/Xsd)
if (length(y) != n){
warning("variable lengths differ")
}
if (alpha<0 || alpha>1 || is.na(alpha)){
warning("alpha is invalid")
}
if (maxit%%1 != 0 || maxit<0 || is.na(maxit) ){
warning("niter is invalid")
}
if (lambda<0|| is.na(lambda)){
warning("lambda is invalid")
}
if (tol<0 || is.na(tol) ){
warning("tol is invalid")
}
if(any(is.na(beta))){
beta = rep(0, p)
}
res=CD(X,Y,lambda,alpha,beta,n,p,maxit,tol)
return(res)
}
#' coordinate descent elastic net
#'@param X The covariate
#'@param Y The response
#'@param beta The initialization of the coeffient.
#'@param alpha Elastic net hyperparameter that controls how much of each of
#' the L1 and L2 penalties are involved. 0<=alpha<=1
#'@param lambda Regularization parameter
#'@param n numbers of rows of X
#'@param p numbers of rows of Y
#'@param maxit Maximum Number of Iteration.
#'@param tol stop criterion. If the difference between the current result
#' and result from the last iteration is less than the Tolerance, then we say the algorithm converges
#'@export
CD=function(X,Y,lambda,alpha,beta,n,p,maxit,tol){
tol.met=F
beta1=beta
iter=0
while(!tol.met){
beta0=beta1
iter=iter+1
for (j in 1:p){
ej=Y-X[,-j]%*% beta1[-j]
xj=X[,j]
z=mean(xj*ej)
num=sign(z)*max(0,abs(z)-lambda*alpha)
denom=mean(xj^2)+lambda*(1-alpha)
beta.j=num/denom
beta1[j]=beta.j
}
if (any(abs(beta1) == Inf))
stop("The algorithm diverges")
if (iter == maxit) {
stop("Max iteration reached")
}
if (norm(beta0-beta1,'2')<tol){
tol.met=T
}
}
return(beta1)
}
| /R/elnet_coord.R | permissive | yl2883/yixiaopackage | R | false | false | 2,699 | r | #' Elastic Net
#'
#' A function that fits elastic net to the given data using coordinate descent algorithm.
#'
#'@param y The response
#'@param X The covariate
#'@param beta The initialization of the coeffient. The default value is 0.
#'@param alpha Elastic net parameter, 0<=alpha<=1
#'@param lambda Regularization parameter
#'@param maxit Maximum Number of Iteration. The default value is 10000.
#'@param tol stop criterion. If the difference between the current result
#' and result from the last iteration is less than the Tolerance, then we say the algorithm converges
#' The default value is 1e-6
#'@return The solution that fits elastic net to the given data using
#'coordinate descent algorithm.
#'
#'@author Yixiao Lin
#'
#'@export
elnet_coord <- function(y,X, beta, alpha,lambda, maxit= 10000, tol = 1e-6){
X = as.matrix(X)
n = dim(X)[1]
p = dim(X)[2]
ymean = mean(Y); ysd = sd(Y); Y = (Y-ymean)/ysd;
Xmean = apply(X,2,mean); Xsd = apply(X,2,sd);
X = (X - matrix(Xmean,nrow(X),ncol(X),byrow=TRUE))%*%diag(1/Xsd)
if (length(y) != n){
warning("variable lengths differ")
}
if (alpha<0 || alpha>1 || is.na(alpha)){
warning("alpha is invalid")
}
if (maxit%%1 != 0 || maxit<0 || is.na(maxit) ){
warning("niter is invalid")
}
if (lambda<0|| is.na(lambda)){
warning("lambda is invalid")
}
if (tol<0 || is.na(tol) ){
warning("tol is invalid")
}
if(any(is.na(beta))){
beta = rep(0, p)
}
res=CD(X,Y,lambda,alpha,beta,n,p,maxit,tol)
return(res)
}
#' coordinate descent elastic net
#'@param X The covariate
#'@param Y The response
#'@param beta The initialization of the coeffient.
#'@param alpha Elastic net hyperparameter that controls how much of each of
#' the L1 and L2 penalties are involved. 0<=alpha<=1
#'@param lambda Regularization parameter
#'@param n numbers of rows of X
#'@param p numbers of rows of Y
#'@param maxit Maximum Number of Iteration.
#'@param tol stop criterion. If the difference between the current result
#' and result from the last iteration is less than the Tolerance, then we say the algorithm converges
#'@export
CD=function(X,Y,lambda,alpha,beta,n,p,maxit,tol){
tol.met=F
beta1=beta
iter=0
while(!tol.met){
beta0=beta1
iter=iter+1
for (j in 1:p){
ej=Y-X[,-j]%*% beta1[-j]
xj=X[,j]
z=mean(xj*ej)
num=sign(z)*max(0,abs(z)-lambda*alpha)
denom=mean(xj^2)+lambda*(1-alpha)
beta.j=num/denom
beta1[j]=beta.j
}
if (any(abs(beta1) == Inf))
stop("The algorithm diverges")
if (iter == maxit) {
stop("Max iteration reached")
}
if (norm(beta0-beta1,'2')<tol){
tol.met=T
}
}
return(beta1)
}
|
### =========================================================================
### bedtools shift command
### -------------------------------------------------------------------------
###
bedtools_shift <- function(cmd = "--help") {
do_R_call(R_bedtools_shift, BEDTOOLS_SHIFT_DOC, cmd)
}
R_bedtools_shift <- function(i, s = 0, m = 0, p = 0,
pct = FALSE, g = NULL, header = FALSE)
{
stopifnot(isSingleString(i) || hasRanges(i),
isSingleNumber(s),
isSingleNumber(m),
isSingleNumber(p),
xor(!(missing(m) && missing(p)), !missing(s)),
isTRUEorFALSE(pct),
isGenome(g),
isTRUEorFALSE(header))
importGenome(g)
i <- normA(i)
.gr_i <- importA(i)
.gr_i_o <- prepOverlapRanges(i, FALSE)
if (s != 0) {
if (pct) {
s <- .R(width(.gr_i_o) * s)
}
R(ans <- shift(.gr_i_o, s))
} else {
R(ans <- .gr_i_o)
}
if (p != 0) {
.plus <- .R(strand(ans) == "+")
if (pct) {
p <- .R(width(ans) * p)
}
R(ans[.plus] <- shift(ans[.plus], p))
}
if (m != 0) {
.minus <- .R(strand(ans) == "-")
if (pct) {
m <- .R(width(ans) * m)
}
R(ans[.minus] <- shift(ans[.minus], m))
}
R(ans)
}
BEDTOOLS_SHIFT_DOC <-
"Usage:
bedtools_shift [options]
Options:
-i <FILE,...> BAM/BED/GFF/VCF files.
-s <bp> Shift the BED/GFF/VCF entry -s base pairs.
Integer or Float (e.g. 0.1) if used with -pct.
-m <bp> Shift entries on the - strand -m base pairs.
Integer or Float (e.g. 0.1) if used with -pct.
-p <bp> Shift entries on the + strand -p base pairs.
Integer or Float (e.g. 0.1) if used with -pct.
--pct Define -l and -r as a fraction of the feature's length.
E.g. if used on a 1000bp feature, -l 0.50, will add 500 bp
\"upstream\". Default = false.
-g <path> Specify a genome file or identifier that defines the
order and size of the sequences.
--header Print the header from the input file prior to results."
do_bedtools_shift <- make_do(R_bedtools_shift)
| /R/shift.R | no_license | lawremi/HelloRanges | R | false | false | 2,278 | r | ### =========================================================================
### bedtools shift command
### -------------------------------------------------------------------------
###
bedtools_shift <- function(cmd = "--help") {
do_R_call(R_bedtools_shift, BEDTOOLS_SHIFT_DOC, cmd)
}
R_bedtools_shift <- function(i, s = 0, m = 0, p = 0,
pct = FALSE, g = NULL, header = FALSE)
{
stopifnot(isSingleString(i) || hasRanges(i),
isSingleNumber(s),
isSingleNumber(m),
isSingleNumber(p),
xor(!(missing(m) && missing(p)), !missing(s)),
isTRUEorFALSE(pct),
isGenome(g),
isTRUEorFALSE(header))
importGenome(g)
i <- normA(i)
.gr_i <- importA(i)
.gr_i_o <- prepOverlapRanges(i, FALSE)
if (s != 0) {
if (pct) {
s <- .R(width(.gr_i_o) * s)
}
R(ans <- shift(.gr_i_o, s))
} else {
R(ans <- .gr_i_o)
}
if (p != 0) {
.plus <- .R(strand(ans) == "+")
if (pct) {
p <- .R(width(ans) * p)
}
R(ans[.plus] <- shift(ans[.plus], p))
}
if (m != 0) {
.minus <- .R(strand(ans) == "-")
if (pct) {
m <- .R(width(ans) * m)
}
R(ans[.minus] <- shift(ans[.minus], m))
}
R(ans)
}
BEDTOOLS_SHIFT_DOC <-
"Usage:
bedtools_shift [options]
Options:
-i <FILE,...> BAM/BED/GFF/VCF files.
-s <bp> Shift the BED/GFF/VCF entry -s base pairs.
Integer or Float (e.g. 0.1) if used with -pct.
-m <bp> Shift entries on the - strand -m base pairs.
Integer or Float (e.g. 0.1) if used with -pct.
-p <bp> Shift entries on the + strand -p base pairs.
Integer or Float (e.g. 0.1) if used with -pct.
--pct Define -l and -r as a fraction of the feature's length.
E.g. if used on a 1000bp feature, -l 0.50, will add 500 bp
\"upstream\". Default = false.
-g <path> Specify a genome file or identifier that defines the
order and size of the sequences.
--header Print the header from the input file prior to results."
do_bedtools_shift <- make_do(R_bedtools_shift)
|
\name{Zdate}
\alias{Zdate}
\alias{dateList}
\alias{dateStamp}
\title{Date functions}
\description{
Make character vector from dates
}
\usage{
Zdate(info, sel=1, t1=0, sep=':')
dateList(datevec)
dateStamp(datelist, sep=':')
}
\arguments{
\item{info}{info structure from trace structure}
\item{sel}{selection of which ones to extract,
default=1:length(info$jd) }
\item{t1}{ time offset, seconds, default=0 }
\item{sep}{ character for separating the components in the string, default=":" }
\item{datevec}{ vector with yr, jd, mo, day, hr, mi, sec }
\item{ datelist}{ output of dateList }
}
\details{
Format date stamp for plotting and identification. Used for STAMP.
}
\value{
character strings
}
\note{
If using Zdate to create a file name, becareful about the separator. A colon
in the file name on PC and MAC systems can be confusing for the OS.
}
\author{Jonathan M. Lees<jonathan.lees.edu>}
\seealso{swig, dateStamp, ghstamp, filedatetime}
\examples{
data("GH")
sel <- which(GH$COMPS == "V")
ftime <- Zdate(GH$info, sel[1:5], 1)
dvec <- c(2009, 134, 5, 14, 10, 32, 24.5, 0)
A <- dateList(dvec)
dateStamp(A)
dateStamp(A, sep="_")
}
\keyword{misc}
| /man/Zdate.Rd | no_license | cran/RSEIS | R | false | false | 1,193 | rd | \name{Zdate}
\alias{Zdate}
\alias{dateList}
\alias{dateStamp}
\title{Date functions}
\description{
Make character vector from dates
}
\usage{
Zdate(info, sel=1, t1=0, sep=':')
dateList(datevec)
dateStamp(datelist, sep=':')
}
\arguments{
\item{info}{info structure from trace structure}
\item{sel}{selection of which ones to extract,
default=1:length(info$jd) }
\item{t1}{ time offset, seconds, default=0 }
\item{sep}{ character for separating the components in the string, default=":" }
\item{datevec}{ vector with yr, jd, mo, day, hr, mi, sec }
\item{ datelist}{ output of dateList }
}
\details{
Format date stamp for plotting and identification. Used for STAMP.
}
\value{
character strings
}
\note{
If using Zdate to create a file name, becareful about the separator. A colon
in the file name on PC and MAC systems can be confusing for the OS.
}
\author{Jonathan M. Lees<jonathan.lees.edu>}
\seealso{swig, dateStamp, ghstamp, filedatetime}
\examples{
data("GH")
sel <- which(GH$COMPS == "V")
ftime <- Zdate(GH$info, sel[1:5], 1)
dvec <- c(2009, 134, 5, 14, 10, 32, 24.5, 0)
A <- dateList(dvec)
dateStamp(A)
dateStamp(A, sep="_")
}
\keyword{misc}
|
# Pivot and unpivot data (short or long format in R) ----------------------
# https://python-bloggers.com/2022/07/how-to-unpivot-a-dataset-in-excel-power-query-vs-r-vs-python/
library(tidyverse)
# Read in dataset as csv
wholesale <- read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/00292/Wholesale%20customers%20data.csv')
head(wholesale)
# it is still small size table
# Channel Region Fresh Milk Grocery Frozen Detergents_Paper
# <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
# 1 2 3 12669 9656 7561 214 2674
# 2 2 3 7057 9810 9568 1762 3293
# 3 2 3 6353 8808 7684 2405 3516
# 4 1 3 13265 1196 4221 6404 507
# 5 2 3 22615 5410 7198 3915 1777
# 6 2 3 9413 8259 5126 666 1795
# pivot or long table
wholesale_pivot <- wholesale %>%
pivot_longer(cols=c('Fresh':'Delicassen'),
values_to = 'Sales',
names_to = 'Category')
head(wholesale_pivot)
# Channel Region Category Sales
# <dbl> <dbl> <chr> <dbl>
# 1 2 3 Fresh 12669
# 2 2 3 Milk 9656
# 3 2 3 Grocery 7561
# 4 2 3 Frozen 214
# 5 2 3 Detergents_Paper 2674
# 6 2 3 Delicassen 1338 | /ProbStatsR/template_pivot_unpivot_table.R | no_license | PyRPy/stats_r | R | false | false | 1,467 | r |
# Pivot and unpivot data (short or long format in R) ----------------------
# https://python-bloggers.com/2022/07/how-to-unpivot-a-dataset-in-excel-power-query-vs-r-vs-python/
library(tidyverse)
# Read in dataset as csv
wholesale <- read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/00292/Wholesale%20customers%20data.csv')
head(wholesale)
# it is still small size table
# Channel Region Fresh Milk Grocery Frozen Detergents_Paper
# <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
# 1 2 3 12669 9656 7561 214 2674
# 2 2 3 7057 9810 9568 1762 3293
# 3 2 3 6353 8808 7684 2405 3516
# 4 1 3 13265 1196 4221 6404 507
# 5 2 3 22615 5410 7198 3915 1777
# 6 2 3 9413 8259 5126 666 1795
# pivot or long table
wholesale_pivot <- wholesale %>%
pivot_longer(cols=c('Fresh':'Delicassen'),
values_to = 'Sales',
names_to = 'Category')
head(wholesale_pivot)
# Channel Region Category Sales
# <dbl> <dbl> <chr> <dbl>
# 1 2 3 Fresh 12669
# 2 2 3 Milk 9656
# 3 2 3 Grocery 7561
# 4 2 3 Frozen 214
# 5 2 3 Detergents_Paper 2674
# 6 2 3 Delicassen 1338 |
library(readxl)
data <- read_excel("Downloads/demotivation.xlsx")
View(data)
#preprocessing
#1-convertion
data$Sexe <- as.factor(data$Sexe)
levels(data$Sexe) = c("M", "F")
data$Sexe
data$Fonction <- as.factor(data$Fonction)
levels(data$Fonction) = c("Responsable","Cadre","Opérateur")
data$Fonction
data$Age <- as.numeric(data$Age)
data$Age
data$Revenu <- as.numeric(data$Revenu)
data$Revenu
summary(data)
#Nettoyage de donne
#1-on a pas de valeurs manquantes alors on va juste se focaliser sur les valeurs aberrantes
#1-commençons par l'Age
boxplot(data$Age)
#d'apres le boxplot il n'ya pas de valeurs aberrante on Age
age_aberrante <- boxplot(data$Age)$out
age_aberrante
#aussi la liste des valeurs aberrantes est nule.
#2-scolarite
boxplot(data$Scolarite)
scolarite_aberrante <- boxplot(data$Scolarite)$out
scolarite_aberrante
#alors d'apres la fonction boxplot out c'est facile de voir qu'il ya une seul valeur aberrante c'est 2
#alors on va calculer la moyene et la remplacer par sa valeur
#fonction pour calculer la moyenne sans valeur aberrante
count <- 0
function.moyen <- function(list){
for(i in list){
if(i>3){
count <- count + i
}
}
result <- count/(length(list)-1)
print(result)
}
scolarite_moyen <- function.moyen(data$Scolarite)
#prenons la partie entiere c'est 16 alors on vat juste la remplcer dans notre donnes
data$Scolarite <- replace(data$Scolarite , data$Scolarite == 2,16)
data$Scolarite
#3pour la variable revenue
boxplot(data$Revenu)
#alors il n'ya pas de valeurs aberrante .
#Etape 2 L'analyse de donnes
#1-analyse univarie : consiste a faire un plot pour les variables qualitatives et un hist pour
#les variables quantitatives et faire un summary .
plot(data$Sexe)
hist(data$Age)
hist(data$Scolarite)
plot(data$Fonction)
hist(data$Revenu)
summary(data$Sexe)
summary(data$Age)
summary(data$Scolarite)
summary(data$Fonction)
summary(data$Revenu)
#test de normalite c'est juste pour les variables quantitatives
#1 variable Age
shapiro.test(data$Age)
# alors on n'a la valeurs de p-value < 0.5% alors on a pas de normalite passons a la casinormalite
library(moments)
skewness(data$Age)
kurtosis(data$Age)
#alors on 'a notre valeur n'est pas entre 2.5 et -2.5 alors ne respecte pas la casinormalite
#2 variable Scolarite
shapiro.test(data$Scolarite)
#On a la valeurs de p-value >5% alors il respecte la normalite , il ny'a pas de difference entre notre distrbution et la loi normale
#3 variable Revenue
shapiro.test(data$Revenu)
#On a la valeurs de p-value >5% alors il respecte la normalite , il ny'a pas de difference entre notre distrbution et la loi normale
#alors on a comme variables suit la loi normale : Scolarite et Revenue .
#2 analyse bivarie : entre 2 varible
#1 entre Age et Revenue sont deux variable quantitative alors effectuons test de correlation
cor.test(data$Revenu,data$Age)
#alors on a p-value = 0.04517 inferieur a 5% alors il y'a une relation entre l'age et le Revenue
#2entre genre et revenue : alors on'a genre variable qualitative et revenue variables quanti suit la loi normale
#alors effectuons un test de nova
summary(aov(data$Revenu~data$Sexe))[[1]][["Pr(>F)"]]
#alors on'a une valeur inferieur a 5% alors ily'a une association entre Sexe et Revnue
#3annee d'etude et revenue : on'a pas annee d'etude
#4entre fonction et revenue : de meme quanti suit la loi normale avec variable quali -> la nova
summary(aov(data$Revenu~data$Fonction))[[1]][["Pr(>F)"]]
#on 'a une valeur inferieur a 5% alors il y'a une association entre Revenue et Fonction ce qui est normale
# le revenue d'un Responsable c'est pas d'un cadre ect .
#5 entre genre et Scolarite : aussi genre variable quali et Scolarite quanti suit la loi normale -> la nova
summary(aov(data$Scolarite~data$Sexe))[[1]][["Pr(>F)"]]
#ON a la valeur de p-value superieur a 5% alors il ny'a pas d'asscociation entre sexe et Scolarite
#5 entre genre et focntion : 2 variables qualitatives alors chisq test
chisq.test(data$Fonction,data$Sexe)
#on 'a la valeur de p-value > a 5£ alors il n'ya pas une relation entre Sexe et Fonction
#4-1 ALORS D'APRES Les test effectuer , on peut dire qu'il y'a une certain association entre le revenue d'un homme et femme
#qui suit le meme parcours , alors il n'ya pas de diffrence .
#4-2 on peut rien dire parce qu'on a pas de donnes suffisantes .
#linear regression between Revnue et Age
plot(data$Age, data$Revenu)
plot(data$Age, data$Revenu, pch = 16, cex = 1.3, col = "blue", main = "Relation entre Revnue et Age", xlab = "Age", ylab = "Revenue")
lm(data$Revenu ~ data$Age)
abline(32951.2, 592.3 ,col="red")
#linear regression between Revenue et Scolarite
plot(data$Scolarite, data$Revenu)
plot(data$Scolarite, data$Revenu, pch = 16, cex = 1.3, col = "blue", main = "Relation entre Revenue et Scolarite", xlab = "Scolarite", ylab = "Revenue")
lm(data$Revenu ~ data$Scolarite)
abline(13948, 2293,col="red")
| /Main.R | no_license | Ayoubkassi/Data_Analysis | R | false | false | 4,986 | r | library(readxl)
data <- read_excel("Downloads/demotivation.xlsx")
View(data)
#preprocessing
#1-convertion
data$Sexe <- as.factor(data$Sexe)
levels(data$Sexe) = c("M", "F")
data$Sexe
data$Fonction <- as.factor(data$Fonction)
levels(data$Fonction) = c("Responsable","Cadre","Opérateur")
data$Fonction
data$Age <- as.numeric(data$Age)
data$Age
data$Revenu <- as.numeric(data$Revenu)
data$Revenu
summary(data)
#Nettoyage de donne
#1-on a pas de valeurs manquantes alors on va juste se focaliser sur les valeurs aberrantes
#1-commençons par l'Age
boxplot(data$Age)
#d'apres le boxplot il n'ya pas de valeurs aberrante on Age
age_aberrante <- boxplot(data$Age)$out
age_aberrante
#aussi la liste des valeurs aberrantes est nule.
#2-scolarite
boxplot(data$Scolarite)
scolarite_aberrante <- boxplot(data$Scolarite)$out
scolarite_aberrante
#alors d'apres la fonction boxplot out c'est facile de voir qu'il ya une seul valeur aberrante c'est 2
#alors on va calculer la moyene et la remplacer par sa valeur
#fonction pour calculer la moyenne sans valeur aberrante
count <- 0
function.moyen <- function(list){
for(i in list){
if(i>3){
count <- count + i
}
}
result <- count/(length(list)-1)
print(result)
}
scolarite_moyen <- function.moyen(data$Scolarite)
#prenons la partie entiere c'est 16 alors on vat juste la remplcer dans notre donnes
data$Scolarite <- replace(data$Scolarite , data$Scolarite == 2,16)
data$Scolarite
#3pour la variable revenue
boxplot(data$Revenu)
#alors il n'ya pas de valeurs aberrante .
#Etape 2 L'analyse de donnes
#1-analyse univarie : consiste a faire un plot pour les variables qualitatives et un hist pour
#les variables quantitatives et faire un summary .
plot(data$Sexe)
hist(data$Age)
hist(data$Scolarite)
plot(data$Fonction)
hist(data$Revenu)
summary(data$Sexe)
summary(data$Age)
summary(data$Scolarite)
summary(data$Fonction)
summary(data$Revenu)
#test de normalite c'est juste pour les variables quantitatives
#1 variable Age
shapiro.test(data$Age)
# alors on n'a la valeurs de p-value < 0.5% alors on a pas de normalite passons a la casinormalite
library(moments)
skewness(data$Age)
kurtosis(data$Age)
#alors on 'a notre valeur n'est pas entre 2.5 et -2.5 alors ne respecte pas la casinormalite
#2 variable Scolarite
shapiro.test(data$Scolarite)
#On a la valeurs de p-value >5% alors il respecte la normalite , il ny'a pas de difference entre notre distrbution et la loi normale
#3 variable Revenue
shapiro.test(data$Revenu)
#On a la valeurs de p-value >5% alors il respecte la normalite , il ny'a pas de difference entre notre distrbution et la loi normale
#alors on a comme variables suit la loi normale : Scolarite et Revenue .
#2 analyse bivarie : entre 2 varible
#1 entre Age et Revenue sont deux variable quantitative alors effectuons test de correlation
cor.test(data$Revenu,data$Age)
#alors on a p-value = 0.04517 inferieur a 5% alors il y'a une relation entre l'age et le Revenue
#2entre genre et revenue : alors on'a genre variable qualitative et revenue variables quanti suit la loi normale
#alors effectuons un test de nova
summary(aov(data$Revenu~data$Sexe))[[1]][["Pr(>F)"]]
#alors on'a une valeur inferieur a 5% alors ily'a une association entre Sexe et Revnue
#3annee d'etude et revenue : on'a pas annee d'etude
#4entre fonction et revenue : de meme quanti suit la loi normale avec variable quali -> la nova
summary(aov(data$Revenu~data$Fonction))[[1]][["Pr(>F)"]]
#on 'a une valeur inferieur a 5% alors il y'a une association entre Revenue et Fonction ce qui est normale
# le revenue d'un Responsable c'est pas d'un cadre ect .
#5 entre genre et Scolarite : aussi genre variable quali et Scolarite quanti suit la loi normale -> la nova
summary(aov(data$Scolarite~data$Sexe))[[1]][["Pr(>F)"]]
#ON a la valeur de p-value superieur a 5% alors il ny'a pas d'asscociation entre sexe et Scolarite
#5 entre genre et focntion : 2 variables qualitatives alors chisq test
chisq.test(data$Fonction,data$Sexe)
#on 'a la valeur de p-value > a 5£ alors il n'ya pas une relation entre Sexe et Fonction
#4-1 ALORS D'APRES Les test effectuer , on peut dire qu'il y'a une certain association entre le revenue d'un homme et femme
#qui suit le meme parcours , alors il n'ya pas de diffrence .
#4-2 on peut rien dire parce qu'on a pas de donnes suffisantes .
#linear regression between Revnue et Age
plot(data$Age, data$Revenu)
plot(data$Age, data$Revenu, pch = 16, cex = 1.3, col = "blue", main = "Relation entre Revnue et Age", xlab = "Age", ylab = "Revenue")
lm(data$Revenu ~ data$Age)
abline(32951.2, 592.3 ,col="red")
#linear regression between Revenue et Scolarite
plot(data$Scolarite, data$Revenu)
plot(data$Scolarite, data$Revenu, pch = 16, cex = 1.3, col = "blue", main = "Relation entre Revenue et Scolarite", xlab = "Scolarite", ylab = "Revenue")
lm(data$Revenu ~ data$Scolarite)
abline(13948, 2293,col="red")
|
library(dtw)
library(readr)
library(TSclust)
library(reshape2)
library(ggplot)
library(directlabels)
# load the data and cast it in cross tab form
numGunLawsByState <- read_csv("num_gun_laws_by_state_per_year.csv",
col_types = cols_only(lawtotal = col_guess(),
state = col_guess(), year = col_character()))
gunLawsTabs <- acast(numGunLawsByState, state~year, value.var="lawtotal")
massShootings14to17 <- read_csv("mass_shootings_2014-2017.csv",
col_types = cols(Year = col_character()))
massShootingsTabs <- acast(massShootings14to17, State~Year, value.var="Num of Mass Shootings")
colnames(massShootings14to17) <- c("year","state","MassShootings")
# plot a time series of all the states by gun laws
ggplot(numGunLawsByState, aes(x = year, y = lawtotal, group = state, colour = state)) +
geom_line() +
scale_colour_discrete(guide = 'none') +
scale_x_discrete(expand=c(0, 1)) +
geom_dl(aes(label = state), method = list(dl.trans(x = x - 1.4), "last.points", cex = 0.8)) +
geom_dl(aes(label = state), method = list(dl.trans(x = x + 1.4), "first.points", cex = 0.8))
# plot a time series of all the states by mass shootings
ggplot(massShootings14to17, aes(x = year, y = MassShootings, group = state, colour = state)) +
geom_line() +
scale_colour_discrete(guide = 'none') +
scale_x_discrete(expand=c(0, 1)) +
geom_dl(aes(label = state), method = list(dl.trans(x = x + .5), "last.points", cex = 0.8)) +
geom_dl(aes(label = state), method = list(dl.trans(x = x - .5), "first.points", cex = 0.8))
# calculate the dissimilarity matrix based on DTW
dGunLaws <- diss(gunLawsTabs, "DTWARP")
# build a hierarchical cluster and plot
cGunLaws <- hclust(dGunLaws)
plot(cGunLaws, main = "Gun Laws Time Series Clusters 1991-2017", xlab = "States")
# calculate dissimilarity matrix based on DTW
dMassShootings <- diss(massShootingsTabs, "DTWARP")
# create a hierarchical cluster and plot
cMassShootings <- hclust(dMassShootings)
plot(cMassShootings, main="Mass Shootings Time Series Clusters 2014-2017", xlab="States")
| /TimeSeries/time_series_clusters.R | no_license | isavannahr/CS235Project | R | false | false | 2,197 | r | library(dtw)
library(readr)
library(TSclust)
library(reshape2)
library(ggplot)
library(directlabels)
# load the data and cast it in cross tab form
numGunLawsByState <- read_csv("num_gun_laws_by_state_per_year.csv",
col_types = cols_only(lawtotal = col_guess(),
state = col_guess(), year = col_character()))
gunLawsTabs <- acast(numGunLawsByState, state~year, value.var="lawtotal")
massShootings14to17 <- read_csv("mass_shootings_2014-2017.csv",
col_types = cols(Year = col_character()))
massShootingsTabs <- acast(massShootings14to17, State~Year, value.var="Num of Mass Shootings")
colnames(massShootings14to17) <- c("year","state","MassShootings")
# plot a time series of all the states by gun laws
ggplot(numGunLawsByState, aes(x = year, y = lawtotal, group = state, colour = state)) +
geom_line() +
scale_colour_discrete(guide = 'none') +
scale_x_discrete(expand=c(0, 1)) +
geom_dl(aes(label = state), method = list(dl.trans(x = x - 1.4), "last.points", cex = 0.8)) +
geom_dl(aes(label = state), method = list(dl.trans(x = x + 1.4), "first.points", cex = 0.8))
# plot a time series of all the states by mass shootings
ggplot(massShootings14to17, aes(x = year, y = MassShootings, group = state, colour = state)) +
geom_line() +
scale_colour_discrete(guide = 'none') +
scale_x_discrete(expand=c(0, 1)) +
geom_dl(aes(label = state), method = list(dl.trans(x = x + .5), "last.points", cex = 0.8)) +
geom_dl(aes(label = state), method = list(dl.trans(x = x - .5), "first.points", cex = 0.8))
# calculate the dissimilarity matrix based on DTW
dGunLaws <- diss(gunLawsTabs, "DTWARP")
# build a hierarchical cluster and plot
cGunLaws <- hclust(dGunLaws)
plot(cGunLaws, main = "Gun Laws Time Series Clusters 1991-2017", xlab = "States")
# calculate dissimilarity matrix based on DTW
dMassShootings <- diss(massShootingsTabs, "DTWARP")
# create a hierarchical cluster and plot
cMassShootings <- hclust(dMassShootings)
plot(cMassShootings, main="Mass Shootings Time Series Clusters 2014-2017", xlab="States")
|
getwd()
setwd("D:/workspace/R_Data_Analysis/part2/stage1 word cloud/190613도전미션")
data1 <- read.csv('마포09번이용현황.csv')
str(data1)
ggplot(bus09, aes(x=정류소명)) +
geom_point(color='orange', aes(x=number, y=승차인원)) +
geom_line(color='orange', aes(x=number, y=승차인원)) +
labels = c(승차인원)
geom_point(color='blue', aes(x=number, y=하차인원)) +
geom_line(color='blue', aes(x=number, y=하차인원)) +
labels = c(하차인원)
# geom_bar(stat='identity')+
# geom_text(aes(x=승차인원, y='',label=v1*10, group=표시과목,family = 'NanumGothic'))+ #familly는 데이터안에 들어가는 글씨
# theme(axis.text.x=element_text(angle=45, hjust=1, vjust=1,
# colour="black", size=8,family = 'NanumGothic')) + #angle-제목 기울기,familly-x.y축글씨
# ggtitle('강남구 병원 수') +
# theme(plot.title = element_text(face = "bold", hjust = 0.7, vjust=1, #hjust-제목좌우위치,vjust-제목상하위치
# size = 15, color = "darkgreen",family = 'NanumGothic')) +
# geom_hline(yintercept=a1, color='purple', linetype = 'dashed')
| /part2/stage1 word cloud/190613도전미션/마포09번이용현황.R | no_license | hiyoung93/R_Lecture | R | false | false | 1,181 | r | getwd()
setwd("D:/workspace/R_Data_Analysis/part2/stage1 word cloud/190613도전미션")
data1 <- read.csv('마포09번이용현황.csv')
str(data1)
ggplot(bus09, aes(x=정류소명)) +
geom_point(color='orange', aes(x=number, y=승차인원)) +
geom_line(color='orange', aes(x=number, y=승차인원)) +
labels = c(승차인원)
geom_point(color='blue', aes(x=number, y=하차인원)) +
geom_line(color='blue', aes(x=number, y=하차인원)) +
labels = c(하차인원)
# geom_bar(stat='identity')+
# geom_text(aes(x=승차인원, y='',label=v1*10, group=표시과목,family = 'NanumGothic'))+ #familly는 데이터안에 들어가는 글씨
# theme(axis.text.x=element_text(angle=45, hjust=1, vjust=1,
# colour="black", size=8,family = 'NanumGothic')) + #angle-제목 기울기,familly-x.y축글씨
# ggtitle('강남구 병원 수') +
# theme(plot.title = element_text(face = "bold", hjust = 0.7, vjust=1, #hjust-제목좌우위치,vjust-제목상하위치
# size = 15, color = "darkgreen",family = 'NanumGothic')) +
# geom_hline(yintercept=a1, color='purple', linetype = 'dashed')
|
library(shiny)
library(shinydashboard)
library(shinycssloaders)
# Define UI for application that draws a histogram
shinyUI(fluidPage(#theme = "bootstrap.css",
radioButtons(inputId = "jezik", label = "",
choices = c("Slovensko" = "sl", "English" = "en"),
selected = "sl"),
tabsetPanel(
tabPanel(
title="Simulacija ocene/Grade simulation",
fluid = TRUE,
#titlePanel("Simulacija ocene velja samo za SPROTNO delo v tekočem kvartalu!"),
# Sidebar with a slider input for the number of bins
sidebarLayout(
sidebarPanel(
#helpText("Z drsnikom nastavite število točk v posamezni kategoriji!"),
withSpinner(uiOutput("TV")),
withSpinner(uiOutput("TP")),
withSpinner(uiOutput("TK"))
# sliderInput(
# "animation",
# "Avtomatska simulacija",
# 1,
# 10,
# 1,
# step = 1,
# animate = animationOptions(interval = 3000, loop = F)
# ),
# p(
# "V primeru lenobe lahko kliknete gumb play in vam bo program sam premaknil drsnike 10x! :)"
# )
),
# Show values
mainPanel(tableOutput("values"),
textOutput("ocena"))
),
conditionalPanel(condition = "input.jezik == 'sl'",
fluidRow(tabBox(
tabPanel(
"Tocke vaje",
numericInput("mTV", label = "Max tocke pri vajah",
value = 70)
),
tabPanel(
"Tocke IP",
numericInput("mIP", label = "Max tocke pri IP",
value = 20)
),
tabPanel(
"Tocke kolokvijev",
numericInput("mKO", label = "Max tocke pri obeh kolokvijih",
value = 42)
)
))
) #conditional panel
), #TabPanelSimulacijaOcene
tabPanel(
"Statistika ocen (works only for Slovenian language)",
fluid = TRUE,
conditionalPanel(
condition = "input.jezik == 'sl'",
fluidRow(
column(4,offset = 2,
sliderInput("Obdobje",
"Izberite obdobje",
min=2006,
max=2016,
value = c(2009,2013),
sep = ""
)
) #column
), #fluidrow
h3(textOutput("naslov")),
hr(),
#Tukaj dodam nov fluidrow, ki bo vsebovala tabsete
fluidPage(
tabsetPanel(
tabPanel("Graf porazdelitve",
fluid = TRUE,
plotOutput('analiza')
), #tabpanel grafporazdelitve
tabPanel("Osnovna statistika za celoten niz podatkov",
fluid = TRUE,
tableOutput("tabelavse")
), #Osnovna statistika za celoten niz podatkov
tabPanel("Grafi porazdelitve po letih in predmetih",
fluid = TRUE,
withSpinner(plotOutput('poletih'))
), #grafpoletih
tabPanel("Statistika po letih in predmetih",
fluid = TRUE,
mainPanel(
h4("VS2005"),
withSpinner(tableOutput('tabelapoletihVS2005')),
h4("VS2013"),
withSpinner(tableOutput('tabelapoletihVS2013')),
h4("UN2005"),
withSpinner(tableOutput('tabelapoletihUN2005')),
h4("UN2013"),
withSpinner(tableOutput('tabelapoletihUN2013'))
)
) #statistikapoletih
) #tabsetpanel na statistiki ocen
) #FluidRow podseta
) #TabPanelStatistika ocen
) #conditionalPanel
)#TabSetPanel
))
| /PI.R | no_license | Dino314/Lab | R | false | false | 4,459 | r | library(shiny)
library(shinydashboard)
library(shinycssloaders)
# Define UI for application that draws a histogram
shinyUI(fluidPage(#theme = "bootstrap.css",
radioButtons(inputId = "jezik", label = "",
choices = c("Slovensko" = "sl", "English" = "en"),
selected = "sl"),
tabsetPanel(
tabPanel(
title="Simulacija ocene/Grade simulation",
fluid = TRUE,
#titlePanel("Simulacija ocene velja samo za SPROTNO delo v tekočem kvartalu!"),
# Sidebar with a slider input for the number of bins
sidebarLayout(
sidebarPanel(
#helpText("Z drsnikom nastavite število točk v posamezni kategoriji!"),
withSpinner(uiOutput("TV")),
withSpinner(uiOutput("TP")),
withSpinner(uiOutput("TK"))
# sliderInput(
# "animation",
# "Avtomatska simulacija",
# 1,
# 10,
# 1,
# step = 1,
# animate = animationOptions(interval = 3000, loop = F)
# ),
# p(
# "V primeru lenobe lahko kliknete gumb play in vam bo program sam premaknil drsnike 10x! :)"
# )
),
# Show values
mainPanel(tableOutput("values"),
textOutput("ocena"))
),
conditionalPanel(condition = "input.jezik == 'sl'",
fluidRow(tabBox(
tabPanel(
"Tocke vaje",
numericInput("mTV", label = "Max tocke pri vajah",
value = 70)
),
tabPanel(
"Tocke IP",
numericInput("mIP", label = "Max tocke pri IP",
value = 20)
),
tabPanel(
"Tocke kolokvijev",
numericInput("mKO", label = "Max tocke pri obeh kolokvijih",
value = 42)
)
))
) #conditional panel
), #TabPanelSimulacijaOcene
tabPanel(
"Statistika ocen (works only for Slovenian language)",
fluid = TRUE,
conditionalPanel(
condition = "input.jezik == 'sl'",
fluidRow(
column(4,offset = 2,
sliderInput("Obdobje",
"Izberite obdobje",
min=2006,
max=2016,
value = c(2009,2013),
sep = ""
)
) #column
), #fluidrow
h3(textOutput("naslov")),
hr(),
#Tukaj dodam nov fluidrow, ki bo vsebovala tabsete
fluidPage(
tabsetPanel(
tabPanel("Graf porazdelitve",
fluid = TRUE,
plotOutput('analiza')
), #tabpanel grafporazdelitve
tabPanel("Osnovna statistika za celoten niz podatkov",
fluid = TRUE,
tableOutput("tabelavse")
), #Osnovna statistika za celoten niz podatkov
tabPanel("Grafi porazdelitve po letih in predmetih",
fluid = TRUE,
withSpinner(plotOutput('poletih'))
), #grafpoletih
tabPanel("Statistika po letih in predmetih",
fluid = TRUE,
mainPanel(
h4("VS2005"),
withSpinner(tableOutput('tabelapoletihVS2005')),
h4("VS2013"),
withSpinner(tableOutput('tabelapoletihVS2013')),
h4("UN2005"),
withSpinner(tableOutput('tabelapoletihUN2005')),
h4("UN2013"),
withSpinner(tableOutput('tabelapoletihUN2013'))
)
) #statistikapoletih
) #tabsetpanel na statistiki ocen
) #FluidRow podseta
) #TabPanelStatistika ocen
) #conditionalPanel
)#TabSetPanel
))
|
cat("\n This R script produces boxplots using Roary output Tab Files (*.Rtab).\n\n")
# Set Working Directory
#setwd("~/projects/mgsa/roary/2016-06-07")
# List files in a directory
files <- list.files(path="analysis", pattern="number_of_.*\\.Rtab", full.names=TRUE)
# Loading Data into R
ld <- lapply(files, read.delim, header = FALSE)
# name each list item with the filename (sans full path)
names(ld) <- basename(files)
# Exploring Data Visually
par(mfcol=c(2,2), cex=0.6)
lapply(names(ld), function(x) boxplot(ld[[x]], main=x))
# http://stackoverflow.com/questions/14790331/adding-lists-names-as-plot-titles-in-lapply-call-in-r
# Print R version and packages
sessionInfo()
| /scripts/my_roary_Rtab.R | no_license | tsubasaw/mgsa | R | false | false | 682 | r | cat("\n This R script produces boxplots using Roary output Tab Files (*.Rtab).\n\n")
# Set Working Directory
#setwd("~/projects/mgsa/roary/2016-06-07")
# List files in a directory
files <- list.files(path="analysis", pattern="number_of_.*\\.Rtab", full.names=TRUE)
# Loading Data into R
ld <- lapply(files, read.delim, header = FALSE)
# name each list item with the filename (sans full path)
names(ld) <- basename(files)
# Exploring Data Visually
par(mfcol=c(2,2), cex=0.6)
lapply(names(ld), function(x) boxplot(ld[[x]], main=x))
# http://stackoverflow.com/questions/14790331/adding-lists-names-as-plot-titles-in-lapply-call-in-r
# Print R version and packages
sessionInfo()
|
# data-raw/process.R
# Data import and processing pipeline
clock_prices <- read.csv("data-raw/clock_prices.csv")
auto <- read.table("data-raw/auto.data.txt",header = TRUE)
devtools::use_data(clock_prices,auto,overwrite = T)
| /data-raw/process.R | no_license | karthickrajas/Lumos | R | false | false | 229 | r | # data-raw/process.R
# Data import and processing pipeline
clock_prices <- read.csv("data-raw/clock_prices.csv")
auto <- read.table("data-raw/auto.data.txt",header = TRUE)
devtools::use_data(clock_prices,auto,overwrite = T)
|
library(Matrix)
library(BDgraph)
#setwd("WHATEVER")
#####################################################################################################
#####################################################################################################
########------------------------Program to read data from QMSim-----------------------------#########
#####################################################################################################
#####################################################################################################
#--To read genotypes
#XXX=1,2
QTL1=read.table("Pop1_qtl_00XXX.txt",header=F)
QTL1=as.matrix(QTL1[,-1])
QTL.Matrix=function(QTL){
template=seq(1,ncol(QTL)-1,2)
A=matrix(0,nrow=nrow(QTL),ncol=ncol(QTL))
for(i in 1:nrow(QTL)){
for(j in template){
A[i,j]=QTL[i,j]+QTL[i,j+1]-3
}
}
A=A[,template]
return(A)
}
###---Reading phenotypes and total additive effects
#XXX=1,2
data1=read.table("Pop1_data_00XXX.txt",header=T)
#var(cbind(data1$Res,data1$QTL,data1$Phen))
W=QTL.Matrix(QTL=QTL1)
nloci=ncol(W)
#########################################################
###-----Create training and testing population-----######
#########################################################
data1.Training=data1[-which(data1$G==3), ]
W.Train=W[-which(data1$G==3), ]
n.train=nrow(W.Train)
data1.Test=data1[which(data1$G==3),]
####---Reading graph files----#####
GraphXXX_YYY=read.table("GraphXXX YYY .txt",header=F)
###############################################################
########---Functions to build the adjacency matrices----#######
###############################################################
Adj=function(Nodes,nloci){
library(Matrix)
Adj=matrix(0,nrow=nloci,ncol=nloci)
for(i in 1:nrow(Nodes)){
Adj[Nodes[i,1],Nodes[i,2]]=1
}
return(Adj)
}
#---This second version builds a symmetric matrix instead of an upper triangular one
Adj2=function(Nodes,nloci){
library(Matrix)
Adj=diag(nloci)
for(i in 1:nrow(Nodes)){
Adj[Nodes[i,1],Nodes[i,2]]=1
Adj[Nodes[i,2],Nodes[i,1]]=1
}
return(Adj)
}
#########----Function to do full MC integration (i.e.,NO Laplace approximation)-------#########
MCint2=function(y,W,Tau=210,V=4.1,nsamples=2000,Adjac){
nloci=ncol(W)
library(mvtnorm)
library(pscl)
n=length(y)
I=diag(n)
WTW=crossprod(W)
yt=t(y)
Wt=t(W)
funct=matrix(0,nrow=nsamples,ncol=1)
for(i in 1:nsamples){
resvar=rigamma(1,alpha=V/2,beta=Tau/2)
Omega=matrix(round(rgwish(n=1,adj.g=Adjac,b=10,D=diag(nloci)),8),ncol=nloci,byrow=TRUE)
Sigma=as.matrix(forceSymmetric(chol2inv(chol(Omega))))
g=matrix(rmvnorm(1,mean=matrix(0,nrow=nloci,ncol=1),sigma=Sigma,method="svd"))
#funct[i]=dmvnorm(yt,mean=W%*%g,sigma=diag(resvar,nrow=n),log=TRUE)
funct[i]=dmvnorm(yt,mean=W%*%g,sigma=diag(resvar,nrow=n),log=FALSE)
}
estimate=mean(funct)
var.estimate=var(funct)*(nsamples-1)/(nsamples^2)
return(list(estimate,var.estimate,funct))
}
#####################################################################################################################
#####################################################################################################################
######-----Syed's function to perform the SSS algorithm of Ben-David et al. (2015)--------###########################
#####################################################################################################################
#####################################################################################################################
getNgraphs <- function(D,N){
N1 <- list()
dimD <- dim(D)
for(i in 1:N){
N1[[i]] <- D
x <- floor(runif(1,1,dimD[1]))
y <- floor(runif(1,1,x))
N1[[i]][x,y] = ifelse(N1[[i]][x,y]==0,1,0)
}
N1
}
getscores <- function(graphs){
scores <- rep(0,length(graphs))
for(i in 1:length(graphs)){
scores[i] <-100*MCint2(y=data1.Training$Phen,W=W.Train,Tau=210,V=4.1,nsamples=2000,Adjac=graphs[[i]])[[1]]
#scores[i] <- gnorm(adj.g=graphs[[i]],b=10,D=diag(nloci),iter=1000) ###This one is for m>n case only
}
scores
}
getDnew <- function(gamma, scores2, graphs2){
pvec <- exp(scores2^gamma)
pvec <- pvec/sum(pvec)
cpvec <- cumsum(pvec)
u <- runif(1,0,1)
Dind <- min(sum(u > cpvec) + 1,length(scores2))
graphs2[[Dind]]
}
getLk <- function(gamma,M,N1,D0){
graphs <- getNgraphs(D0,N1)
scores <- getscores(graphs)
D0 <- getDnew(gamma,scores,graphs)
for(i in 2:M){
graphs2 <- getNgraphs(D0,N1)
scores2 <- getscores(graphs2)
D0 <- getDnew(gamma,scores2,graphs2)
graphs <- append(graphs,graphs2)
scores <- append(scores,scores2)
}
return(list(graphs=graphs, scores=scores))
}
Largestscoregraph <- function(gamma,D,M,N1){
L <- list()
for(i in 1:length(D)){
newL <- getLk(gamma,M,N1,D[[i]])
L$graphs <- append(newL$graphs,L$graphs)
L$scores <- append(newL$scores,L$scores)
}
L$graphs[[which.max(L$scores)]]
}
####---Example to perform the SSS algorithm for a particular k----####
Adjac=Adj(Nodes=GraphXXX_YYY,nloci=nloci)
ScoreXXX_YYY=getLk(gamma=0.5,M=3,N1=10,D0=Adjac)
save(ScoreXXX_YYY,file=paste("score",toString(XXX),toString(YYY),".rdata",sep=""))
####--So, this gives L(k) for a given k, k=1,2,...,15. Instead of using Largestscoregraph,
####--we run different batches of graphs in parallel, this will be faster.
| /Carlos-collab/Model_selection_Scenario_1/Model_sel_Bayes_SSS.R | no_license | shr264/Rcode | R | false | false | 5,773 | r |
library(Matrix)
library(BDgraph)
#setwd("WHATEVER")
#####################################################################################################
#####################################################################################################
########------------------------Program to read data from QMSim-----------------------------#########
#####################################################################################################
#####################################################################################################
#--To read genotypes
#XXX=1,2
QTL1=read.table("Pop1_qtl_00XXX.txt",header=F)
QTL1=as.matrix(QTL1[,-1])
QTL.Matrix=function(QTL){
template=seq(1,ncol(QTL)-1,2)
A=matrix(0,nrow=nrow(QTL),ncol=ncol(QTL))
for(i in 1:nrow(QTL)){
for(j in template){
A[i,j]=QTL[i,j]+QTL[i,j+1]-3
}
}
A=A[,template]
return(A)
}
###---Reading phenotypes and total additive effects
#XXX=1,2
data1=read.table("Pop1_data_00XXX.txt",header=T)
#var(cbind(data1$Res,data1$QTL,data1$Phen))
W=QTL.Matrix(QTL=QTL1)
nloci=ncol(W)
#########################################################
###-----Create training and testing population-----######
#########################################################
data1.Training=data1[-which(data1$G==3), ]
W.Train=W[-which(data1$G==3), ]
n.train=nrow(W.Train)
data1.Test=data1[which(data1$G==3),]
####---Reading graph files----#####
GraphXXX_YYY=read.table("GraphXXX YYY .txt",header=F)
###############################################################
########---Functions to build the adjacency matrices----#######
###############################################################
Adj=function(Nodes,nloci){
library(Matrix)
Adj=matrix(0,nrow=nloci,ncol=nloci)
for(i in 1:nrow(Nodes)){
Adj[Nodes[i,1],Nodes[i,2]]=1
}
return(Adj)
}
#---This second version builds a symmetric matrix instead of an upper triangular one
Adj2=function(Nodes,nloci){
library(Matrix)
Adj=diag(nloci)
for(i in 1:nrow(Nodes)){
Adj[Nodes[i,1],Nodes[i,2]]=1
Adj[Nodes[i,2],Nodes[i,1]]=1
}
return(Adj)
}
#########----Function to do full MC integration (i.e.,NO Laplace approximation)-------#########
MCint2=function(y,W,Tau=210,V=4.1,nsamples=2000,Adjac){
nloci=ncol(W)
library(mvtnorm)
library(pscl)
n=length(y)
I=diag(n)
WTW=crossprod(W)
yt=t(y)
Wt=t(W)
funct=matrix(0,nrow=nsamples,ncol=1)
for(i in 1:nsamples){
resvar=rigamma(1,alpha=V/2,beta=Tau/2)
Omega=matrix(round(rgwish(n=1,adj.g=Adjac,b=10,D=diag(nloci)),8),ncol=nloci,byrow=TRUE)
Sigma=as.matrix(forceSymmetric(chol2inv(chol(Omega))))
g=matrix(rmvnorm(1,mean=matrix(0,nrow=nloci,ncol=1),sigma=Sigma,method="svd"))
#funct[i]=dmvnorm(yt,mean=W%*%g,sigma=diag(resvar,nrow=n),log=TRUE)
funct[i]=dmvnorm(yt,mean=W%*%g,sigma=diag(resvar,nrow=n),log=FALSE)
}
estimate=mean(funct)
var.estimate=var(funct)*(nsamples-1)/(nsamples^2)
return(list(estimate,var.estimate,funct))
}
#####################################################################################################################
#####################################################################################################################
######-----Syed's function to perform the SSS algorithm of Ben-David et al. (2015)--------###########################
#####################################################################################################################
#####################################################################################################################
getNgraphs <- function(D,N){
N1 <- list()
dimD <- dim(D)
for(i in 1:N){
N1[[i]] <- D
x <- floor(runif(1,1,dimD[1]))
y <- floor(runif(1,1,x))
N1[[i]][x,y] = ifelse(N1[[i]][x,y]==0,1,0)
}
N1
}
getscores <- function(graphs){
scores <- rep(0,length(graphs))
for(i in 1:length(graphs)){
scores[i] <-100*MCint2(y=data1.Training$Phen,W=W.Train,Tau=210,V=4.1,nsamples=2000,Adjac=graphs[[i]])[[1]]
#scores[i] <- gnorm(adj.g=graphs[[i]],b=10,D=diag(nloci),iter=1000) ###This one is for m>n case only
}
scores
}
getDnew <- function(gamma, scores2, graphs2){
pvec <- exp(scores2^gamma)
pvec <- pvec/sum(pvec)
cpvec <- cumsum(pvec)
u <- runif(1,0,1)
Dind <- min(sum(u > cpvec) + 1,length(scores2))
graphs2[[Dind]]
}
getLk <- function(gamma,M,N1,D0){
graphs <- getNgraphs(D0,N1)
scores <- getscores(graphs)
D0 <- getDnew(gamma,scores,graphs)
for(i in 2:M){
graphs2 <- getNgraphs(D0,N1)
scores2 <- getscores(graphs2)
D0 <- getDnew(gamma,scores2,graphs2)
graphs <- append(graphs,graphs2)
scores <- append(scores,scores2)
}
return(list(graphs=graphs, scores=scores))
}
Largestscoregraph <- function(gamma,D,M,N1){
L <- list()
for(i in 1:length(D)){
newL <- getLk(gamma,M,N1,D[[i]])
L$graphs <- append(newL$graphs,L$graphs)
L$scores <- append(newL$scores,L$scores)
}
L$graphs[[which.max(L$scores)]]
}
####---Example to perform the SSS algorithm for a particular k----####
Adjac=Adj(Nodes=GraphXXX_YYY,nloci=nloci)
ScoreXXX_YYY=getLk(gamma=0.5,M=3,N1=10,D0=Adjac)
save(ScoreXXX_YYY,file=paste("score",toString(XXX),toString(YYY),".rdata",sep=""))
####--So, this gives L(k) for a given k, k=1,2,...,15. Instead of using Largestscoregraph,
####--we run different batches of graphs in parallel, this will be faster.
|
parse_mendota_daily_buoy <- function(inind, outind) {
infile <- sc_retrieve(inind, remake_file = '6_temp_coop_fetch_tasks.yml')
outfile <- as_data_file(outind)
raw_file <- data.table::fread(infile)
#flag code definitions are in the EML format on the UW limno data site
#https://lter.limnology.wisc.edu/data
clean <- raw_file %>% filter(!flag_wtemp %in% c("A11N", "D", "H")) %>%
rename(DateTime = sampledate, Depth = depth, temp = wtemp) %>%
mutate(DateTime = as.Date(DateTime), UWID = "ME") %>%
select(DateTime, Depth, temp, UWID)
saveRDS(object = clean, file = outfile)
sc_indicate(ind_file = outind, data_file = outfile)
}
parse_long_term_ntl <- function(inind, outind) {
infile <- sc_retrieve(inind, remake_file = '6_temp_coop_fetch_tasks.yml')
outfile <- as_data_file(outind)
raw_file <- data.table::fread(infile, select = c("lakeid", "sampledate",
"depth", "wtemp"))
clean <- raw_file %>% rename(UWID = lakeid, DateTime = sampledate, Depth = depth,
temp = wtemp) %>% mutate(DateTime = as.Date(DateTime))
saveRDS(object = clean, file = outfile)
scipiper::sc_indicate(outind, data_file = outfile)
}
parse_mendota_temps_long <- function(inind, outind) {
infile <- sc_retrieve(inind, remake_file = '6_temp_coop_fetch_tasks.yml')
outfile <- as_data_file(outind)
raw_file <- data.table::fread(infile, select = c("sampledate", "depth", "watertemp"))
clean <- raw_file %>% mutate(UWID = "ME") %>% rename(DateTime = sampledate,
Depth = depth, temp = watertemp) %>%
filter(Depth != "MUD") %>%
mutate(DateTime = as.Date(DateTime), Depth = as.numeric(Depth))
saveRDS(object = clean, file = outfile)
sc_indicate(outind, data_file = outfile)
}
| /7a_temp_coop_munge/src/data_parsers/parse_wilter_files.R | no_license | wdwatkins/lake-temperature-model-prep | R | false | false | 1,844 | r | parse_mendota_daily_buoy <- function(inind, outind) {
infile <- sc_retrieve(inind, remake_file = '6_temp_coop_fetch_tasks.yml')
outfile <- as_data_file(outind)
raw_file <- data.table::fread(infile)
#flag code definitions are in the EML format on the UW limno data site
#https://lter.limnology.wisc.edu/data
clean <- raw_file %>% filter(!flag_wtemp %in% c("A11N", "D", "H")) %>%
rename(DateTime = sampledate, Depth = depth, temp = wtemp) %>%
mutate(DateTime = as.Date(DateTime), UWID = "ME") %>%
select(DateTime, Depth, temp, UWID)
saveRDS(object = clean, file = outfile)
sc_indicate(ind_file = outind, data_file = outfile)
}
parse_long_term_ntl <- function(inind, outind) {
infile <- sc_retrieve(inind, remake_file = '6_temp_coop_fetch_tasks.yml')
outfile <- as_data_file(outind)
raw_file <- data.table::fread(infile, select = c("lakeid", "sampledate",
"depth", "wtemp"))
clean <- raw_file %>% rename(UWID = lakeid, DateTime = sampledate, Depth = depth,
temp = wtemp) %>% mutate(DateTime = as.Date(DateTime))
saveRDS(object = clean, file = outfile)
scipiper::sc_indicate(outind, data_file = outfile)
}
parse_mendota_temps_long <- function(inind, outind) {
infile <- sc_retrieve(inind, remake_file = '6_temp_coop_fetch_tasks.yml')
outfile <- as_data_file(outind)
raw_file <- data.table::fread(infile, select = c("sampledate", "depth", "watertemp"))
clean <- raw_file %>% mutate(UWID = "ME") %>% rename(DateTime = sampledate,
Depth = depth, temp = watertemp) %>%
filter(Depth != "MUD") %>%
mutate(DateTime = as.Date(DateTime), Depth = as.numeric(Depth))
saveRDS(object = clean, file = outfile)
sc_indicate(outind, data_file = outfile)
}
|
#' Get variable genes
#'
#' This function finds the variable genes in an SCE object.
#' The means and variances of the genes are calculated from the raw counts
#' of the cluster set. Then, the means and variances are log-transformed
#' after adding a constant of 1. A loess regression line is fit between
#' the log counts and log variance, and the only top genes
#' ranked by residual are variable genes used to initialize the clusters.
#' The number of genes is specified with \code{n_genes}.
#' The span for the loess function is given by \code{lss}
#' (default is 0.3).
#'
#' @param x An SCE object.
#' @param n_genes Number of variable genes to return.
#' @param lss Numeric value of the span parameter of the loess regression.
#' @param droplets.use Vector of droplet IDs to use for getting variable
#' genes. Default is to use the test set.
#' @param threads Number of threads for parallel execution. Default is 1.
#' @param verbose Verbosity.
#'
#' @return An SCE object
#' @importFrom Matrix rowMeans
#' @importFrom stats loess
#' @export
get_var_genes <- function(x,
n_genes=2000,
lss=0.3,
droplets.use = NULL,
threads = 1,
verbose=FALSE){
if (verbose) message("getting variable genes")
if (is.null(droplets.use)){
droplets.use <- rownames(x@test_data)
}
if (sum(x@gene_data$exprsd) == 0) x <- filter_genes(x)
gene_means <- rowMeans(x@counts[x@gene_data$exprsd, droplets.use])
gene_names <- rownames(x@counts[x@gene_data$exprsd, droplets.use])
log_mean <- log10(gene_means + 1)
log_var <- log10(fast_varCPP(t(x@counts[x@gene_data$exprsd, droplets.use]), gene_means, threads = threads) + 1)
fit <- loess(log_var ~ log_mean, span=lss)
rsd <- log_var - fit$fitted
topi <- order(rsd, decreasing=TRUE)[1:n_genes]
vg <- gene_names[topi]
datf <- data.frame(mean=log_mean, var=log_var,
fit=fit$fitted, rsd=rsd)
rownames(datf) <- gene_names
x@vg <- vg[!is.na(vg)]
x@vg_info <- datf
if (verbose) message("found ", n_genes, " variable genes")
return(x)
}
| /R/var.R | no_license | marcalva/diem | R | false | false | 2,206 | r |
#' Get variable genes
#'
#' This function finds the variable genes in an SCE object.
#' The means and variances of the genes are calculated from the raw counts
#' of the cluster set. Then, the means and variances are log-transformed
#' after adding a constant of 1. A loess regression line is fit between
#' the log counts and log variance, and the only top genes
#' ranked by residual are variable genes used to initialize the clusters.
#' The number of genes is specified with \code{n_genes}.
#' The span for the loess function is given by \code{lss}
#' (default is 0.3).
#'
#' @param x An SCE object.
#' @param n_genes Number of variable genes to return.
#' @param lss Numeric value of the span parameter of the loess regression.
#' @param droplets.use Vector of droplet IDs to use for getting variable
#' genes. Default is to use the test set.
#' @param threads Number of threads for parallel execution. Default is 1.
#' @param verbose Verbosity.
#'
#' @return An SCE object
#' @importFrom Matrix rowMeans
#' @importFrom stats loess
#' @export
get_var_genes <- function(x,
n_genes=2000,
lss=0.3,
droplets.use = NULL,
threads = 1,
verbose=FALSE){
if (verbose) message("getting variable genes")
if (is.null(droplets.use)){
droplets.use <- rownames(x@test_data)
}
if (sum(x@gene_data$exprsd) == 0) x <- filter_genes(x)
gene_means <- rowMeans(x@counts[x@gene_data$exprsd, droplets.use])
gene_names <- rownames(x@counts[x@gene_data$exprsd, droplets.use])
log_mean <- log10(gene_means + 1)
log_var <- log10(fast_varCPP(t(x@counts[x@gene_data$exprsd, droplets.use]), gene_means, threads = threads) + 1)
fit <- loess(log_var ~ log_mean, span=lss)
rsd <- log_var - fit$fitted
topi <- order(rsd, decreasing=TRUE)[1:n_genes]
vg <- gene_names[topi]
datf <- data.frame(mean=log_mean, var=log_var,
fit=fit$fitted, rsd=rsd)
rownames(datf) <- gene_names
x@vg <- vg[!is.na(vg)]
x@vg_info <- datf
if (verbose) message("found ", n_genes, " variable genes")
return(x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.lakemorpho.R
\name{plot.lakeMorpho}
\alias{plot.lakeMorpho}
\title{Default plotting of a lakeMorpho object}
\usage{
\method{plot}{lakeMorpho}(x, dist = FALSE, length = TRUE, width = TRUE, fetch = FALSE, ...)
}
\arguments{
\item{x}{input lakeMorpho class to plot}
\item{dist}{Boolean to control plotting of in lake distance}
\item{length}{Boolean to control plotting of max lake length line}
\item{width}{Boolean to control plotting of max lake width line}
\item{fetch}{Boolean to control plotting of fetch lines}
\item{...}{allows for passing of other plot parameters}
}
\description{
Plots the lakeMorpho class object showing the lake, surrounding topography, and in-lake distance
}
\examples{
data(lakes)
plot(inputLM)
}
| /man/plot.lakeMorpho.Rd | no_license | cran/lakemorpho | R | false | true | 841 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.lakemorpho.R
\name{plot.lakeMorpho}
\alias{plot.lakeMorpho}
\title{Default plotting of a lakeMorpho object}
\usage{
\method{plot}{lakeMorpho}(x, dist = FALSE, length = TRUE, width = TRUE, fetch = FALSE, ...)
}
\arguments{
\item{x}{input lakeMorpho class to plot}
\item{dist}{Boolean to control plotting of in lake distance}
\item{length}{Boolean to control plotting of max lake length line}
\item{width}{Boolean to control plotting of max lake width line}
\item{fetch}{Boolean to control plotting of fetch lines}
\item{...}{allows for passing of other plot parameters}
}
\description{
Plots the lakeMorpho class object showing the lake, surrounding topography, and in-lake distance
}
\examples{
data(lakes)
plot(inputLM)
}
|
library(psych)
### Name: partial.r
### Title: Find the partial correlations for a set (x) of variables with
### set (y) removed.
### Aliases: partial.r
### Keywords: multivariate
### ** Examples
jen <- make.hierarchical() #make up a correlation matrix
lowerMat(jen[1:5,1:5])
par.r <- partial.r(jen,c(1,3,5),c(2,4))
lowerMat(par.r)
cp <- corr.p(par.r,n=98) #assumes the jen data based upon n =100.
print(cp,short=FALSE) #show the confidence intervals as well
#partial all from all correlations.
lowerMat(partial.r(jen))
#Consider the Tal.Or data set.
lowerCor(Tal.Or)
#partial gender and age from these relations (they hardly change)
partial.r(Tal.Or,1:4,5:6)
#find the partial correlations between the first three variables and the DV (reaction)
round(partial.r(Tal.Or[1:4])[4,1:3],2) #The partial correlations with the criterion
| /data/genthat_extracted_code/psych/examples/partial.r.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 849 | r | library(psych)
### Name: partial.r
### Title: Find the partial correlations for a set (x) of variables with
### set (y) removed.
### Aliases: partial.r
### Keywords: multivariate
### ** Examples
jen <- make.hierarchical() #make up a correlation matrix
lowerMat(jen[1:5,1:5])
par.r <- partial.r(jen,c(1,3,5),c(2,4))
lowerMat(par.r)
cp <- corr.p(par.r,n=98) #assumes the jen data based upon n =100.
print(cp,short=FALSE) #show the confidence intervals as well
#partial all from all correlations.
lowerMat(partial.r(jen))
#Consider the Tal.Or data set.
lowerCor(Tal.Or)
#partial gender and age from these relations (they hardly change)
partial.r(Tal.Or,1:4,5:6)
#find the partial correlations between the first three variables and the DV (reaction)
round(partial.r(Tal.Or[1:4])[4,1:3],2) #The partial correlations with the criterion
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Employment Status
if(year == 1996)
FYC <- FYC %>% mutate(EMPST53 = EMPST96, EMPST42 = EMPST2, EMPST31 = EMPST1)
FYC <- FYC %>%
mutate_at(vars(EMPST53, EMPST42, EMPST31), funs(replace(., .< 0, NA))) %>%
mutate(employ_last = coalesce(EMPST53, EMPST42, EMPST31))
FYC <- FYC %>% mutate(
employed = 1*(employ_last==1) + 2*(employ_last > 1),
employed = replace(employed, is.na(employed) & AGELAST < 16, 9),
employed = recode_factor(employed, .default = "Missing", .missing = "Missing",
"1" = "Employed",
"2" = "Not employed",
"9" = "Inapplicable (age < 16)"))
# Education
if(year <= 1998){
FYC <- FYC %>% mutate(EDUCYR = EDUCYR.yy.)
}else if(year <= 2004){
FYC <- FYC %>% mutate(EDUCYR = EDUCYEAR)
}
if(year >= 2012 & year < 2016){
FYC <- FYC %>%
mutate(
less_than_hs = (0 <= EDRECODE & EDRECODE < 13),
high_school = (EDRECODE == 13),
some_college = (EDRECODE > 13))
}else{
FYC <- FYC %>%
mutate(
less_than_hs = (0 <= EDUCYR & EDUCYR < 12),
high_school = (EDUCYR == 12),
some_college = (EDUCYR > 12))
}
FYC <- FYC %>% mutate(
education = 1*less_than_hs + 2*high_school + 3*some_college,
education = replace(education, AGELAST < 18, 9),
education = recode_factor(education, .default = "Missing", .missing = "Missing",
"1" = "Less than high school",
"2" = "High school",
"3" = "Some college",
"9" = "Inapplicable (age < 18)",
"0" = "Missing"))
# Keep only needed variables from FYC
FYCsub <- FYC %>% select(education,employed,ind, DUPERSID, PERWT.yy.F, VARSTR, VARPSU)
# Load event files
RX <- read_sas('C:/MEPS/.RX..sas7bdat')
DVT <- read_sas('C:/MEPS/.DV..sas7bdat')
IPT <- read_sas('C:/MEPS/.IP..sas7bdat')
ERT <- read_sas('C:/MEPS/.ER..sas7bdat')
OPT <- read_sas('C:/MEPS/.OP..sas7bdat')
OBV <- read_sas('C:/MEPS/.OB..sas7bdat')
HHT <- read_sas('C:/MEPS/.HH..sas7bdat')
# Define sub-levels for office-based and outpatient
# To compute estimates for these sub-events, replace 'event' with 'event_v2X'
# in the 'svyby' statement below, when applicable
OBV <- OBV %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OBD', '2' = 'OBO'))
OPT <- OPT %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OPY', '2' = 'OPZ'))
# Stack events
stacked_events <- stack_events(RX, DVT, IPT, ERT, OPT, OBV, HHT,
keep.vars = c('SEEDOC','event_v2X'))
stacked_events <- stacked_events %>%
mutate(event = data,
PR.yy.X = PV.yy.X + TR.yy.X,
OZ.yy.X = OF.yy.X + SL.yy.X + OT.yy.X + OR.yy.X + OU.yy.X + WC.yy.X + VA.yy.X) %>%
select(DUPERSID, event, event_v2X, SEEDOC,
XP.yy.X, SF.yy.X, MR.yy.X, MD.yy.X, PR.yy.X, OZ.yy.X)
EVENTS <- stacked_events %>% full_join(FYCsub, by='DUPERSID')
EVNTdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = EVENTS,
nest = TRUE)
results <- svyby(~XP.yy.X, FUN=svymean, by = ~education + employed, design = subset(EVNTdsgn, XP.yy.X >= 0))
print(results)
| /mepstrends/hc_use/json/code/r/meanEVT__education__employed__.r | permissive | HHS-AHRQ/MEPS-summary-tables | R | false | false | 3,983 | r | # Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Employment Status
if(year == 1996)
FYC <- FYC %>% mutate(EMPST53 = EMPST96, EMPST42 = EMPST2, EMPST31 = EMPST1)
FYC <- FYC %>%
mutate_at(vars(EMPST53, EMPST42, EMPST31), funs(replace(., .< 0, NA))) %>%
mutate(employ_last = coalesce(EMPST53, EMPST42, EMPST31))
FYC <- FYC %>% mutate(
employed = 1*(employ_last==1) + 2*(employ_last > 1),
employed = replace(employed, is.na(employed) & AGELAST < 16, 9),
employed = recode_factor(employed, .default = "Missing", .missing = "Missing",
"1" = "Employed",
"2" = "Not employed",
"9" = "Inapplicable (age < 16)"))
# Education
if(year <= 1998){
FYC <- FYC %>% mutate(EDUCYR = EDUCYR.yy.)
}else if(year <= 2004){
FYC <- FYC %>% mutate(EDUCYR = EDUCYEAR)
}
if(year >= 2012 & year < 2016){
FYC <- FYC %>%
mutate(
less_than_hs = (0 <= EDRECODE & EDRECODE < 13),
high_school = (EDRECODE == 13),
some_college = (EDRECODE > 13))
}else{
FYC <- FYC %>%
mutate(
less_than_hs = (0 <= EDUCYR & EDUCYR < 12),
high_school = (EDUCYR == 12),
some_college = (EDUCYR > 12))
}
FYC <- FYC %>% mutate(
education = 1*less_than_hs + 2*high_school + 3*some_college,
education = replace(education, AGELAST < 18, 9),
education = recode_factor(education, .default = "Missing", .missing = "Missing",
"1" = "Less than high school",
"2" = "High school",
"3" = "Some college",
"9" = "Inapplicable (age < 18)",
"0" = "Missing"))
# Keep only needed variables from FYC
FYCsub <- FYC %>% select(education,employed,ind, DUPERSID, PERWT.yy.F, VARSTR, VARPSU)
# Load event files
RX <- read_sas('C:/MEPS/.RX..sas7bdat')
DVT <- read_sas('C:/MEPS/.DV..sas7bdat')
IPT <- read_sas('C:/MEPS/.IP..sas7bdat')
ERT <- read_sas('C:/MEPS/.ER..sas7bdat')
OPT <- read_sas('C:/MEPS/.OP..sas7bdat')
OBV <- read_sas('C:/MEPS/.OB..sas7bdat')
HHT <- read_sas('C:/MEPS/.HH..sas7bdat')
# Define sub-levels for office-based and outpatient
# To compute estimates for these sub-events, replace 'event' with 'event_v2X'
# in the 'svyby' statement below, when applicable
OBV <- OBV %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OBD', '2' = 'OBO'))
OPT <- OPT %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OPY', '2' = 'OPZ'))
# Stack events
stacked_events <- stack_events(RX, DVT, IPT, ERT, OPT, OBV, HHT,
keep.vars = c('SEEDOC','event_v2X'))
stacked_events <- stacked_events %>%
mutate(event = data,
PR.yy.X = PV.yy.X + TR.yy.X,
OZ.yy.X = OF.yy.X + SL.yy.X + OT.yy.X + OR.yy.X + OU.yy.X + WC.yy.X + VA.yy.X) %>%
select(DUPERSID, event, event_v2X, SEEDOC,
XP.yy.X, SF.yy.X, MR.yy.X, MD.yy.X, PR.yy.X, OZ.yy.X)
EVENTS <- stacked_events %>% full_join(FYCsub, by='DUPERSID')
EVNTdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = EVENTS,
nest = TRUE)
results <- svyby(~XP.yy.X, FUN=svymean, by = ~education + employed, design = subset(EVNTdsgn, XP.yy.X >= 0))
print(results)
|
## These functions saving runtime by computing only when necessary.
## Function makeCacheMatrix makes a copy of x matrix.
## The new matrix is a kind of "rich" type, similar to class with methods.
## Methods are:
### getting oryginal matrix;
### solve and caching solved matrix;
### getting solved matrixh from cache.
## I think the "set" method from makeVector example is unnecessary,
## then I skipped it.
makeCacheMatrix <- function(x = matrix()) {
s <- NULL # clear cache in the global (for this function) env.
get <- function() x # fun. returns oryginal matrix from argument x
setsolve <- function(solve) s <<- solve # fun. stores solution in global s
getsolve <- function() s # fun. returns solved matrix from global s variable
list(get = get,
setsolve = setsolve,
getsolve = getsolve) # defining list (+/- class with methods)
}
## Function checks the existence of solution, computes if necessary, and return it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
s <- x$getsolve() # getting solution from cache of "rich" matrix x
if(!is.null(s)) { # if x is already solved
message("getting cached data") # print message,
return(s) # return cached result, and exit function
} # if not
data <- x$get() # getting oryginal matrix from "get" position of "rich" matrix
s <- solve(data, ...) # run solve function and write solution to s
x$setsolve(s) # cacheing solution from s to global s of "rich" matrix
s # print result
}
### Trying...
mx<-round(matrix(abs(rnorm(25)),5)*100)
smx<-solve(mx)
cmx<-makeCacheMatrix(mx)
smx2<-cacheSolve(cmx) # 1st run - computing solution
smx2<-cacheSolve(cmx) # next getting solution from cache
identical(smx,smx2)
# [1] TRUE
| /cachematrix.R | no_license | towije/rprog006 | R | false | false | 1,750 | r | ## These functions saving runtime by computing only when necessary.
## Function makeCacheMatrix makes a copy of x matrix.
## The new matrix is a kind of "rich" type, similar to class with methods.
## Methods are:
### getting oryginal matrix;
### solve and caching solved matrix;
### getting solved matrixh from cache.
## I think the "set" method from makeVector example is unnecessary,
## then I skipped it.
makeCacheMatrix <- function(x = matrix()) {
s <- NULL # clear cache in the global (for this function) env.
get <- function() x # fun. returns oryginal matrix from argument x
setsolve <- function(solve) s <<- solve # fun. stores solution in global s
getsolve <- function() s # fun. returns solved matrix from global s variable
list(get = get,
setsolve = setsolve,
getsolve = getsolve) # defining list (+/- class with methods)
}
## Function checks the existence of solution, computes if necessary, and return it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
s <- x$getsolve() # getting solution from cache of "rich" matrix x
if(!is.null(s)) { # if x is already solved
message("getting cached data") # print message,
return(s) # return cached result, and exit function
} # if not
data <- x$get() # getting oryginal matrix from "get" position of "rich" matrix
s <- solve(data, ...) # run solve function and write solution to s
x$setsolve(s) # cacheing solution from s to global s of "rich" matrix
s # print result
}
### Trying...
mx<-round(matrix(abs(rnorm(25)),5)*100)
smx<-solve(mx)
cmx<-makeCacheMatrix(mx)
smx2<-cacheSolve(cmx) # 1st run - computing solution
smx2<-cacheSolve(cmx) # next getting solution from cache
identical(smx,smx2)
# [1] TRUE
|
#' @name mac-package
#' @aliases mac
#' @aliases macLibrary
#' @docType package
#' @title Title mac package
#' @description HCR simulation of mackerel
#' @details Version 0.0.1: 19. June 2014
#' @author Einar Hjorleifsson
#' @seealso link to other documents
NULL | /R/mac.R | no_license | einarhjorleifsson/mac | R | false | false | 264 | r | #' @name mac-package
#' @aliases mac
#' @aliases macLibrary
#' @docType package
#' @title Title mac package
#' @description HCR simulation of mackerel
#' @details Version 0.0.1: 19. June 2014
#' @author Einar Hjorleifsson
#' @seealso link to other documents
NULL |
library(shiny)
library(shinythemes)
library(digest)
library(leaflet)
library(rmarkdown)
library(ggplot2)
library(ggforce)
library(dplyr)
library(tidyr)
library(dygraphs)
library(xts)
library(kwb.pilot)
# By default, the file size limit is 5MB. It can be changed by
# setting this option. Here we'll raise limit to 9MB.
options(shiny.maxRequestSize = 9*1024^2)
# Global settings (for all sessions)
my_theme <<- shinythemes::shinytheme("readable")
#Read user table for deployment from user table
exists_userTable <- file.exists("tools/userTable.csv")
if (exists_userTable) {
userTable <<- read.csv(file = "tools/userTable.csv",
header = TRUE,
sep = ",")
}
#theme <<- "bootstrap.css"
# logo -------------------------------------------------------------------------
logo <<- function
(
src="aquanes.png",
target = "_blank", ### opens new tab/window
href="http://aquanes-h2020.eu",
align="middle",
label = "AQUANES_homepage",
add_div = TRUE,
... ### add. arguments passed to img(), e.g. height=40
)
{
x <- a(
target = target,
href = href,
onclick = sprintf("ga('send', 'event', 'click', 'link', '%s', 1)", label),
img(src = src, align = align, ...)
)
if (add_div) {
x <- div(x)
}
return(x)
}
# siteLogo ---------------------------------------------------------------------
siteLogo <- logo(
src = "berlin_s.jpg",
href = "http://www.aquanes-h2020.eu/Default.aspx?t=1666",
label = "Site12_Berlin_Schoenerlinde",
add_div = FALSE
)
# footer -----------------------------------------------------------------------
footer <- function
(
startCol = 9,
txt = "\u00A9 Kompetenzzentrum Wasser Berlin gGmbH 2017"
)
{
footerTxt <- tags$footer(tags$h6(txt))
x <- fixedRow(column(width = 12-startCol, footerTxt, offset = startCol))
return(x)
}
# reference --------------------------------------------------------------------
reference <- tabPanel("Reference", tags$div(siteLogo))
# shinyServer ------------------------------------------------------------------
shinyServer(function(input, output, session) {
# Local settings (for each session)
# Tools ----
source("tools/login.R", local = TRUE)
# Modules ----
source("module/timeSeries.R", local = TRUE)
source("module/report.R", local = TRUE)
source("module/site.R", local = TRUE)
source("module/kwb.R", local = TRUE)
# Data ----
#Read user table
# main page ----
output$mainPage <- renderUI({
doLogin()
if (loginData$LoggedIn == exists_userTable) {
doLogout()
server_timeSeries(input, output, session)
server_report(input, output, session)
server_site(input, output)
server_kwb(input, output)
div(
class = "",
fluidPage(
fluidRow(column(12, column(4, br(), loginInfo()), br(), br(), logo(align = "right"))),
navbarPage(
title = "Interactive reporting",
windowTitle = "kwb.pilot",
tabPanel(
"Explore", br(),
div(class = " ", ui_timeSeries()),
id = "timeSeries"
),
tabPanel(
"Report", br(),
div(class = " ", ui_report()),
id = "report"),
tabPanel(
"Background", br(),
div(class = " ", reference),
# tags$iframe(src="http://www.aquanes-h2020.eu/Default.aspx?t=1668",
# height = 800,
# width = 1500,
# frameborder = 0,
# seamless = "seamless"),
id = "hintergrund"
),
tabPanel(
"Site", br(),
div(class = " ", ui_site(output)),
id = "site"
),
tabPanel(
"KWB", br(),
div(class = " ", ui_kwb(output)),
id = "kwb"
),
#navbarMenu("More",
# reference,
# ui_kwb(output)),
theme = my_theme,
footer = footer()
)
)
)
} else {
fluidPage(
fluidRow(
column(
1, offset = 5,
br(), br(), br(), br(),
h5("Login"),
loginUI(), br()
)
),
header = tags$style(type = "text/css", "well { width: 100%; }"),
theme = my_theme
)
}
})
})
| /inst/shiny/berlin_s/server.R | permissive | KWB-R/kwb.pilot | R | false | false | 4,467 | r | library(shiny)
library(shinythemes)
library(digest)
library(leaflet)
library(rmarkdown)
library(ggplot2)
library(ggforce)
library(dplyr)
library(tidyr)
library(dygraphs)
library(xts)
library(kwb.pilot)
# By default, the file size limit is 5MB. It can be changed by
# setting this option. Here we'll raise limit to 9MB.
options(shiny.maxRequestSize = 9*1024^2)
# Global settings (for all sessions)
my_theme <<- shinythemes::shinytheme("readable")
#Read user table for deployment from user table
exists_userTable <- file.exists("tools/userTable.csv")
if (exists_userTable) {
userTable <<- read.csv(file = "tools/userTable.csv",
header = TRUE,
sep = ",")
}
#theme <<- "bootstrap.css"
# logo -------------------------------------------------------------------------
logo <<- function
(
src="aquanes.png",
target = "_blank", ### opens new tab/window
href="http://aquanes-h2020.eu",
align="middle",
label = "AQUANES_homepage",
add_div = TRUE,
... ### add. arguments passed to img(), e.g. height=40
)
{
x <- a(
target = target,
href = href,
onclick = sprintf("ga('send', 'event', 'click', 'link', '%s', 1)", label),
img(src = src, align = align, ...)
)
if (add_div) {
x <- div(x)
}
return(x)
}
# siteLogo ---------------------------------------------------------------------
siteLogo <- logo(
src = "berlin_s.jpg",
href = "http://www.aquanes-h2020.eu/Default.aspx?t=1666",
label = "Site12_Berlin_Schoenerlinde",
add_div = FALSE
)
# footer -----------------------------------------------------------------------
footer <- function
(
startCol = 9,
txt = "\u00A9 Kompetenzzentrum Wasser Berlin gGmbH 2017"
)
{
footerTxt <- tags$footer(tags$h6(txt))
x <- fixedRow(column(width = 12-startCol, footerTxt, offset = startCol))
return(x)
}
# reference --------------------------------------------------------------------
reference <- tabPanel("Reference", tags$div(siteLogo))
# shinyServer ------------------------------------------------------------------
shinyServer(function(input, output, session) {
# Local settings (for each session)
# Tools ----
source("tools/login.R", local = TRUE)
# Modules ----
source("module/timeSeries.R", local = TRUE)
source("module/report.R", local = TRUE)
source("module/site.R", local = TRUE)
source("module/kwb.R", local = TRUE)
# Data ----
#Read user table
# main page ----
output$mainPage <- renderUI({
doLogin()
if (loginData$LoggedIn == exists_userTable) {
doLogout()
server_timeSeries(input, output, session)
server_report(input, output, session)
server_site(input, output)
server_kwb(input, output)
div(
class = "",
fluidPage(
fluidRow(column(12, column(4, br(), loginInfo()), br(), br(), logo(align = "right"))),
navbarPage(
title = "Interactive reporting",
windowTitle = "kwb.pilot",
tabPanel(
"Explore", br(),
div(class = " ", ui_timeSeries()),
id = "timeSeries"
),
tabPanel(
"Report", br(),
div(class = " ", ui_report()),
id = "report"),
tabPanel(
"Background", br(),
div(class = " ", reference),
# tags$iframe(src="http://www.aquanes-h2020.eu/Default.aspx?t=1668",
# height = 800,
# width = 1500,
# frameborder = 0,
# seamless = "seamless"),
id = "hintergrund"
),
tabPanel(
"Site", br(),
div(class = " ", ui_site(output)),
id = "site"
),
tabPanel(
"KWB", br(),
div(class = " ", ui_kwb(output)),
id = "kwb"
),
#navbarMenu("More",
# reference,
# ui_kwb(output)),
theme = my_theme,
footer = footer()
)
)
)
} else {
fluidPage(
fluidRow(
column(
1, offset = 5,
br(), br(), br(), br(),
h5("Login"),
loginUI(), br()
)
),
header = tags$style(type = "text/css", "well { width: 100%; }"),
theme = my_theme
)
}
})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Tm_NN.R
\name{Tm_NN}
\alias{Tm_NN}
\title{Calculate melting temperature using nearest neighbor thermodynamics}
\usage{
Tm_NN(
ntseq,
ambiguous = FALSE,
comSeq = NULL,
shift = 0,
nn_table = c("DNA_NN4", "DNA_NN1", "DNA_NN2", "DNA_NN3", "RNA_NN1", "RNA_NN2",
"RNA_NN3", "R_DNA_NN1"),
tmm_table = "DNA_TMM1",
imm_table = "DNA_IMM1",
de_table = c("DNA_DE1", "RNA_DE1"),
dnac1 = 25,
dnac2 = 25,
selfcomp = FALSE,
Na = 0,
K = 0,
Tris = 0,
Mg = 0,
dNTPs = 0,
saltcorr = c("Schildkraut2010", "Wetmur1991", "SantaLucia1996", "SantaLucia1998-1",
"SantaLucia1998-2", "Owczarzy2004", "Owczarzy2008"),
DMSO = 0,
fmd = 0,
DMSOfactor = 0.75,
fmdfactor = 0.65,
fmdmethod = c("concentration", "molar")
)
}
\arguments{
\item{ntseq}{Sequence (5' to 3') of one strand of the nucleic acid duplex
as string or vector of characters.}
\item{ambiguous}{Ambiguous bases are taken into account to compute the G
and C content when ambiguous is TRUE.Default is FALSE.}
\item{comSeq}{Complementary sequence. The sequence of the template/target in 3'->5' direction}
\item{shift}{Shift of the primer/probe sequence on the template/target sequence, default=0.
for example: when shift=0, the first nucleotide base at 5` end of primer align to first one at 3`
end of template. When shift=-1, the second nucleotide base at 5` end of primer align to first one at 3`
end of template.
When shift=1, the first nucleotide base at 5` end of primer align to second one at 3` end
of template. The shift parameter is necessary to align primer/probe and template/target
if they have different lengths or if they should have dangling ends.}
\item{nn_table}{Thermodynamic NN values, eight tables are implemented.
For DNA/DNA hybridizations:
DNA_NN1,DNA_NN2,DNA_NN3,DNA_NN4
For RNA/RNA hybridizations:
RNA_NN1,RNA_NN2,RNA_NN3
For RNA/DNA hybridizations:
R_DNA_NN1}
\item{tmm_table}{Thermodynamic values for terminal mismatches. Default: DNA_TMM1}
\item{imm_table}{Thermodynamic values for internal mismatches, may include insosine mismatches. Default: DNA_IMM1}
\item{de_table}{Thermodynamic values for dangling ends. DNA_DE1(default) and RNA_DE1}
\item{dnac1}{Concentration of the higher concentrated strand [nM]. Typically this will
be the primer (for PCR) or the probe. Default=25.}
\item{dnac2}{Concentration of the lower concentrated strand [nM].}
\item{selfcomp}{Sequence self-complementary, default=False. If 'True'
the primer is thought binding to itself, thus dnac2 is not considered.}
\item{Na}{Millimolar concentration of Na, default is 0}
\item{K}{Millimolar concentration of K, default is 0}
\item{Tris}{Millimolar concentration of Tris, default is 0}
\item{Mg}{Millimolar concentration of Mg, default is 0}
\item{dNTPs}{Millimolar concentration of dNTPs, default is 0}
\item{saltcorr}{Salt correction method should be chosen when provide 'userset'
Options are "Schildkraut2010", "Wetmur1991","SantaLucia1996","SantaLucia1998-1",
"SantaLucia1998-2","Owczarzy2004","Owczarzy2008". Note that NA means no salt correction.}
\item{DMSO}{Percent DMSO}
\item{fmd}{Formamide concentration in percentage (fmdmethod="concentration") or molar (fmdmethod="molar").}
\item{DMSOfactor}{Coeffecient of Tm decreases per percent DMSO. Default=0.75 von Ahsen N (2001) <PMID:11673362>. Other published values are 0.5, 0.6 and 0.675.}
\item{fmdfactor}{Coeffecient of Tm decrease per percent formamide. Default=0.65. Several papers report factors between 0.6 and 0.72.}
\item{fmdmethod}{"concentration" method for formamide concentration in percentage and "molar" for formamide concentration in molar.}
}
\description{
Calculate melting temperature using nearest neighbor thermodynamics
}
\details{
DNA_NN1: Breslauer K J (1986) <doi:10.1073/pnas.83.11.3746>
DNA_NN2: Sugimoto N (1996) <doi:10.1093/nar/24.22.4501>
DNA_NN3: Allawi H (1998) <doi:10.1093/nar/26.11.2694>
DNA_NN4: SantaLucia J (2004) <doi:10.1146/annurev.biophys.32.110601.141800>
RNA_NN1: Freier S (1986) <doi:10.1073/pnas.83.24.9373>
RNA_NN2: Xia T (1998) <doi:10.1021/bi9809425>
RNA_NN3: Chen JL (2012) <doi:10.1021/bi3002709>
R_DNA_NN1: Sugimoto N (1995)<doi:10.1016/S0048-9697(98)00088-6>
DNA_TMM1: Bommarito S (2000) <doi:10.1093/nar/28.9.1929>
DNA_IMM1: Peyret N (1999) <doi:10.1021/bi9825091> & Allawi H T (1997) <doi:10.1021/bi962590c> & Santalucia N (2005) <doi:10.1093/nar/gki918>
DNA_DE1: Bommarito S (2000) <doi:10.1093/nar/28.9.1929>
RNA_DE1: Turner D H (2010) <doi:10.1093/nar/gkp892>
}
\examples{
ntseq <- c("AAAATTTTTTTCCCCCCCCCCCCCCGGGGGGGGGGGGTGTGCGCTGC")
out <- Tm_NN(ntseq,Na=50)
out
out$Options
}
\references{
Breslauer K J , Frank R , Blocker H , et al. Predicting DNA duplex stability from the base sequence.[J]. Proceedings of the National Academy of Sciences, 1986, 83(11):3746-3750.
Sugimoto N , Nakano S , Yoneyama M , et al. Improved Thermodynamic Parameters and Helix Initiation Factor to Predict Stability of DNA Duplexes[J]. Nucleic Acids Research, 1996, 24(22):4501-5.
Allawi, H. Thermodynamics of internal C.T mismatches in DNA[J]. Nucleic Acids Research, 1998, 26(11):2694-2701.
Hicks L D , Santalucia J . The thermodynamics of DNA structural motifs.[J]. Annual Review of Biophysics & Biomolecular Structure, 2004, 33(1):415-440.
Freier S M , Kierzek R , Jaeger J A , et al. Improved free-energy parameters for predictions of RNA duplex stability.[J]. Proceedings of the National Academy of Sciences, 1986, 83(24):9373-9377.
Xia T , Santalucia , J , Burkard M E , et al. Thermodynamic Parameters for an Expanded Nearest-Neighbor Model for Formation of RNA Duplexes with Watson-Crick Base Pairs,[J]. Biochemistry, 1998, 37(42):14719-14735.
Chen J L , Dishler A L , Kennedy S D , et al. Testing the Nearest Neighbor Model for Canonical RNA Base Pairs: Revision of GU Parameters[J]. Biochemistry, 2012, 51(16):3508-3522.
Bommarito S, Peyret N, Jr S L. Thermodynamic parameters for DNA sequences with dangling ends[J]. Nucleic Acids Research, 2000, 28(9):1929-1934.
Turner D H , Mathews D H . NNDB: the nearest neighbor parameter database for predicting stability of nucleic acid secondary structure[J]. Nucleic Acids Research, 2010, 38(Database issue):D280-D282.
Sugimoto N , Nakano S I , Katoh M , et al. Thermodynamic Parameters To Predict Stability of RNA/DNA Hybrid Duplexes[J]. Biochemistry, 1995, 34(35):11211-11216.
Allawi H, SantaLucia J: Thermodynamics and NMR of internal G-T mismatches in DNA. Biochemistry 1997, 36:10581-10594.
Santalucia N E W J . Nearest-neighbor thermodynamics of deoxyinosine pairs in DNA duplexes[J]. Nucleic Acids Research, 2005, 33(19):6258-67.
Peyret N , Seneviratne P A , Allawi H T , et al. Nearest-Neighbor Thermodynamics and NMR of DNA Sequences with Internal A-A, C-C, G-G, and T-T Mismatches, [J]. Biochemistry, 1999, 38(12):3468-3477.
}
\author{
Junhui Li
}
| /man/Tm_NN.Rd | no_license | JunhuiLi1017/TmCalculator | R | false | true | 6,958 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Tm_NN.R
\name{Tm_NN}
\alias{Tm_NN}
\title{Calculate melting temperature using nearest neighbor thermodynamics}
\usage{
Tm_NN(
ntseq,
ambiguous = FALSE,
comSeq = NULL,
shift = 0,
nn_table = c("DNA_NN4", "DNA_NN1", "DNA_NN2", "DNA_NN3", "RNA_NN1", "RNA_NN2",
"RNA_NN3", "R_DNA_NN1"),
tmm_table = "DNA_TMM1",
imm_table = "DNA_IMM1",
de_table = c("DNA_DE1", "RNA_DE1"),
dnac1 = 25,
dnac2 = 25,
selfcomp = FALSE,
Na = 0,
K = 0,
Tris = 0,
Mg = 0,
dNTPs = 0,
saltcorr = c("Schildkraut2010", "Wetmur1991", "SantaLucia1996", "SantaLucia1998-1",
"SantaLucia1998-2", "Owczarzy2004", "Owczarzy2008"),
DMSO = 0,
fmd = 0,
DMSOfactor = 0.75,
fmdfactor = 0.65,
fmdmethod = c("concentration", "molar")
)
}
\arguments{
\item{ntseq}{Sequence (5' to 3') of one strand of the nucleic acid duplex
as string or vector of characters.}
\item{ambiguous}{Ambiguous bases are taken into account to compute the G
and C content when ambiguous is TRUE.Default is FALSE.}
\item{comSeq}{Complementary sequence. The sequence of the template/target in 3'->5' direction}
\item{shift}{Shift of the primer/probe sequence on the template/target sequence, default=0.
for example: when shift=0, the first nucleotide base at 5` end of primer align to first one at 3`
end of template. When shift=-1, the second nucleotide base at 5` end of primer align to first one at 3`
end of template.
When shift=1, the first nucleotide base at 5` end of primer align to second one at 3` end
of template. The shift parameter is necessary to align primer/probe and template/target
if they have different lengths or if they should have dangling ends.}
\item{nn_table}{Thermodynamic NN values, eight tables are implemented.
For DNA/DNA hybridizations:
DNA_NN1,DNA_NN2,DNA_NN3,DNA_NN4
For RNA/RNA hybridizations:
RNA_NN1,RNA_NN2,RNA_NN3
For RNA/DNA hybridizations:
R_DNA_NN1}
\item{tmm_table}{Thermodynamic values for terminal mismatches. Default: DNA_TMM1}
\item{imm_table}{Thermodynamic values for internal mismatches, may include insosine mismatches. Default: DNA_IMM1}
\item{de_table}{Thermodynamic values for dangling ends. DNA_DE1(default) and RNA_DE1}
\item{dnac1}{Concentration of the higher concentrated strand [nM]. Typically this will
be the primer (for PCR) or the probe. Default=25.}
\item{dnac2}{Concentration of the lower concentrated strand [nM].}
\item{selfcomp}{Sequence self-complementary, default=False. If 'True'
the primer is thought binding to itself, thus dnac2 is not considered.}
\item{Na}{Millimolar concentration of Na, default is 0}
\item{K}{Millimolar concentration of K, default is 0}
\item{Tris}{Millimolar concentration of Tris, default is 0}
\item{Mg}{Millimolar concentration of Mg, default is 0}
\item{dNTPs}{Millimolar concentration of dNTPs, default is 0}
\item{saltcorr}{Salt correction method should be chosen when provide 'userset'
Options are "Schildkraut2010", "Wetmur1991","SantaLucia1996","SantaLucia1998-1",
"SantaLucia1998-2","Owczarzy2004","Owczarzy2008". Note that NA means no salt correction.}
\item{DMSO}{Percent DMSO}
\item{fmd}{Formamide concentration in percentage (fmdmethod="concentration") or molar (fmdmethod="molar").}
\item{DMSOfactor}{Coeffecient of Tm decreases per percent DMSO. Default=0.75 von Ahsen N (2001) <PMID:11673362>. Other published values are 0.5, 0.6 and 0.675.}
\item{fmdfactor}{Coeffecient of Tm decrease per percent formamide. Default=0.65. Several papers report factors between 0.6 and 0.72.}
\item{fmdmethod}{"concentration" method for formamide concentration in percentage and "molar" for formamide concentration in molar.}
}
\description{
Calculate melting temperature using nearest neighbor thermodynamics
}
\details{
DNA_NN1: Breslauer K J (1986) <doi:10.1073/pnas.83.11.3746>
DNA_NN2: Sugimoto N (1996) <doi:10.1093/nar/24.22.4501>
DNA_NN3: Allawi H (1998) <doi:10.1093/nar/26.11.2694>
DNA_NN4: SantaLucia J (2004) <doi:10.1146/annurev.biophys.32.110601.141800>
RNA_NN1: Freier S (1986) <doi:10.1073/pnas.83.24.9373>
RNA_NN2: Xia T (1998) <doi:10.1021/bi9809425>
RNA_NN3: Chen JL (2012) <doi:10.1021/bi3002709>
R_DNA_NN1: Sugimoto N (1995)<doi:10.1016/S0048-9697(98)00088-6>
DNA_TMM1: Bommarito S (2000) <doi:10.1093/nar/28.9.1929>
DNA_IMM1: Peyret N (1999) <doi:10.1021/bi9825091> & Allawi H T (1997) <doi:10.1021/bi962590c> & Santalucia N (2005) <doi:10.1093/nar/gki918>
DNA_DE1: Bommarito S (2000) <doi:10.1093/nar/28.9.1929>
RNA_DE1: Turner D H (2010) <doi:10.1093/nar/gkp892>
}
\examples{
ntseq <- c("AAAATTTTTTTCCCCCCCCCCCCCCGGGGGGGGGGGGTGTGCGCTGC")
out <- Tm_NN(ntseq,Na=50)
out
out$Options
}
\references{
Breslauer K J , Frank R , Blocker H , et al. Predicting DNA duplex stability from the base sequence.[J]. Proceedings of the National Academy of Sciences, 1986, 83(11):3746-3750.
Sugimoto N , Nakano S , Yoneyama M , et al. Improved Thermodynamic Parameters and Helix Initiation Factor to Predict Stability of DNA Duplexes[J]. Nucleic Acids Research, 1996, 24(22):4501-5.
Allawi, H. Thermodynamics of internal C.T mismatches in DNA[J]. Nucleic Acids Research, 1998, 26(11):2694-2701.
Hicks L D , Santalucia J . The thermodynamics of DNA structural motifs.[J]. Annual Review of Biophysics & Biomolecular Structure, 2004, 33(1):415-440.
Freier S M , Kierzek R , Jaeger J A , et al. Improved free-energy parameters for predictions of RNA duplex stability.[J]. Proceedings of the National Academy of Sciences, 1986, 83(24):9373-9377.
Xia T , Santalucia , J , Burkard M E , et al. Thermodynamic Parameters for an Expanded Nearest-Neighbor Model for Formation of RNA Duplexes with Watson-Crick Base Pairs,[J]. Biochemistry, 1998, 37(42):14719-14735.
Chen J L , Dishler A L , Kennedy S D , et al. Testing the Nearest Neighbor Model for Canonical RNA Base Pairs: Revision of GU Parameters[J]. Biochemistry, 2012, 51(16):3508-3522.
Bommarito S, Peyret N, Jr S L. Thermodynamic parameters for DNA sequences with dangling ends[J]. Nucleic Acids Research, 2000, 28(9):1929-1934.
Turner D H , Mathews D H . NNDB: the nearest neighbor parameter database for predicting stability of nucleic acid secondary structure[J]. Nucleic Acids Research, 2010, 38(Database issue):D280-D282.
Sugimoto N , Nakano S I , Katoh M , et al. Thermodynamic Parameters To Predict Stability of RNA/DNA Hybrid Duplexes[J]. Biochemistry, 1995, 34(35):11211-11216.
Allawi H, SantaLucia J: Thermodynamics and NMR of internal G-T mismatches in DNA. Biochemistry 1997, 36:10581-10594.
Santalucia N E W J . Nearest-neighbor thermodynamics of deoxyinosine pairs in DNA duplexes[J]. Nucleic Acids Research, 2005, 33(19):6258-67.
Peyret N , Seneviratne P A , Allawi H T , et al. Nearest-Neighbor Thermodynamics and NMR of DNA Sequences with Internal A-A, C-C, G-G, and T-T Mismatches, [J]. Biochemistry, 1999, 38(12):3468-3477.
}
\author{
Junhui Li
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/supervised.R
\name{computeSupervised}
\alias{computeSupervised}
\title{Supervised classification}
\usage{
computeSupervised(
data.sample,
prototypes,
method.name = "K-NN",
model = NULL,
RclusTool.env = initParameters()
)
}
\arguments{
\item{data.sample}{list containing features, profiles and clustering results.}
\item{prototypes}{data.frame containing the features of each prototype associated to a class.}
\item{method.name}{character vector specifying the supervised algorithm to use. Must be 'K-NN' (K-Nearest Neighbor by default), 'MLP' (MultiLayer Perceptron), 'SVM' (Support Vector Machine) or 'RF' (Random Forest).}
\item{model}{option to predict directly from model}
\item{RclusTool.env}{environment in which all global parameters, raw data and results are stored.}
}
\value{
The function returns a list containing:
\item{label}{vector of labels.}
\item{summary}{data.frame containing classes summaries (min, max, sum, average, sd).}
\item{nbItems}{number of observations.}
\item{prototypes}{data.frame containing the features of each prototype associated to a class.}
}
\description{
Perform supervised classification based on the use of a training set.
}
\details{
computeSupervised performs supervised classification based on the use of a training set
}
\examples{
rep <- system.file("extdata", package="RclusTool")
featuresFile <- file.path(rep, "sample_example_features.csv")
features <- read.csv(featuresFile, header = TRUE)
features$ID <- NULL
traindir <- file.path(rep, "train_example")
tf <- tempfile()
write.table(features, tf, sep=",", dec=".")
x <- importSample(file.features=tf, dir.save=dirname(tf))
train <- readTrainSet(traindir)
res <- computeSupervised(x, prototypes=train)
plot(features[,3], features[,4], type = "p", xlab = "x", ylab = "y",
col = res$label, main = "K-Nearest-Neighbor classification")
}
\seealso{
\code{\link{readTrainSet}}
}
| /man/computeSupervised.Rd | no_license | cran/RclusTool | R | false | true | 1,973 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/supervised.R
\name{computeSupervised}
\alias{computeSupervised}
\title{Supervised classification}
\usage{
computeSupervised(
data.sample,
prototypes,
method.name = "K-NN",
model = NULL,
RclusTool.env = initParameters()
)
}
\arguments{
\item{data.sample}{list containing features, profiles and clustering results.}
\item{prototypes}{data.frame containing the features of each prototype associated to a class.}
\item{method.name}{character vector specifying the supervised algorithm to use. Must be 'K-NN' (K-Nearest Neighbor by default), 'MLP' (MultiLayer Perceptron), 'SVM' (Support Vector Machine) or 'RF' (Random Forest).}
\item{model}{option to predict directly from model}
\item{RclusTool.env}{environment in which all global parameters, raw data and results are stored.}
}
\value{
The function returns a list containing:
\item{label}{vector of labels.}
\item{summary}{data.frame containing classes summaries (min, max, sum, average, sd).}
\item{nbItems}{number of observations.}
\item{prototypes}{data.frame containing the features of each prototype associated to a class.}
}
\description{
Perform supervised classification based on the use of a training set.
}
\details{
computeSupervised performs supervised classification based on the use of a training set
}
\examples{
rep <- system.file("extdata", package="RclusTool")
featuresFile <- file.path(rep, "sample_example_features.csv")
features <- read.csv(featuresFile, header = TRUE)
features$ID <- NULL
traindir <- file.path(rep, "train_example")
tf <- tempfile()
write.table(features, tf, sep=",", dec=".")
x <- importSample(file.features=tf, dir.save=dirname(tf))
train <- readTrainSet(traindir)
res <- computeSupervised(x, prototypes=train)
plot(features[,3], features[,4], type = "p", xlab = "x", ylab = "y",
col = res$label, main = "K-Nearest-Neighbor classification")
}
\seealso{
\code{\link{readTrainSet}}
}
|
#Bagian Data Preparation
pelanggan <- read.csv("customer_segments.txt", sep = "\t")
pelanggan_matrix <- data.matrix(pelanggan[c("Jenis.Kelamin", "Profesi", "Tipe.Residen")])
pelanggan <- data.frame(pelanggan, pelanggan_matrix)
Profesi <- unique(pelanggan[c("Profesi", "Profesi.1")])
Jenis.Kelamin <- unique(pelanggan[c("Jenis.Kelamin", "Jenis.Kelamin.1")])
Tipe.Residen <- unique(pelanggan[c("Tipe.Residen", "Tipe.Residen.1")])
pelanggan$NilaiBelanjaSetahun <- pelanggan$NilaiBelanjaSetahun/1000000
field_yang_digunakan <- c("Jenis.Kelamin.1", "Umur", "Profesi.1", "Tipe.Residen.1", "NilaiBelanjaSetahun")
#Bagian K-Means
set.seed(100)
segmentasi <- kmeans(x = pelanggan[field_yang_digunakan], centers = 5, nstart = 25)
#Penggabungan hasil cluster
segmentasi$cluster
pelanggan$cluster <- segmentasi$cluster
which(pelanggan$cluster == 2)
length(which(pelanggan$cluster == 3)) | /Data Science In Marketing_Customer Segmentation/Analisa Hasil Cluster Size.R | no_license | rhedi/Data_Science | R | false | false | 888 | r | #Bagian Data Preparation
pelanggan <- read.csv("customer_segments.txt", sep = "\t")
pelanggan_matrix <- data.matrix(pelanggan[c("Jenis.Kelamin", "Profesi", "Tipe.Residen")])
pelanggan <- data.frame(pelanggan, pelanggan_matrix)
Profesi <- unique(pelanggan[c("Profesi", "Profesi.1")])
Jenis.Kelamin <- unique(pelanggan[c("Jenis.Kelamin", "Jenis.Kelamin.1")])
Tipe.Residen <- unique(pelanggan[c("Tipe.Residen", "Tipe.Residen.1")])
pelanggan$NilaiBelanjaSetahun <- pelanggan$NilaiBelanjaSetahun/1000000
field_yang_digunakan <- c("Jenis.Kelamin.1", "Umur", "Profesi.1", "Tipe.Residen.1", "NilaiBelanjaSetahun")
#Bagian K-Means
set.seed(100)
segmentasi <- kmeans(x = pelanggan[field_yang_digunakan], centers = 5, nstart = 25)
#Penggabungan hasil cluster
segmentasi$cluster
pelanggan$cluster <- segmentasi$cluster
which(pelanggan$cluster == 2)
length(which(pelanggan$cluster == 3)) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/name.R
\name{neuprint_search}
\alias{neuprint_search}
\title{Search for body IDs based on a given name}
\usage{
neuprint_search(search = "MBON.*", meta = TRUE, all_segments = TRUE,
dataset = NULL, conn = NULL, ...)
}
\arguments{
\item{search}{name to search. Defaults to a search for MBONs}
\item{meta}{if TRUE, meta data for found bodyids is also pulled}
\item{all_segments}{if TRUE, all bodies are considered, if FALSE, only 'Neurons', i.e. bodies with a status roughly traced status.}
\item{dataset}{optional, a dataset you want to query. If NULL, the default specified by your R environ file is used. See \code{neuprint_login} for details.}
\item{conn}{optional, a neuprintr connection object, which also specifies the neuPrint server see \code{?neuprint_login}.
If NULL, your defaults set in your R.profile or R.environ are used.}
\item{...}{methods passed to \code{neuprint_login}}
}
\value{
a vector of body ids, or a data frame with their meta information
}
\description{
Search for bodyids corresponding to a given name, Reex sensitive
}
| /man/neuprint_search.Rd | no_license | Tomke587/neuprintr | R | false | true | 1,132 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/name.R
\name{neuprint_search}
\alias{neuprint_search}
\title{Search for body IDs based on a given name}
\usage{
neuprint_search(search = "MBON.*", meta = TRUE, all_segments = TRUE,
dataset = NULL, conn = NULL, ...)
}
\arguments{
\item{search}{name to search. Defaults to a search for MBONs}
\item{meta}{if TRUE, meta data for found bodyids is also pulled}
\item{all_segments}{if TRUE, all bodies are considered, if FALSE, only 'Neurons', i.e. bodies with a status roughly traced status.}
\item{dataset}{optional, a dataset you want to query. If NULL, the default specified by your R environ file is used. See \code{neuprint_login} for details.}
\item{conn}{optional, a neuprintr connection object, which also specifies the neuPrint server see \code{?neuprint_login}.
If NULL, your defaults set in your R.profile or R.environ are used.}
\item{...}{methods passed to \code{neuprint_login}}
}
\value{
a vector of body ids, or a data frame with their meta information
}
\description{
Search for bodyids corresponding to a given name, Reex sensitive
}
|
#'
#' @title Genomic Mediation analysis with Adaptive Petmutation scheme and
#' Adaptive Confunders
#'
#' @description The gmap.ac function performs genomic mediation analysis with
#' Adaptive Permutation scheme and Adaptive Confunders. It tests for mediation
#' effects for a set of user specified mediation trios(e.g., eQTL, cis- and
#' trans-genes) in the genome with the assumption of the presence of
#' cis-association. The gmap.ac function considers either a user provided pool
#' of potential confounding variables, real or constructed by other methods,
#' or all the PCs based on the feature data as the potential confounder pool.
#'
#'
#' It returns the mediation p-values(nominal and empirical), the coefficient
#' of linear models(e.g, t_stat, std.error, beta, beta.total), and the
#' proportions mediated(e.g., the percentage of reduction in trans-effects
#' after accounting for cis-mediation) based on the mediation tests i)
#' adjusting for known confounders only, and ii) adjusting for known
#' confounders and adaptively selected potential confounders for each
#' mediation trio.
#'
#' @details The function performs genomic mediation analysis with Adaptive
#' Permutation scheme and Adaptive Confunders. \code{Adaptive Permutation
#' scheme}{When using Fixed Permutation scheme, good estimation of
#' insignificant adjusted P-values can be achieved with few permutations while
#' many more are needed to estimate highly significant ones. Therefore, we
#' implemented an alternative permutation scheme that adapts the number of
#' permutations to the significance level of the variant–phenotype pairs}
#' \code{Adaptive Confunding adjustment} {One challenge in mediation test in
#' genomic studies is how to adjust unmeasured confounding variables for the
#' cis- and trans-genes (i.e., mediator-outcome) relationship.The current
#' function adaptively selects the variables to adjust for each mediation trio
#' given a large pool of constructed or real potential confounding variables.
#' The function allows the input of variables known to be potential cis- and
#' trans-genes (mediator-outcome) confounders in all mediation tests
#' (\code{known.conf}), and the input of the pool of candidate confounders
#' from which potential confounders for each mediation test will be adaptively
#' selected (\code{cov.pool}). When no pool is provided (\code{cov.pool =
#' NULL}), all the PCs based on feature profile (\code{fea.dat}) will be
#' constructed as the potential confounder pool.} \code{calculate Empirical
#' P-values using GPD fitting}{The use of a fixed number of permutations to
#' calculate empirical P-values has the disadvantage that the minimum
#' empirical P-value that can be calculated is 1/N. This makes a larger number
#' of permutations needed to calculate a smaller P-value. Therefore, we model
#' the tail of the permutation value as a Generalized Pareto
#' Distribution(GPD), enabling a smaller empirical P-value with fewer
#' permutation times.}
#'
#' @param snp.dat The eQTL genotype matrix. Each row is an eQTL, each column is
#' a sample.
#' @param fea.dat A feature profile matrix. Each row is for one feature, each
#' column is a sample.
#' @param known.conf A known confounders matrix which is adjusted in all
#' mediation tests. Each row is a confounder, each column is a sample.
#' @param trios.idx A matrix of selected trios indexes (row numbers) for
#' mediation tests. Each row consists of the index (i.e., row number) of the
#' eQTL in \code{snp.dat}, the index of cis-gene feature in \code{fea.dat},
#' and the index of trans-gene feature in \code{fea.dat}. The dimension is the
#' number of trios by three.
#' @param cl Parallel backend if it is set up. It is used for parallel
#' computing. We set \code{cl}=NULL as default.
#' @param cov.pool The pool of candidate confounding variables from which
#' potential confounders are adaptively selected to adjust for each trio. Each
#' row is a covariate, each column is a sample. We set \code{cov.pool}=NULL as
#' default, which will calculate PCs of features as cov.pool.
#' @param Minperm The minimum number of permutations. When the number of
#' permutation statistics better than the original statistic is greater than
#' \code{Minperm}, stop permutation and directly calculate the empirical P
#' value. If \code{Minperm}=0, only the nominal P-value is calculated. We set
#' \code{Minperm}=100 as default.
#' @param Maxperm Maximum number of permutation. We set \code{Maxperm}=10000 as
#' default.
#' @param fdr The false discovery rate to select confounders. We set
#' \code{fdr}=0.05 as default.
#' @param fdr_filter The false discovery rate to filter common child and
#' intermediate variables. We set \code{fdr_filter}=0.1 as default.
#'
#' @return The algorithm will return a list of nperm, empirical.p, nominal.p,
#' beta, std.error, t_stat, beta.total, beta.change. \item{nperm}{The actual
#' number of permutations for testing mediation.} \item{empirical.p}{The
#' mediation empirical P-values with nperm times permutation. A matrix with
#' dimension of the number of trios.} \item{nominal.p}{The mediation nominal
#' P-values. A matrix with dimension of the number of trios.}
#' \item{std.error}{The return std.error value of feature1 for fit liner
#' models. A matrix with dimension of the number of trios.} \item{t_stat}{The
#' return t_stat value of feature1 for fit liner models. A matrix with
#' dimension of the number of trios.} \item{beta}{The return beta value of
#' feature2 for fit liner models in the case of feature1. A matrix with
#' dimension of the number of trios.} \item{beta.total}{The return beta value
#' of feature2 for fit liner models without considering feature1. A matrix
#' with dimension of the number of trios.} \item{beta.change}{The proportions
#' mediated. A matrix with dimension of the number of trios.}
#' \item{pc.matrix}{PCs will be returned if the PCs based on expression data
#' are used as the pool of potential confounders. Each column is a PC.}
#' \item{sel.conf.ind}{An indicator matrix with dimension of the number of
#' trios by the number of covariates in \code{cov.pool} or \code{pc.matrix}if
#' the principal components (PCs) based on expression data are used as the
#' pool of potential confounders.}
#'
#' @references Ongen H, Buil A, Brown AA, Dermitzakis ET, Delaneau O. (2016)
#' Fast and efficient QTL mapper for thousands of molecular phenotypes.
#' Bioinformatics. 2016;32:1479–1485. \doi{10.1093/bioinformatics/btv722}
#' @references Yang F, Wang J, Consortium G, Pierce BL, Chen LS. (2017)
#' Identifying cis-mediators for trans-eQTLs across many human tissues using
#' genomic mediation analysis. Genome Research. 2017;27:1859–1871.
#' \doi{10.1101/gr.216754.116}
#'
#' @examples
#'
#' output <- gmap.ac(known.conf = dat$known.conf, fea.dat = dat$fea.dat, snp.dat = dat$snp.dat,
#' trios.idx = dat$trios.idx[1:10,], Minperm = 100, Maxperm = 10000)
#'
#' \dontrun{
#' ## generate a cluster with 2 nodes for parallel computing
#' cl <- makeCluster(2)
#'
#' ## Use the specified candidate confusion variable pool
#' output <- gmap.ac(known.conf = dat$known.conf, fea.dat = dat$fea.dat, snp.dat = dat$snp.dat,
#' trios.idx = dat$trios.idx[1:10,], cl = cl, cov.pool = dat$cov.pool,
#' Minperm = 100, Maxperm = 10000)
#'
#' stopCluster(cl)
#' }
#'
#' @export
#' @importFrom parallel parLapply
#'
gmap.ac <- function(snp.dat, fea.dat, known.conf, trios.idx, cl = NULL, cov.pool = NULL,
Minperm = 100, Maxperm = 10000, fdr = 0.05, fdr_filter = 0.1){
confounders <- t(known.conf)
triomatrix <- array(NA, c(dim(fea.dat)[2], dim(trios.idx)[1], 3))
for (i in 1:dim(trios.idx)[1]) {
triomatrix[,i, ] <- cbind(round(snp.dat[trios.idx[i, 1], ], digits = 0),
fea.dat[trios.idx[i, 2], ], fea.dat[trios.idx[i, 3], ])
}
num_trio <- dim(triomatrix)[2]
# Adaptive Confunding adjustment
use.PC <- FALSE
if(is.null(cov.pool)){ # using PC
use.PC <- TRUE
res <- get.cov(cl, fea.dat = fea.dat, triomatrix = triomatrix, fdr = fdr, fdr_filter = fdr_filter)
pool_cov <- res$pool_cov
est_conf_pool_idx <- res$est_conf_pool_idx
all_pc <- res$pc.all
}else{ # using cov.pool
res <- get.cov(cl, cov.pool = cov.pool, triomatrix = triomatrix, fdr = fdr, fdr_filter = fdr_filter)
pool_cov <- res$pool_cov
est_conf_pool_idx <- res$est_conf_pool_idx
}
if(!is.null(cl)){
known_output <- parLapply(cl, 1:num_trio, getp.func, triomatrix = triomatrix, confounders = confounders,
Minperm = Minperm, Maxperm = Maxperm)
known_sel_pool_output <- parLapply(cl, 1:num_trio, getp.func, triomatrix = triomatrix,
confounders = confounders, pool_cov = pool_cov, est_conf_pool_idx = est_conf_pool_idx,
Minperm = Minperm, Maxperm = Maxperm, use.PC = use.PC)
}else{
known_output <- lapply(1:num_trio, getp.func, triomatrix = triomatrix, confounders = confounders,
Minperm = Minperm, Maxperm = Maxperm)
known_sel_pool_output <- lapply(1:num_trio, getp.func, triomatrix = triomatrix,
confounders = confounders, pool_cov = pool_cov, est_conf_pool_idx = est_conf_pool_idx,
Minperm = Minperm, Maxperm = Maxperm, use.PC = use.PC)
}
nominal.p <- matrix(c(lapply(known_output, function(x) x$nominal.p),
lapply(known_sel_pool_output, function(x) x$nominal.p)), byrow = F, ncol = 2)
t_stat <- matrix(c(lapply(known_output, function(x) x$t_stat),
lapply(known_sel_pool_output, function(x) x$t_stat)), byrow = F, ncol = 2)
std.error <- matrix(c(lapply(known_output, function(x) x$std.error),
lapply(known_sel_pool_output, function(x) x$std.error)), byrow = F, ncol = 2)
beta <- matrix(c(lapply(known_output, function(x) x$beta),
lapply(known_sel_pool_output, function(x) x$beta)), byrow = F, ncol = 2)
beta.total <- matrix(c(lapply(known_output, function(x) x$beta.total),
lapply(known_sel_pool_output, function(x) x$beta.total)), byrow = F, ncol = 2)
beta.change <- matrix(c(lapply(known_output, function(x) x$beta.change),
lapply(known_sel_pool_output, function(x) x$beta.change)), byrow = F, ncol = 2)
empirical.p <- matrix(c(lapply(known_output, function(x) x$empirical.p),
lapply(known_sel_pool_output, function(x) x$empirical.p)), byrow = F, ncol = 2)
nperm <- matrix(c(lapply(known_output, function(x) x$nperm),
lapply(known_sel_pool_output, function(x) x$nperm)), byrow = F, ncol = 2)
if(use.PC){
output <- list(nperm = nperm, empirical.p = empirical.p, nominal.p = nominal.p, std.error = std.error, t_stat = t_stat,
beta = beta, beta.total = beta.total, beta.change = beta.change, pc.matrix = all_pc, sel.conf.ind = est_conf_pool_idx)
}else{
output <- list(nperm = nperm, empirical.p = empirical.p, nominal.p = nominal.p, std.error = std.error, t_stat = t_stat,
beta = beta, beta.total = beta.total, beta.change = beta.change, sel.conf.ind = est_conf_pool_idx)
}
return(output)
}
| /xQTLMediation/R/gmap.ac.R | no_license | stormlovetao/R_package | R | false | false | 11,439 | r | #'
#' @title Genomic Mediation analysis with Adaptive Petmutation scheme and
#' Adaptive Confunders
#'
#' @description The gmap.ac function performs genomic mediation analysis with
#' Adaptive Permutation scheme and Adaptive Confunders. It tests for mediation
#' effects for a set of user specified mediation trios(e.g., eQTL, cis- and
#' trans-genes) in the genome with the assumption of the presence of
#' cis-association. The gmap.ac function considers either a user provided pool
#' of potential confounding variables, real or constructed by other methods,
#' or all the PCs based on the feature data as the potential confounder pool.
#'
#'
#' It returns the mediation p-values(nominal and empirical), the coefficient
#' of linear models(e.g, t_stat, std.error, beta, beta.total), and the
#' proportions mediated(e.g., the percentage of reduction in trans-effects
#' after accounting for cis-mediation) based on the mediation tests i)
#' adjusting for known confounders only, and ii) adjusting for known
#' confounders and adaptively selected potential confounders for each
#' mediation trio.
#'
#' @details The function performs genomic mediation analysis with Adaptive
#' Permutation scheme and Adaptive Confunders. \code{Adaptive Permutation
#' scheme}{When using Fixed Permutation scheme, good estimation of
#' insignificant adjusted P-values can be achieved with few permutations while
#' many more are needed to estimate highly significant ones. Therefore, we
#' implemented an alternative permutation scheme that adapts the number of
#' permutations to the significance level of the variant–phenotype pairs}
#' \code{Adaptive Confunding adjustment} {One challenge in mediation test in
#' genomic studies is how to adjust unmeasured confounding variables for the
#' cis- and trans-genes (i.e., mediator-outcome) relationship.The current
#' function adaptively selects the variables to adjust for each mediation trio
#' given a large pool of constructed or real potential confounding variables.
#' The function allows the input of variables known to be potential cis- and
#' trans-genes (mediator-outcome) confounders in all mediation tests
#' (\code{known.conf}), and the input of the pool of candidate confounders
#' from which potential confounders for each mediation test will be adaptively
#' selected (\code{cov.pool}). When no pool is provided (\code{cov.pool =
#' NULL}), all the PCs based on feature profile (\code{fea.dat}) will be
#' constructed as the potential confounder pool.} \code{calculate Empirical
#' P-values using GPD fitting}{The use of a fixed number of permutations to
#' calculate empirical P-values has the disadvantage that the minimum
#' empirical P-value that can be calculated is 1/N. This makes a larger number
#' of permutations needed to calculate a smaller P-value. Therefore, we model
#' the tail of the permutation value as a Generalized Pareto
#' Distribution(GPD), enabling a smaller empirical P-value with fewer
#' permutation times.}
#'
#' @param snp.dat The eQTL genotype matrix. Each row is an eQTL, each column is
#' a sample.
#' @param fea.dat A feature profile matrix. Each row is for one feature, each
#' column is a sample.
#' @param known.conf A known confounders matrix which is adjusted in all
#' mediation tests. Each row is a confounder, each column is a sample.
#' @param trios.idx A matrix of selected trios indexes (row numbers) for
#' mediation tests. Each row consists of the index (i.e., row number) of the
#' eQTL in \code{snp.dat}, the index of cis-gene feature in \code{fea.dat},
#' and the index of trans-gene feature in \code{fea.dat}. The dimension is the
#' number of trios by three.
#' @param cl Parallel backend if it is set up. It is used for parallel
#' computing. We set \code{cl}=NULL as default.
#' @param cov.pool The pool of candidate confounding variables from which
#' potential confounders are adaptively selected to adjust for each trio. Each
#' row is a covariate, each column is a sample. We set \code{cov.pool}=NULL as
#' default, which will calculate PCs of features as cov.pool.
#' @param Minperm The minimum number of permutations. When the number of
#' permutation statistics better than the original statistic is greater than
#' \code{Minperm}, stop permutation and directly calculate the empirical P
#' value. If \code{Minperm}=0, only the nominal P-value is calculated. We set
#' \code{Minperm}=100 as default.
#' @param Maxperm Maximum number of permutation. We set \code{Maxperm}=10000 as
#' default.
#' @param fdr The false discovery rate to select confounders. We set
#' \code{fdr}=0.05 as default.
#' @param fdr_filter The false discovery rate to filter common child and
#' intermediate variables. We set \code{fdr_filter}=0.1 as default.
#'
#' @return The algorithm will return a list of nperm, empirical.p, nominal.p,
#' beta, std.error, t_stat, beta.total, beta.change. \item{nperm}{The actual
#' number of permutations for testing mediation.} \item{empirical.p}{The
#' mediation empirical P-values with nperm times permutation. A matrix with
#' dimension of the number of trios.} \item{nominal.p}{The mediation nominal
#' P-values. A matrix with dimension of the number of trios.}
#' \item{std.error}{The return std.error value of feature1 for fit liner
#' models. A matrix with dimension of the number of trios.} \item{t_stat}{The
#' return t_stat value of feature1 for fit liner models. A matrix with
#' dimension of the number of trios.} \item{beta}{The return beta value of
#' feature2 for fit liner models in the case of feature1. A matrix with
#' dimension of the number of trios.} \item{beta.total}{The return beta value
#' of feature2 for fit liner models without considering feature1. A matrix
#' with dimension of the number of trios.} \item{beta.change}{The proportions
#' mediated. A matrix with dimension of the number of trios.}
#' \item{pc.matrix}{PCs will be returned if the PCs based on expression data
#' are used as the pool of potential confounders. Each column is a PC.}
#' \item{sel.conf.ind}{An indicator matrix with dimension of the number of
#' trios by the number of covariates in \code{cov.pool} or \code{pc.matrix}if
#' the principal components (PCs) based on expression data are used as the
#' pool of potential confounders.}
#'
#' @references Ongen H, Buil A, Brown AA, Dermitzakis ET, Delaneau O. (2016)
#' Fast and efficient QTL mapper for thousands of molecular phenotypes.
#' Bioinformatics. 2016;32:1479–1485. \doi{10.1093/bioinformatics/btv722}
#' @references Yang F, Wang J, Consortium G, Pierce BL, Chen LS. (2017)
#' Identifying cis-mediators for trans-eQTLs across many human tissues using
#' genomic mediation analysis. Genome Research. 2017;27:1859–1871.
#' \doi{10.1101/gr.216754.116}
#'
#' @examples
#'
#' output <- gmap.ac(known.conf = dat$known.conf, fea.dat = dat$fea.dat, snp.dat = dat$snp.dat,
#' trios.idx = dat$trios.idx[1:10,], Minperm = 100, Maxperm = 10000)
#'
#' \dontrun{
#' ## generate a cluster with 2 nodes for parallel computing
#' cl <- makeCluster(2)
#'
#' ## Use the specified candidate confusion variable pool
#' output <- gmap.ac(known.conf = dat$known.conf, fea.dat = dat$fea.dat, snp.dat = dat$snp.dat,
#' trios.idx = dat$trios.idx[1:10,], cl = cl, cov.pool = dat$cov.pool,
#' Minperm = 100, Maxperm = 10000)
#'
#' stopCluster(cl)
#' }
#'
#' @export
#' @importFrom parallel parLapply
#'
gmap.ac <- function(snp.dat, fea.dat, known.conf, trios.idx, cl = NULL, cov.pool = NULL,
Minperm = 100, Maxperm = 10000, fdr = 0.05, fdr_filter = 0.1){
confounders <- t(known.conf)
triomatrix <- array(NA, c(dim(fea.dat)[2], dim(trios.idx)[1], 3))
for (i in 1:dim(trios.idx)[1]) {
triomatrix[,i, ] <- cbind(round(snp.dat[trios.idx[i, 1], ], digits = 0),
fea.dat[trios.idx[i, 2], ], fea.dat[trios.idx[i, 3], ])
}
num_trio <- dim(triomatrix)[2]
# Adaptive Confunding adjustment
use.PC <- FALSE
if(is.null(cov.pool)){ # using PC
use.PC <- TRUE
res <- get.cov(cl, fea.dat = fea.dat, triomatrix = triomatrix, fdr = fdr, fdr_filter = fdr_filter)
pool_cov <- res$pool_cov
est_conf_pool_idx <- res$est_conf_pool_idx
all_pc <- res$pc.all
}else{ # using cov.pool
res <- get.cov(cl, cov.pool = cov.pool, triomatrix = triomatrix, fdr = fdr, fdr_filter = fdr_filter)
pool_cov <- res$pool_cov
est_conf_pool_idx <- res$est_conf_pool_idx
}
if(!is.null(cl)){
known_output <- parLapply(cl, 1:num_trio, getp.func, triomatrix = triomatrix, confounders = confounders,
Minperm = Minperm, Maxperm = Maxperm)
known_sel_pool_output <- parLapply(cl, 1:num_trio, getp.func, triomatrix = triomatrix,
confounders = confounders, pool_cov = pool_cov, est_conf_pool_idx = est_conf_pool_idx,
Minperm = Minperm, Maxperm = Maxperm, use.PC = use.PC)
}else{
known_output <- lapply(1:num_trio, getp.func, triomatrix = triomatrix, confounders = confounders,
Minperm = Minperm, Maxperm = Maxperm)
known_sel_pool_output <- lapply(1:num_trio, getp.func, triomatrix = triomatrix,
confounders = confounders, pool_cov = pool_cov, est_conf_pool_idx = est_conf_pool_idx,
Minperm = Minperm, Maxperm = Maxperm, use.PC = use.PC)
}
nominal.p <- matrix(c(lapply(known_output, function(x) x$nominal.p),
lapply(known_sel_pool_output, function(x) x$nominal.p)), byrow = F, ncol = 2)
t_stat <- matrix(c(lapply(known_output, function(x) x$t_stat),
lapply(known_sel_pool_output, function(x) x$t_stat)), byrow = F, ncol = 2)
std.error <- matrix(c(lapply(known_output, function(x) x$std.error),
lapply(known_sel_pool_output, function(x) x$std.error)), byrow = F, ncol = 2)
beta <- matrix(c(lapply(known_output, function(x) x$beta),
lapply(known_sel_pool_output, function(x) x$beta)), byrow = F, ncol = 2)
beta.total <- matrix(c(lapply(known_output, function(x) x$beta.total),
lapply(known_sel_pool_output, function(x) x$beta.total)), byrow = F, ncol = 2)
beta.change <- matrix(c(lapply(known_output, function(x) x$beta.change),
lapply(known_sel_pool_output, function(x) x$beta.change)), byrow = F, ncol = 2)
empirical.p <- matrix(c(lapply(known_output, function(x) x$empirical.p),
lapply(known_sel_pool_output, function(x) x$empirical.p)), byrow = F, ncol = 2)
nperm <- matrix(c(lapply(known_output, function(x) x$nperm),
lapply(known_sel_pool_output, function(x) x$nperm)), byrow = F, ncol = 2)
if(use.PC){
output <- list(nperm = nperm, empirical.p = empirical.p, nominal.p = nominal.p, std.error = std.error, t_stat = t_stat,
beta = beta, beta.total = beta.total, beta.change = beta.change, pc.matrix = all_pc, sel.conf.ind = est_conf_pool_idx)
}else{
output <- list(nperm = nperm, empirical.p = empirical.p, nominal.p = nominal.p, std.error = std.error, t_stat = t_stat,
beta = beta, beta.total = beta.total, beta.change = beta.change, sel.conf.ind = est_conf_pool_idx)
}
return(output)
}
|
library(gapminder)
library(dplyr)
library(ggplot2)
# Summarize to find the median life expectancy
gapminder %>% summarize(medianLifeExp = median(lifeExp))
# Filter for 1957 then summarize the median life expectancy
gapminder %>% filter(year == 1957) %>% summarize(medianLifeExp = median(lifeExp))
# Filter for 1957 then summarize the median life expectancy and the maximum GDP per capita
gapminder %>% filter(year == 1957) %>% summarize(medianLifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))
# Find median life expectancy and maximum GDP per capita in each year
gapminder %>%
group_by(year) %>%
summarize(medianLifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))
# Find median life expectancy and maximum GDP per capita in each continent in 1957
gapminder %>%
filter(year == 1957) %>%
group_by(continent) %>%
summarize(medianLifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))
# Find median life expectancy and maximum GDP per capita in each year/continent combination
gapminder %>%
group_by(continent, year) %>%
summarize(medianLifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))
by_year <- gapminder %>%
group_by(year) %>%
summarize(medianLifeExp = median(lifeExp),
maxGdpPercap = max(gdpPercap))
# Create a scatter plot showing the change in medianLifeExp over time
ggplot(by_year, aes(x=year, y=medianLifeExp)) +
geom_point() +
expand_limits(y = 0)
# Summarize medianGdpPercap within each continent within each year: by_year_continent
by_year_continent <- gapminder %>%
group_by(year, continent) %>%
summarize(medianGdpPercap = median(gdpPercap))
# Plot the change in medianGdpPercap in each continent over time
ggplot(by_year_continent, aes(x= year, y=medianGdpPercap, color=continent)) +
geom_point() +
expand_limits(y=0)
# Summarize the median GDP and median life expectancy per continent in 2007
by_continent_2007 <- gapminder %>%
filter(year == 2007) %>%
group_by(continent) %>%
summarize(medianLifeExp = median(lifeExp), medianGdpPercap = median(gdpPercap))
# Use a scatter plot to compare the median GDP and median life expectancy
ggplot(by_continent_2007, aes(x= medianGdpPercap, y = medianLifeExp, color = continent)) +
geom_point() +
expand_limits(y=0)
| /Grouping and summarizing.R | no_license | Willamar/Introduction-to-the-Tidyverse | R | false | false | 2,264 | r | library(gapminder)
library(dplyr)
library(ggplot2)
# Summarize to find the median life expectancy
gapminder %>% summarize(medianLifeExp = median(lifeExp))
# Filter for 1957 then summarize the median life expectancy
gapminder %>% filter(year == 1957) %>% summarize(medianLifeExp = median(lifeExp))
# Filter for 1957 then summarize the median life expectancy and the maximum GDP per capita
gapminder %>% filter(year == 1957) %>% summarize(medianLifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))
# Find median life expectancy and maximum GDP per capita in each year
gapminder %>%
group_by(year) %>%
summarize(medianLifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))
# Find median life expectancy and maximum GDP per capita in each continent in 1957
gapminder %>%
filter(year == 1957) %>%
group_by(continent) %>%
summarize(medianLifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))
# Find median life expectancy and maximum GDP per capita in each year/continent combination
gapminder %>%
group_by(continent, year) %>%
summarize(medianLifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))
by_year <- gapminder %>%
group_by(year) %>%
summarize(medianLifeExp = median(lifeExp),
maxGdpPercap = max(gdpPercap))
# Create a scatter plot showing the change in medianLifeExp over time
ggplot(by_year, aes(x=year, y=medianLifeExp)) +
geom_point() +
expand_limits(y = 0)
# Summarize medianGdpPercap within each continent within each year: by_year_continent
by_year_continent <- gapminder %>%
group_by(year, continent) %>%
summarize(medianGdpPercap = median(gdpPercap))
# Plot the change in medianGdpPercap in each continent over time
ggplot(by_year_continent, aes(x= year, y=medianGdpPercap, color=continent)) +
geom_point() +
expand_limits(y=0)
# Summarize the median GDP and median life expectancy per continent in 2007
by_continent_2007 <- gapminder %>%
filter(year == 2007) %>%
group_by(continent) %>%
summarize(medianLifeExp = median(lifeExp), medianGdpPercap = median(gdpPercap))
# Use a scatter plot to compare the median GDP and median life expectancy
ggplot(by_continent_2007, aes(x= medianGdpPercap, y = medianLifeExp, color = continent)) +
geom_point() +
expand_limits(y=0)
|
library(coin)
library(ez)
library(nparcomp)
library(pgirmess)
library(influence.ME)
library(MASS)
library(car)
library(reshape)
library(plyr)
library(strucchange)
library(segmented)
library(SiZer)
library(gplots)
library(ggplot2)
library(psych)
library(nlme)
library(lme4)
library(MuMIn)
library(ggplot2)
library(lattice)
library(nlme)
library(multcomp)
library(arm)
library(pbkrtest)
library(RColorBrewer)
library(lattice)
install.packages("gridExtra")
library(gridExtra)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
##### Mulitplot - display lots of ggplots at once #####
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
##### Games-Howell ####
tukey <- function( data, # 観察値ベクトル
group, # 群変数ベクトル
method=c("Tukey", "Games-Howell")) # 手法の選択
{
OK <- complete.cases(data, group) # 欠損値を持つケースを除く
data <- data[OK]
group <- factor(group[OK])
n <- tapply(data, group, length) # 各群のケース数
a <- length(n) # 群の数
phi.e <- sum(n)-a # 誤差分散(群内不偏分散)の自由度
Mean <- tapply(data, group, mean) # 各群の平均値
Variance <- tapply(data, group, var) # 各群の不偏分散
result1 <- cbind(n, Mean, Variance) # 各群の統計量
rownames(result1) <- paste("Group", 1:a, sep="")
method <- match.arg(method)
if (method == "Tukey") { # Tukey の方法
v.e <- sum((n-1)*Variance)/phi.e # 誤差分散(群内不偏分散)
t <- combn(a, 2, function(ij) # 対比較
abs(diff(Mean[ij]))/sqrt(v.e*sum(1/n[ij])) )
p <- ptukey(t*sqrt(2), a, phi.e, lower.tail=FALSE) # 有意確率を計算する
Tukey <- cbind(t, p)
rownames(Tukey) <- combn(a, 2, paste, collapse=":")
return(list(result1=result1, Tukey=Tukey, phi=phi.e, v=v.e))
}
else {
t.df <- combn(a, 2, function(ij) {
t <- abs(diff(Mean[ij]))/sqrt(sum(Variance[ij]/n[ij]))
df <- sum(Variance[ij]/n[ij])^2/sum((Variance[ij]/n[ij])^2/(n[ij]-1))
return(c(t, df))} )
t <- t.df[1,]
df <- t.df[2,]
p <- ptukey(t*sqrt(2), a, df, lower.tail=FALSE)
Games.Howell <- cbind(t, df, p)
rownames(Games.Howell) <- combn(a, 2, paste, collapse=":")
return(list(result1=result1, Games.Howell=Games.Howell))
}
}
# Pairwise Pairwise Multiple Multiple Comparison Comparison Procedures Procedures with Unequal Unequal N‘s
# and/orVariances: A Monte Carlo Study
# Journal Journal of Educational Statistics Educational Statistics,Vol.1, ,Vol.1, No. 2, 1976, pp. 113 . 2, 1976, pp. 113-125
# Friedman post-hoc test for non-parametric test
friedman.test.with.post.hoc <- function(formu, data, to.print.friedman = T, to.post.hoc.if.signif = T, to.plot.parallel = T, to.plot.boxplot = T, signif.P = .05, color.blocks.in.cor.plot = T, jitter.Y.in.cor.plot =F)
{
# formu is a formula of the shape: Y ~ X | block
# data is a long data.frame with three columns: [[ Y (numeric), X (factor), block (factor) ]]
# Note: This function doesn't handle NA's! In case of NA in Y in one of the blocks, then that entire block should be removed.
# Loading needed packages
if(!require(coin))
{
print("You are missing the package 'coin', we will now try to install it...")
install.packages("coin")
library(coin)
}
if(!require(multcomp))
{
print("You are missing the package 'multcomp', we will now try to install it...")
install.packages("multcomp")
library(multcomp)
}
if(!require(colorspace))
{
print("You are missing the package 'colorspace', we will now try to install it...")
install.packages("colorspace")
library(colorspace)
}
# get the names out of the formula
formu.names <- all.vars(formu)
Y.name <- formu.names[1]
X.name <- formu.names[2]
block.name <- formu.names[3]
if(dim(data)[2] >3) data <- data[,c(Y.name,X.name,block.name)] # In case we have a "data" data frame with more then the three columns we need. This code will clean it from them...
# Note: the function doesn't handle NA's. In case of NA in one of the block T outcomes, that entire block should be removed.
# stopping in case there is NA in the Y vector
if(sum(is.na(data[,Y.name])) > 0) stop("Function stopped: This function doesn't handle NA's. In case of NA in Y in one of the blocks, then that entire block should be removed.")
# make sure that the number of factors goes with the actual values present in the data:
data[,X.name ] <- factor(data[,X.name ])
data[,block.name ] <- factor(data[,block.name ])
number.of.X.levels <- length(levels(data[,X.name ]))
if(number.of.X.levels == 2) { warning(paste("'",X.name,"'", "has only two levels. Consider using paired wilcox.test instead of friedman test"))}
# making the object that will hold the friedman test and the other.
the.sym.test <- symmetry_test(formu, data = data, ### all pairwise comparisons
teststat = "max",
xtrafo = function(Y.data) { trafo( Y.data, factor_trafo = function(x) { model.matrix(~ x - 1) %*% t(contrMat(table(x), "Tukey")) } ) },
ytrafo = function(Y.data){ trafo(Y.data, numeric_trafo = rank, block = data[,block.name] ) }
)
# if(to.print.friedman) { print(the.sym.test) }
if(to.post.hoc.if.signif)
{
if(pvalue(the.sym.test) < signif.P)
{
# the post hoc test
The.post.hoc.P.values <- pvalue(the.sym.test, method = "single-step") # this is the post hoc of the friedman test
# plotting
if(to.plot.parallel & to.plot.boxplot) par(mfrow = c(1,2)) # if we are plotting two plots, let's make sure we'll be able to see both
if(to.plot.parallel)
{
X.names <- levels(data[, X.name])
X.for.plot <- seq_along(X.names)
plot.xlim <- c(.7 , length(X.for.plot)+.3) # adding some spacing from both sides of the plot
if(color.blocks.in.cor.plot)
{
blocks.col <- rainbow_hcl(length(levels(data[,block.name])))
} else {
blocks.col <- 1 # black
}
data2 <- data
if(jitter.Y.in.cor.plot) {
data2[,Y.name] <- jitter(data2[,Y.name])
par.cor.plot.text <- "Parallel coordinates plot (with Jitter)"
} else {
par.cor.plot.text <- "Parallel coordinates plot"
}
# adding a Parallel coordinates plot
matplot(as.matrix(reshape(data2, idvar=X.name, timevar=block.name,
direction="wide")[,-1]) ,
type = "l", lty = 1, axes = FALSE, ylab = Y.name,
xlim = plot.xlim,
col = blocks.col,
main = par.cor.plot.text)
axis(1, at = X.for.plot , labels = X.names) # plot X axis
axis(2) # plot Y axis
points(tapply(data[,Y.name], data[,X.name], median) ~ X.for.plot, col = "red",pch = 4, cex = 2, lwd = 5)
}
if(to.plot.boxplot)
{
# first we create a function to create a new Y, by substracting different combinations of X levels from each other.
subtract.a.from.b <- function(a.b , the.data)
{
the.data[,a.b[2]] - the.data[,a.b[1]]
}
temp.wide <- reshape(data, idvar=X.name, timevar=block.name,
direction="wide") #[,-1]
wide.data <- as.matrix(t(temp.wide[,-1]))
colnames(wide.data) <- temp.wide[,1]
Y.b.minus.a.combos <- apply(with(data,combn(levels(data[,X.name]), 2)), 2, subtract.a.from.b, the.data =wide.data)
names.b.minus.a.combos <- apply(with(data,combn(levels(data[,X.name]), 2)), 2, function(a.b) {paste(a.b[2],a.b[1],sep=" - ")})
the.ylim <- range(Y.b.minus.a.combos)
the.ylim[2] <- the.ylim[2] + max(sd(Y.b.minus.a.combos)) # adding some space for the labels
is.signif.color <- ifelse(The.post.hoc.P.values < .05 , "green", "grey")
boxplot(Y.b.minus.a.combos,
names = names.b.minus.a.combos ,
col = is.signif.color,
main = "Boxplots (of the differences)",
ylim = the.ylim
)
legend("topright", legend = paste(names.b.minus.a.combos, rep(" ; PostHoc P.value:", number.of.X.levels),round(The.post.hoc.P.values,5)) , fill = is.signif.color )
abline(h = 0, col = "blue")
}
list.to.return <- list(Friedman.Test = the.sym.test, PostHoc.Test = The.post.hoc.P.values)
if(to.print.friedman) {print(list.to.return)}
return(list.to.return)
} else {
print("The results where not significant, There is no need for a post hoc test")
return(the.sym.test)
}
}
}
##
library(lme4)
install.packages("optimx")
require(optimx) ## for optim optimizers
## (optimx-specific optimizers require explicit gradients --
## we could use numDeriv::grad, but this seems to defeat
## the intention)
install.packages("nloptr")
require(nloptr)
install.packages("dfoptim")
require(dfoptim) ## for nmkb
namedList <- function(...) {
L <- list(...)
snm <- sapply(substitute(list(...)),deparse)[-1]
if (is.null(nm <- names(L))) nm <- snm
if (any(nonames <- nm=="")) nm[nonames] <- snm[nonames]
setNames(L,nm)
}
## incorporated in lme4 1.1-7
## originally from https://github.com/lme4/lme4/issues/98 :
## nloptWrap <- function(fn, par, lower, upper, control=list(), ...) {
## defaultControl <- list(xtol_rel = 1e-6, maxeval = 1e5)
## for (n in names(defaultControl))
## if (is.null(control[[n]])) control[[n]] <- defaultControl[[n]]
## res <- nloptr(x0=par, eval_f=fn, lb=lower, ub=upper, opts=control, ...)
## ## ------
## with(res,list(par=solution,
## fval=objective,
## feval=iterations,
## conv=if (status>0) 0 else status,
## message=message))
## }
##' Attempt to re-fit a [g]lmer model with a range of optimizers.
##' The default is to use all known optimizers for R that satisfy the
##' requirements (do not require explicit gradients, allow
##' box constraints), in three categories; (i) built-in
##' (minqa::bobyqa, lme4::Nelder_Mead), (ii) wrapped via optimx
##' (most of optimx's optimizers that allow box constraints require
##' an explicit gradient function to be specified; the two provided
##' here are really base R functions that can be accessed via optimx,
##' (iii) wrapped via nloptr.
##'
##' @param m a fitted model
##' @param meth.tab a matrix (or data.frame) with columns
##' - method the name of a specific optimization method to pass to the optimizer
##' (leave blank for built-in optimizers)
##' - optimizer the \code{optimizer} function to use
##' @param verbose print progress messages?
##' @return a list of fitted \code{merMod} objects
##' @seealso slice, slice2D in the bbmle package
##' @examples
##' library(lme4)
##' gm1 <- glmer(cbind(incidence, size - incidence) ~ period + (1 | herd),
##' data = cbpp, family = binomial)
##' gm_all <- allFit(gm1)
##' t(sapply(gm_all,fixef)) ## extract fixed effects
##' sapply(gm_all,logLik) ## log-likelihoods
##' sapply(gm_all,getME,"theta") ## theta parameters
##' !sapply(gm_all,inherits,"try-error") ## was fit OK?
allFit <- function(m, meth.tab = cbind(optimizer=
rep(c("bobyqa","Nelder_Mead", "optimx", "nloptwrap"),
c( 1, 1, 2, 2)),
method= c("", "", "nlminb","L-BFGS-B",
"NLOPT_LN_NELDERMEAD", "NLOPT_LN_BOBYQA")),
verbose=TRUE,
maxfun=1e5)
{
stopifnot(length(dm <- dim(meth.tab)) == 2, dm[1] >= 1, dm[2] >= 2,
is.character(optimizer <- meth.tab[,"optimizer"]),
is.character(method <- meth.tab[,"method"]))
fit.names <- paste(optimizer, method, sep=".")
res <- setNames(as.list(fit.names), fit.names)
for (i in seq_along(fit.names)) {
if (verbose) cat(fit.names[i],": ")
ctrl <- list(optimizer=optimizer[i])
ctrl$optCtrl <- switch(optimizer[i],
optimx = list(method = method[i]),
nloptWrap = list(algorithm= method[i]),
list(maxfun=maxfun))
ctrl <- do.call(if(isGLMM(m)) glmerControl else lmerControl, ctrl)
tt <- system.time(rr <- tryCatch(update(m, control = ctrl), error = function(e) e))
attr(rr, "optCtrl") <- ctrl$optCtrl # contains crucial info here
attr(rr, "time") <- tt # store timing info
res[[i]] <- rr
if (verbose) cat("[OK]\n")
}
##
res
}
summary.allfit <- function(object, ...) {
which.OK <- !sapply(object,is,"error")
msgs <- lapply(object[which.OK],function(x) x@optinfo$conv$lme4$messages)
fixef <- t(sapply(object[which.OK],fixef))
llik <- sapply(object[which.OK],logLik)
times <- t(sapply(object[which.OK],attr,"time"))
feval <- sapply(object[which.OK],function(x) x@optinfo$feval)
sdcor <- t(sapply(object[which.OK],function(x) {
aa <- as.data.frame(VarCorr(x))
setNames(aa[,"sdcor"],c(lme4:::tnames(object[which.OK][[1]]),
if (isLMM(object[[1]])) "sigma" else NULL))
}))
namedList(which.OK,msgs,fixef,llik,sdcor,times,feval)
}
print.summary.allfit <- function(object,...) {
if (!which.OK==seq(length(object))) {
cat("some optimizers failed: ",
paste(names(object)[!which.OK],collapse=","),"\n")
}
}
# deletes NA rows
completeFun <- function(data, desiredCols) {
completeVec <- complete.cases(data[, desiredCols])
return(data[completeVec, ])
}
Theme23<-function (base_size = 12, base_family = "")
{
theme(
line = element_line(colour = "black", size = 0.5, linetype = 1, lineend = "butt"),
rect = element_rect(fill = "white", colour = "black", size = 0.5, linetype = 1),
text = element_text(family = base_family, face = "plain", colour = "black", size = base_size, hjust = 0.5, vjust = 0.5, angle = 0, lineheight = 0.9),
axis.text = element_text(size = rel(0.8), colour = "grey50"),
strip.text = element_text(size = rel(0.8)),
axis.line = element_blank(),
axis.text.x = element_text(vjust = 1),
axis.text.y = element_text(hjust = 1),
axis.ticks = element_line(colour = "grey50"),
axis.title.x = element_text(),
axis.title.y = element_text(angle = 90),
axis.ticks.length = unit(0.15, "cm"),
axis.ticks.margin = unit(0.1, "cm"),
legend.background = element_rect(colour = NA),
legend.margin = unit(0.2, "cm"),
legend.key = element_rect(fill = "grey95", colour = "white"),
legend.key.size = unit(1.2, "lines"),
legend.key.height = NULL,
legend.key.width = NULL,
legend.text = element_text(size = rel(0.8)),
legend.text.align = NULL,
legend.title = element_text(size = rel(0.8), face = "bold", hjust = 0),
legend.title.align = NULL,
legend.position = "right",
legend.direction = NULL,
legend.justification = "center",
legend.box = NULL,
panel.background = element_rect(fill = "grey90", colour = NA),
panel.border = element_blank(),
panel.grid.major = element_line(colour = "white"),
panel.grid.minor = element_line(colour = "grey95", size = 0.25),
panel.margin = unit(0.25, "lines"),
panel.margin.x = NULL,
panel.margin.y = NULL,
strip.background = element_rect(fill = "grey80", colour = NA),
strip.text.x = element_text(),
strip.text.y = element_text(angle = -90),
plot.background = element_rect(colour = "white"),
plot.title = element_text(size = rel(1.2)),
plot.margin = unit(c(1, 1, 0.5, 0.5), "lines"), complete = TRUE)
}
## Order in court
# deletes NA rows
completeFun <- function(data, desiredCols) {
completeVec <- complete.cases(data[, desiredCols])
return(data[completeVec, ])
}
# Inspect data
hist(Gradient$var)
# If proportional data, you can use the below transformation avoiding arcsine and only model with lmer
# first, transform data by devidiing by 100
Gradient$var<-Gradient$var/100
Gradient$var<-ifelse(Gradient$tvar==1,Gradient$var-0.001,Gradient$var)
Gradient$var<-ifelse(Gradient$tvar==0,Gradient$var+0.001,Gradient$var)
Gradient$var2<-qlogis(Gradient$tvar)
# Statistical test for normality
shapiro.test(Gradient$var)
#Shapiro-Wilk for normaility tests as x has levels (without adjusting for multiple testing).
do.call("rbind", with(Gradient, tapply(var, Plot,
function(x) unlist(shapiro.test(x)[c("statistic", "p.value")]))))
# Homogenrity of variances for model. Use Bartletts if each stage has high normality
leveneTest(tvar2~Plot,data=Gradient) # Homogeneity of Variance of residuals
bartlett.test(resid(lm(var~Plot))~Plot,data=Gradient) # Homogeneity of Variance of residuals
summary(aov(var~Plot,data=db)) # run if two tests are above 0.05. See one.way below, if not
TukeyHSD(aov(var~Plot,data=db)) # Tukey post-hoc test
oneway.test(var~Plot,data=Gradient)
tukey(var,Gradient$Plot,method="Games-Howell")
kruskal.test(var~Plot,data=Gradient) # non-parametric one.way anova equivalent
kruskalmc(var~Plot,data=Gradient) # post-hoc test for the KW test
ezANOVA(data=Gradient, dv=.(var), within=.(Plot), wid=.(Site), detailed=TRUE) # Gives info on assumptions and signigifance
# Get the confidence intervals and se and plot
newSE <- summarySE(Gradient, measurevar="var", groupvars=c("Plot"))
g<-ggplot(newSE, aes(x=Plot, y=var,group=1)) +
geom_errorbar(aes(ymin=var-se, ymax=var+se), width=0.1,size=1.3) +
geom_line(size=1)+geom_point(size=10,shape=20,col="black")
g
g2<-g + theme(axis.text.x=element_text(angle=55, size=30, vjust=0.5)) + theme(axis.text.y=element_text(angle=0, size=30, vjust=0.5))+
labs(x="Stage of collapse", y="var cover (%)")
g3<-g2+theme(axis.text = element_text(size = 50, colour = "black"), panel.background = element_rect(fill = "white", colour = NA))
g4<-g3+theme(axis.title.y = element_text(size = rel(3.5), angle = 90),
axis.title.x = element_text(size = rel(3.5)))
g4
g5<-g4+theme(panel.border = element_rect(color="darkred", size=0.5, linetype="solid",fill=NA))
g5
ggplot(Gradient,aes(x=SBAPC,y=tvar2,colour=Site))+geom_point()+facet_wrap(~Site)+geom_smooth(method="lm") # Visualise how the slope differs at each site
## Random effects modelling
Modnull<-lm(tvar2~1,data=Gradient)
Modnull1<-lmer(tvar2~ 1 +(1|Site),data=Gradient)
Modnull2<- lmer(tvar2~ 1 +(1|Soil_Type),data=Gradient)
Modnull3<- lmer(tvar2~ 1 +(1|Site)+(1|Soil_Type),data=Gradient)
# Test to see if random effects make a difference - judge by std. dev being higher than 0
print(Modnull1); print(Modnull2); print (Modnull3); print (Modnull)
AICc(Modnull,Modnull1,Modnull2,Modnull3)
dotplot(ranef(Modnull1,condVar=TRUE),
lattice.options=list(layout=c(1,1))) # test to see if intercepts change
# Linear regression
lr1<-glmer(var~Plot+(1|Site), data=db,family=poisson)
summary(lr1)
r.squaredGLMM(lr1)
confint(lr1)
coefs <- data.frame(coef(summary(lr1)))
# use normal distribution to approximate p-value
coefs$p.z <- 2 * (1 - pnorm(abs(coefs$t.value)))
coefs
plot(lr1)
summary(glht(lr1,linfct=mcp(Plot="Tukey")))
# extract coefficients for lme4 package
coefs <- data.frame(coef(summary(mod)))
# use normal distribution to approximate p-value
coefs$p.z <- 2 * (1 - pnorm(abs(coefs$t.value)))
coefs
# If assumptions not met then you can do non-parametric Friedmans test with post-hoc
# Read in function first. The code below will only give post-hoc reults if test is signigifacnt
summary(glht(mod,linfct=mcp(Plot="Tukey")))
## Random effects modelling
AICc(Modnull1,Modnull2,Modnull3)
Mod1<- lmer(var~Plot+Dung+(1|Site)+(1|Soil_Type),data=Gradient)
Mod2<- lmer(var~Plot,+(1|Site)+(1|Soil_Type),data=Gradient)
Mod3<- lmer(var~Plot*Dung+(1|Site)+(1|Soil_Type),data=Gradient)
Mod4<- lmer(var~Dung,+(1|Site)+(1|Soil_Type),data=Gradient)
AICc(Mod1, Mod2,Mod3,Mod4,Modnull3)
Modelfun<-list(Mod1,Mod2,Mod3,Mod4,Modnull3)
#summarise these in this table
Model_tab<-model.sel(Modelfun)
Model_tab # Nearly all weight goes to the best model
r.squaredGLMM(Mod1); r.squaredGLMM(Mod2);
r.squaredGLMM(Mod3); r.squaredGLMM(Mod4)
r.squaredGLMM(Modnull3)
# or if glm/lm with no random effects, find out r2 from the code below
R2 <- cor(db$var,predict(Mod1))^2
R2
R2 <- cor(db$var,predict(Mod2))^2
R2
R2 <- cor(db$var,predict(Mod3))^2
R2
R2 <- cor(db$var,predict(Mod4))^2
# Continous SBA for count data
Mod0.1<- glmer(var ~ 1 + (SBAPC| Site), data = Gradient,family=poisson)
Mod0.2<- glmer(var ~ 1 + (1 | Site), data = Gradient,family=poisson)
Mod0.3<- glmer(var ~ 1 + (1 | Soil_Type), data = Gradient,family=poisson )
Mod0.4<-glm(var~1,data=Gradient,family=poisson)
AICc(Mod0.1,Mod0.2,Mod0.3,Mod0.4) # shows that the random effects should include SBAPC change
Mod1<-glmer(var ~ SBAPC +Dung+(SBAPC| Site), data = Gradient,family=poisson)
Mod2<-glmer(var ~ SBAPC*Dung + (SBAPC| Site), data = Gradient,family=poisson)
AICc(Mod0.1,Mod1,Mod2)
anova(Mod0.1,Mod2) # Implies that should keep both. Let's try model selection though
AICc(Mod0.1,Mod1,Mod2) # Mod1 (i.e. the one with the additive term) is better slightly
Mod3<-glmer(var~Dung+ (SBAPC| Site), data = Gradient,family=poisson)
AICc(Mod0.1,Mod1,Mod2,Mod3)
Mod4<-glmer(var~SBAPC+ (SBAPC| Site), data = Gradient,family=poisson)
Mod5<-glmer(var~SBAPC+I(SBAPC^2)+ (SBAPC| Site), data = Gradient,family=poisson)
Modelfun<-list(Mod1,Mod2,Mod3,Mod4,Mod5,Mod0.1)
Model_tab<-model.sel(Modelfun)
Model_tab # Nearly all weight goes to the best model
r.squaredGLMM(Mod1); r.squaredGLMM(Mod2);
r.squaredGLMM(Mod3); r.squaredGLMM(Mod4)
r.squaredGLMM(Mod5); r.squaredGLMM(Mod0.1)
plot(Mod) # residuals look visually fine
# Continous SBA for proportional data
Mod0.1<- lmer(var~ 1 + (SBAPC| Site), data = Gradient, )
Mod0.2<- lmer(var ~ 1 + (1 | Site), data = Gradient, )
Mod0.3<- lmer(var ~ 1 + (SBAPC| Soil_Type), data = Gradient, )
Mod0.4<- lmer(var ~ 1 + (1 | Soil_Type), data = Gradient, )
Mod0.5<- lmer(var ~ 1 +(1|Site) +(1 | Soil_Type), data = Gradient, )
print(Mod0.1);print(Mod0.2);print(Mod0.3);print(Mod0.4);print(Mod0.5)
AICc(Mod0.1,Mod0.2,Mod0.3,Mod0.4,Mod0.5) # shows that the random effects should include SBAPC change
Mod1<-lmer(var ~ SBAPC +Dung+(1|Site) +(1 | Soil_Type), data = Gradient, )
Mod2<-lmer(var ~ SBAPC*Dung +(1|Site) +(1 | Soil_Type), data = Gradient, )
Mod3<-lmer(var~Dung+(1|Site) +(1 | Soil_Type), data = Gradient, )
Mod4<-lmer(var~SBAPC+(1|Site) +(1 | Soil_Type), data = Gradient, )
Mod5<-lmer(var~SBAPC+I(SBAPC^2)+(1|Site) +(1 | Soil_Type), data = Gradient,)
AICc(Mod1,Mod2,Mod3,Mod4,Mod5,Mod0.1)
Modelfun<-list(Mod1,Mod2,Mod3,Mod4,Mod5,Mod0.5)
#summarise these in this table
Model_tab<-model.sel(Modelfun)
Model_tab # Nearly all weight goes to the best model
r.squaredGLMM(Mod1); r.squaredGLMM(Mod2);
r.squaredGLMM(Mod3); r.squaredGLMM(Mod4)
r.squaredGLMM(Mod5); r.squaredGLMM(Mod0.1)
summary(Mod1) # best model
confint(Mod1)
# extract coefficients for lme4 package
coefs <- data.frame(coef(summary(Mod)))
# use normal distribution to approximate p-value
coefs$p.z <- 2 * (1 - pnorm(abs(coefs$t.value)))
coefs
# Plot graphs from predictions
Gradient$Pred_R<-predict(mod1)
new.data<-expand.grid(SBAPC=seq(0,1,0.01),
Prevar=mean(Gradient$Prevar),
Site=levels(Gradient$Site))
new.data$Pred_R<-predict(mod1,newdata=new.data)
new.data$Pred<-predict(mod1,newdata=new.data,re.form=NA)
theme_set(theme_bw(base_size=12))
Grad_plot1<-ggplot(Gradient,aes(x=SBAPC*100,y=exp(var)-1,group=Site,colour=Site))+geom_point()+geom_line(data=new.data,aes(y=exp(Pred_R)))
Grad_plot1
Grad_plot2<-Grad_plot1+geom_line(data=new.data,size=2,colour="black",aes(y=exp(Pred),x=SBAPC*100,group=NULL))+
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.border = element_rect(size=1.5,colour="black",fill=NA))
Grad_plot2
Grad_plot2+xlab("Percentage loss of basal area relative to reference")+ylab("Understorey Condition")
| /Run-thru code.R | no_license | pevans13/Grad_plots | R | false | false | 26,560 | r | library(coin)
library(ez)
library(nparcomp)
library(pgirmess)
library(influence.ME)
library(MASS)
library(car)
library(reshape)
library(plyr)
library(strucchange)
library(segmented)
library(SiZer)
library(gplots)
library(ggplot2)
library(psych)
library(nlme)
library(lme4)
library(MuMIn)
library(ggplot2)
library(lattice)
library(nlme)
library(multcomp)
library(arm)
library(pbkrtest)
library(RColorBrewer)
library(lattice)
install.packages("gridExtra")
library(gridExtra)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
##### Mulitplot - display lots of ggplots at once #####
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
##### Games-Howell ####
tukey <- function( data, # 観察値ベクトル
group, # 群変数ベクトル
method=c("Tukey", "Games-Howell")) # 手法の選択
{
OK <- complete.cases(data, group) # 欠損値を持つケースを除く
data <- data[OK]
group <- factor(group[OK])
n <- tapply(data, group, length) # 各群のケース数
a <- length(n) # 群の数
phi.e <- sum(n)-a # 誤差分散(群内不偏分散)の自由度
Mean <- tapply(data, group, mean) # 各群の平均値
Variance <- tapply(data, group, var) # 各群の不偏分散
result1 <- cbind(n, Mean, Variance) # 各群の統計量
rownames(result1) <- paste("Group", 1:a, sep="")
method <- match.arg(method)
if (method == "Tukey") { # Tukey の方法
v.e <- sum((n-1)*Variance)/phi.e # 誤差分散(群内不偏分散)
t <- combn(a, 2, function(ij) # 対比較
abs(diff(Mean[ij]))/sqrt(v.e*sum(1/n[ij])) )
p <- ptukey(t*sqrt(2), a, phi.e, lower.tail=FALSE) # 有意確率を計算する
Tukey <- cbind(t, p)
rownames(Tukey) <- combn(a, 2, paste, collapse=":")
return(list(result1=result1, Tukey=Tukey, phi=phi.e, v=v.e))
}
else {
t.df <- combn(a, 2, function(ij) {
t <- abs(diff(Mean[ij]))/sqrt(sum(Variance[ij]/n[ij]))
df <- sum(Variance[ij]/n[ij])^2/sum((Variance[ij]/n[ij])^2/(n[ij]-1))
return(c(t, df))} )
t <- t.df[1,]
df <- t.df[2,]
p <- ptukey(t*sqrt(2), a, df, lower.tail=FALSE)
Games.Howell <- cbind(t, df, p)
rownames(Games.Howell) <- combn(a, 2, paste, collapse=":")
return(list(result1=result1, Games.Howell=Games.Howell))
}
}
# Pairwise Pairwise Multiple Multiple Comparison Comparison Procedures Procedures with Unequal Unequal N‘s
# and/orVariances: A Monte Carlo Study
# Journal Journal of Educational Statistics Educational Statistics,Vol.1, ,Vol.1, No. 2, 1976, pp. 113 . 2, 1976, pp. 113-125
# Friedman post-hoc test for non-parametric test
friedman.test.with.post.hoc <- function(formu, data, to.print.friedman = T, to.post.hoc.if.signif = T, to.plot.parallel = T, to.plot.boxplot = T, signif.P = .05, color.blocks.in.cor.plot = T, jitter.Y.in.cor.plot =F)
{
# formu is a formula of the shape: Y ~ X | block
# data is a long data.frame with three columns: [[ Y (numeric), X (factor), block (factor) ]]
# Note: This function doesn't handle NA's! In case of NA in Y in one of the blocks, then that entire block should be removed.
# Loading needed packages
if(!require(coin))
{
print("You are missing the package 'coin', we will now try to install it...")
install.packages("coin")
library(coin)
}
if(!require(multcomp))
{
print("You are missing the package 'multcomp', we will now try to install it...")
install.packages("multcomp")
library(multcomp)
}
if(!require(colorspace))
{
print("You are missing the package 'colorspace', we will now try to install it...")
install.packages("colorspace")
library(colorspace)
}
# get the names out of the formula
formu.names <- all.vars(formu)
Y.name <- formu.names[1]
X.name <- formu.names[2]
block.name <- formu.names[3]
if(dim(data)[2] >3) data <- data[,c(Y.name,X.name,block.name)] # In case we have a "data" data frame with more then the three columns we need. This code will clean it from them...
# Note: the function doesn't handle NA's. In case of NA in one of the block T outcomes, that entire block should be removed.
# stopping in case there is NA in the Y vector
if(sum(is.na(data[,Y.name])) > 0) stop("Function stopped: This function doesn't handle NA's. In case of NA in Y in one of the blocks, then that entire block should be removed.")
# make sure that the number of factors goes with the actual values present in the data:
data[,X.name ] <- factor(data[,X.name ])
data[,block.name ] <- factor(data[,block.name ])
number.of.X.levels <- length(levels(data[,X.name ]))
if(number.of.X.levels == 2) { warning(paste("'",X.name,"'", "has only two levels. Consider using paired wilcox.test instead of friedman test"))}
# making the object that will hold the friedman test and the other.
the.sym.test <- symmetry_test(formu, data = data, ### all pairwise comparisons
teststat = "max",
xtrafo = function(Y.data) { trafo( Y.data, factor_trafo = function(x) { model.matrix(~ x - 1) %*% t(contrMat(table(x), "Tukey")) } ) },
ytrafo = function(Y.data){ trafo(Y.data, numeric_trafo = rank, block = data[,block.name] ) }
)
# if(to.print.friedman) { print(the.sym.test) }
if(to.post.hoc.if.signif)
{
if(pvalue(the.sym.test) < signif.P)
{
# the post hoc test
The.post.hoc.P.values <- pvalue(the.sym.test, method = "single-step") # this is the post hoc of the friedman test
# plotting
if(to.plot.parallel & to.plot.boxplot) par(mfrow = c(1,2)) # if we are plotting two plots, let's make sure we'll be able to see both
if(to.plot.parallel)
{
X.names <- levels(data[, X.name])
X.for.plot <- seq_along(X.names)
plot.xlim <- c(.7 , length(X.for.plot)+.3) # adding some spacing from both sides of the plot
if(color.blocks.in.cor.plot)
{
blocks.col <- rainbow_hcl(length(levels(data[,block.name])))
} else {
blocks.col <- 1 # black
}
data2 <- data
if(jitter.Y.in.cor.plot) {
data2[,Y.name] <- jitter(data2[,Y.name])
par.cor.plot.text <- "Parallel coordinates plot (with Jitter)"
} else {
par.cor.plot.text <- "Parallel coordinates plot"
}
# adding a Parallel coordinates plot
matplot(as.matrix(reshape(data2, idvar=X.name, timevar=block.name,
direction="wide")[,-1]) ,
type = "l", lty = 1, axes = FALSE, ylab = Y.name,
xlim = plot.xlim,
col = blocks.col,
main = par.cor.plot.text)
axis(1, at = X.for.plot , labels = X.names) # plot X axis
axis(2) # plot Y axis
points(tapply(data[,Y.name], data[,X.name], median) ~ X.for.plot, col = "red",pch = 4, cex = 2, lwd = 5)
}
if(to.plot.boxplot)
{
# first we create a function to create a new Y, by substracting different combinations of X levels from each other.
subtract.a.from.b <- function(a.b , the.data)
{
the.data[,a.b[2]] - the.data[,a.b[1]]
}
temp.wide <- reshape(data, idvar=X.name, timevar=block.name,
direction="wide") #[,-1]
wide.data <- as.matrix(t(temp.wide[,-1]))
colnames(wide.data) <- temp.wide[,1]
Y.b.minus.a.combos <- apply(with(data,combn(levels(data[,X.name]), 2)), 2, subtract.a.from.b, the.data =wide.data)
names.b.minus.a.combos <- apply(with(data,combn(levels(data[,X.name]), 2)), 2, function(a.b) {paste(a.b[2],a.b[1],sep=" - ")})
the.ylim <- range(Y.b.minus.a.combos)
the.ylim[2] <- the.ylim[2] + max(sd(Y.b.minus.a.combos)) # adding some space for the labels
is.signif.color <- ifelse(The.post.hoc.P.values < .05 , "green", "grey")
boxplot(Y.b.minus.a.combos,
names = names.b.minus.a.combos ,
col = is.signif.color,
main = "Boxplots (of the differences)",
ylim = the.ylim
)
legend("topright", legend = paste(names.b.minus.a.combos, rep(" ; PostHoc P.value:", number.of.X.levels),round(The.post.hoc.P.values,5)) , fill = is.signif.color )
abline(h = 0, col = "blue")
}
list.to.return <- list(Friedman.Test = the.sym.test, PostHoc.Test = The.post.hoc.P.values)
if(to.print.friedman) {print(list.to.return)}
return(list.to.return)
} else {
print("The results where not significant, There is no need for a post hoc test")
return(the.sym.test)
}
}
}
##
library(lme4)
install.packages("optimx")
require(optimx) ## for optim optimizers
## (optimx-specific optimizers require explicit gradients --
## we could use numDeriv::grad, but this seems to defeat
## the intention)
install.packages("nloptr")
require(nloptr)
install.packages("dfoptim")
require(dfoptim) ## for nmkb
namedList <- function(...) {
L <- list(...)
snm <- sapply(substitute(list(...)),deparse)[-1]
if (is.null(nm <- names(L))) nm <- snm
if (any(nonames <- nm=="")) nm[nonames] <- snm[nonames]
setNames(L,nm)
}
## incorporated in lme4 1.1-7
## originally from https://github.com/lme4/lme4/issues/98 :
## nloptWrap <- function(fn, par, lower, upper, control=list(), ...) {
## defaultControl <- list(xtol_rel = 1e-6, maxeval = 1e5)
## for (n in names(defaultControl))
## if (is.null(control[[n]])) control[[n]] <- defaultControl[[n]]
## res <- nloptr(x0=par, eval_f=fn, lb=lower, ub=upper, opts=control, ...)
## ## ------
## with(res,list(par=solution,
## fval=objective,
## feval=iterations,
## conv=if (status>0) 0 else status,
## message=message))
## }
##' Attempt to re-fit a [g]lmer model with a range of optimizers.
##' The default is to use all known optimizers for R that satisfy the
##' requirements (do not require explicit gradients, allow
##' box constraints), in three categories; (i) built-in
##' (minqa::bobyqa, lme4::Nelder_Mead), (ii) wrapped via optimx
##' (most of optimx's optimizers that allow box constraints require
##' an explicit gradient function to be specified; the two provided
##' here are really base R functions that can be accessed via optimx,
##' (iii) wrapped via nloptr.
##'
##' @param m a fitted model
##' @param meth.tab a matrix (or data.frame) with columns
##' - method the name of a specific optimization method to pass to the optimizer
##' (leave blank for built-in optimizers)
##' - optimizer the \code{optimizer} function to use
##' @param verbose print progress messages?
##' @return a list of fitted \code{merMod} objects
##' @seealso slice, slice2D in the bbmle package
##' @examples
##' library(lme4)
##' gm1 <- glmer(cbind(incidence, size - incidence) ~ period + (1 | herd),
##' data = cbpp, family = binomial)
##' gm_all <- allFit(gm1)
##' t(sapply(gm_all,fixef)) ## extract fixed effects
##' sapply(gm_all,logLik) ## log-likelihoods
##' sapply(gm_all,getME,"theta") ## theta parameters
##' !sapply(gm_all,inherits,"try-error") ## was fit OK?
allFit <- function(m, meth.tab = cbind(optimizer=
rep(c("bobyqa","Nelder_Mead", "optimx", "nloptwrap"),
c( 1, 1, 2, 2)),
method= c("", "", "nlminb","L-BFGS-B",
"NLOPT_LN_NELDERMEAD", "NLOPT_LN_BOBYQA")),
verbose=TRUE,
maxfun=1e5)
{
stopifnot(length(dm <- dim(meth.tab)) == 2, dm[1] >= 1, dm[2] >= 2,
is.character(optimizer <- meth.tab[,"optimizer"]),
is.character(method <- meth.tab[,"method"]))
fit.names <- paste(optimizer, method, sep=".")
res <- setNames(as.list(fit.names), fit.names)
for (i in seq_along(fit.names)) {
if (verbose) cat(fit.names[i],": ")
ctrl <- list(optimizer=optimizer[i])
ctrl$optCtrl <- switch(optimizer[i],
optimx = list(method = method[i]),
nloptWrap = list(algorithm= method[i]),
list(maxfun=maxfun))
ctrl <- do.call(if(isGLMM(m)) glmerControl else lmerControl, ctrl)
tt <- system.time(rr <- tryCatch(update(m, control = ctrl), error = function(e) e))
attr(rr, "optCtrl") <- ctrl$optCtrl # contains crucial info here
attr(rr, "time") <- tt # store timing info
res[[i]] <- rr
if (verbose) cat("[OK]\n")
}
##
res
}
summary.allfit <- function(object, ...) {
which.OK <- !sapply(object,is,"error")
msgs <- lapply(object[which.OK],function(x) x@optinfo$conv$lme4$messages)
fixef <- t(sapply(object[which.OK],fixef))
llik <- sapply(object[which.OK],logLik)
times <- t(sapply(object[which.OK],attr,"time"))
feval <- sapply(object[which.OK],function(x) x@optinfo$feval)
sdcor <- t(sapply(object[which.OK],function(x) {
aa <- as.data.frame(VarCorr(x))
setNames(aa[,"sdcor"],c(lme4:::tnames(object[which.OK][[1]]),
if (isLMM(object[[1]])) "sigma" else NULL))
}))
namedList(which.OK,msgs,fixef,llik,sdcor,times,feval)
}
print.summary.allfit <- function(object,...) {
if (!which.OK==seq(length(object))) {
cat("some optimizers failed: ",
paste(names(object)[!which.OK],collapse=","),"\n")
}
}
# deletes NA rows
completeFun <- function(data, desiredCols) {
completeVec <- complete.cases(data[, desiredCols])
return(data[completeVec, ])
}
Theme23<-function (base_size = 12, base_family = "")
{
theme(
line = element_line(colour = "black", size = 0.5, linetype = 1, lineend = "butt"),
rect = element_rect(fill = "white", colour = "black", size = 0.5, linetype = 1),
text = element_text(family = base_family, face = "plain", colour = "black", size = base_size, hjust = 0.5, vjust = 0.5, angle = 0, lineheight = 0.9),
axis.text = element_text(size = rel(0.8), colour = "grey50"),
strip.text = element_text(size = rel(0.8)),
axis.line = element_blank(),
axis.text.x = element_text(vjust = 1),
axis.text.y = element_text(hjust = 1),
axis.ticks = element_line(colour = "grey50"),
axis.title.x = element_text(),
axis.title.y = element_text(angle = 90),
axis.ticks.length = unit(0.15, "cm"),
axis.ticks.margin = unit(0.1, "cm"),
legend.background = element_rect(colour = NA),
legend.margin = unit(0.2, "cm"),
legend.key = element_rect(fill = "grey95", colour = "white"),
legend.key.size = unit(1.2, "lines"),
legend.key.height = NULL,
legend.key.width = NULL,
legend.text = element_text(size = rel(0.8)),
legend.text.align = NULL,
legend.title = element_text(size = rel(0.8), face = "bold", hjust = 0),
legend.title.align = NULL,
legend.position = "right",
legend.direction = NULL,
legend.justification = "center",
legend.box = NULL,
panel.background = element_rect(fill = "grey90", colour = NA),
panel.border = element_blank(),
panel.grid.major = element_line(colour = "white"),
panel.grid.minor = element_line(colour = "grey95", size = 0.25),
panel.margin = unit(0.25, "lines"),
panel.margin.x = NULL,
panel.margin.y = NULL,
strip.background = element_rect(fill = "grey80", colour = NA),
strip.text.x = element_text(),
strip.text.y = element_text(angle = -90),
plot.background = element_rect(colour = "white"),
plot.title = element_text(size = rel(1.2)),
plot.margin = unit(c(1, 1, 0.5, 0.5), "lines"), complete = TRUE)
}
## Order in court
# deletes NA rows
completeFun <- function(data, desiredCols) {
completeVec <- complete.cases(data[, desiredCols])
return(data[completeVec, ])
}
# Inspect data
hist(Gradient$var)
# If proportional data, you can use the below transformation avoiding arcsine and only model with lmer
# first, transform data by devidiing by 100
Gradient$var<-Gradient$var/100
Gradient$var<-ifelse(Gradient$tvar==1,Gradient$var-0.001,Gradient$var)
Gradient$var<-ifelse(Gradient$tvar==0,Gradient$var+0.001,Gradient$var)
Gradient$var2<-qlogis(Gradient$tvar)
# Statistical test for normality
shapiro.test(Gradient$var)
#Shapiro-Wilk for normaility tests as x has levels (without adjusting for multiple testing).
do.call("rbind", with(Gradient, tapply(var, Plot,
function(x) unlist(shapiro.test(x)[c("statistic", "p.value")]))))
# Homogenrity of variances for model. Use Bartletts if each stage has high normality
leveneTest(tvar2~Plot,data=Gradient) # Homogeneity of Variance of residuals
bartlett.test(resid(lm(var~Plot))~Plot,data=Gradient) # Homogeneity of Variance of residuals
summary(aov(var~Plot,data=db)) # run if two tests are above 0.05. See one.way below, if not
TukeyHSD(aov(var~Plot,data=db)) # Tukey post-hoc test
oneway.test(var~Plot,data=Gradient)
tukey(var,Gradient$Plot,method="Games-Howell")
kruskal.test(var~Plot,data=Gradient) # non-parametric one.way anova equivalent
kruskalmc(var~Plot,data=Gradient) # post-hoc test for the KW test
ezANOVA(data=Gradient, dv=.(var), within=.(Plot), wid=.(Site), detailed=TRUE) # Gives info on assumptions and signigifance
# Get the confidence intervals and se and plot
newSE <- summarySE(Gradient, measurevar="var", groupvars=c("Plot"))
g<-ggplot(newSE, aes(x=Plot, y=var,group=1)) +
geom_errorbar(aes(ymin=var-se, ymax=var+se), width=0.1,size=1.3) +
geom_line(size=1)+geom_point(size=10,shape=20,col="black")
g
g2<-g + theme(axis.text.x=element_text(angle=55, size=30, vjust=0.5)) + theme(axis.text.y=element_text(angle=0, size=30, vjust=0.5))+
labs(x="Stage of collapse", y="var cover (%)")
g3<-g2+theme(axis.text = element_text(size = 50, colour = "black"), panel.background = element_rect(fill = "white", colour = NA))
g4<-g3+theme(axis.title.y = element_text(size = rel(3.5), angle = 90),
axis.title.x = element_text(size = rel(3.5)))
g4
g5<-g4+theme(panel.border = element_rect(color="darkred", size=0.5, linetype="solid",fill=NA))
g5
ggplot(Gradient,aes(x=SBAPC,y=tvar2,colour=Site))+geom_point()+facet_wrap(~Site)+geom_smooth(method="lm") # Visualise how the slope differs at each site
## Random effects modelling
Modnull<-lm(tvar2~1,data=Gradient)
Modnull1<-lmer(tvar2~ 1 +(1|Site),data=Gradient)
Modnull2<- lmer(tvar2~ 1 +(1|Soil_Type),data=Gradient)
Modnull3<- lmer(tvar2~ 1 +(1|Site)+(1|Soil_Type),data=Gradient)
# Test to see if random effects make a difference - judge by std. dev being higher than 0
print(Modnull1); print(Modnull2); print (Modnull3); print (Modnull)
AICc(Modnull,Modnull1,Modnull2,Modnull3)
dotplot(ranef(Modnull1,condVar=TRUE),
lattice.options=list(layout=c(1,1))) # test to see if intercepts change
# Linear regression
lr1<-glmer(var~Plot+(1|Site), data=db,family=poisson)
summary(lr1)
r.squaredGLMM(lr1)
confint(lr1)
coefs <- data.frame(coef(summary(lr1)))
# use normal distribution to approximate p-value
coefs$p.z <- 2 * (1 - pnorm(abs(coefs$t.value)))
coefs
plot(lr1)
summary(glht(lr1,linfct=mcp(Plot="Tukey")))
# extract coefficients for lme4 package
coefs <- data.frame(coef(summary(mod)))
# use normal distribution to approximate p-value
coefs$p.z <- 2 * (1 - pnorm(abs(coefs$t.value)))
coefs
# If assumptions not met then you can do non-parametric Friedmans test with post-hoc
# Read in function first. The code below will only give post-hoc reults if test is signigifacnt
summary(glht(mod,linfct=mcp(Plot="Tukey")))
## Random effects modelling
AICc(Modnull1,Modnull2,Modnull3)
Mod1<- lmer(var~Plot+Dung+(1|Site)+(1|Soil_Type),data=Gradient)
Mod2<- lmer(var~Plot,+(1|Site)+(1|Soil_Type),data=Gradient)
Mod3<- lmer(var~Plot*Dung+(1|Site)+(1|Soil_Type),data=Gradient)
Mod4<- lmer(var~Dung,+(1|Site)+(1|Soil_Type),data=Gradient)
AICc(Mod1, Mod2,Mod3,Mod4,Modnull3)
Modelfun<-list(Mod1,Mod2,Mod3,Mod4,Modnull3)
#summarise these in this table
Model_tab<-model.sel(Modelfun)
Model_tab # Nearly all weight goes to the best model
r.squaredGLMM(Mod1); r.squaredGLMM(Mod2);
r.squaredGLMM(Mod3); r.squaredGLMM(Mod4)
r.squaredGLMM(Modnull3)
# or if glm/lm with no random effects, find out r2 from the code below
R2 <- cor(db$var,predict(Mod1))^2
R2
R2 <- cor(db$var,predict(Mod2))^2
R2
R2 <- cor(db$var,predict(Mod3))^2
R2
R2 <- cor(db$var,predict(Mod4))^2
# Continous SBA for count data
Mod0.1<- glmer(var ~ 1 + (SBAPC| Site), data = Gradient,family=poisson)
Mod0.2<- glmer(var ~ 1 + (1 | Site), data = Gradient,family=poisson)
Mod0.3<- glmer(var ~ 1 + (1 | Soil_Type), data = Gradient,family=poisson )
Mod0.4<-glm(var~1,data=Gradient,family=poisson)
AICc(Mod0.1,Mod0.2,Mod0.3,Mod0.4) # shows that the random effects should include SBAPC change
Mod1<-glmer(var ~ SBAPC +Dung+(SBAPC| Site), data = Gradient,family=poisson)
Mod2<-glmer(var ~ SBAPC*Dung + (SBAPC| Site), data = Gradient,family=poisson)
AICc(Mod0.1,Mod1,Mod2)
anova(Mod0.1,Mod2) # Implies that should keep both. Let's try model selection though
AICc(Mod0.1,Mod1,Mod2) # Mod1 (i.e. the one with the additive term) is better slightly
Mod3<-glmer(var~Dung+ (SBAPC| Site), data = Gradient,family=poisson)
AICc(Mod0.1,Mod1,Mod2,Mod3)
Mod4<-glmer(var~SBAPC+ (SBAPC| Site), data = Gradient,family=poisson)
Mod5<-glmer(var~SBAPC+I(SBAPC^2)+ (SBAPC| Site), data = Gradient,family=poisson)
Modelfun<-list(Mod1,Mod2,Mod3,Mod4,Mod5,Mod0.1)
Model_tab<-model.sel(Modelfun)
Model_tab # Nearly all weight goes to the best model
r.squaredGLMM(Mod1); r.squaredGLMM(Mod2);
r.squaredGLMM(Mod3); r.squaredGLMM(Mod4)
r.squaredGLMM(Mod5); r.squaredGLMM(Mod0.1)
plot(Mod) # residuals look visually fine
# Continous SBA for proportional data
Mod0.1<- lmer(var~ 1 + (SBAPC| Site), data = Gradient, )
Mod0.2<- lmer(var ~ 1 + (1 | Site), data = Gradient, )
Mod0.3<- lmer(var ~ 1 + (SBAPC| Soil_Type), data = Gradient, )
Mod0.4<- lmer(var ~ 1 + (1 | Soil_Type), data = Gradient, )
Mod0.5<- lmer(var ~ 1 +(1|Site) +(1 | Soil_Type), data = Gradient, )
print(Mod0.1);print(Mod0.2);print(Mod0.3);print(Mod0.4);print(Mod0.5)
AICc(Mod0.1,Mod0.2,Mod0.3,Mod0.4,Mod0.5) # shows that the random effects should include SBAPC change
Mod1<-lmer(var ~ SBAPC +Dung+(1|Site) +(1 | Soil_Type), data = Gradient, )
Mod2<-lmer(var ~ SBAPC*Dung +(1|Site) +(1 | Soil_Type), data = Gradient, )
Mod3<-lmer(var~Dung+(1|Site) +(1 | Soil_Type), data = Gradient, )
Mod4<-lmer(var~SBAPC+(1|Site) +(1 | Soil_Type), data = Gradient, )
Mod5<-lmer(var~SBAPC+I(SBAPC^2)+(1|Site) +(1 | Soil_Type), data = Gradient,)
AICc(Mod1,Mod2,Mod3,Mod4,Mod5,Mod0.1)
Modelfun<-list(Mod1,Mod2,Mod3,Mod4,Mod5,Mod0.5)
#summarise these in this table
Model_tab<-model.sel(Modelfun)
Model_tab # Nearly all weight goes to the best model
r.squaredGLMM(Mod1); r.squaredGLMM(Mod2);
r.squaredGLMM(Mod3); r.squaredGLMM(Mod4)
r.squaredGLMM(Mod5); r.squaredGLMM(Mod0.1)
summary(Mod1) # best model
confint(Mod1)
# extract coefficients for lme4 package
coefs <- data.frame(coef(summary(Mod)))
# use normal distribution to approximate p-value
coefs$p.z <- 2 * (1 - pnorm(abs(coefs$t.value)))
coefs
# Plot graphs from predictions
Gradient$Pred_R<-predict(mod1)
new.data<-expand.grid(SBAPC=seq(0,1,0.01),
Prevar=mean(Gradient$Prevar),
Site=levels(Gradient$Site))
new.data$Pred_R<-predict(mod1,newdata=new.data)
new.data$Pred<-predict(mod1,newdata=new.data,re.form=NA)
theme_set(theme_bw(base_size=12))
Grad_plot1<-ggplot(Gradient,aes(x=SBAPC*100,y=exp(var)-1,group=Site,colour=Site))+geom_point()+geom_line(data=new.data,aes(y=exp(Pred_R)))
Grad_plot1
Grad_plot2<-Grad_plot1+geom_line(data=new.data,size=2,colour="black",aes(y=exp(Pred),x=SBAPC*100,group=NULL))+
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.border = element_rect(size=1.5,colour="black",fill=NA))
Grad_plot2
Grad_plot2+xlab("Percentage loss of basal area relative to reference")+ylab("Understorey Condition")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{methods}
\alias{methods}
\title{Metadata on the different TI methods}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 48 rows and 32 columns.}
\usage{
methods
}
\description{
Metadata on the different TI methods
}
\keyword{datasets}
| /man/methods.Rd | no_license | flying-sheep/dynmethods | R | false | true | 383 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{methods}
\alias{methods}
\title{Metadata on the different TI methods}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 48 rows and 32 columns.}
\usage{
methods
}
\description{
Metadata on the different TI methods
}
\keyword{datasets}
|
# Get the dataset
dataset <- read.csv('/home/felipe/Documentos/Machine Learning A-Z/Part 3 - Classification/Section 14 - Logistic Regression/Social_Network_Ads.csv')
# No missing values
# Check for categorical/hierarchical parameters
dataset <- dataset[2:ncol(dataset)]
dataset$Gender <- factor(dataset$Gender,
levels = unique(dataset$Gender),
labels = seq(0, length(unique(dataset$Gender)) - 1, 1))
# Feature scaling
dataset[, 1:(ncol(dataset) - 1)] <- scale(apply(dataset[, 1:(ncol(dataset) - 1)], 2, as.numeric))
# Split the dataset into test set and train set
library(caTools)
datasplit <- sample.split(dataset$Purchased, SplitRatio = 0.75)
train_set <- subset(dataset, datasplit)
test_set <- subset(dataset, !datasplit)
# Fit the model
classifier <- glm(formula = Purchased ~ Age + EstimatedSalary,
family = binomial, # Because it's a logist regression
data = train_set)
# Seens like 'Gender' parameter is not a very influent parameter
# Predict the test set
prediction_prob <- predict(object = classifier,
type = 'response',
newdata = test_set)
# Threshold: 0.5
prediction_label <- ifelse(prediction_prob >= 0.5, 1, 0)
# Confusion matrix
conf_mat <- table(test_set$Purchased, prediction_label) | /testing-section/Classification-testing/part8-logistic-regression/lr.R | no_license | irekizea/machine-learning-learning | R | false | false | 1,219 | r | # Get the dataset
dataset <- read.csv('/home/felipe/Documentos/Machine Learning A-Z/Part 3 - Classification/Section 14 - Logistic Regression/Social_Network_Ads.csv')
# No missing values
# Check for categorical/hierarchical parameters
dataset <- dataset[2:ncol(dataset)]
dataset$Gender <- factor(dataset$Gender,
levels = unique(dataset$Gender),
labels = seq(0, length(unique(dataset$Gender)) - 1, 1))
# Feature scaling
dataset[, 1:(ncol(dataset) - 1)] <- scale(apply(dataset[, 1:(ncol(dataset) - 1)], 2, as.numeric))
# Split the dataset into test set and train set
library(caTools)
datasplit <- sample.split(dataset$Purchased, SplitRatio = 0.75)
train_set <- subset(dataset, datasplit)
test_set <- subset(dataset, !datasplit)
# Fit the model
classifier <- glm(formula = Purchased ~ Age + EstimatedSalary,
family = binomial, # Because it's a logist regression
data = train_set)
# Seens like 'Gender' parameter is not a very influent parameter
# Predict the test set
prediction_prob <- predict(object = classifier,
type = 'response',
newdata = test_set)
# Threshold: 0.5
prediction_label <- ifelse(prediction_prob >= 0.5, 1, 0)
# Confusion matrix
conf_mat <- table(test_set$Purchased, prediction_label) |
setwd("~/ferdig_rotation/pB_data/my_results/diff_gene_exp/")
data <- read.csv("triplicates_data.csv")
dim(data) # 18 by 5545, first 5 columns are metadata
#6 hr compare
pvals_6hr <- vector()
for(i in 6:5545){
test_res <- t.test(data[1:3,i],data[10:12,i])
pval <- test_res$p.value
pvals_6hr <- c(pvals_6hr, pval)
}
#26 hr compare
pvals_26hr <- vector()
for(i in 6:5545){
test_res <- t.test(data[4:6,i],data[13:15,i])
pval <- test_res$p.value
pvals_26hr <- c(pvals_26hr, pval)
}
#38 hr compare
pvals_38hr <- vector()
for(i in 6:5545){
test_res <- t.test(data[7:9,i],data[16:18,i])
pval <- test_res$p.value
pvals_38hr <- c(pvals_38hr, pval)
}
#fdr correction
fdr_vals_6hr <- p.adjust(pvals_6hr, method = "fdr")
fdr_vals_26hr <- p.adjust(pvals_26hr, method = "fdr")
fdr_vals_38hr <- p.adjust(pvals_38hr, method = "fdr")
difgenes_6hr <- which(pvals_6hr < 0.01)
difgenes_26hr <- which(pvals_26hr < 0.01)
difgenes_38hr <- which(pvals_38hr < 0.01)
for(i in 1:22){
difgenes_6hr[i] <- difgenes_6hr[i] + 5
}
for(i in 1:length(difgenes_26hr)){
difgenes_26hr[i] <- difgenes_26hr[i] + 5
}
for(i in 1:length(difgenes_38hr)){
difgenes_38hr[i] <- difgenes_38hr[i] + 5
}
dif_exp_6hr <- data[c(1:3,10:12),difgenes_6hr]
dif_exp_26hr <- data[c(4:6,13:15),difgenes_26hr]
dif_exp_38hr <- data[c(7:9,16:18),difgenes_38hr]
#find average and plot
avg_6_pb <- colMeans(dif_exp_6hr[1:3,])
avg_6_nf <- colMeans(dif_exp_6hr[4:6,])
diff_6 <- cbind(avg_6_nf, avg_6_pb)
subtract <- vector()
for(i in 1:22){
subtract[i] <- diff_6[i,1] - diff_6[i,2]
}
diff_6 <- cbind(diff_6, subtract)
ordered_diff_6 <- diff_6[order(-diff_6[,3]),]
mat1=data.matrix(ordered_diff_6[,-3])
heatmap.2(mat1, Rowv=FALSE, Colv=FALSE, scale="row", dendrogram = "none", distfun=dist, hclustfun=hclust, xlab="Line", ylab="Genes", key=TRUE, keysize=1, col=greenred(2000), trace="none", density.info=c("none"), margins=c(10, 8), cexRow=0.5, sepcolor="white")
avg_26_pb <- colMeans(dif_exp_26hr[1:3,])
avg_26_nf <- colMeans(dif_exp_26hr[4:6,])
diff_26 <- cbind(avg_26_nf, avg_26_pb)
subtract <- vector()
for(i in 1:126){
subtract[i] <- diff_26[i,1] - diff_26[i,2]
}
diff_26 <- cbind(diff_26, subtract)
ordered_diff_26 <- diff_26[order(-diff_26[,3]),]
mat2=data.matrix(ordered_diff_26[,-3])
heatmap.2(mat2, Rowv=FALSE, Colv=FALSE, scale = "row", dendrogram="none", distfun=dist, hclustfun=hclust, xlab="Line", ylab="Genes", key=TRUE, keysize=1, col=greenred(2000), trace="none", density.info=c("none"), margins=c(10, 8), cexRow=0.5, sepcolor="white")
avg_38_pb <- colMeans(dif_exp_38hr[1:3,])
avg_38_nf <- colMeans(dif_exp_38hr[4:6,])
diff_38 <- cbind(avg_38_nf, avg_38_pb)
subtract <- vector()
for(i in 1:97){
subtract[i] <- diff_38[i,1] - diff_38[i,2]
}
diff_38 <- cbind(diff_38, subtract)
ordered_diff_38 <- diff_38[order(-diff_38[,3]),]
mat3=data.matrix(ordered_diff_38[,-3])
heatmap.2(mat3, Rowv=FALSE, Colv=TRUE, scale = "row",dendrogram="none", distfun=dist, hclustfun=hclust, xlab="Line", ylab="Genes", key=TRUE, keysize=1, col=greenred(2000), trace="none", density.info=c("none"), margins=c(10, 8), cexRow=0.5, sepcolor="white")
#find names of diff exp genes
diff_6_names <- row.names(ordered_diff_6)
diff_26_names <- row.names(ordered_diff_26)
diff_38_names <- row.names(ordered_diff_38)
#plot all together
all_genes <- c(diff_6_names, diff_26_names, diff_38_names)
difgeneindex <- vector()
for(i in all_genes){
difgeneindex <- c(difgeneindex, which(colnames(data) == i))
}
all_genes_data <- data[,difgeneindex]
PB58_6hr <- colMeans(all_genes_data[1:3,])
NF54_6hr <- colMeans(all_genes_data[10:12,])
PB58_26hr <- colMeans(all_genes_data[4:6,])
NF54_26hr <- colMeans(all_genes_data[13:15,])
PB58_38hr <- colMeans(all_genes_data[7:9,])
NF54_38hr <- colMeans(all_genes_data[16:18,])
diff_all <- rbind(PB58_6hr, NF54_6hr, PB58_26hr, NF54_26hr, PB58_38hr, NF54_38hr)
diff_all_mat <- data.matrix(diff_all)
heatmap.2(diff_all_mat, Rowv=FALSE, Colv=FALSE, scale = "column",dendrogram="none", distfun=dist, hclustfun=hclust, key=TRUE, keysize=1, col=greenred(2000), trace="none", density.info=c("none"), margins=c(10, 8), cexRow=0.5, sepcolor="white")
diff_t <- data.matrix(t(diff_all))
heatmap.2(diff_t, Rowv=FALSE, Colv=FALSE, scale = "row",dendrogram="none", distfun=dist, hclustfun=hclust, key=TRUE, keysize=1, col=greenred(2000), trace="none", density.info=c("none"), margins=c(10, 8), cexRow=0.5, sepcolor="white")
#compare differentially expressed genes to network to see how much overlap
rw_net <- read.csv("../DTWMIC/wgcna/wgcna_hardthreshold/rewired_edgelist_PB58_KBS.csv")
| /pB_data_codes/differential_gene_expression.R | no_license | katiemeis/code_gradlab | R | false | false | 4,754 | r |
setwd("~/ferdig_rotation/pB_data/my_results/diff_gene_exp/")
data <- read.csv("triplicates_data.csv")
dim(data) # 18 by 5545, first 5 columns are metadata
#6 hr compare
pvals_6hr <- vector()
for(i in 6:5545){
test_res <- t.test(data[1:3,i],data[10:12,i])
pval <- test_res$p.value
pvals_6hr <- c(pvals_6hr, pval)
}
#26 hr compare
pvals_26hr <- vector()
for(i in 6:5545){
test_res <- t.test(data[4:6,i],data[13:15,i])
pval <- test_res$p.value
pvals_26hr <- c(pvals_26hr, pval)
}
#38 hr compare
pvals_38hr <- vector()
for(i in 6:5545){
test_res <- t.test(data[7:9,i],data[16:18,i])
pval <- test_res$p.value
pvals_38hr <- c(pvals_38hr, pval)
}
#fdr correction
fdr_vals_6hr <- p.adjust(pvals_6hr, method = "fdr")
fdr_vals_26hr <- p.adjust(pvals_26hr, method = "fdr")
fdr_vals_38hr <- p.adjust(pvals_38hr, method = "fdr")
difgenes_6hr <- which(pvals_6hr < 0.01)
difgenes_26hr <- which(pvals_26hr < 0.01)
difgenes_38hr <- which(pvals_38hr < 0.01)
for(i in 1:22){
difgenes_6hr[i] <- difgenes_6hr[i] + 5
}
for(i in 1:length(difgenes_26hr)){
difgenes_26hr[i] <- difgenes_26hr[i] + 5
}
for(i in 1:length(difgenes_38hr)){
difgenes_38hr[i] <- difgenes_38hr[i] + 5
}
dif_exp_6hr <- data[c(1:3,10:12),difgenes_6hr]
dif_exp_26hr <- data[c(4:6,13:15),difgenes_26hr]
dif_exp_38hr <- data[c(7:9,16:18),difgenes_38hr]
#find average and plot
avg_6_pb <- colMeans(dif_exp_6hr[1:3,])
avg_6_nf <- colMeans(dif_exp_6hr[4:6,])
diff_6 <- cbind(avg_6_nf, avg_6_pb)
subtract <- vector()
for(i in 1:22){
subtract[i] <- diff_6[i,1] - diff_6[i,2]
}
diff_6 <- cbind(diff_6, subtract)
ordered_diff_6 <- diff_6[order(-diff_6[,3]),]
mat1=data.matrix(ordered_diff_6[,-3])
heatmap.2(mat1, Rowv=FALSE, Colv=FALSE, scale="row", dendrogram = "none", distfun=dist, hclustfun=hclust, xlab="Line", ylab="Genes", key=TRUE, keysize=1, col=greenred(2000), trace="none", density.info=c("none"), margins=c(10, 8), cexRow=0.5, sepcolor="white")
avg_26_pb <- colMeans(dif_exp_26hr[1:3,])
avg_26_nf <- colMeans(dif_exp_26hr[4:6,])
diff_26 <- cbind(avg_26_nf, avg_26_pb)
subtract <- vector()
for(i in 1:126){
subtract[i] <- diff_26[i,1] - diff_26[i,2]
}
diff_26 <- cbind(diff_26, subtract)
ordered_diff_26 <- diff_26[order(-diff_26[,3]),]
mat2=data.matrix(ordered_diff_26[,-3])
heatmap.2(mat2, Rowv=FALSE, Colv=FALSE, scale = "row", dendrogram="none", distfun=dist, hclustfun=hclust, xlab="Line", ylab="Genes", key=TRUE, keysize=1, col=greenred(2000), trace="none", density.info=c("none"), margins=c(10, 8), cexRow=0.5, sepcolor="white")
avg_38_pb <- colMeans(dif_exp_38hr[1:3,])
avg_38_nf <- colMeans(dif_exp_38hr[4:6,])
diff_38 <- cbind(avg_38_nf, avg_38_pb)
subtract <- vector()
for(i in 1:97){
subtract[i] <- diff_38[i,1] - diff_38[i,2]
}
diff_38 <- cbind(diff_38, subtract)
ordered_diff_38 <- diff_38[order(-diff_38[,3]),]
mat3=data.matrix(ordered_diff_38[,-3])
heatmap.2(mat3, Rowv=FALSE, Colv=TRUE, scale = "row",dendrogram="none", distfun=dist, hclustfun=hclust, xlab="Line", ylab="Genes", key=TRUE, keysize=1, col=greenred(2000), trace="none", density.info=c("none"), margins=c(10, 8), cexRow=0.5, sepcolor="white")
#find names of diff exp genes
diff_6_names <- row.names(ordered_diff_6)
diff_26_names <- row.names(ordered_diff_26)
diff_38_names <- row.names(ordered_diff_38)
#plot all together
all_genes <- c(diff_6_names, diff_26_names, diff_38_names)
difgeneindex <- vector()
for(i in all_genes){
difgeneindex <- c(difgeneindex, which(colnames(data) == i))
}
all_genes_data <- data[,difgeneindex]
PB58_6hr <- colMeans(all_genes_data[1:3,])
NF54_6hr <- colMeans(all_genes_data[10:12,])
PB58_26hr <- colMeans(all_genes_data[4:6,])
NF54_26hr <- colMeans(all_genes_data[13:15,])
PB58_38hr <- colMeans(all_genes_data[7:9,])
NF54_38hr <- colMeans(all_genes_data[16:18,])
diff_all <- rbind(PB58_6hr, NF54_6hr, PB58_26hr, NF54_26hr, PB58_38hr, NF54_38hr)
diff_all_mat <- data.matrix(diff_all)
heatmap.2(diff_all_mat, Rowv=FALSE, Colv=FALSE, scale = "column",dendrogram="none", distfun=dist, hclustfun=hclust, key=TRUE, keysize=1, col=greenred(2000), trace="none", density.info=c("none"), margins=c(10, 8), cexRow=0.5, sepcolor="white")
diff_t <- data.matrix(t(diff_all))
heatmap.2(diff_t, Rowv=FALSE, Colv=FALSE, scale = "row",dendrogram="none", distfun=dist, hclustfun=hclust, key=TRUE, keysize=1, col=greenred(2000), trace="none", density.info=c("none"), margins=c(10, 8), cexRow=0.5, sepcolor="white")
#compare differentially expressed genes to network to see how much overlap
rw_net <- read.csv("../DTWMIC/wgcna/wgcna_hardthreshold/rewired_edgelist_PB58_KBS.csv")
|
##### SCRIPT FOR PLOTTING SE-N-PLOT OF GENOME-WIDE ASSOCIATION RESULTS #####
#
# Adapted from EasyQC by Thomas Winkler and Mathias Gorski.
# website: http://www.uni-regensburg.de/medizin/epidemiologie-praeventivmedizin/genetische-epidemiologie/software/index.html
# citation: http://www.nature.com/nprot/journal/v9/n5/full/nprot.2014.071.html
#
# Description: This script plots the inverse of the median standard error of the
# beta estimates accross all SNPs against the square root of the sample
# size for the GWAS study.
#
# Usage: R CMD BATCH --args -CL -input.txt -[PNG/PDF/TIFF] -output.[PNG/PDF/TIFF] plotter.se_n_lambda.R
#
# Input data: required:
# ONLY the output of the se-n-lambda.pl script; this consists of
# 4 columns (Study Median_SE Lambda Mean_N) WITH headers
# Columns present:
# - V1=[Study]
# - V2=[Median_SE]
# - V3=[Lambda]
# - V4=[Mean_N]
# Image styles: PDF (width=10, height=5), PNG (width=1280, height=720),
# TIFF (width=1280, height=720, resolution=150).
# Note on the image styles: PNG can best be used in PowerPoint
# presentations, whereas TIFF produces high-quality, large-sized
# files for publication purposes.
# Output: Name of the output-file with the correct extension [PNG/PDf/TIFF]
#
# Update: 2015-02-26
# Editor: Sander W. van der Laan
# E-mail: s.w.vanderlaan-2@umcutrecht.nl
### READS INPUT OPTIONS ###
rm(list=ls())
x <- 0
repeat {
x <- x+1
if (commandArgs()[x] == "-CL") {
input <- commandArgs()[x+1]; input <- substr(input, 2, nchar(input))
image_style <- commandArgs()[x+2]; image_style <- substr(image_style, 2, nchar(image_style))
output <- commandArgs()[x+3]; output <- substr(output, 2, nchar(output))
break
}
if (x == length(commandArgs())) {
print("remember the -CL command!")
break
}
}
rm(x)
#---------------------------------------------------------------------------------#
### READ IN THE DATA ###
data <- read.table(input, header=TRUE)
#---------------------------------------------------------------------------------#
# INSTALLATION OF REQUIRED PACKAGE(S)
# Install function for packages
packages<-function(x){
x<-as.character(match.call()[[2]])
if (!require(x,character.only=TRUE)){
install.packages(pkgs=x,repos="http://cran.r-project.org")
require(x,character.only=TRUE)
}
}
# Install and load required package(s)
packages(calibrate) #the "calibrate" package is needed to plot text at the datapoints
library(calibrate)
#---------------------------------------------------------------------------------#
# PLOTTING
### DEFINE PLOT COLORS ###
### Define plotting colors for each element/point of the plot.
### UtrechtSciencePark Colours Scheme
### yellow #FBB820 => 1 or 1.0 > INFO
### gold #F59D10 => 2
### salmon #E55738 => 3 or 0.05 < MAF < 0.2 or 0.4 < INFO < 0.6
### darkpink #DB003F => 4
### lightpink #E35493 => 5 or 0.8 < INFO < 1.0
### pink #D5267B => 6
### hardpink #CC0071 => 7
### lightpurple #A8448A => 8
### purple #9A3480 => 9
### lavendel #8D5B9A => 10
### bluepurple #705296 => 11
### purpleblue #686AA9 => 12
### lightpurpleblue #6173AD => 13
### seablue #4C81BF => 14
### skyblue #2F8BC9 => 15
### azurblue #1290D9 => 16 or 0.01 < MAF < 0.05 or 0.2 < INFO < 0.4
### lightazurblue #1396D8 => 17
### greenblue #15A6C1 => 18
### seaweedgreen #5EB17F => 19
### yellowgreen #86B833 => 20
### lightmossgreen #C5D220 => 21
### mossgreen #9FC228 => 22 or MAF > 0.20 or 0.6 < INFO < 0.8
### lightgreen #78B113 => 23/X/x
### green #49A01D => 24/Y/y
### grey #595A5C => 25/XY/xy/Xy/xY or MAF < 0.01 or 0.0 < INFO < 0.2
### lightgrey #A2A3A4 => 26/MT/Mt/mt/mT
## Plots axes and null distribution
print("Determining what type of image should be produced and plotting axes.")
if (image_style == "PNG")
png(output, width=600, height=800)
if (image_style == "TIFF")
tiff(output, width=600, height=800)
if (image_style == "PDF")
pdf(output, width=6, height=8)
# Plotting 2 figures arranged in 1 row1 and 2 columns
par(mfrow=c(2,1), mar=c(4,4,4,4))
# plot 1/(median(SE)) vs. sqrt(N)
plot(sqrt(data$Mean_N), (data$Median_SE),
main="SE-N plot", xlab=expression(sqrt(paste(italic(N)))), ylab="Inverse median SE",
xlim= c(0, 1.5*max(sqrt(data$Mean_N))), ylim=c(0, 1.5*max(data$Median_SE)),
col="#E55738", pch=20, bty="n", cex.lab=0.75, cex.axis=0.75,
xaxs="i", yaxs="i")
abline(a=0, b=((1.5*max(data$Median_SE))/(1.5*max(sqrt(data$Mean_N)))), col="#595A5C", lty=2)
textxy(sqrt(data$Mean_N), (data$Median_SE), data$Study, cex=0.5)
# plot lambda(p) vs. sqrt(N)
plot(sqrt(data$Mean_N), data$Lambda,
main="Lambda-N plot", xlab=expression(sqrt(paste(italic(N)))), ylab=expression(lambda),
xlim=c(0, 1.5*max(sqrt(data$Mean_N))), ylim=c(0.9*min(data$Lambda), 1.2*max(data$Lambda)),
col="#9FC228", pch=20, bty="n", cex.lab=0.75, cex.axis=0.75,
xaxs="i", yaxs="i")
abline(h=1.0, col="#595A5C", lty=2)
abline(h=1.1, col="#E55738", lty=3)
textxy(sqrt(data$Mean_N), data$Lambda, data$Study, cex=0.5)
dev.off()
| /SCRIPTS/plotter.se_n_lambda.R | permissive | swvanderlaan/MetaGWASToolKit | R | false | false | 5,295 | r | ##### SCRIPT FOR PLOTTING SE-N-PLOT OF GENOME-WIDE ASSOCIATION RESULTS #####
#
# Adapted from EasyQC by Thomas Winkler and Mathias Gorski.
# website: http://www.uni-regensburg.de/medizin/epidemiologie-praeventivmedizin/genetische-epidemiologie/software/index.html
# citation: http://www.nature.com/nprot/journal/v9/n5/full/nprot.2014.071.html
#
# Description: This script plots the inverse of the median standard error of the
# beta estimates accross all SNPs against the square root of the sample
# size for the GWAS study.
#
# Usage: R CMD BATCH --args -CL -input.txt -[PNG/PDF/TIFF] -output.[PNG/PDF/TIFF] plotter.se_n_lambda.R
#
# Input data: required:
# ONLY the output of the se-n-lambda.pl script; this consists of
# 4 columns (Study Median_SE Lambda Mean_N) WITH headers
# Columns present:
# - V1=[Study]
# - V2=[Median_SE]
# - V3=[Lambda]
# - V4=[Mean_N]
# Image styles: PDF (width=10, height=5), PNG (width=1280, height=720),
# TIFF (width=1280, height=720, resolution=150).
# Note on the image styles: PNG can best be used in PowerPoint
# presentations, whereas TIFF produces high-quality, large-sized
# files for publication purposes.
# Output: Name of the output-file with the correct extension [PNG/PDf/TIFF]
#
# Update: 2015-02-26
# Editor: Sander W. van der Laan
# E-mail: s.w.vanderlaan-2@umcutrecht.nl
### READS INPUT OPTIONS ###
rm(list=ls())
x <- 0
repeat {
x <- x+1
if (commandArgs()[x] == "-CL") {
input <- commandArgs()[x+1]; input <- substr(input, 2, nchar(input))
image_style <- commandArgs()[x+2]; image_style <- substr(image_style, 2, nchar(image_style))
output <- commandArgs()[x+3]; output <- substr(output, 2, nchar(output))
break
}
if (x == length(commandArgs())) {
print("remember the -CL command!")
break
}
}
rm(x)
#---------------------------------------------------------------------------------#
### READ IN THE DATA ###
data <- read.table(input, header=TRUE)
#---------------------------------------------------------------------------------#
# INSTALLATION OF REQUIRED PACKAGE(S)
# Install function for packages
packages<-function(x){
x<-as.character(match.call()[[2]])
if (!require(x,character.only=TRUE)){
install.packages(pkgs=x,repos="http://cran.r-project.org")
require(x,character.only=TRUE)
}
}
# Install and load required package(s)
packages(calibrate) #the "calibrate" package is needed to plot text at the datapoints
library(calibrate)
#---------------------------------------------------------------------------------#
# PLOTTING
### DEFINE PLOT COLORS ###
### Define plotting colors for each element/point of the plot.
### UtrechtSciencePark Colours Scheme
### yellow #FBB820 => 1 or 1.0 > INFO
### gold #F59D10 => 2
### salmon #E55738 => 3 or 0.05 < MAF < 0.2 or 0.4 < INFO < 0.6
### darkpink #DB003F => 4
### lightpink #E35493 => 5 or 0.8 < INFO < 1.0
### pink #D5267B => 6
### hardpink #CC0071 => 7
### lightpurple #A8448A => 8
### purple #9A3480 => 9
### lavendel #8D5B9A => 10
### bluepurple #705296 => 11
### purpleblue #686AA9 => 12
### lightpurpleblue #6173AD => 13
### seablue #4C81BF => 14
### skyblue #2F8BC9 => 15
### azurblue #1290D9 => 16 or 0.01 < MAF < 0.05 or 0.2 < INFO < 0.4
### lightazurblue #1396D8 => 17
### greenblue #15A6C1 => 18
### seaweedgreen #5EB17F => 19
### yellowgreen #86B833 => 20
### lightmossgreen #C5D220 => 21
### mossgreen #9FC228 => 22 or MAF > 0.20 or 0.6 < INFO < 0.8
### lightgreen #78B113 => 23/X/x
### green #49A01D => 24/Y/y
### grey #595A5C => 25/XY/xy/Xy/xY or MAF < 0.01 or 0.0 < INFO < 0.2
### lightgrey #A2A3A4 => 26/MT/Mt/mt/mT
## Plots axes and null distribution
print("Determining what type of image should be produced and plotting axes.")
if (image_style == "PNG")
png(output, width=600, height=800)
if (image_style == "TIFF")
tiff(output, width=600, height=800)
if (image_style == "PDF")
pdf(output, width=6, height=8)
# Plotting 2 figures arranged in 1 row1 and 2 columns
par(mfrow=c(2,1), mar=c(4,4,4,4))
# plot 1/(median(SE)) vs. sqrt(N)
plot(sqrt(data$Mean_N), (data$Median_SE),
main="SE-N plot", xlab=expression(sqrt(paste(italic(N)))), ylab="Inverse median SE",
xlim= c(0, 1.5*max(sqrt(data$Mean_N))), ylim=c(0, 1.5*max(data$Median_SE)),
col="#E55738", pch=20, bty="n", cex.lab=0.75, cex.axis=0.75,
xaxs="i", yaxs="i")
abline(a=0, b=((1.5*max(data$Median_SE))/(1.5*max(sqrt(data$Mean_N)))), col="#595A5C", lty=2)
textxy(sqrt(data$Mean_N), (data$Median_SE), data$Study, cex=0.5)
# plot lambda(p) vs. sqrt(N)
plot(sqrt(data$Mean_N), data$Lambda,
main="Lambda-N plot", xlab=expression(sqrt(paste(italic(N)))), ylab=expression(lambda),
xlim=c(0, 1.5*max(sqrt(data$Mean_N))), ylim=c(0.9*min(data$Lambda), 1.2*max(data$Lambda)),
col="#9FC228", pch=20, bty="n", cex.lab=0.75, cex.axis=0.75,
xaxs="i", yaxs="i")
abline(h=1.0, col="#595A5C", lty=2)
abline(h=1.1, col="#E55738", lty=3)
textxy(sqrt(data$Mean_N), data$Lambda, data$Study, cex=0.5)
dev.off()
|
# lambda average number of generations since admixture
hapmixPar <- function(lambda=6.0, parfile="test_chr10.par", ref1geno="maizeland94.out",
ref2geno="mex120.out", ref1snp="snp_maize.info", ref2snp="snp_mex.info",
admixsnp="toton_chr10.snpinfo", admixgeno="toton_chr10.out", admixind="toton_chr10.ind",
ref1label="MZ", ref2label="MEX",
rates="toton_chr10.rate", admixlabel="TOTON",
chr=10, outdir="TestOut", pwd="largedata/path",
mode="LOCAL_ANC"){
wd <- getwd()
setwd(pwd)
outdir <- paste0(outdir, lambda)
dir.create(outdir, showWarnings = FALSE)
cat("GENOTYPE:1",
"OUTPUT_SITES:0",
"SITE_POSITIONS: 1 1000000000",
"THETA:0.2",
paste0("LAMBDA:", lambda),
"RECOMBINATION_VALS:900 900",
"MUTATION_VALS:0.2 0.2 0.01",
"MISCOPYING_VALS:0.05 0.05",
paste0("REFPOP1GENOFILE:", ref1geno),
paste0("REFPOP2GENOFILE:", ref2geno),
paste0("REFPOP1SNPFILE:", ref1snp),
paste0("REFPOP2SNPFILE:", ref2snp),
paste0("ADMIXSNPFILE:", admixsnp),
paste0("ADMIXGENOFILE:",admixgeno),
paste0("ADMIXINDFILE:",admixind),
paste0("REF1LABEL:", ref1label),
paste0("REF2LABEL:", ref2label),
paste0("RATESFILE: ", rates),
paste0("ADMIXPOP: ", admixlabel),
paste0("CHR:", chr),
paste0("OUTDIR:", outdir),
paste0("HAPMIX_MODE:", mode),
"OUTPUT_DETAILS:PROB",
"THRESHOLD:0.0",
"KEEPINTFILES:0",
file=parfile, sep="\n", append=FALSE)
setwd(wd)
}
jacknifePar <- function(lambda=6.0, jn=1, parfile="test_chr10.par", ref1geno="maizeland94.out",
ref2geno="mex120.out", ref1snp="snp_maize.info", ref2snp="snp_mex.info",
admixsnp="toton_chr10.snpinfo", admixgeno="toton_chr10.out", admixind="toton_chr10.ind",
ref1label="MZ", ref2label="MEX",
rates="toton_chr10.rate", admixlabel="TOTON",
chr=10, outdir="TestOut", pwd="largedata/path",
mode="LOCAL_ANC"){
wd <- getwd()
setwd(pwd)
outdir <- paste0(outdir, lambda, "_", jn)
dir.create(outdir, showWarnings = FALSE)
cat("GENOTYPE:1",
"OUTPUT_SITES:0",
"SITE_POSITIONS: 1 1000000000",
"THETA:0.2",
paste0("LAMBDA:", lambda),
"RECOMBINATION_VALS:900 900",
"MUTATION_VALS:0.2 0.2 0.01",
"MISCOPYING_VALS:0.05 0.05",
paste0("REFPOP1GENOFILE:", ref1geno),
paste0("REFPOP2GENOFILE:", ref2geno),
paste0("REFPOP1SNPFILE:", ref1snp),
paste0("REFPOP2SNPFILE:", ref2snp),
paste0("ADMIXSNPFILE:", admixsnp),
paste0("ADMIXGENOFILE:",admixgeno),
paste0("ADMIXINDFILE:",admixind),
paste0("REF1LABEL:", ref1label),
paste0("REF2LABEL:", ref2label),
paste0("RATESFILE: ", rates),
paste0("ADMIXPOP: ", admixlabel),
paste0("CHR:", chr),
paste0("OUTDIR:", outdir),
paste0("HAPMIX_MODE:", mode),
"OUTPUT_DETAILS:PROB",
"THRESHOLD:0.0",
"KEEPINTFILES:0",
file=parfile, sep="\n", append=FALSE)
setwd(wd)
}
| /lib/hapmixPar.R | no_license | yangjl/N2 | R | false | false | 3,362 | r | # lambda average number of generations since admixture
hapmixPar <- function(lambda=6.0, parfile="test_chr10.par", ref1geno="maizeland94.out",
ref2geno="mex120.out", ref1snp="snp_maize.info", ref2snp="snp_mex.info",
admixsnp="toton_chr10.snpinfo", admixgeno="toton_chr10.out", admixind="toton_chr10.ind",
ref1label="MZ", ref2label="MEX",
rates="toton_chr10.rate", admixlabel="TOTON",
chr=10, outdir="TestOut", pwd="largedata/path",
mode="LOCAL_ANC"){
wd <- getwd()
setwd(pwd)
outdir <- paste0(outdir, lambda)
dir.create(outdir, showWarnings = FALSE)
cat("GENOTYPE:1",
"OUTPUT_SITES:0",
"SITE_POSITIONS: 1 1000000000",
"THETA:0.2",
paste0("LAMBDA:", lambda),
"RECOMBINATION_VALS:900 900",
"MUTATION_VALS:0.2 0.2 0.01",
"MISCOPYING_VALS:0.05 0.05",
paste0("REFPOP1GENOFILE:", ref1geno),
paste0("REFPOP2GENOFILE:", ref2geno),
paste0("REFPOP1SNPFILE:", ref1snp),
paste0("REFPOP2SNPFILE:", ref2snp),
paste0("ADMIXSNPFILE:", admixsnp),
paste0("ADMIXGENOFILE:",admixgeno),
paste0("ADMIXINDFILE:",admixind),
paste0("REF1LABEL:", ref1label),
paste0("REF2LABEL:", ref2label),
paste0("RATESFILE: ", rates),
paste0("ADMIXPOP: ", admixlabel),
paste0("CHR:", chr),
paste0("OUTDIR:", outdir),
paste0("HAPMIX_MODE:", mode),
"OUTPUT_DETAILS:PROB",
"THRESHOLD:0.0",
"KEEPINTFILES:0",
file=parfile, sep="\n", append=FALSE)
setwd(wd)
}
jacknifePar <- function(lambda=6.0, jn=1, parfile="test_chr10.par", ref1geno="maizeland94.out",
ref2geno="mex120.out", ref1snp="snp_maize.info", ref2snp="snp_mex.info",
admixsnp="toton_chr10.snpinfo", admixgeno="toton_chr10.out", admixind="toton_chr10.ind",
ref1label="MZ", ref2label="MEX",
rates="toton_chr10.rate", admixlabel="TOTON",
chr=10, outdir="TestOut", pwd="largedata/path",
mode="LOCAL_ANC"){
wd <- getwd()
setwd(pwd)
outdir <- paste0(outdir, lambda, "_", jn)
dir.create(outdir, showWarnings = FALSE)
cat("GENOTYPE:1",
"OUTPUT_SITES:0",
"SITE_POSITIONS: 1 1000000000",
"THETA:0.2",
paste0("LAMBDA:", lambda),
"RECOMBINATION_VALS:900 900",
"MUTATION_VALS:0.2 0.2 0.01",
"MISCOPYING_VALS:0.05 0.05",
paste0("REFPOP1GENOFILE:", ref1geno),
paste0("REFPOP2GENOFILE:", ref2geno),
paste0("REFPOP1SNPFILE:", ref1snp),
paste0("REFPOP2SNPFILE:", ref2snp),
paste0("ADMIXSNPFILE:", admixsnp),
paste0("ADMIXGENOFILE:",admixgeno),
paste0("ADMIXINDFILE:",admixind),
paste0("REF1LABEL:", ref1label),
paste0("REF2LABEL:", ref2label),
paste0("RATESFILE: ", rates),
paste0("ADMIXPOP: ", admixlabel),
paste0("CHR:", chr),
paste0("OUTDIR:", outdir),
paste0("HAPMIX_MODE:", mode),
"OUTPUT_DETAILS:PROB",
"THRESHOLD:0.0",
"KEEPINTFILES:0",
file=parfile, sep="\n", append=FALSE)
setwd(wd)
}
|
#' Preprocesses a Cytotrol flowFrame object
#'
#' Our goal here is to use swap the marker names and the channel names within a
#' \code{flowFrame} object to ensure that the \code{flowFrame} objects across
#' centers can be merged into a single \code{flowSet}.
#'
#' We also preprocess the marker names to strip out any additional information
#' added to the marker name. For instance, NHLBI uses "IgD V500", which we reduce
#' to "IgD".
#'
#' @param flow_frame the \code{flowFrame} object to preprocess
#' @param markers_keep a character vector containing the markers to keep
#' @return the updated \code{flowFrame} object containing only the markers of
#' interest
preprocess_flowframe <- function(flow_frame, markers_keep) {
if (missing(markers_keep)) {
stop("The marker to keep must be specified.")
}
fr_rownames <- rownames(parameters(flow_frame)@data)
# Preprocesses each of the columns in the flow_frame
for (j in seq_len(ncol(flow_frame))) {
marker_idx <- paste0(fr_rownames[j], "S")
channel_idx <- paste0(fr_rownames[j], "N")
marker <- flow_frame@description[[marker_idx]]
channel <- flow_frame@description[[channel_idx]]
# In the case the marker name is given, we swap the marker and channel
# names.
if (!is.null(marker) && channel != "<FITC-A>") {
# Converts the marker names to a common name
marker <- marker_conversion(marker)
# Updates the channel information in the flow_frame with the marker
flow_frame@description[[channel_idx]] <- marker
flow_frame@parameters@data$name[j] <- marker
# Updates the marker information in the flow_frame with the channel
flow_frame@description[[marker_idx]] <- channel
flow_frame@parameters@data$desc[j] <- channel
} else if (is.null(marker) && channel == "FITC-A") {
marker <- "Live"
channel <- "FITC-A"
flow_frame@description[[channel_idx]] <- marker
flow_frame@parameters@data$name[j] <- marker
# Updates the marker information in the flow_frame with the channel
flow_frame@description[[marker_idx]] <- channel
flow_frame@parameters@data$desc[j] <- channel
}
}
colnames(exprs(flow_frame)) <- colnames(flow_frame)
# Subset to markers of interest
flow_frame <- flow_frame[, markers_keep]
# The pData for the parameters are (sometimes?) resulting in class "AsIs"
# rather than "character", which is causing errors in the gating.
# We fix this here.
pData(parameters(flow_frame))$name <- as.character(pData(parameters(flow_frame))$name)
pData(parameters(flow_frame))$desc <- as.character(pData(parameters(flow_frame))$desc)
flow_frame
}
#' Converts the Cytotrol marker names to a common name
#'
#' For the following list of marker names, we manually update the names so
#' that they are standard across centers.
marker_conversion <- Vectorize(function(marker) {
# If marker name contains additional info, remove everything after the
# space. (e.g., "IgD V500" to "IgD")
marker <- strsplit(marker, " ")[[1]][1]
if (marker == "19") {
marker <- "CD19"
} else if (marker %in% c("LIVE", "LIVE_GREEN", "Live/Dead", "live", "Live/green")) {
marker <- "Live"
} else if (marker == "IGD") {
marker <- "IgD"
} else if (marker %in% c("HLA", "HLADR", "HLA-DR")) {
marker <- "HLADR"
} else if (marker == "CD197") {
marker <- "CCR7"
} else if (marker == "CD194") {
marker <- "CCR4"
} else if (marker == "CD11C") {
marker <- "CD11c"
} else if (marker %in% c("CD3CD19CD20", "CD3+19+20", "CD3_CD19_CD20",
"CD3+CD19+CD20+", "CD3+CD19+CD20", "CD3+19+20", "CD3/19/20")) {
marker <- "Lineage"
} else if (marker == "CD196") {
marker <- "CCR6"
} else if (marker == "CD183") {
marker <- "CXCR3"
}
marker
})
| /lib/helpers.R | no_license | gfinak/Cytotrol | R | false | false | 3,803 | r | #' Preprocesses a Cytotrol flowFrame object
#'
#' Our goal here is to use swap the marker names and the channel names within a
#' \code{flowFrame} object to ensure that the \code{flowFrame} objects across
#' centers can be merged into a single \code{flowSet}.
#'
#' We also preprocess the marker names to strip out any additional information
#' added to the marker name. For instance, NHLBI uses "IgD V500", which we reduce
#' to "IgD".
#'
#' @param flow_frame the \code{flowFrame} object to preprocess
#' @param markers_keep a character vector containing the markers to keep
#' @return the updated \code{flowFrame} object containing only the markers of
#' interest
preprocess_flowframe <- function(flow_frame, markers_keep) {
if (missing(markers_keep)) {
stop("The marker to keep must be specified.")
}
fr_rownames <- rownames(parameters(flow_frame)@data)
# Preprocesses each of the columns in the flow_frame
for (j in seq_len(ncol(flow_frame))) {
marker_idx <- paste0(fr_rownames[j], "S")
channel_idx <- paste0(fr_rownames[j], "N")
marker <- flow_frame@description[[marker_idx]]
channel <- flow_frame@description[[channel_idx]]
# In the case the marker name is given, we swap the marker and channel
# names.
if (!is.null(marker) && channel != "<FITC-A>") {
# Converts the marker names to a common name
marker <- marker_conversion(marker)
# Updates the channel information in the flow_frame with the marker
flow_frame@description[[channel_idx]] <- marker
flow_frame@parameters@data$name[j] <- marker
# Updates the marker information in the flow_frame with the channel
flow_frame@description[[marker_idx]] <- channel
flow_frame@parameters@data$desc[j] <- channel
} else if (is.null(marker) && channel == "FITC-A") {
marker <- "Live"
channel <- "FITC-A"
flow_frame@description[[channel_idx]] <- marker
flow_frame@parameters@data$name[j] <- marker
# Updates the marker information in the flow_frame with the channel
flow_frame@description[[marker_idx]] <- channel
flow_frame@parameters@data$desc[j] <- channel
}
}
colnames(exprs(flow_frame)) <- colnames(flow_frame)
# Subset to markers of interest
flow_frame <- flow_frame[, markers_keep]
# The pData for the parameters are (sometimes?) resulting in class "AsIs"
# rather than "character", which is causing errors in the gating.
# We fix this here.
pData(parameters(flow_frame))$name <- as.character(pData(parameters(flow_frame))$name)
pData(parameters(flow_frame))$desc <- as.character(pData(parameters(flow_frame))$desc)
flow_frame
}
#' Converts the Cytotrol marker names to a common name
#'
#' For the following list of marker names, we manually update the names so
#' that they are standard across centers.
marker_conversion <- Vectorize(function(marker) {
# If marker name contains additional info, remove everything after the
# space. (e.g., "IgD V500" to "IgD")
marker <- strsplit(marker, " ")[[1]][1]
if (marker == "19") {
marker <- "CD19"
} else if (marker %in% c("LIVE", "LIVE_GREEN", "Live/Dead", "live", "Live/green")) {
marker <- "Live"
} else if (marker == "IGD") {
marker <- "IgD"
} else if (marker %in% c("HLA", "HLADR", "HLA-DR")) {
marker <- "HLADR"
} else if (marker == "CD197") {
marker <- "CCR7"
} else if (marker == "CD194") {
marker <- "CCR4"
} else if (marker == "CD11C") {
marker <- "CD11c"
} else if (marker %in% c("CD3CD19CD20", "CD3+19+20", "CD3_CD19_CD20",
"CD3+CD19+CD20+", "CD3+CD19+CD20", "CD3+19+20", "CD3/19/20")) {
marker <- "Lineage"
} else if (marker == "CD196") {
marker <- "CCR6"
} else if (marker == "CD183") {
marker <- "CXCR3"
}
marker
})
|
#' Make posterior cacluations
#'
#' A generic and some methods for creating posterior calculations.
#' The intended use is to prepare data for use with bayesplot ppc plots.
#' The default method works for any object for which `posterior()` can
#' create a data frame of posterior samples.
#'
#' @param object An object from which posterior calculations are made.
#' @param formula A formula describing the quantity to be calculated. The rhs of the
#' formula is evaluated using one posterior sample of the parameters and `data`.
#' The lhs, if it exists, is used to name the resulting column when `data.frame = TRUE`.
#' @param data Additional data involved in the computation. This may be the original
#' data used to fit the model or counter-factual data.
#' @param draws The number of draws to make from the posterior distribution.
#' Sampling is with replacement if `draws` is larger than the number of
#' posterior samples in `object`.
#' @param data.frame A logical indicating whtehr the results should be returned
#' as a data frame (TRUE) or a matrix (FALSE).
#' @param ... Additional arguments, currently ignored.
#' @return A matrix with `draws` rows or data frame with three columns.
#'
#'
#' @export
#'
#' @rdname posterior_calc
posterior_calc <- function(object, ...) {
UseMethod("posterior_calc")
}
#' @rdname posterior_calc
#' @export
posterior_calc.default <-
function(object, formula, data = NULL, draws = NULL, data.frame = FALSE, ...) {
posterior_calc(
posterior(object),
formula = formula, data = data, draws = draws, data.frame = data.frame, ...)
}
#' @rdname posterior_calc
#' @export
posterior_calc.data.frame <-
function(object, formula, data = NULL, draws = NULL, data.frame = FALSE, ...) {
Post <- object
if (is.null(draws) || draws == nrow(Post)) {
# use all the rows (in order)
draws <- nrow(Post)
sampled_rows <- 1:draws
} else {
# sample the number of times requested, with replacement if required
if (draws < 1) { stop("draws must be at least 1") }
draws <- round(draws)
sampled_rows <-
sample(1:nrow(Post), size = draws, replace = draws > nrow(Post))
}
# now evalute rhs of formula for each sampled row
res <-
do.call(
rbind,
lapply(
sampled_rows,
function(r) {eval(formula[[length(formula)]],
c(as.list(Post[r, , drop = FALSE]), as.list(data)),
parent.frame())}
)
)
if (data.frame) {
dm <- dim(res)
res <-
data.frame(
yrep = as.vector(res),
draw = rep(1:dm[1], times = dm[2]),
y_ind = rep(1:dm[2], each = dm[2])
)
if (length(formula) == 3) {
names(res)[1] <- deparse(formula[[2]])
}
}
res
}
| /R/posterior_calc.R | no_license | CalvinData/CalvinBayes | R | false | false | 2,846 | r | #' Make posterior cacluations
#'
#' A generic and some methods for creating posterior calculations.
#' The intended use is to prepare data for use with bayesplot ppc plots.
#' The default method works for any object for which `posterior()` can
#' create a data frame of posterior samples.
#'
#' @param object An object from which posterior calculations are made.
#' @param formula A formula describing the quantity to be calculated. The rhs of the
#' formula is evaluated using one posterior sample of the parameters and `data`.
#' The lhs, if it exists, is used to name the resulting column when `data.frame = TRUE`.
#' @param data Additional data involved in the computation. This may be the original
#' data used to fit the model or counter-factual data.
#' @param draws The number of draws to make from the posterior distribution.
#' Sampling is with replacement if `draws` is larger than the number of
#' posterior samples in `object`.
#' @param data.frame A logical indicating whtehr the results should be returned
#' as a data frame (TRUE) or a matrix (FALSE).
#' @param ... Additional arguments, currently ignored.
#' @return A matrix with `draws` rows or data frame with three columns.
#'
#'
#' @export
#'
#' @rdname posterior_calc
posterior_calc <- function(object, ...) {
UseMethod("posterior_calc")
}
#' @rdname posterior_calc
#' @export
posterior_calc.default <-
function(object, formula, data = NULL, draws = NULL, data.frame = FALSE, ...) {
posterior_calc(
posterior(object),
formula = formula, data = data, draws = draws, data.frame = data.frame, ...)
}
#' @rdname posterior_calc
#' @export
posterior_calc.data.frame <-
function(object, formula, data = NULL, draws = NULL, data.frame = FALSE, ...) {
Post <- object
if (is.null(draws) || draws == nrow(Post)) {
# use all the rows (in order)
draws <- nrow(Post)
sampled_rows <- 1:draws
} else {
# sample the number of times requested, with replacement if required
if (draws < 1) { stop("draws must be at least 1") }
draws <- round(draws)
sampled_rows <-
sample(1:nrow(Post), size = draws, replace = draws > nrow(Post))
}
# now evalute rhs of formula for each sampled row
res <-
do.call(
rbind,
lapply(
sampled_rows,
function(r) {eval(formula[[length(formula)]],
c(as.list(Post[r, , drop = FALSE]), as.list(data)),
parent.frame())}
)
)
if (data.frame) {
dm <- dim(res)
res <-
data.frame(
yrep = as.vector(res),
draw = rep(1:dm[1], times = dm[2]),
y_ind = rep(1:dm[2], each = dm[2])
)
if (length(formula) == 3) {
names(res)[1] <- deparse(formula[[2]])
}
}
res
}
|
testlist <- list(n = -486350080L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result) | /breakfast/inst/testfiles/setBitNumber/libFuzzer_setBitNumber/setBitNumber_valgrind_files/1609962942-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 98 | r | testlist <- list(n = -486350080L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result) |
library(VGAM)
### Name: mccullagh89
### Title: McCullagh (1989) Distribution Family Function
### Aliases: mccullagh89
### Keywords: models regression
### ** Examples
mdata <- data.frame(y = rnorm(n = 1000, sd = 0.2)) # Limit as theta = 0, nu = Inf
fit <- vglm(y ~ 1, mccullagh89, data = mdata, trace = TRUE)
head(fitted(fit))
with(mdata, mean(y))
summary(fit)
coef(fit, matrix = TRUE)
Coef(fit)
| /data/genthat_extracted_code/VGAM/examples/mccullagh89.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 403 | r | library(VGAM)
### Name: mccullagh89
### Title: McCullagh (1989) Distribution Family Function
### Aliases: mccullagh89
### Keywords: models regression
### ** Examples
mdata <- data.frame(y = rnorm(n = 1000, sd = 0.2)) # Limit as theta = 0, nu = Inf
fit <- vglm(y ~ 1, mccullagh89, data = mdata, trace = TRUE)
head(fitted(fit))
with(mdata, mean(y))
summary(fit)
coef(fit, matrix = TRUE)
Coef(fit)
|
#Find optimal combination of outcome variables
library(r2weight)
library(SummarizedExperiment)
data<-data.frame(assays(neuroIQse)$meth450k)
meta<-data.frame(colData(neuroIQse))
#Remove samples with missing outcomes
meta_comp<-meta[(!is.na(meta$vciq_i_7y) & !is.na(meta$priq_i_7y) & !is.na(meta$wmiq_i_7y) & !is.na(meta$psiq_7y) & !is.na(meta$fsiq_i_7y) & !is.na(meta$baattss_7y) & !is.na(meta$pbdss_7y) & !is.na(meta$ftdr_7y)),]
data_comp<-data[,match(row.names(meta_comp), names(data))]
Y<-data.frame(meta_comp[,1:8])
#With all covariates...
#out1 <- optWeight(Y = Y, X = data.frame(t(data_comp)), SL.library = c("SL.mean"))
#R2Weight based on clusters:
data_comp_island<-cbind.data.frame(clusterCpGs,data_comp)
#Slow
data_comp_isl_split<-split(data_comp, as.factor(clusterCpGs))
optWeight_res <- vector("list", length(data_comp_isl_split))
for(i in 1:length(data_comp_isl_split)){
X<-data.frame(t(data_comp_isl_split[[i]]))
optWeight_res[[i]]<-optWeight(Y = Y, X = X, SL.library = c("SL.glm","SL.mean","SL.step"))
}
| /lib/CombOutcome.R | no_license | nhejazi/conf_acic2017_methynpvi | R | false | false | 1,055 | r | #Find optimal combination of outcome variables
library(r2weight)
library(SummarizedExperiment)
data<-data.frame(assays(neuroIQse)$meth450k)
meta<-data.frame(colData(neuroIQse))
#Remove samples with missing outcomes
meta_comp<-meta[(!is.na(meta$vciq_i_7y) & !is.na(meta$priq_i_7y) & !is.na(meta$wmiq_i_7y) & !is.na(meta$psiq_7y) & !is.na(meta$fsiq_i_7y) & !is.na(meta$baattss_7y) & !is.na(meta$pbdss_7y) & !is.na(meta$ftdr_7y)),]
data_comp<-data[,match(row.names(meta_comp), names(data))]
Y<-data.frame(meta_comp[,1:8])
#With all covariates...
#out1 <- optWeight(Y = Y, X = data.frame(t(data_comp)), SL.library = c("SL.mean"))
#R2Weight based on clusters:
data_comp_island<-cbind.data.frame(clusterCpGs,data_comp)
#Slow
data_comp_isl_split<-split(data_comp, as.factor(clusterCpGs))
optWeight_res <- vector("list", length(data_comp_isl_split))
for(i in 1:length(data_comp_isl_split)){
X<-data.frame(t(data_comp_isl_split[[i]]))
optWeight_res[[i]]<-optWeight(Y = Y, X = X, SL.library = c("SL.glm","SL.mean","SL.step"))
}
|
######################################################################
##
## -- cachematrix.R --
## This file have two functions a makecacheMatrix that stores
## a matrix inverse
## and a inverse function
##
#######################################################################
##
## makeCacheMatrix function:
## This function creates a special "matrix" object
## that can cache its inverse.
##
makeCacheMatrix <- function(x = matrix()) {
}
##
## cacheSolve Function:
## This function computes the inverse of the
## special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated
## (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
##
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | felipe84841/ProgrammingAssignment2 | R | false | false | 842 | r | ######################################################################
##
## -- cachematrix.R --
## This file have two functions a makecacheMatrix that stores
## a matrix inverse
## and a inverse function
##
#######################################################################
##
## makeCacheMatrix function:
## This function creates a special "matrix" object
## that can cache its inverse.
##
makeCacheMatrix <- function(x = matrix()) {
}
##
## cacheSolve Function:
## This function computes the inverse of the
## special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated
## (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
##
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
}
|
library(plotly)
library(shiny)
create_PCA <- function(gender, bmi) {
demographics_dataset <- "./data/metaboanalyst_data_with_demographics.csv"
x_dataset <- paste("./data/pca_", gender, "_", bmi, "_x.csv", sep = "")
var_dataset <- paste("./data/pca_", gender, "_", bmi, "_var.csv", sep = "")
cls_dataset <- paste("./data/pca_", gender, "_", bmi, "_cls.csv", sep = "")
df_demographics <- read.csv(demographics_dataset)
df_x <- read.csv(x_dataset)
rownames(df_x) <- 1:nrow(df_x)
df_var <- read.csv(var_dataset)
df_cls <- read.csv(cls_dataset)
df_cls$x <- as.factor(df_cls$x)
columns = c("AGE", "GENDER", "BMI", "Sample", "TREATMENT")
df_demographics <- df_demographics[columns]
merged_data <- merge(df_x, df_demographics, how="inner", on = "Sample")
rownames(merged_data) <- merged_data$Sample
merged_data$Sample <- NULL
merged_data$Info <- paste("AGE : ", merged_data$AGE, " GENDER : ", merged_data$GENDER, " BMI : ", merged_data$BMI)
col <- c("#4A274F", "#CCF381")
levels(merged_data$TREATMENT)[levels(merged_data$TREATMENT)=='A'] <- "Aspirin"
levels(merged_data$TREATMENT)[levels(merged_data$TREATMENT)=='B'] <- "Placebo"
treatment <- factor(merged_data$TREATMENT, levels=rev(levels(merged_data$TREATMENT)))
xlabel = paste("PC",1, "(", round(100*df_var$x[1],1), "%)");
ylabel = paste("PC",2, "(", round(100*df_var$x[2],1), "%)");
zlabel = paste("PC",3, "(", round(100*df_var$x[3],1), "%)");
p <- plotly::plot_ly(x = merged_data$PC1, y = merged_data$PC2, z = merged_data$PC3, text = merged_data$Info,
color = treatment, colors = col,hovertemplate = paste('C1 Score: %{x:.2f}\nC2 Score: %{y:.2f}\nC3 Score: %{z:.2f}\n%{text}'))
p <- plotly::add_markers(p, sizes = 5)
p <- plotly::layout(p, scene = list(xaxis = list(title = xlabel),
yaxis = list(title = ylabel),
zaxis = list(title = zlabel)))
p
}
create_PLSDA <- function(gender, bmi) {
demographics_dataset <- "./data/metaboanalyst_data_with_demographics.csv"
x_dataset <- paste("./data/plsda_", gender, "_", bmi, "_x.csv", sep = "")
var_dataset <- paste("./data/plsda_", gender, "_", bmi, "_var.csv", sep = "")
cls_dataset <- paste("./data/plsda_", gender, "_", bmi, "_cls.csv", sep = "")
df_demographics <- read.csv(demographics_dataset)
df_x <- read.csv(x_dataset)
df_var <- read.csv(var_dataset)
df_cls <- read.csv(cls_dataset)
df_cls$x <- as.factor(df_cls$x)
columns = c("AGE", "GENDER", "BMI", "Sample")
df_demographics <- df_demographics[columns]
df_x <- merge(df_x, df_demographics, how="inner", on = "Sample")
df_x$Sample <- NULL
df_x$Info <- paste("AGE : ", df_x$AGE, " GENDER : ", df_x$GENDER, " BMI : ", df_x$BMI)
# Label and plot graph
levels(df_cls$x)[levels(df_cls$x)==0] <- "Placebo"
levels(df_cls$x)[levels(df_cls$x)==1] <- "Aspirin"
xlabel <- paste("Component", 1, "(", round(100*df_var$x[1]/df_var$x[9],1), "%)");
ylabel <- paste("Component", 2, "(", round(100*df_var$x[2]/df_var$x[9],1), "%)");
zlabel <- paste("Component", 3, "(", round(100*df_var$x[3]/df_var$x[9],1), "%)");
p <- plotly::plot_ly(x = df_x$Comp.1, y = df_x$Comp.2, z = df_x$Comp.3,
color = df_cls$x,text = df_x$Info, colors = c("#4A274F", "#CCF381"),hovertemplate = paste('C1 Score: %{x:.2f}\nC2 Score: %{y:.2f}\nC3 Score: %{z:.2f}\n%{text}'))
p <- plotly::add_markers(p, sizes = 5)
p <- plotly::layout(p, scene = list(xaxis = list(title = xlabel),
yaxis = list(title = ylabel),
zaxis = list(title = zlabel)))
p
}
ui <- fluidPage(
sidebarPanel(
fluidRow(
selectInput("gender", "Gender", c("both", "male", "female")),
selectInput("bmi", "BMI", c("both", "normal", "overweight"))
),
fluidRow(
h4("Effect of Aspirin on Plasma Protein Concentration in a Colorectal Cancer Prevention Study"),
HTML("<ul>
<li>Long term use of aspirin is associated with lower risk of colorectal cancer but the mechanism is not known</li>
<li>This cross-over randomized study of 44 individuals investigates how aspirin may change biological response, specifically plasma protein concentration after taking aspirin</li>
<li>Participants were randomized to take a regular dose of aspirin (325 mg/day) and placebo, each for 60 days, while switching treatment after a 3-month washout period</li>
</ul>")
)
),
mainPanel(
tabsetPanel(
tabPanel(
"Delta Analysis",
sliderInput(
"deltaPercentile",
label = "Percentile",
min = 1, max = 99, value = 95),
plotOutput("deltaHistogram"),
uiOutput("plotsTitle"),
uiOutput("proteinPlots"),
verbatimTextOutput("ph")),
tabPanel(
"PCA / PLS-DA Analysis",
fluidRow(
h4("PCA plot of plasma protein concentration after 60 days of taking aspirin and placebo in a randomized cross-over controlled study of 44 individuals")
),
fluidRow(
column(8, plotlyOutput("pcaPlot")),
column(
3,
h4("PCA Analysis"),
HTML("<ul>
<li>Unsupervised analysis shows a fair clustering for 2700 plasma proteins</li>
<li>Better clustering is observed for men with normal BMI and women with overweight BMI</li>
<li>The number of observations for subcategory analysis is small so this result should be interpreted cautiously</li>
</ul>"))
),
fluidRow(
h4("PLS-DA plot of plasma protein concentration after 60 days of taking aspirin and placebo in a randomized cross-over controlled study of 44 individuals")
),
fluidRow(
column(8, plotlyOutput("plsdaPlot")),
column(
3,
h4("PLS-DA Analysis"),
HTML("<ul>
<li>Supervised analysis shows a fair separation for 2700 plasma proteins after taking aspirin compared to placebo</li>
<li>The separation is more prominent for men with normal BMI, women with overweight BMI, and men with overweight BMI</li>
<li>The number of observations for subcategory analysis is small, and this result should be interpreted cautiously</li>
</ul>"))
)
)
)
)
)
server <- function(input, output, session) {
delta <- reactive({
delta_dataset <- paste("./data/delta_", input$gender, "_", input$bmi, ".csv", sep = "")
data <- read.csv(delta_dataset, header = TRUE)
data[,]
})
significantProteins <- reactive({
delta()[delta()$DeltaSquared > quantile(delta()$DeltaSquared, input$deltaPercentile / 100),]
})
n_plots <- reactive({
n <- nrow(significantProteins())
n2 <- n %/% 10
if (n %% 10 > 0) {
n2 <- n2 + 1
}
n2
})
output$deltaHistogram <- renderPlot({
hist(delta()$DeltaSquared,
col = 'gray',
border = 'white',
ylim = c(0, 2000),
labels = TRUE,
xlab = "Squared Difference in Plasma Protein Concentration",
breaks = seq(0, 3.3, by = 0.05),
main="Histogram of change in concentration for each plasma protein after taking aspirin vs. placebo")
abline(v = quantile(delta()$DeltaSquared, input$deltaPercentile / 100), col = "red", lwd = 2)
})
output$plotsTitle <- renderUI({
h2(paste("List of proteins with concentration change above ", input$deltaPercentile, "-th percentile", sep = ""))
})
output$proteinPlots <- renderUI({
plots <- n_plots()
plotOutputs <- lapply(1:plots, function(i) {
plotOutput(paste("plot", i, sep = ""))
})
do.call(tagList, plotOutputs)
})
output$ph <- renderPrint({
for (i in 1:n_plots()) {
local({
my_i <- i
plotName <- paste("plot", my_i, sep = "")
output[[plotName]] <- renderPlot({
end <- my_i * 10
start <- end - 9
if (my_i == n_plots()) {
n <- nrow(significantProteins())
prev <- end - 10
end <- n %% 10 + prev
}
df <- delta()[order(abs(delta()$Delta), decreasing = TRUE),]
df <- df[start:end,]
par(mai=c(1,5,1,1))
barplot(
df[order(abs(df$Delta), decreasing = FALSE),]$Delta,
names.arg = df[order(abs(df$Delta), decreasing = FALSE),]$Protein,
xlim = c(-0.5, 0.5),
horiz=TRUE,
las=1)
})
})
}
})
output$pcaPlot <- renderPlotly({
if (is.null(input$gender) & is.null(input$bmi) ){return(create_PCA("both", "both"))}
else if (is.null(input$gender)){
return(create_PCA("both", input$bmi))
}
else if (is.null(input$bmi)){
return(create_PCA(input$gender, "both"))
}
else{
{return(create_PCA(input$gender, input$bmi))}
}
})
output$plsdaPlot <- renderPlotly({
if (is.null(input$gender) & is.null(input$bmi) ){return(create_PLSDA("both", "both"))}
else if (is.null(input$gender)){
return(create_PLSDA("both", input$bmi))
}
else if (is.null(input$bmi)){
return(create_PLSDA(input$gender, "both"))
}
else{
{return(create_PLSDA(input$gender, input$bmi))}
}
})
}
shinyApp(ui, server)
| /shiny/src/app.R | no_license | medha-sagar/DATA511A-CancerPrevention | R | false | false | 9,738 | r | library(plotly)
library(shiny)
create_PCA <- function(gender, bmi) {
demographics_dataset <- "./data/metaboanalyst_data_with_demographics.csv"
x_dataset <- paste("./data/pca_", gender, "_", bmi, "_x.csv", sep = "")
var_dataset <- paste("./data/pca_", gender, "_", bmi, "_var.csv", sep = "")
cls_dataset <- paste("./data/pca_", gender, "_", bmi, "_cls.csv", sep = "")
df_demographics <- read.csv(demographics_dataset)
df_x <- read.csv(x_dataset)
rownames(df_x) <- 1:nrow(df_x)
df_var <- read.csv(var_dataset)
df_cls <- read.csv(cls_dataset)
df_cls$x <- as.factor(df_cls$x)
columns = c("AGE", "GENDER", "BMI", "Sample", "TREATMENT")
df_demographics <- df_demographics[columns]
merged_data <- merge(df_x, df_demographics, how="inner", on = "Sample")
rownames(merged_data) <- merged_data$Sample
merged_data$Sample <- NULL
merged_data$Info <- paste("AGE : ", merged_data$AGE, " GENDER : ", merged_data$GENDER, " BMI : ", merged_data$BMI)
col <- c("#4A274F", "#CCF381")
levels(merged_data$TREATMENT)[levels(merged_data$TREATMENT)=='A'] <- "Aspirin"
levels(merged_data$TREATMENT)[levels(merged_data$TREATMENT)=='B'] <- "Placebo"
treatment <- factor(merged_data$TREATMENT, levels=rev(levels(merged_data$TREATMENT)))
xlabel = paste("PC",1, "(", round(100*df_var$x[1],1), "%)");
ylabel = paste("PC",2, "(", round(100*df_var$x[2],1), "%)");
zlabel = paste("PC",3, "(", round(100*df_var$x[3],1), "%)");
p <- plotly::plot_ly(x = merged_data$PC1, y = merged_data$PC2, z = merged_data$PC3, text = merged_data$Info,
color = treatment, colors = col,hovertemplate = paste('C1 Score: %{x:.2f}\nC2 Score: %{y:.2f}\nC3 Score: %{z:.2f}\n%{text}'))
p <- plotly::add_markers(p, sizes = 5)
p <- plotly::layout(p, scene = list(xaxis = list(title = xlabel),
yaxis = list(title = ylabel),
zaxis = list(title = zlabel)))
p
}
create_PLSDA <- function(gender, bmi) {
demographics_dataset <- "./data/metaboanalyst_data_with_demographics.csv"
x_dataset <- paste("./data/plsda_", gender, "_", bmi, "_x.csv", sep = "")
var_dataset <- paste("./data/plsda_", gender, "_", bmi, "_var.csv", sep = "")
cls_dataset <- paste("./data/plsda_", gender, "_", bmi, "_cls.csv", sep = "")
df_demographics <- read.csv(demographics_dataset)
df_x <- read.csv(x_dataset)
df_var <- read.csv(var_dataset)
df_cls <- read.csv(cls_dataset)
df_cls$x <- as.factor(df_cls$x)
columns = c("AGE", "GENDER", "BMI", "Sample")
df_demographics <- df_demographics[columns]
df_x <- merge(df_x, df_demographics, how="inner", on = "Sample")
df_x$Sample <- NULL
df_x$Info <- paste("AGE : ", df_x$AGE, " GENDER : ", df_x$GENDER, " BMI : ", df_x$BMI)
# Label and plot graph
levels(df_cls$x)[levels(df_cls$x)==0] <- "Placebo"
levels(df_cls$x)[levels(df_cls$x)==1] <- "Aspirin"
xlabel <- paste("Component", 1, "(", round(100*df_var$x[1]/df_var$x[9],1), "%)");
ylabel <- paste("Component", 2, "(", round(100*df_var$x[2]/df_var$x[9],1), "%)");
zlabel <- paste("Component", 3, "(", round(100*df_var$x[3]/df_var$x[9],1), "%)");
p <- plotly::plot_ly(x = df_x$Comp.1, y = df_x$Comp.2, z = df_x$Comp.3,
color = df_cls$x,text = df_x$Info, colors = c("#4A274F", "#CCF381"),hovertemplate = paste('C1 Score: %{x:.2f}\nC2 Score: %{y:.2f}\nC3 Score: %{z:.2f}\n%{text}'))
p <- plotly::add_markers(p, sizes = 5)
p <- plotly::layout(p, scene = list(xaxis = list(title = xlabel),
yaxis = list(title = ylabel),
zaxis = list(title = zlabel)))
p
}
ui <- fluidPage(
sidebarPanel(
fluidRow(
selectInput("gender", "Gender", c("both", "male", "female")),
selectInput("bmi", "BMI", c("both", "normal", "overweight"))
),
fluidRow(
h4("Effect of Aspirin on Plasma Protein Concentration in a Colorectal Cancer Prevention Study"),
HTML("<ul>
<li>Long term use of aspirin is associated with lower risk of colorectal cancer but the mechanism is not known</li>
<li>This cross-over randomized study of 44 individuals investigates how aspirin may change biological response, specifically plasma protein concentration after taking aspirin</li>
<li>Participants were randomized to take a regular dose of aspirin (325 mg/day) and placebo, each for 60 days, while switching treatment after a 3-month washout period</li>
</ul>")
)
),
mainPanel(
tabsetPanel(
tabPanel(
"Delta Analysis",
sliderInput(
"deltaPercentile",
label = "Percentile",
min = 1, max = 99, value = 95),
plotOutput("deltaHistogram"),
uiOutput("plotsTitle"),
uiOutput("proteinPlots"),
verbatimTextOutput("ph")),
tabPanel(
"PCA / PLS-DA Analysis",
fluidRow(
h4("PCA plot of plasma protein concentration after 60 days of taking aspirin and placebo in a randomized cross-over controlled study of 44 individuals")
),
fluidRow(
column(8, plotlyOutput("pcaPlot")),
column(
3,
h4("PCA Analysis"),
HTML("<ul>
<li>Unsupervised analysis shows a fair clustering for 2700 plasma proteins</li>
<li>Better clustering is observed for men with normal BMI and women with overweight BMI</li>
<li>The number of observations for subcategory analysis is small so this result should be interpreted cautiously</li>
</ul>"))
),
fluidRow(
h4("PLS-DA plot of plasma protein concentration after 60 days of taking aspirin and placebo in a randomized cross-over controlled study of 44 individuals")
),
fluidRow(
column(8, plotlyOutput("plsdaPlot")),
column(
3,
h4("PLS-DA Analysis"),
HTML("<ul>
<li>Supervised analysis shows a fair separation for 2700 plasma proteins after taking aspirin compared to placebo</li>
<li>The separation is more prominent for men with normal BMI, women with overweight BMI, and men with overweight BMI</li>
<li>The number of observations for subcategory analysis is small, and this result should be interpreted cautiously</li>
</ul>"))
)
)
)
)
)
server <- function(input, output, session) {
delta <- reactive({
delta_dataset <- paste("./data/delta_", input$gender, "_", input$bmi, ".csv", sep = "")
data <- read.csv(delta_dataset, header = TRUE)
data[,]
})
significantProteins <- reactive({
delta()[delta()$DeltaSquared > quantile(delta()$DeltaSquared, input$deltaPercentile / 100),]
})
n_plots <- reactive({
n <- nrow(significantProteins())
n2 <- n %/% 10
if (n %% 10 > 0) {
n2 <- n2 + 1
}
n2
})
output$deltaHistogram <- renderPlot({
hist(delta()$DeltaSquared,
col = 'gray',
border = 'white',
ylim = c(0, 2000),
labels = TRUE,
xlab = "Squared Difference in Plasma Protein Concentration",
breaks = seq(0, 3.3, by = 0.05),
main="Histogram of change in concentration for each plasma protein after taking aspirin vs. placebo")
abline(v = quantile(delta()$DeltaSquared, input$deltaPercentile / 100), col = "red", lwd = 2)
})
output$plotsTitle <- renderUI({
h2(paste("List of proteins with concentration change above ", input$deltaPercentile, "-th percentile", sep = ""))
})
output$proteinPlots <- renderUI({
plots <- n_plots()
plotOutputs <- lapply(1:plots, function(i) {
plotOutput(paste("plot", i, sep = ""))
})
do.call(tagList, plotOutputs)
})
output$ph <- renderPrint({
for (i in 1:n_plots()) {
local({
my_i <- i
plotName <- paste("plot", my_i, sep = "")
output[[plotName]] <- renderPlot({
end <- my_i * 10
start <- end - 9
if (my_i == n_plots()) {
n <- nrow(significantProteins())
prev <- end - 10
end <- n %% 10 + prev
}
df <- delta()[order(abs(delta()$Delta), decreasing = TRUE),]
df <- df[start:end,]
par(mai=c(1,5,1,1))
barplot(
df[order(abs(df$Delta), decreasing = FALSE),]$Delta,
names.arg = df[order(abs(df$Delta), decreasing = FALSE),]$Protein,
xlim = c(-0.5, 0.5),
horiz=TRUE,
las=1)
})
})
}
})
output$pcaPlot <- renderPlotly({
if (is.null(input$gender) & is.null(input$bmi) ){return(create_PCA("both", "both"))}
else if (is.null(input$gender)){
return(create_PCA("both", input$bmi))
}
else if (is.null(input$bmi)){
return(create_PCA(input$gender, "both"))
}
else{
{return(create_PCA(input$gender, input$bmi))}
}
})
output$plsdaPlot <- renderPlotly({
if (is.null(input$gender) & is.null(input$bmi) ){return(create_PLSDA("both", "both"))}
else if (is.null(input$gender)){
return(create_PLSDA("both", input$bmi))
}
else if (is.null(input$bmi)){
return(create_PLSDA(input$gender, "both"))
}
else{
{return(create_PLSDA(input$gender, input$bmi))}
}
})
}
shinyApp(ui, server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seuratFunctions.R
\name{runSeuratJackStraw}
\alias{runSeuratJackStraw}
\title{runSeuratJackStraw
Compute jackstraw plot and store the computations in the input sce object}
\usage{
runSeuratJackStraw(
inSCE,
useAssay,
dims = NULL,
numReplicate = 100,
propFreq = 0.025,
externalReduction = NULL
)
}
\arguments{
\item{inSCE}{(sce) object on which to compute and store jackstraw plot}
\item{useAssay}{Specify name of the assay to use for scaling. Assay name
provided against this parameter is scaled by the function and used
for the computation of JackStraw scores along with the reduced dimensions
specified by the \code{dims} parameter.}
\item{dims}{Number of components to test in Jackstraw. If \code{NULL}, then
all components are used. Default \code{NULL}.}
\item{numReplicate}{Numeric value indicating the number of replicate
samplings to perform.
Default value is \code{100}.}
\item{propFreq}{Numeric value indicating the proportion of data to randomly
permute for each replicate.
Default value is \code{0.025}.}
\item{externalReduction}{Pass DimReduc object if PCA/ICA computed through
other libraries. Default \code{NULL}.}
}
\value{
Updated \code{SingleCellExperiment} object with jackstraw
computations stored in it
}
\description{
runSeuratJackStraw
Compute jackstraw plot and store the computations in the input sce object
}
\examples{
data(scExample, package = "singleCellTK")
\dontrun{
sce <- runSeuratNormalizeData(sce, useAssay = "counts")
sce <- runSeuratFindHVG(sce, useAssay = "counts")
sce <- runSeuratScaleData(sce, useAssay = "counts")
sce <- runSeuratPCA(sce, useAssay = "counts")
sce <- runSeuratJackStraw(sce, useAssay = "counts")
}
}
| /man/runSeuratJackStraw.Rd | permissive | compbiomed/singleCellTK | R | false | true | 1,754 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seuratFunctions.R
\name{runSeuratJackStraw}
\alias{runSeuratJackStraw}
\title{runSeuratJackStraw
Compute jackstraw plot and store the computations in the input sce object}
\usage{
runSeuratJackStraw(
inSCE,
useAssay,
dims = NULL,
numReplicate = 100,
propFreq = 0.025,
externalReduction = NULL
)
}
\arguments{
\item{inSCE}{(sce) object on which to compute and store jackstraw plot}
\item{useAssay}{Specify name of the assay to use for scaling. Assay name
provided against this parameter is scaled by the function and used
for the computation of JackStraw scores along with the reduced dimensions
specified by the \code{dims} parameter.}
\item{dims}{Number of components to test in Jackstraw. If \code{NULL}, then
all components are used. Default \code{NULL}.}
\item{numReplicate}{Numeric value indicating the number of replicate
samplings to perform.
Default value is \code{100}.}
\item{propFreq}{Numeric value indicating the proportion of data to randomly
permute for each replicate.
Default value is \code{0.025}.}
\item{externalReduction}{Pass DimReduc object if PCA/ICA computed through
other libraries. Default \code{NULL}.}
}
\value{
Updated \code{SingleCellExperiment} object with jackstraw
computations stored in it
}
\description{
runSeuratJackStraw
Compute jackstraw plot and store the computations in the input sce object
}
\examples{
data(scExample, package = "singleCellTK")
\dontrun{
sce <- runSeuratNormalizeData(sce, useAssay = "counts")
sce <- runSeuratFindHVG(sce, useAssay = "counts")
sce <- runSeuratScaleData(sce, useAssay = "counts")
sce <- runSeuratPCA(sce, useAssay = "counts")
sce <- runSeuratJackStraw(sce, useAssay = "counts")
}
}
|
# Pruning Functions
find_cvm_pval_asym <- function(n, STAT){
mu <- 1/6
lambda <- 5*n /(24*n-18)
STAT<- 1-pinvgauss(STAT, mu, lambda)
}
find_ad_pval_asym <- function(STAT){
# from Marsalia paper
if(STAT <=0) return("ERROR, STAT out of bounds")
if(STAT<2 & STAT > 0){
pval <- 1- STAT^(-.5)*exp(-1.2337141/STAT)*(2.00012 + (0.247105 -(.0649821 -(.0347962 -(.0116720 -.00168691*STAT)*STAT)*STAT)*STAT)*STAT)
}else{
pval <- 1- exp(-exp(1.0776-(2.30695-(.43424-(.082433-(.008056-.0003146*STAT)*STAT)*STAT)*STAT)*STAT))
}
# mu <- 1/6
# lambda <- 5*n /(24*n-18)
# STAT<- 1-pinvgauss(STAT, mu, lambda)
}
EWMA_Find_CL_CVM_Pval <- function(ic_data = rnorm(500),
lambda = .05,
control_limit = .01,
m = 5,
ICdist = "rnorm",
IC_dist_ops = NULL,
track_candidates=NULL,
diag=FALSE,
method="partha",
run_until=FALSE){
# run_until: diagnostic option. It should always be set to false
# diag: diagnostic option
# track_candidates: diagnostic option. Should be true/false, but it isn't. oh well
require(statmod, quietly=TRUE) # needed to find cvm pvalue approximation
constant <- lambda # Makes things more compatible later
# Initializing Variables
data <- NULL
u <- 1 # initialize first value at 1 (p-value approach)
i <- 1
j <- 1:m
rIC <- get(ICdist, mode = "function", envir = parent.frame())
# Pre Calculations for functions
sorted_ic_data <- sort(ic_data)
len_ic <- length(sorted_ic_data)
f_ic <- ecdf(sorted_ic_data)
store_keep <- NULL
store_remove <- NULL
# Pruning methods put here. If outside function won't compile right
source("functions_prune_internal.R", local=TRUE)
if(run_until){
control_limit <- 0
}
while (tail(u, 1) > control_limit) {
# Calculate new value of test statisic
new_data <- do.call(rIC, c(list(m), IC_dist_ops))
data <- cbind(data, new_data) # master data
num_active_batches <- dim(data)[2]
#if(dim(data)[2] > 2000) stop("data has grown very large")
# Pruning Function Here
if(i==1){
STAT <- cvm.res(x=data, y=f_ic)
# find p-value
pval <- find_cvm_pval_asym(n=m, STAT=STAT)
if(method=="ewma"){
ewma_stat <- pval # average p-val?
}
}else{
# on the previous steps data + new data, run a cvm test
if(method=="lookback"){
STAT.big <- cvm.res(x=data, y=f_ic)
pval.big <- find_cvm_pval_asym(n=m*num_active_batches, STAT=STAT.big)
STAT.small <- cvm.res(x=data[,-1], y=f_ic)
pval.small <- find_cvm_pval_asym(n=m*(num_active_batches-1), STAT=STAT.small)
}else {
STAT <- cvm.res(x=data, y=f_ic)
pval <- find_cvm_pval_asym(n=m*num_active_batches, STAT=STAT)
}
# find amount of previous data to remove
if(method=="lookback"){
num_remove <- prune(pval=pval.big, method=method, pval.small=pval.small)
pval = pval.big #idk, maybe
}else{
num_remove <- prune(pval=pval, method=method)
}
# For an ewma p-value method, what we gon do?
if(method=="ewma"){
ewma_stat = constant * pval + (1 - constant) * u_nK
}
# for diagnostics
num_keep <- num_active_batches - num_remove
store_keep <- append(store_keep, num_keep)
store_remove <- append(store_remove, num_remove)
# print(paste("On Batch", i))
# print(paste("Number removed equals ", num_remove))
# print(paste("Amount of batches used equals ", num_keep))
if(num_remove > 0){
data <- data[,-1:-num_remove]
}
}
# update control statistic
if(method=="ewma"){
u_nK = ewma_stat
}else{
u_nK <- pval
}
u <- append(u, u_nK)
i <- i + 1
if(run_until){
if(i == 300) u[length(u)] <- -.2
}
}
i <- i - 1 # 10/20/2016 -- this adjust for the time out of control value!
# For the p-value approach, we can calclulate for the SMALLEST p-value
if(!is.null(track_candidates)){
track_ucl <-numeric(length(track_candidates))
for(j in seq_along(track_candidates)){
#print(as.numeric(which.min(track_candidates[j] < u)))
track_ucl[j] <- as.numeric(which.min(track_candidates[j] < u)) - 1 #investigate later
}
names(track_ucl) <- as.character(track_candidates)
return(list("Time OOC"=i, "Lower CLs"=track_ucl))
}
if(diag){
return(list("control_stats" = u, "Time OOC" = i, "Track Removed"=store_remove,
"Track Keep"=store_keep))
}
return(list("control_stats" = u, "Time OOC" = i))
}
EWMA_Find_CL_CVM_Pval_OOC <- function(ic_data=rnorm(500),
lambda=.05,
control_limit=.01,
m=5,
tau=0,
ICdist="rnorm", IC_dist_ops=NULL,
OOCdist="rnorm", OOC_dist_ops=NULL,
method="partha"){
# 11/1/2016 -- Checked for accuracy
require(statmod, quietly=TRUE) # Needed for asymptotic p-value
constant <- lambda # Makes things more compatible later
# Initializing Variables
data <- NULL
u <- 1 # initialize first value at 1 (p-value approach)
i <- 1
j <- 1:m
rOOC <- get(OOCdist, mode = "function", envir = parent.frame())
rIC <- get(ICdist, mode = "function", envir = parent.frame())
# Pre Calculations for functions
sorted_ic_data <- sort(ic_data)
len_ic <- length(sorted_ic_data)
f_ic <- ecdf(sorted_ic_data)
# For diagnostic functions
store_keep <- NULL
store_remove <- NULL
# pruning function
# Pruning methods put here. If outside function won't compile right
source("functions_prune_internal.R", local=TRUE)
while (tail(u, 1) > control_limit) {
if(i <= tau){
new_data <- do.call(rIC, c(n=list(m), IC_dist_ops))
}else{
# example calls:
# rOOC= standard_chi, list(df=1, center_mean=1, center_sd=1)
new_data <- do.call(rOOC, c(n=list(m), OOC_dist_ops))
}
data <- cbind(data, new_data) # master data
num_active_batches <- dim(data)[2]
if(i==1){
STAT <- cvm.res(x=data, y=f_ic)
# find p-value
pval <- find_cvm_pval_asym(n=m, STAT=STAT)
if(method=="ewma"){
ewma_stat <- pval # Is this right? I feel like this is going to bias results!
}
}else{
# on the previous steps data + new data, run a cvm test
if(method=="lookback"){
STAT.big <- cvm.res(x=data, y=f_ic)
pval.big <- find_cvm_pval_asym(n=m*num_active_batches, STAT=STAT.big)
STAT.small <- cvm.res(x=data[,-1], y=f_ic)
pval.small <- find_cvm_pval_asym(n=m*(num_active_batches-1), STAT=STAT.small)
}else {
STAT <- cvm.res(x=data, y=f_ic)
pval <- find_cvm_pval_asym(n=m*num_active_batches, STAT=STAT)
}
# find amount of previous data to remove
if(method=="lookback"){
num_remove <- prune(pval=pval.big, method=method, pval.small=pval.small)
pval = pval.big #idk, maybe
}else{
num_remove <- prune(pval=pval, method=method)
}
# For an ewma p-value method, what we gon do?
if(method=="ewma"){
ewma_stat = constant * pval + (1 - constant) * u_nK
}
# for diagnostics
num_keep <- num_active_batches - num_remove
store_keep <- append(store_keep, num_keep)
store_remove <- append(store_remove, num_remove)
# print(paste("On Batch", i))
# print(paste("Number removed equals ", num_remove))
# print(paste("Amount of batches used equals ", num_keep))
if(num_remove > 0){
data <- data[,-1:-num_remove]
}
}
# update control statistic
if(method=="ewma"){
u_nK = ewma_stat
}else{
u_nK <- pval
}
u <- append(u, u_nK)
i <- i + 1
}
i <- i - 1 # 10/20/2016 -- this adjust for the time out of control value!
return(list("control_stats" = u, "Time OOC" = i))
}
EWMA_Find_CL_CVM_Prune <- function(ic_data = rnorm(500),
lambda = .05,
control_limit = .06,
m = 5,
ICdist = "rnorm",
IC_dist_ops = NULL,
bootstrap_samples = 3000,
track_candidates=NULL,
constant=.5,
pval_table){
# See below for track_candidates explanation
# Calculate d0 via bootstrapping
# d0 <- Fast_Bootstrap_CVM(ic_data=ic_data, m=m, bootstrap_samples=bootstrap_samples)
# Initializing Variables
data <- NULL
u <- 0 # initialize first value at 0
i <- 1
j <- 1:m
rIC <- get(ICdist, mode = "function", envir = parent.frame())
# Will be parameterized later
num_history <- 15
tr <- 1:num_history
weights <- rep(.9^tr, each=num_history)
# Pre Calculations for functions
sorted_ic_data <- sort(ic_data)
len_ic <- length(sorted_ic_data)
# len_com <- len_ic + m*num_history
# ind <- 1:(len_com-1)
# lenx_t_leny <- len_ic * m*num_history
f_ic <- ecdf(sorted_ic_data)
how_far_back <- 1
while (tail(u, 1) < control_limit) {
# if(i <= num_history){
# tr_temp <- 1:i
# new_data <- do.call(rIC, c(list(m), IC_dist_ops))
# data <- cbind(data, new_data)
# if(i==1){
# data <- data[, -1]
# }
# weights <- rep(.9^tr_temp, each=m)
# len.com.temp <- len_ic + dim(data)[1]*dim(data)[2]
# ind.temp <- 1:(len_com-1)
# lenx.t.leny.temp <- len_ic * dim(data)[1]*dim(data)[2]
# }
# if(i <= num_history){
# new_data <- do.call(rIC, c(list(m), IC_dist_ops))
# data <- data[, -1]
# data <- cbind(data, new_data)
# }
# data <- do.call(rIC, c(list(m), IC_dist_ops))
# Calculate new value of test statisic
new_data <- do.call(rIC, c(list(m), IC_dist_ops))
data <- cbind(data, new_data) # master data
if(dim(data)[2] > 2000) stop("data has grown very large")
paste("at step ", i, " the dimension of data is", dim(data)[2])
# Pruning Function Here
if(i==1){
p1 <- 1
STAT <- cvm.res(x=data, y=f_ic)
print("i=1")
# lookup p-value from table
pval <- find_pval(N=as.character(m), STAT=STAT)
# Error catch if N is not found in table needs to go HERE
}else{
# on the previous steps data + new data, run a cvm test
STAT <- cvm.res(x=data, y=f_ic)
pval <- find_pval(N=as.character(m), STAT=STAT, pval_table=pval_table)
# re-adjust effective (active) data
# obviously, clean this up later to make it look nicer
percent_prune <- min(.2, ((pval-constant)/(1-constant))^2)
print(((pval-constant)/(1-constant))^2)
percent_prune
percent_keep <- 1 - percent_prune
num_keep <- ceiling(dim(data)[2] * percent_keep)
num_remove <- dim(data[2])-num_keep
if(num_remove>0){
data <- data[,-1:num_remove]
}
}
#D_n <- cvm.weighted(x=data, y=f_ic, w=weights)
# print(D_n)
u_nK <- lambda * (STAT - d0) + (1 - lambda) * tail(u, 1)
# print(paste("u=",u_nK))
u <- append(u, u_nK)
i <- i + 1
# stop after 20 regardless
if(i > 20){
u[length(u)] <- control_limit + 1
}
}
i <- i -1 # this adjust for the time out of control!
# If we have a large upper bound, we can calculate all our upper bounds below that number
if(!is.null(track_candidates)){
track_ucl <-numeric(length(track_candidates))
for(j in seq_along(track_candidates)){
track_ucl[j] <- as.numeric(which.min(track_candidates[j] > u)) - 1 #investigate later
}
names(track_ucl) <- as.character(track_candidates)
return(list("Time OOC"=i, "Lower CLs"=track_ucl))
}
return(list("control_stats" = u, "Time OOC" = i))
}
EWMA_Find_CL_AD_Weight <- function(ic_data = rnorm(500),
lambda = .05,
control_limit = .06,
m = 5,
ICdist = "rnorm",
IC_dist_ops = NULL,
bootstrap_samples = 3000,
track_candidates=NULL) {
# See below for track_candidates explanation
# Calculate d0 via bootstrapping
d0 <- Fast_Bootstrap_AD(ic_data=ic_data, m=m, bootstrap_samples=bootstrap_samples)
# Initializing Variables
data <- NULL
u <- 0 # initialize first value at 1 (p-value approach)
i <- 1
j <- 1:m
rIC <- get(ICdist, mode = "function", envir = parent.frame())
# Will be parameterized later
num_history <- 15
tr <- 1:num_history
weights <- rep(.9^tr, each=num_history)
# Pre Calculations for functions
sorted_ic_data <- sort(ic_data)
len_ic <- length(sorted_ic_data)
len_com <- len_ic + m*num_history
ind <- 1:(len_com-1)
lenx_t_leny <- len_ic * m*num_history
f_ic <- ecdf(sorted_ic_data)
while (tail(u, 1) < control_limit) {
if(i <= num_history){
tr_temp <- 1:i
new_data <- do.call(rIC, c(list(m), IC_dist_ops))
data <- cbind(data, new_data)
if(i==1){
data <- data[, -1]
}
weights <- rep(.9^tr_temp, each=m)
len.com.temp <- len_ic + dim(data)[1]*dim(data)[2]
ind.temp <- 1:(len_com-1)
lenx.t.leny.temp <- len_ic * dim(data)[1]*dim(data)[2]
}
if(i <= num_history){
new_data <- do.call(rIC, c(list(m), IC_dist_ops))
data <- data[, -1]
data <- cbind(data, new_data)
}
# data <- do.call(rIC, c(list(m), IC_dist_ops))
# Calculate new value of test statisic
D_n <- ad.weighted(x=data, y=f_ic, w=weights)
# print(D_n)
u_nK <- lambda * (D_n - d0) + (1 - lambda) * tail(u, 1)
# print(paste("u=",u_nK))
u <- append(u, u_nK)
i <- i + 1
# stop after 20 regardless
if(i > 20){
u[length(u)] <- control_limit + 1
}
}
# If we have a large upper bound, we can calculate all our upper bounds below that number
if(!is.null(track_candidates)){
track_ucl <-numeric(length(track_candidates))
for(j in seq_along(track_candidates)){
track_ucl[j] <- as.numeric(which.min(track_candidates[j] > u)) - 1 #investigate later
}
names(track_ucl) <- as.character(track_candidates)
return(list("Time OOC"=i, "Lower CLs"=track_ucl))
}
return(list("control_stats" = u, "Time OOC" = i))
}
# Pruning Functions
| /functions_prune.R | no_license | morndorff/NonParProcessControl | R | false | false | 14,939 | r | # Pruning Functions
find_cvm_pval_asym <- function(n, STAT){
mu <- 1/6
lambda <- 5*n /(24*n-18)
STAT<- 1-pinvgauss(STAT, mu, lambda)
}
find_ad_pval_asym <- function(STAT){
# from Marsalia paper
if(STAT <=0) return("ERROR, STAT out of bounds")
if(STAT<2 & STAT > 0){
pval <- 1- STAT^(-.5)*exp(-1.2337141/STAT)*(2.00012 + (0.247105 -(.0649821 -(.0347962 -(.0116720 -.00168691*STAT)*STAT)*STAT)*STAT)*STAT)
}else{
pval <- 1- exp(-exp(1.0776-(2.30695-(.43424-(.082433-(.008056-.0003146*STAT)*STAT)*STAT)*STAT)*STAT))
}
# mu <- 1/6
# lambda <- 5*n /(24*n-18)
# STAT<- 1-pinvgauss(STAT, mu, lambda)
}
EWMA_Find_CL_CVM_Pval <- function(ic_data = rnorm(500),
lambda = .05,
control_limit = .01,
m = 5,
ICdist = "rnorm",
IC_dist_ops = NULL,
track_candidates=NULL,
diag=FALSE,
method="partha",
run_until=FALSE){
# run_until: diagnostic option. It should always be set to false
# diag: diagnostic option
# track_candidates: diagnostic option. Should be true/false, but it isn't. oh well
require(statmod, quietly=TRUE) # needed to find cvm pvalue approximation
constant <- lambda # Makes things more compatible later
# Initializing Variables
data <- NULL
u <- 1 # initialize first value at 1 (p-value approach)
i <- 1
j <- 1:m
rIC <- get(ICdist, mode = "function", envir = parent.frame())
# Pre Calculations for functions
sorted_ic_data <- sort(ic_data)
len_ic <- length(sorted_ic_data)
f_ic <- ecdf(sorted_ic_data)
store_keep <- NULL
store_remove <- NULL
# Pruning methods put here. If outside function won't compile right
source("functions_prune_internal.R", local=TRUE)
if(run_until){
control_limit <- 0
}
while (tail(u, 1) > control_limit) {
# Calculate new value of test statisic
new_data <- do.call(rIC, c(list(m), IC_dist_ops))
data <- cbind(data, new_data) # master data
num_active_batches <- dim(data)[2]
#if(dim(data)[2] > 2000) stop("data has grown very large")
# Pruning Function Here
if(i==1){
STAT <- cvm.res(x=data, y=f_ic)
# find p-value
pval <- find_cvm_pval_asym(n=m, STAT=STAT)
if(method=="ewma"){
ewma_stat <- pval # average p-val?
}
}else{
# on the previous steps data + new data, run a cvm test
if(method=="lookback"){
STAT.big <- cvm.res(x=data, y=f_ic)
pval.big <- find_cvm_pval_asym(n=m*num_active_batches, STAT=STAT.big)
STAT.small <- cvm.res(x=data[,-1], y=f_ic)
pval.small <- find_cvm_pval_asym(n=m*(num_active_batches-1), STAT=STAT.small)
}else {
STAT <- cvm.res(x=data, y=f_ic)
pval <- find_cvm_pval_asym(n=m*num_active_batches, STAT=STAT)
}
# find amount of previous data to remove
if(method=="lookback"){
num_remove <- prune(pval=pval.big, method=method, pval.small=pval.small)
pval = pval.big #idk, maybe
}else{
num_remove <- prune(pval=pval, method=method)
}
# For an ewma p-value method, what we gon do?
if(method=="ewma"){
ewma_stat = constant * pval + (1 - constant) * u_nK
}
# for diagnostics
num_keep <- num_active_batches - num_remove
store_keep <- append(store_keep, num_keep)
store_remove <- append(store_remove, num_remove)
# print(paste("On Batch", i))
# print(paste("Number removed equals ", num_remove))
# print(paste("Amount of batches used equals ", num_keep))
if(num_remove > 0){
data <- data[,-1:-num_remove]
}
}
# update control statistic
if(method=="ewma"){
u_nK = ewma_stat
}else{
u_nK <- pval
}
u <- append(u, u_nK)
i <- i + 1
if(run_until){
if(i == 300) u[length(u)] <- -.2
}
}
i <- i - 1 # 10/20/2016 -- this adjust for the time out of control value!
# For the p-value approach, we can calclulate for the SMALLEST p-value
if(!is.null(track_candidates)){
track_ucl <-numeric(length(track_candidates))
for(j in seq_along(track_candidates)){
#print(as.numeric(which.min(track_candidates[j] < u)))
track_ucl[j] <- as.numeric(which.min(track_candidates[j] < u)) - 1 #investigate later
}
names(track_ucl) <- as.character(track_candidates)
return(list("Time OOC"=i, "Lower CLs"=track_ucl))
}
if(diag){
return(list("control_stats" = u, "Time OOC" = i, "Track Removed"=store_remove,
"Track Keep"=store_keep))
}
return(list("control_stats" = u, "Time OOC" = i))
}
EWMA_Find_CL_CVM_Pval_OOC <- function(ic_data=rnorm(500),
lambda=.05,
control_limit=.01,
m=5,
tau=0,
ICdist="rnorm", IC_dist_ops=NULL,
OOCdist="rnorm", OOC_dist_ops=NULL,
method="partha"){
# 11/1/2016 -- Checked for accuracy
require(statmod, quietly=TRUE) # Needed for asymptotic p-value
constant <- lambda # Makes things more compatible later
# Initializing Variables
data <- NULL
u <- 1 # initialize first value at 1 (p-value approach)
i <- 1
j <- 1:m
rOOC <- get(OOCdist, mode = "function", envir = parent.frame())
rIC <- get(ICdist, mode = "function", envir = parent.frame())
# Pre Calculations for functions
sorted_ic_data <- sort(ic_data)
len_ic <- length(sorted_ic_data)
f_ic <- ecdf(sorted_ic_data)
# For diagnostic functions
store_keep <- NULL
store_remove <- NULL
# pruning function
# Pruning methods put here. If outside function won't compile right
source("functions_prune_internal.R", local=TRUE)
while (tail(u, 1) > control_limit) {
if(i <= tau){
new_data <- do.call(rIC, c(n=list(m), IC_dist_ops))
}else{
# example calls:
# rOOC= standard_chi, list(df=1, center_mean=1, center_sd=1)
new_data <- do.call(rOOC, c(n=list(m), OOC_dist_ops))
}
data <- cbind(data, new_data) # master data
num_active_batches <- dim(data)[2]
if(i==1){
STAT <- cvm.res(x=data, y=f_ic)
# find p-value
pval <- find_cvm_pval_asym(n=m, STAT=STAT)
if(method=="ewma"){
ewma_stat <- pval # Is this right? I feel like this is going to bias results!
}
}else{
# on the previous steps data + new data, run a cvm test
if(method=="lookback"){
STAT.big <- cvm.res(x=data, y=f_ic)
pval.big <- find_cvm_pval_asym(n=m*num_active_batches, STAT=STAT.big)
STAT.small <- cvm.res(x=data[,-1], y=f_ic)
pval.small <- find_cvm_pval_asym(n=m*(num_active_batches-1), STAT=STAT.small)
}else {
STAT <- cvm.res(x=data, y=f_ic)
pval <- find_cvm_pval_asym(n=m*num_active_batches, STAT=STAT)
}
# find amount of previous data to remove
if(method=="lookback"){
num_remove <- prune(pval=pval.big, method=method, pval.small=pval.small)
pval = pval.big #idk, maybe
}else{
num_remove <- prune(pval=pval, method=method)
}
# For an ewma p-value method, what we gon do?
if(method=="ewma"){
ewma_stat = constant * pval + (1 - constant) * u_nK
}
# for diagnostics
num_keep <- num_active_batches - num_remove
store_keep <- append(store_keep, num_keep)
store_remove <- append(store_remove, num_remove)
# print(paste("On Batch", i))
# print(paste("Number removed equals ", num_remove))
# print(paste("Amount of batches used equals ", num_keep))
if(num_remove > 0){
data <- data[,-1:-num_remove]
}
}
# update control statistic
if(method=="ewma"){
u_nK = ewma_stat
}else{
u_nK <- pval
}
u <- append(u, u_nK)
i <- i + 1
}
i <- i - 1 # 10/20/2016 -- this adjust for the time out of control value!
return(list("control_stats" = u, "Time OOC" = i))
}
EWMA_Find_CL_CVM_Prune <- function(ic_data = rnorm(500),
lambda = .05,
control_limit = .06,
m = 5,
ICdist = "rnorm",
IC_dist_ops = NULL,
bootstrap_samples = 3000,
track_candidates=NULL,
constant=.5,
pval_table){
# See below for track_candidates explanation
# Calculate d0 via bootstrapping
# d0 <- Fast_Bootstrap_CVM(ic_data=ic_data, m=m, bootstrap_samples=bootstrap_samples)
# Initializing Variables
data <- NULL
u <- 0 # initialize first value at 0
i <- 1
j <- 1:m
rIC <- get(ICdist, mode = "function", envir = parent.frame())
# Will be parameterized later
num_history <- 15
tr <- 1:num_history
weights <- rep(.9^tr, each=num_history)
# Pre Calculations for functions
sorted_ic_data <- sort(ic_data)
len_ic <- length(sorted_ic_data)
# len_com <- len_ic + m*num_history
# ind <- 1:(len_com-1)
# lenx_t_leny <- len_ic * m*num_history
f_ic <- ecdf(sorted_ic_data)
how_far_back <- 1
while (tail(u, 1) < control_limit) {
# if(i <= num_history){
# tr_temp <- 1:i
# new_data <- do.call(rIC, c(list(m), IC_dist_ops))
# data <- cbind(data, new_data)
# if(i==1){
# data <- data[, -1]
# }
# weights <- rep(.9^tr_temp, each=m)
# len.com.temp <- len_ic + dim(data)[1]*dim(data)[2]
# ind.temp <- 1:(len_com-1)
# lenx.t.leny.temp <- len_ic * dim(data)[1]*dim(data)[2]
# }
# if(i <= num_history){
# new_data <- do.call(rIC, c(list(m), IC_dist_ops))
# data <- data[, -1]
# data <- cbind(data, new_data)
# }
# data <- do.call(rIC, c(list(m), IC_dist_ops))
# Calculate new value of test statisic
new_data <- do.call(rIC, c(list(m), IC_dist_ops))
data <- cbind(data, new_data) # master data
if(dim(data)[2] > 2000) stop("data has grown very large")
paste("at step ", i, " the dimension of data is", dim(data)[2])
# Pruning Function Here
if(i==1){
p1 <- 1
STAT <- cvm.res(x=data, y=f_ic)
print("i=1")
# lookup p-value from table
pval <- find_pval(N=as.character(m), STAT=STAT)
# Error catch if N is not found in table needs to go HERE
}else{
# on the previous steps data + new data, run a cvm test
STAT <- cvm.res(x=data, y=f_ic)
pval <- find_pval(N=as.character(m), STAT=STAT, pval_table=pval_table)
# re-adjust effective (active) data
# obviously, clean this up later to make it look nicer
percent_prune <- min(.2, ((pval-constant)/(1-constant))^2)
print(((pval-constant)/(1-constant))^2)
percent_prune
percent_keep <- 1 - percent_prune
num_keep <- ceiling(dim(data)[2] * percent_keep)
num_remove <- dim(data[2])-num_keep
if(num_remove>0){
data <- data[,-1:num_remove]
}
}
#D_n <- cvm.weighted(x=data, y=f_ic, w=weights)
# print(D_n)
u_nK <- lambda * (STAT - d0) + (1 - lambda) * tail(u, 1)
# print(paste("u=",u_nK))
u <- append(u, u_nK)
i <- i + 1
# stop after 20 regardless
if(i > 20){
u[length(u)] <- control_limit + 1
}
}
i <- i -1 # this adjust for the time out of control!
# If we have a large upper bound, we can calculate all our upper bounds below that number
if(!is.null(track_candidates)){
track_ucl <-numeric(length(track_candidates))
for(j in seq_along(track_candidates)){
track_ucl[j] <- as.numeric(which.min(track_candidates[j] > u)) - 1 #investigate later
}
names(track_ucl) <- as.character(track_candidates)
return(list("Time OOC"=i, "Lower CLs"=track_ucl))
}
return(list("control_stats" = u, "Time OOC" = i))
}
EWMA_Find_CL_AD_Weight <- function(ic_data = rnorm(500),
lambda = .05,
control_limit = .06,
m = 5,
ICdist = "rnorm",
IC_dist_ops = NULL,
bootstrap_samples = 3000,
track_candidates=NULL) {
# See below for track_candidates explanation
# Calculate d0 via bootstrapping
d0 <- Fast_Bootstrap_AD(ic_data=ic_data, m=m, bootstrap_samples=bootstrap_samples)
# Initializing Variables
data <- NULL
u <- 0 # initialize first value at 1 (p-value approach)
i <- 1
j <- 1:m
rIC <- get(ICdist, mode = "function", envir = parent.frame())
# Will be parameterized later
num_history <- 15
tr <- 1:num_history
weights <- rep(.9^tr, each=num_history)
# Pre Calculations for functions
sorted_ic_data <- sort(ic_data)
len_ic <- length(sorted_ic_data)
len_com <- len_ic + m*num_history
ind <- 1:(len_com-1)
lenx_t_leny <- len_ic * m*num_history
f_ic <- ecdf(sorted_ic_data)
while (tail(u, 1) < control_limit) {
if(i <= num_history){
tr_temp <- 1:i
new_data <- do.call(rIC, c(list(m), IC_dist_ops))
data <- cbind(data, new_data)
if(i==1){
data <- data[, -1]
}
weights <- rep(.9^tr_temp, each=m)
len.com.temp <- len_ic + dim(data)[1]*dim(data)[2]
ind.temp <- 1:(len_com-1)
lenx.t.leny.temp <- len_ic * dim(data)[1]*dim(data)[2]
}
if(i <= num_history){
new_data <- do.call(rIC, c(list(m), IC_dist_ops))
data <- data[, -1]
data <- cbind(data, new_data)
}
# data <- do.call(rIC, c(list(m), IC_dist_ops))
# Calculate new value of test statisic
D_n <- ad.weighted(x=data, y=f_ic, w=weights)
# print(D_n)
u_nK <- lambda * (D_n - d0) + (1 - lambda) * tail(u, 1)
# print(paste("u=",u_nK))
u <- append(u, u_nK)
i <- i + 1
# stop after 20 regardless
if(i > 20){
u[length(u)] <- control_limit + 1
}
}
# If we have a large upper bound, we can calculate all our upper bounds below that number
if(!is.null(track_candidates)){
track_ucl <-numeric(length(track_candidates))
for(j in seq_along(track_candidates)){
track_ucl[j] <- as.numeric(which.min(track_candidates[j] > u)) - 1 #investigate later
}
names(track_ucl) <- as.character(track_candidates)
return(list("Time OOC"=i, "Lower CLs"=track_ucl))
}
return(list("control_stats" = u, "Time OOC" = i))
}
# Pruning Functions
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RAFdata.R
\docType{data}
\name{RAFdata}
\alias{RAFdata}
\title{A sample data.frame as might be constructed by getNetCDF(),
containing a short period of measurements from an NSF/NCAR GV
flight in a project called "IDEAS-4". The data.frame contains
a set of measurements, one row per second, and a "Time"
variable.}
\format{
The data.frame contains 301 rows and 29 variables:
\describe{
\item{Time}{A POSIX time, units: seconds since 2013-10-01 00:00:00 +0000, time zone UTC}
\item{ATTACK}{angle of attack, degrees}
\item{SSLIP}{sideslip angle, degrees}
\item{GGVEW}{ground speed, east component, from GPS, m/s}
\item{GGVNS}{ground speed, north component, from GPS, m/s}
\item{GGVSPD}{vertical speed of aircraft, from GPS, m/s}
\item{VEW}{ground speed, east component, from INS, m/s}
\item{VNS}{ground speed, north component, from INS, m/s}
\item{ATX}{air temperature, primary measurement, deg.C}
\item{DPXC}{dew point, primary measurement, deg.C}
\item{THDG}{heading wrt true north, degrees}
\item{ROLL}{roll angle, positive for right-wing down, degrees}
\item{PITCH}{pitch angle, positive for nose up, degrees}
\item{PSXC}{ambient pressure, primary measurement, hPa}
\item{QCXC}{dynamic pressure, primary measurement, hPa}
\item{EWX}{water vapor pressure, primary measurement, hPa}
\item{RTH1}{recovery temperature, heated sensor 1, deg.C}
\item{RTH2}{recovery temperature, heated sensor 2, deg.C}
\item{ADIFR}{pressure difference, vertically aligned radome ports, hPa}
\item{BDIFR}{pressure difference, horizontally aligned radome ports, hPa}
\item{TASX}{true airspeed, primary measurement, m/s}
\item{GGALT}{aircraft altitude, from GPS, m}
\item{LATC}{aircraft latitude, from INS/GPS combination, deg.N}
\item{LONC}{aircraft longitude, from INS/GPS combination, deg.E}
\item{PLWC}{power passing through CSIRO/King LWC sensor, Watts}
\item{CONCD}{droplet concentration from a CDP, per cubic cm}
\item{WDC}{wind direction, GPS-corrected, degrees from north}
\item{WSC}{wind speed, GPS-corrected, m/s}
\item{WIC}{vertical wind speed, m/s}
}
}
\source{
\url{http://data.eol.ucar.edu/master_list/?project=IDEAS-4_GV}
}
\usage{
RAFdata
}
\description{
A sample data.frame as might be constructed by getNetCDF(),
containing a short period of measurements from an NSF/NCAR GV
flight in a project called "IDEAS-4". The data.frame contains
a set of measurements, one row per second, and a "Time"
variable.
}
\keyword{datasets}
| /man/RAFdata.Rd | permissive | NCAR/Ranadu | R | false | true | 2,559 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RAFdata.R
\docType{data}
\name{RAFdata}
\alias{RAFdata}
\title{A sample data.frame as might be constructed by getNetCDF(),
containing a short period of measurements from an NSF/NCAR GV
flight in a project called "IDEAS-4". The data.frame contains
a set of measurements, one row per second, and a "Time"
variable.}
\format{
The data.frame contains 301 rows and 29 variables:
\describe{
\item{Time}{A POSIX time, units: seconds since 2013-10-01 00:00:00 +0000, time zone UTC}
\item{ATTACK}{angle of attack, degrees}
\item{SSLIP}{sideslip angle, degrees}
\item{GGVEW}{ground speed, east component, from GPS, m/s}
\item{GGVNS}{ground speed, north component, from GPS, m/s}
\item{GGVSPD}{vertical speed of aircraft, from GPS, m/s}
\item{VEW}{ground speed, east component, from INS, m/s}
\item{VNS}{ground speed, north component, from INS, m/s}
\item{ATX}{air temperature, primary measurement, deg.C}
\item{DPXC}{dew point, primary measurement, deg.C}
\item{THDG}{heading wrt true north, degrees}
\item{ROLL}{roll angle, positive for right-wing down, degrees}
\item{PITCH}{pitch angle, positive for nose up, degrees}
\item{PSXC}{ambient pressure, primary measurement, hPa}
\item{QCXC}{dynamic pressure, primary measurement, hPa}
\item{EWX}{water vapor pressure, primary measurement, hPa}
\item{RTH1}{recovery temperature, heated sensor 1, deg.C}
\item{RTH2}{recovery temperature, heated sensor 2, deg.C}
\item{ADIFR}{pressure difference, vertically aligned radome ports, hPa}
\item{BDIFR}{pressure difference, horizontally aligned radome ports, hPa}
\item{TASX}{true airspeed, primary measurement, m/s}
\item{GGALT}{aircraft altitude, from GPS, m}
\item{LATC}{aircraft latitude, from INS/GPS combination, deg.N}
\item{LONC}{aircraft longitude, from INS/GPS combination, deg.E}
\item{PLWC}{power passing through CSIRO/King LWC sensor, Watts}
\item{CONCD}{droplet concentration from a CDP, per cubic cm}
\item{WDC}{wind direction, GPS-corrected, degrees from north}
\item{WSC}{wind speed, GPS-corrected, m/s}
\item{WIC}{vertical wind speed, m/s}
}
}
\source{
\url{http://data.eol.ucar.edu/master_list/?project=IDEAS-4_GV}
}
\usage{
RAFdata
}
\description{
A sample data.frame as might be constructed by getNetCDF(),
containing a short period of measurements from an NSF/NCAR GV
flight in a project called "IDEAS-4". The data.frame contains
a set of measurements, one row per second, and a "Time"
variable.
}
\keyword{datasets}
|
#!/usr/bin/env Rscript
# Get a list of files that need to be processed ----
library("RPostgreSQL")
pg <- dbConnect(PostgreSQL())
if (!dbExistsTable(pg, c("streetevents", "speaker_data"))) {
dbGetQuery(pg, "
CREATE TABLE streetevents.speaker_data
(
file_name text,
last_update timestamp without time zone,
speaker_name text,
employer text,
role text,
speaker_number integer,
context text,
speaker_text text,
language text
);
SET maintenance_work_mem='3GB';
CREATE INDEX ON streetevents.speaker_data (file_name, last_update);
CREATE INDEX ON streetevents.speaker_data (file_name);")
}
# Note that this assumes that streetevents.calls is up to date.
file_list <- dbGetQuery(pg, "
SET work_mem='2GB';
WITH
latest_mtime AS (
SELECT a.file_name, last_update,
max(DISTINCT mtime) AS mtime
FROM streetevents.calls AS a
INNER JOIN streetevents.call_files
USING (file_path)
GROUP BY a.file_name, last_update),
calls AS (
SELECT file_path, file_name, last_update
FROM streetevents.calls
INNER JOIN latest_mtime
USING (file_name, last_update))
SELECT DISTINCT file_path
FROM calls
WHERE (file_name, last_update) NOT IN
(SELECT file_name, last_update FROM streetevents.speaker_data)")
rs <- dbDisconnect(pg)
# Create function to parse a StreetEvents XML file ----
parseFile <- function(file_path) {
full_path <- file.path(Sys.getenv("EDGAR_DIR"),
"streetevents_project",
file_path)
# Parse the indicated file using a Perl script
system(paste("streetevents/download_extract/import_speaker_data.pl",
full_path),
intern = TRUE)
}
# Apply parsing function to files ----
library(parallel)
system.time({
res <- unlist(mclapply(file_list$file_path, parseFile, mc.cores=8))
})
# Add comment to reflect last update ----
library(RPostgreSQL)
pg <- dbConnect(PostgreSQL())
last_update <- dbGetQuery(pg,
"SELECT max(last_update)::text FROM streetevents.calls")
sql <- paste0("COMMENT ON TABLE streetevents.speaker_data IS '",
"Last update on ", last_update , "'")
rs <- dbGetQuery(pg, sql)
dbDisconnect(pg)
| /streetevents/download_extract/import_speaker_data.R | no_license | RichardSaouma/acct_data | R | false | false | 2,420 | r | #!/usr/bin/env Rscript
# Get a list of files that need to be processed ----
library("RPostgreSQL")
pg <- dbConnect(PostgreSQL())
if (!dbExistsTable(pg, c("streetevents", "speaker_data"))) {
dbGetQuery(pg, "
CREATE TABLE streetevents.speaker_data
(
file_name text,
last_update timestamp without time zone,
speaker_name text,
employer text,
role text,
speaker_number integer,
context text,
speaker_text text,
language text
);
SET maintenance_work_mem='3GB';
CREATE INDEX ON streetevents.speaker_data (file_name, last_update);
CREATE INDEX ON streetevents.speaker_data (file_name);")
}
# Note that this assumes that streetevents.calls is up to date.
file_list <- dbGetQuery(pg, "
SET work_mem='2GB';
WITH
latest_mtime AS (
SELECT a.file_name, last_update,
max(DISTINCT mtime) AS mtime
FROM streetevents.calls AS a
INNER JOIN streetevents.call_files
USING (file_path)
GROUP BY a.file_name, last_update),
calls AS (
SELECT file_path, file_name, last_update
FROM streetevents.calls
INNER JOIN latest_mtime
USING (file_name, last_update))
SELECT DISTINCT file_path
FROM calls
WHERE (file_name, last_update) NOT IN
(SELECT file_name, last_update FROM streetevents.speaker_data)")
rs <- dbDisconnect(pg)
# Create function to parse a StreetEvents XML file ----
parseFile <- function(file_path) {
full_path <- file.path(Sys.getenv("EDGAR_DIR"),
"streetevents_project",
file_path)
# Parse the indicated file using a Perl script
system(paste("streetevents/download_extract/import_speaker_data.pl",
full_path),
intern = TRUE)
}
# Apply parsing function to files ----
library(parallel)
system.time({
res <- unlist(mclapply(file_list$file_path, parseFile, mc.cores=8))
})
# Add comment to reflect last update ----
library(RPostgreSQL)
pg <- dbConnect(PostgreSQL())
last_update <- dbGetQuery(pg,
"SELECT max(last_update)::text FROM streetevents.calls")
sql <- paste0("COMMENT ON TABLE streetevents.speaker_data IS '",
"Last update on ", last_update , "'")
rs <- dbGetQuery(pg, sql)
dbDisconnect(pg)
|
predict_word <- function(words){
words <- tolower(unlist(strsplit(words," ")))
n <- length(words)
res <- NA
if(n==1){ #I must read the 2-gram DF
re <- paste0("^",words,"_")
res <- dfm2_df[grepl(re,rownames(dfm2_df)),]$ngram[1]
}
if(n==2) { #I must read the 3-gram DF
gram <- paste0(words,collapse="_")
re <- paste0("^",gram,"_")
res <- dfm3_df[grepl(re,rownames(dfm3_df)),]$ngram[1]
if(is.na(res)){ #check the last word, n = 1
words <- words[2]
re <- paste0("^",words,"_")
res <- dfm2_df[grepl(re,rownames(dfm2_df)),]$ngram[1]
}
}
if(n==3) { #I must read the 4-gram DF
gram <- paste0(words,collapse="_")
re <- paste0("^",gram,"_")
res <- dfm4_df[grepl(re,rownames(dfm4_df)),]$ngram[1]
if(is.na(res)){ #check the last 2 words, n = 2
words <- words[2:3]
gram <- paste0(words,collapse="_")
re <- paste0("^",gram,"_")
res <- dfm3_df[grepl(re,rownames(dfm3_df)),]$ngram[1]
if(is.na(res)){ #check the last word, n = 1
words <- words[2]
re <- paste0("^",words,"_")
res <- dfm2_df[grepl(re,rownames(dfm2_df)),]$ngram[1]
}
}
}
if(n>3) { #I must read the 5-gram DF
if(n>4) words <- words[(n-3):n]
gram <- paste0(words,collapse="_")
re <- paste0("^",gram,"_")
res <- dfm5_df[grepl(re,rownames(dfm5_df)),]$ngram[1]
if(is.na(res)){ #check the last 3 words, n = 3
words <- words[2:4]
gram <- paste0(words,collapse="_")
re <- paste0("^",gram,"_")
res <- dfm4_df[grepl(re,rownames(dfm4_df)),]$ngram[1]
if(is.na(res)){ #check the last 2 words, n = 2
words <- words[2:3]
gram <- paste0(words,collapse="_")
re <- paste0("^",gram,"_")
res <- dfm3_df[grepl(re,rownames(dfm3_df)),]$ngram[1]
if(is.na(res)){ #check the last word, n = 1
words <- words[2]
re <- paste0("^",words,"_")
res <- dfm2_df[grepl(re,rownames(dfm2_df)),]$ngram[1]
}
}
}
}
res
} | /functions1.R | no_license | saulugo/ds_capstone | R | false | false | 2,036 | r | predict_word <- function(words){
words <- tolower(unlist(strsplit(words," ")))
n <- length(words)
res <- NA
if(n==1){ #I must read the 2-gram DF
re <- paste0("^",words,"_")
res <- dfm2_df[grepl(re,rownames(dfm2_df)),]$ngram[1]
}
if(n==2) { #I must read the 3-gram DF
gram <- paste0(words,collapse="_")
re <- paste0("^",gram,"_")
res <- dfm3_df[grepl(re,rownames(dfm3_df)),]$ngram[1]
if(is.na(res)){ #check the last word, n = 1
words <- words[2]
re <- paste0("^",words,"_")
res <- dfm2_df[grepl(re,rownames(dfm2_df)),]$ngram[1]
}
}
if(n==3) { #I must read the 4-gram DF
gram <- paste0(words,collapse="_")
re <- paste0("^",gram,"_")
res <- dfm4_df[grepl(re,rownames(dfm4_df)),]$ngram[1]
if(is.na(res)){ #check the last 2 words, n = 2
words <- words[2:3]
gram <- paste0(words,collapse="_")
re <- paste0("^",gram,"_")
res <- dfm3_df[grepl(re,rownames(dfm3_df)),]$ngram[1]
if(is.na(res)){ #check the last word, n = 1
words <- words[2]
re <- paste0("^",words,"_")
res <- dfm2_df[grepl(re,rownames(dfm2_df)),]$ngram[1]
}
}
}
if(n>3) { #I must read the 5-gram DF
if(n>4) words <- words[(n-3):n]
gram <- paste0(words,collapse="_")
re <- paste0("^",gram,"_")
res <- dfm5_df[grepl(re,rownames(dfm5_df)),]$ngram[1]
if(is.na(res)){ #check the last 3 words, n = 3
words <- words[2:4]
gram <- paste0(words,collapse="_")
re <- paste0("^",gram,"_")
res <- dfm4_df[grepl(re,rownames(dfm4_df)),]$ngram[1]
if(is.na(res)){ #check the last 2 words, n = 2
words <- words[2:3]
gram <- paste0(words,collapse="_")
re <- paste0("^",gram,"_")
res <- dfm3_df[grepl(re,rownames(dfm3_df)),]$ngram[1]
if(is.na(res)){ #check the last word, n = 1
words <- words[2]
re <- paste0("^",words,"_")
res <- dfm2_df[grepl(re,rownames(dfm2_df)),]$ngram[1]
}
}
}
}
res
} |
message('|-- server')
Server <- shiny::shinyServer(function(input, output, session) {
# initialize variables
old.nodes = old.edges = NULL
# system time
log.file <- str_c('logs/record_', format(Sys.time(), "%d-%m-%y"), '.txt')
# output
output$network_proxy <- renderVisNetwork({
map <- Crawl(origin, depth, direction, type, private = NULL, up.initial, pop)
PlotGraph(map$objects, map$arrows, map$origin)
})
# observe
build <- observe({
# listener
input$buildGraph
isolate(if(input$direction == 'down') {
direction = 1
} else if(isolate(input$direction) == 'down & up') {
direction = 0
} else {
direction = -1
})
isolate(if(input$private == 'yes' & input$type == 'all') {
private = token
} else {
private = NULL
})
isolate(if(isolate(input$up.initial) == 'yes') {
up.initial = 1
} else {
up.initial = 0
})
depth <- isolate(as.numeric(input$depth))
isolate(if(input$buildGraph) {
map <- isolate(Crawl(input$origin, depth, direction, input$type, private, up.initial, input$pop))
visNetworkProxy('network_proxy') %>%
visUpdateNodes(map$objects %>% unique) %>%
visUpdateEdges(map$arrows %>% unique) %>%
visLayout(improvedLayout = TRUE)
})
visNetworkProxy("network_proxy") %>%
visGetEdges()
visNetworkProxy("network_proxy") %>%
visGetNodes()
isolate(cat(input$origin, '\n', file = log.file, append = TRUE))
})
clear <- observe({
input$clearGraph
isolate(if(input$clearGraph) {
if(!is.null(input$network_proxy_edges) & !is.null(input$network_proxy_nodes)) {
old.edges <-
input$network_proxy_edges %>%
map(~ { dplyr::as_data_frame(rbind(unlist(.x))) }) %>%
bind_rows
old.nodes <-
input$network_proxy_nodes %>%
map(~ { dplyr::as_data_frame(rbind(unlist(.x))) }) %>%
bind_rows
visNetworkProxy('network_proxy') %>%
visRemoveNodes(old.nodes$id) %>%
visRemoveEdges(old.edges$id)
}
})
})
# cleanup
session$onSessionEnded(function() {
build$suspend()
clear$suspend()
unlink(log.file)
})
})
| /_server.R | permissive | desmondhwong/spider | R | false | false | 2,074 | r |
message('|-- server')
Server <- shiny::shinyServer(function(input, output, session) {
# initialize variables
old.nodes = old.edges = NULL
# system time
log.file <- str_c('logs/record_', format(Sys.time(), "%d-%m-%y"), '.txt')
# output
output$network_proxy <- renderVisNetwork({
map <- Crawl(origin, depth, direction, type, private = NULL, up.initial, pop)
PlotGraph(map$objects, map$arrows, map$origin)
})
# observe
build <- observe({
# listener
input$buildGraph
isolate(if(input$direction == 'down') {
direction = 1
} else if(isolate(input$direction) == 'down & up') {
direction = 0
} else {
direction = -1
})
isolate(if(input$private == 'yes' & input$type == 'all') {
private = token
} else {
private = NULL
})
isolate(if(isolate(input$up.initial) == 'yes') {
up.initial = 1
} else {
up.initial = 0
})
depth <- isolate(as.numeric(input$depth))
isolate(if(input$buildGraph) {
map <- isolate(Crawl(input$origin, depth, direction, input$type, private, up.initial, input$pop))
visNetworkProxy('network_proxy') %>%
visUpdateNodes(map$objects %>% unique) %>%
visUpdateEdges(map$arrows %>% unique) %>%
visLayout(improvedLayout = TRUE)
})
visNetworkProxy("network_proxy") %>%
visGetEdges()
visNetworkProxy("network_proxy") %>%
visGetNodes()
isolate(cat(input$origin, '\n', file = log.file, append = TRUE))
})
clear <- observe({
input$clearGraph
isolate(if(input$clearGraph) {
if(!is.null(input$network_proxy_edges) & !is.null(input$network_proxy_nodes)) {
old.edges <-
input$network_proxy_edges %>%
map(~ { dplyr::as_data_frame(rbind(unlist(.x))) }) %>%
bind_rows
old.nodes <-
input$network_proxy_nodes %>%
map(~ { dplyr::as_data_frame(rbind(unlist(.x))) }) %>%
bind_rows
visNetworkProxy('network_proxy') %>%
visRemoveNodes(old.nodes$id) %>%
visRemoveEdges(old.edges$id)
}
})
})
# cleanup
session$onSessionEnded(function() {
build$suspend()
clear$suspend()
unlink(log.file)
})
})
|
library("aroma.seq")
fullTest <- isPackageInstalled("qrqc")
if (fullTest) {
library("qrqc")
readSeqFile <- aroma.seq::readSeqFile
path <- system.file("extdata", package="qrqc", mustWork=TRUE)
struct <- list(pattern=".*/extdata/([^/]*)", replacement=c(dataset="qrqc", organism="foo", sample="\\1"))
fqs <- FastqDataSet$byPath(path, struct=struct, paired=FALSE)
print(fqs)
fq <- fqs[[indexOf(fqs, "test-contam")]]
print(fq)
dataR <- readSeqFile(fq)
print(dataR)
# Summarize all reads (hash.prop=1)
dataA <- readSeqFile(fq, hash.prop=1, cache=FALSE)
dataB <- readSeqFile(fq, hash.prop=1, cache=FALSE)
stopifnot(all.equal(dataB, dataA))
# Summarize sampled subset of reads (using identical seeds)
dataA <- readSeqFile(fq, seed=0xBEEF, cache=FALSE)
dataB <- readSeqFile(fq, seed=0xBEEF, cache=FALSE)
stopifnot(all.equal(dataB, dataA))
# Summarize without any sampling (hash=FALSE + kmer=FALSE)
dataA <- readSeqFile(fq, hash=FALSE, kmer=FALSE, cache=FALSE)
dataB <- readSeqFile(fq, hash=FALSE, kmer=FALSE, cache=FALSE)
stopifnot(all.equal(dataB, dataA))
# Plotting
data <- readSeqFile(fq, seed=0xBEEF)
gg <- basePlot(data)
print(gg)
gg <- gcPlot(data)
print(gg)
gg <- qualPlot(data) ## Needs mgcv via ggplot2
print(gg)
gg <- seqlenPlot(data)
print(gg)
gg <- kmerKLPlot(data)
print(gg)
} # if (fullTest)
| /tests/readSeqFile.R | no_license | HenrikBengtsson/aroma.seq | R | false | false | 1,374 | r | library("aroma.seq")
fullTest <- isPackageInstalled("qrqc")
if (fullTest) {
library("qrqc")
readSeqFile <- aroma.seq::readSeqFile
path <- system.file("extdata", package="qrqc", mustWork=TRUE)
struct <- list(pattern=".*/extdata/([^/]*)", replacement=c(dataset="qrqc", organism="foo", sample="\\1"))
fqs <- FastqDataSet$byPath(path, struct=struct, paired=FALSE)
print(fqs)
fq <- fqs[[indexOf(fqs, "test-contam")]]
print(fq)
dataR <- readSeqFile(fq)
print(dataR)
# Summarize all reads (hash.prop=1)
dataA <- readSeqFile(fq, hash.prop=1, cache=FALSE)
dataB <- readSeqFile(fq, hash.prop=1, cache=FALSE)
stopifnot(all.equal(dataB, dataA))
# Summarize sampled subset of reads (using identical seeds)
dataA <- readSeqFile(fq, seed=0xBEEF, cache=FALSE)
dataB <- readSeqFile(fq, seed=0xBEEF, cache=FALSE)
stopifnot(all.equal(dataB, dataA))
# Summarize without any sampling (hash=FALSE + kmer=FALSE)
dataA <- readSeqFile(fq, hash=FALSE, kmer=FALSE, cache=FALSE)
dataB <- readSeqFile(fq, hash=FALSE, kmer=FALSE, cache=FALSE)
stopifnot(all.equal(dataB, dataA))
# Plotting
data <- readSeqFile(fq, seed=0xBEEF)
gg <- basePlot(data)
print(gg)
gg <- gcPlot(data)
print(gg)
gg <- qualPlot(data) ## Needs mgcv via ggplot2
print(gg)
gg <- seqlenPlot(data)
print(gg)
gg <- kmerKLPlot(data)
print(gg)
} # if (fullTest)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Dashboard.R
\name{driversS}
\alias{driversS}
\title{Models fire behaviour across ranged variables using species specific details}
\usage{
driversS(
base.params,
a,
db.path = "out_mc.db",
jitters,
windMin,
windReps,
windStep,
moistureMultiplier,
moistureSD,
moistureRange,
Variation,
leafVar,
updateProgress = NULL
)
}
\arguments{
\item{base.params}{Input parameter file}
\item{a}{A unique identifier for the record being run}
\item{db.path}{Name of the exported database}
\item{jitters}{Number of repetitions for each row in the weather table}
\item{windMin}{Lowest wind velocity tested (km/h)}
\item{windReps}{Number of wind speeds tested}
\item{windStep}{Gap (km/h) between wind steps}
\item{moistureMultiplier}{Multiplies all LFMC values by this number}
\item{moistureSD}{Standard deviation of moisture}
\item{moistureRange}{Truncates variability by +/- mean * range}
\item{Variation}{A database of plant variability in traits, with the fields:
record - a unique, consecutively numbered identifier per site
species - the name of the species, which will call trait data from 'default.species.params'
stratum - numeric value from 1 to 4, counting from lowest stratum
Hs - Standard deviation of plant height variations
Hr - Truncates plant height variability by +/- Hr * height}
\item{leafVar}{Variation around input leaf dimensions, equivalent to l}
\item{updateProgress}{Progress bar for use in the dashboard
Private function under development}
}
\description{
Models fire behaviour across ranged variables using species specific details
}
| /man/driversS.Rd | no_license | S-Homayounpour/frame_r | R | false | true | 1,658 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Dashboard.R
\name{driversS}
\alias{driversS}
\title{Models fire behaviour across ranged variables using species specific details}
\usage{
driversS(
base.params,
a,
db.path = "out_mc.db",
jitters,
windMin,
windReps,
windStep,
moistureMultiplier,
moistureSD,
moistureRange,
Variation,
leafVar,
updateProgress = NULL
)
}
\arguments{
\item{base.params}{Input parameter file}
\item{a}{A unique identifier for the record being run}
\item{db.path}{Name of the exported database}
\item{jitters}{Number of repetitions for each row in the weather table}
\item{windMin}{Lowest wind velocity tested (km/h)}
\item{windReps}{Number of wind speeds tested}
\item{windStep}{Gap (km/h) between wind steps}
\item{moistureMultiplier}{Multiplies all LFMC values by this number}
\item{moistureSD}{Standard deviation of moisture}
\item{moistureRange}{Truncates variability by +/- mean * range}
\item{Variation}{A database of plant variability in traits, with the fields:
record - a unique, consecutively numbered identifier per site
species - the name of the species, which will call trait data from 'default.species.params'
stratum - numeric value from 1 to 4, counting from lowest stratum
Hs - Standard deviation of plant height variations
Hr - Truncates plant height variability by +/- Hr * height}
\item{leafVar}{Variation around input leaf dimensions, equivalent to l}
\item{updateProgress}{Progress bar for use in the dashboard
Private function under development}
}
\description{
Models fire behaviour across ranged variables using species specific details
}
|
rm(list=ls(all=TRUE))
setwd("G:/MVPA")
filePathGestalt <- "G:/MVPA/betaFilesGestalt"
filesGestalt <- list.files(filePathGestalt)
pids <- unique(sapply(strsplit(filesGestalt, "_"), function(x) x[[1]][1]))
filesGestalt <- list.files(filePathGestalt, full.names = TRUE)
filePathSubitizing <- "G:/MVPA/betaFilesSubitizing"
filesSubitizing <- list.files(filePathSubitizing, full.names = TRUE)
i <- 1
pid <- pids[2]
acc <- NULL
accGestalt <- NULL
predDf <- NULL
for (pid in pids){
print(pid)
pidFilesGestalt <- filesGestalt[grep(pid, filesGestalt)]
pidFilesSubitizing <- filesSubitizing[grep(pid, filesSubitizing)]
df1 <- read.table(pidFilesGestalt[1], sep="\t", header=TRUE)
head(df1)
colnames(df1) <- c(paste0("condition_",df1$Var2[1]),
paste0("hemisphere_",df1$Var2[1]),
paste0("trial_",df1$Var2[1]),
paste0("voxel_", df1$Var2[1], "_", 1:(ncol(df1)-3)))
df2 <- NULL
if(length(pidFilesGestalt) == 2){
df2 <- read.table(pidFilesGestalt[2], sep="\t", header=TRUE)
colnames(df2) <- c(paste0("condition_",df2$Var2[1]),
paste0("hemisphere_",df2$Var2[1]),
paste0("trial_",df2$Var2[1]),
paste0("voxel_", df2$Var2[1], "_", 1:(ncol(df2)-3)))
df2 <- df2[order(df2[,3]),]
}
df1 <- df1[order(df1[,3]),]
if(!is.null(df2)){
dfGestalt <- cbind(df1, df2)
} else {
dfGestalt <- df1
}
df1 <- read.table(pidFilesSubitizing[1], sep="\t", header=TRUE)
head(df1)
colnames(df1) <- c(paste0("condition_",df1$Var2[1]),
paste0("hemisphere_",df1$Var2[1]),
paste0("trial_",df1$Var2[1]),
paste0("voxel_", df1$Var2[1], "_", 1:(ncol(df1)-3)))
df2 <- NULL
if(length(pidFilesSubitizing) == 2){
df2 <- read.table(pidFilesSubitizing[2], sep="\t", header=TRUE)
colnames(df2) <- c(paste0("condition_",df2$Var2[1]),
paste0("hemisphere_",df2$Var2[1]),
paste0("trial_",df2$Var2[1]),
paste0("voxel_", df2$Var2[1], "_", 1:(ncol(df2)-3)))
df2 <- df2[order(df2[,3]),]
}
df1 <- df1[order(df1[,3]),]
if(!is.null(df2)){
dfSubitizing<- cbind(df1, df2)
} else {
dfSubitizing <- df1
}
colnames(dfGestalt)
colnames(dfSubitizing)
yGestalt <- ifelse(dfGestalt[,1] == "gestalt", 1, 0) # 1 = gestalt, 0 = no_gestalt
XGestalt <- dfGestalt[,grep("voxel", colnames(dfGestalt))]
num <- as.numeric(sapply(dfSubitizing[,1], function(c) strsplit(as.character(c), "can")[[1]][2]))
ySubitizing <- ifelse(num <= 4, 1, 0) # 1 = sub, 0 = est
XSubitizing <- dfSubitizing[,grep("voxel", colnames(dfSubitizing))]
#print(all(colnames(XGestalt) == colnames(XSubitizing)))
XGestalt <- XGestalt[colSums(!is.na(XGestalt)) > 0]
XSubitizing <- XSubitizing[colSums(!is.na(XSubitizing)) > 0]
require(e1071)
colGestalt <- colnames(XGestalt)[colnames(XGestalt) %in% colnames(XSubitizing)]
cols <- colnames(XSubitizing)[colnames(XSubitizing) %in% colGestalt]
XGestalt <- XGestalt[,cols]
XSubitizing <- XSubitizing[,cols]
print(all(colnames(XGestalt) == colnames(XSubitizing)))
XGestalt <- as.matrix(XGestalt)
XSubitizing <- as.matrix(XSubitizing)
yGestalt <- factor(yGestalt)
ySubitizing <- factor(ySubitizing)
set.seed(345)
svm.model <- svm(yGestalt ~ ., data = XGestalt, probability=TRUE, kernel="radial", cross=5, cost=100)
accGestalt <- c(accGestalt, mean(svm.model$accuracies))
pred <- predict(svm.model, newdata=XSubitizing)
#pred <- (predict(xgb, dtest) > 0.5) * 1
acc <- c(acc, mean(ySubitizing == pred))
print(mean(ySubitizing == pred))
predDf <- rbind(predDf,data.frame(pid=rep(pid,length(pred)), pred=(ySubitizing == pred)*1, cor=ySubitizing, svm=pred))
}
acc
t.test(acc, mu=0.5)
require(lme4)
glmer1 <- glmer(pred ~ 1 + (1|pid), predDf, family=binomial)
summary(glmer1)
t.test(accGestalt, mu=50)
accGestalt
| /mvpaAnalysisSVM_cross_subEst.R | no_license | jorennig/MVPA | R | false | false | 4,011 | r | rm(list=ls(all=TRUE))
setwd("G:/MVPA")
filePathGestalt <- "G:/MVPA/betaFilesGestalt"
filesGestalt <- list.files(filePathGestalt)
pids <- unique(sapply(strsplit(filesGestalt, "_"), function(x) x[[1]][1]))
filesGestalt <- list.files(filePathGestalt, full.names = TRUE)
filePathSubitizing <- "G:/MVPA/betaFilesSubitizing"
filesSubitizing <- list.files(filePathSubitizing, full.names = TRUE)
i <- 1
pid <- pids[2]
acc <- NULL
accGestalt <- NULL
predDf <- NULL
for (pid in pids){
print(pid)
pidFilesGestalt <- filesGestalt[grep(pid, filesGestalt)]
pidFilesSubitizing <- filesSubitizing[grep(pid, filesSubitizing)]
df1 <- read.table(pidFilesGestalt[1], sep="\t", header=TRUE)
head(df1)
colnames(df1) <- c(paste0("condition_",df1$Var2[1]),
paste0("hemisphere_",df1$Var2[1]),
paste0("trial_",df1$Var2[1]),
paste0("voxel_", df1$Var2[1], "_", 1:(ncol(df1)-3)))
df2 <- NULL
if(length(pidFilesGestalt) == 2){
df2 <- read.table(pidFilesGestalt[2], sep="\t", header=TRUE)
colnames(df2) <- c(paste0("condition_",df2$Var2[1]),
paste0("hemisphere_",df2$Var2[1]),
paste0("trial_",df2$Var2[1]),
paste0("voxel_", df2$Var2[1], "_", 1:(ncol(df2)-3)))
df2 <- df2[order(df2[,3]),]
}
df1 <- df1[order(df1[,3]),]
if(!is.null(df2)){
dfGestalt <- cbind(df1, df2)
} else {
dfGestalt <- df1
}
df1 <- read.table(pidFilesSubitizing[1], sep="\t", header=TRUE)
head(df1)
colnames(df1) <- c(paste0("condition_",df1$Var2[1]),
paste0("hemisphere_",df1$Var2[1]),
paste0("trial_",df1$Var2[1]),
paste0("voxel_", df1$Var2[1], "_", 1:(ncol(df1)-3)))
df2 <- NULL
if(length(pidFilesSubitizing) == 2){
df2 <- read.table(pidFilesSubitizing[2], sep="\t", header=TRUE)
colnames(df2) <- c(paste0("condition_",df2$Var2[1]),
paste0("hemisphere_",df2$Var2[1]),
paste0("trial_",df2$Var2[1]),
paste0("voxel_", df2$Var2[1], "_", 1:(ncol(df2)-3)))
df2 <- df2[order(df2[,3]),]
}
df1 <- df1[order(df1[,3]),]
if(!is.null(df2)){
dfSubitizing<- cbind(df1, df2)
} else {
dfSubitizing <- df1
}
colnames(dfGestalt)
colnames(dfSubitizing)
yGestalt <- ifelse(dfGestalt[,1] == "gestalt", 1, 0) # 1 = gestalt, 0 = no_gestalt
XGestalt <- dfGestalt[,grep("voxel", colnames(dfGestalt))]
num <- as.numeric(sapply(dfSubitizing[,1], function(c) strsplit(as.character(c), "can")[[1]][2]))
ySubitizing <- ifelse(num <= 4, 1, 0) # 1 = sub, 0 = est
XSubitizing <- dfSubitizing[,grep("voxel", colnames(dfSubitizing))]
#print(all(colnames(XGestalt) == colnames(XSubitizing)))
XGestalt <- XGestalt[colSums(!is.na(XGestalt)) > 0]
XSubitizing <- XSubitizing[colSums(!is.na(XSubitizing)) > 0]
require(e1071)
colGestalt <- colnames(XGestalt)[colnames(XGestalt) %in% colnames(XSubitizing)]
cols <- colnames(XSubitizing)[colnames(XSubitizing) %in% colGestalt]
XGestalt <- XGestalt[,cols]
XSubitizing <- XSubitizing[,cols]
print(all(colnames(XGestalt) == colnames(XSubitizing)))
XGestalt <- as.matrix(XGestalt)
XSubitizing <- as.matrix(XSubitizing)
yGestalt <- factor(yGestalt)
ySubitizing <- factor(ySubitizing)
set.seed(345)
svm.model <- svm(yGestalt ~ ., data = XGestalt, probability=TRUE, kernel="radial", cross=5, cost=100)
accGestalt <- c(accGestalt, mean(svm.model$accuracies))
pred <- predict(svm.model, newdata=XSubitizing)
#pred <- (predict(xgb, dtest) > 0.5) * 1
acc <- c(acc, mean(ySubitizing == pred))
print(mean(ySubitizing == pred))
predDf <- rbind(predDf,data.frame(pid=rep(pid,length(pred)), pred=(ySubitizing == pred)*1, cor=ySubitizing, svm=pred))
}
acc
t.test(acc, mu=0.5)
require(lme4)
glmer1 <- glmer(pred ~ 1 + (1|pid), predDf, family=binomial)
summary(glmer1)
t.test(accGestalt, mu=50)
accGestalt
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{entrez_summary}
\alias{entrez_summary}
\title{Get summaries of objects in NCBI datasets from a unique ID}
\usage{
entrez_summary(db, ...)
}
\arguments{
\item{db}{character Name of the database to search for}
\item{\dots}{character Additional terms to add to the request. Requires either
id (unique id(s) for records in a given database) or WebEnv (a character
containing a cookie created by a previous entrez query).}
}
\value{
A list of esummary records (if multiple IDs are passed) or a single
record.
file XMLInternalDocument xml file resulting from search, parsed with
\code{\link{xmlTreeParse}}
}
\description{
Contstructs a query from the given arguments, including a database name and
list of of unique IDs for that database then downloads the XML document
created by that query. The XML document is parsed, with the
}
\examples{
pop_ids = c("307082412", "307075396", "307075338", "307075274")
pop_summ <- entrez_summary(db="popset", id=pop_ids)
sapply(pop_summ, "[[", "Title")
# clinvar example
res <- entrez_search(db = "clinvar", term = "BRCA1")
cv <- entrez_summary(db="clinvar", id=res$ids)
cv[[1]] # get the names of the list for each result
sapply(cv, "[[", "title") # titles
lapply(cv, "[[", "trait_set")[1:2] # trait_set
sapply(cv, "[[", "gene_sort") # gene_sort
}
| /man/entrez_summary.Rd | no_license | yexiang2046/rentrez | R | false | false | 1,353 | rd | % Generated by roxygen2 (4.0.0): do not edit by hand
\name{entrez_summary}
\alias{entrez_summary}
\title{Get summaries of objects in NCBI datasets from a unique ID}
\usage{
entrez_summary(db, ...)
}
\arguments{
\item{db}{character Name of the database to search for}
\item{\dots}{character Additional terms to add to the request. Requires either
id (unique id(s) for records in a given database) or WebEnv (a character
containing a cookie created by a previous entrez query).}
}
\value{
A list of esummary records (if multiple IDs are passed) or a single
record.
file XMLInternalDocument xml file resulting from search, parsed with
\code{\link{xmlTreeParse}}
}
\description{
Contstructs a query from the given arguments, including a database name and
list of of unique IDs for that database then downloads the XML document
created by that query. The XML document is parsed, with the
}
\examples{
pop_ids = c("307082412", "307075396", "307075338", "307075274")
pop_summ <- entrez_summary(db="popset", id=pop_ids)
sapply(pop_summ, "[[", "Title")
# clinvar example
res <- entrez_search(db = "clinvar", term = "BRCA1")
cv <- entrez_summary(db="clinvar", id=res$ids)
cv[[1]] # get the names of the list for each result
sapply(cv, "[[", "title") # titles
lapply(cv, "[[", "trait_set")[1:2] # trait_set
sapply(cv, "[[", "gene_sort") # gene_sort
}
|
####### song genetic, preference imprinted from mother
sigma2=1
sigmay2 = (5+2*sqrt(6))/3*sigma2+0
sigmax2 = Re(polyroot(c(2*sigma2^2,5*sigma2-3*sigmay2,3))[2])
J= matrix(c(1+sigmax2/4*(sigmay2-sigma2-4*sigmax2*sigmay2/(sigma2+sigmax2))/(sigma2+sigmax2)^2,sigmax2/2/(sigma2+sigmax2),1/2*sigma2*sigmay2/(sigma2+sigmax2)^2,1/2),nrow=2,byrow=TRUE)
print(eigen(J)) | /jacobian.R | no_license | ebrusherb/song_learning_evolution | R | false | false | 361 | r | ####### song genetic, preference imprinted from mother
sigma2=1
sigmay2 = (5+2*sqrt(6))/3*sigma2+0
sigmax2 = Re(polyroot(c(2*sigma2^2,5*sigma2-3*sigmay2,3))[2])
J= matrix(c(1+sigmax2/4*(sigmay2-sigma2-4*sigmax2*sigmay2/(sigma2+sigmax2))/(sigma2+sigmax2)^2,sigmax2/2/(sigma2+sigmax2),1/2*sigma2*sigmay2/(sigma2+sigmax2)^2,1/2),nrow=2,byrow=TRUE)
print(eigen(J)) |
utils::globalVariables(c("silentText","silentGraph","silentLegend","initialType","ar.orders","i.orders","ma.orders"));
#' State Space ARIMA
#'
#' Function selects the best State Space ARIMA based on information criteria,
#' using fancy branch and bound mechanism. The resulting model can be not
#' optimal in IC meaning, but it is usually reasonable.
#'
#' The function constructs bunch of ARIMAs in Single Source of Error
#' state space form (see \link[smooth]{ssarima} documentation) and selects the
#' best one based on information criterion. The mechanism is described in
#' Svetunkov & Boylan (2019).
#'
#' Due to the flexibility of the model, multiple seasonalities can be used. For
#' example, something crazy like this can be constructed:
#' SARIMA(1,1,1)(0,1,1)[24](2,0,1)[24*7](0,0,1)[24*30], but the estimation may
#' take a lot of time... It is recommended to use \link[smooth]{auto.msarima} in
#' cases with more than one seasonality and high frequencies.
#'
#' For some more information about the model and its implementation, see the
#' vignette: \code{vignette("ssarima","smooth")}
#'
#' @template ssBasicParam
#' @template ssAdvancedParam
#' @template ssXregParam
#' @template ssIntervals
#' @template ssInitialParam
#' @template ssAuthor
#' @template ssKeywords
#'
#' @template ssGeneralRef
#' @template ssIntermittentRef
#' @template ssARIMARef
#'
#' @param orders List of maximum orders to check, containing vector variables
#' \code{ar}, \code{i} and \code{ma}. If a variable is not provided in the
#' list, then it is assumed to be equal to zero. At least one variable should
#' have the same length as \code{lags}.
#' @param lags Defines lags for the corresponding orders (see examples). The
#' length of \code{lags} must correspond to the length of \code{orders}. There
#' is no restrictions on the length of \code{lags} vector.
#' @param combine If \code{TRUE}, then resulting ARIMA is combined using AIC
#' weights.
#' @param fast If \code{TRUE}, then some of the orders of ARIMA are
#' skipped. This is not advised for models with \code{lags} greater than 12.
#' @param constant If \code{NULL}, then the function will check if constant is
#' needed. if \code{TRUE}, then constant is forced in the model. Otherwise
#' constant is not used.
#' @param ... Other non-documented parameters. For example \code{FI=TRUE} will
#' make the function also produce Fisher Information matrix, which then can be
#' used to calculated variances of parameters of the model. Maximum orders to
#' check can also be specified separately, however \code{orders} variable must
#' be set to \code{NULL}: \code{ar.orders} - Maximum order of AR term. Can be
#' vector, defining max orders of AR, SAR etc. \code{i.orders} - Maximum order
#' of I. Can be vector, defining max orders of I, SI etc. \code{ma.orders} -
#' Maximum order of MA term. Can be vector, defining max orders of MA, SMA etc.
#' @return Object of class "smooth" is returned. See \link[smooth]{ssarima} for
#' details.
#' @seealso \code{\link[smooth]{es}, \link[smooth]{ces},
#' \link[smooth]{sim.es}, \link[smooth]{gum}, \link[smooth]{ssarima}}
#'
#' @examples
#'
#' \donttest{x <- rnorm(118,100,3)}
#'
#' # The best ARIMA for the data
#' \donttest{ourModel <- auto.ssarima(x,orders=list(ar=c(2,1),i=c(1,1),ma=c(2,1)),lags=c(1,12),
#' h=18,holdout=TRUE,interval="np")}
#'
#' # The other one using optimised states
#' \donttest{auto.ssarima(x,orders=list(ar=c(3,2),i=c(2,1),ma=c(3,2)),lags=c(1,12),
#' initial="o",h=18,holdout=TRUE)}
#'
#' # And now combined ARIMA
#' \donttest{auto.ssarima(x,orders=list(ar=c(3,2),i=c(2,1),ma=c(3,2)),lags=c(1,12),
#' combine=TRUE,h=18,holdout=TRUE)}
#'
#' \donttest{summary(ourModel)
#' forecast(ourModel)
#' plot(forecast(ourModel))}
#'
#'
#' @export auto.ssarima
auto.ssarima <- function(y, orders=list(ar=c(3,3),i=c(2,1),ma=c(3,3)), lags=c(1,frequency(y)),
combine=FALSE, fast=TRUE, constant=NULL,
initial=c("backcasting","optimal"), ic=c("AICc","AIC","BIC","BICc"),
loss=c("likelihood","MSE","MAE","HAM","MSEh","TMSE","GTMSE","MSCE"),
h=10, holdout=FALSE, cumulative=FALSE,
interval=c("none","parametric","likelihood","semiparametric","nonparametric"), level=0.95,
bounds=c("admissible","none"),
silent=c("all","graph","legend","output","none"),
xreg=NULL, regressors=c("use","select"), initialX=NULL, ...){
# Function estimates several ssarima models and selects the best one using the selected information criterion.
#
# Copyright (C) 2015 - Inf Ivan Svetunkov
# Start measuring the time of calculations
startTime <- Sys.time();
### Depricate the old parameters
ellipsis <- list(...)
ellipsis <- depricator(ellipsis, "xregDo", "regressors");
updateX <- FALSE;
persistenceX <- transitionX <- NULL;
occurrence <- "none";
oesmodel <- "MNN";
# Add all the variables in ellipsis to current environment
list2env(ellipsis,environment());
if(!is.null(orders)){
arMax <- orders$ar;
iMax <- orders$i;
maMax <- orders$ma;
}
# If orders are provided in ellipsis via arMax, write them down.
if(exists("ar.orders",inherits=FALSE)){
if(is.null(ar.orders)){
arMax <- 0;
}
else{
arMax <- ar.orders;
}
}
else{
if(is.null(orders)){
arMax <- 0;
}
}
if(exists("i.orders",inherits=FALSE)){
if(is.null(i.orders)){
iMax <- 0;
}
else{
iMax <- i.orders;
}
}
else{
if(is.null(orders)){
iMax <- 0;
}
}
if(exists("ma.orders",inherits=FALSE)){
if(is.null(ma.orders)){
maMax <- 0;
}
else{
maMax <- ma.orders
}
}
else{
if(is.null(orders)){
maMax <- 0;
}
}
##### Set environment for ssInput and make all the checks #####
environment(ssAutoInput) <- environment();
ssAutoInput("auto.ssarima",ParentEnvironment=environment());
if(is.null(constant)){
constantCheck <- TRUE;
constantValue <- TRUE;
}
else{
if(is.logical(constant)){
constantCheck <- FALSE;
constantValue <- constant;
}
else{
constant <- NULL;
constantCheck <- TRUE;
constantValue <- TRUE;
warning("Strange value of constant parameter. We changed it to default value.");
}
}
if(any(is.complex(c(arMax,iMax,maMax,lags)))){
stop("Come on! Be serious! This is ARIMA, not CES!",call.=FALSE);
}
if(any(c(arMax,iMax,maMax)<0)){
stop("Funny guy! How am I gonna construct a model with negative order?",call.=FALSE);
}
if(any(c(lags)<0)){
stop("Right! Why don't you try complex lags then, mister smart guy?",call.=FALSE);
}
# If there are zero lags, drop them
if(any(lags==0)){
arMax <- arMax[lags!=0];
iMax <- iMax[lags!=0];
maMax <- maMax[lags!=0];
lags <- lags[lags!=0];
}
# Define maxorder and make all the values look similar (for the polynomials)
maxorder <- max(length(arMax),length(iMax),length(maMax));
if(length(arMax)!=maxorder){
arMax <- c(arMax,rep(0,maxorder-length(arMax)));
}
if(length(iMax)!=maxorder){
iMax <- c(iMax,rep(0,maxorder-length(iMax)));
}
if(length(maMax)!=maxorder){
maMax <- c(maMax,rep(0,maxorder-length(maMax)));
}
# If zeroes are defined as orders for some lags, drop them.
if(any((arMax + iMax + maMax)==0)){
orders2leave <- (arMax + iMax + maMax)!=0;
if(all(!orders2leave)){
orders2leave <- lags==min(lags);
}
arMax <- arMax[orders2leave];
iMax <- iMax[orders2leave];
maMax <- maMax[orders2leave];
lags <- lags[orders2leave];
}
# Get rid of duplicates in lags
if(length(unique(lags))!=length(lags)){
if(dataFreq!=1){
warning(paste0("'lags' variable contains duplicates: (",paste0(lags,collapse=","),"). Getting rid of some of them."),call.=FALSE);
}
lagsNew <- unique(lags);
arMaxNew <- iMaxNew <- maMaxNew <- lagsNew;
for(i in 1:length(lagsNew)){
arMaxNew[i] <- max(arMax[which(lags==lagsNew[i])],na.rm=TRUE);
iMaxNew[i] <- max(iMax[which(lags==lagsNew[i])],na.rm=TRUE);
maMaxNew[i] <- max(maMax[which(lags==lagsNew[i])],na.rm=TRUE);
}
arMax <- arMaxNew;
iMax <- iMaxNew;
maMax <- maMaxNew;
lags <- lagsNew;
}
# Order things, so we would deal with the lowest level of seasonality first
arMax <- arMax[order(lags,decreasing=FALSE)];
iMax <- iMax[order(lags,decreasing=FALSE)];
maMax <- maMax[order(lags,decreasing=FALSE)];
lags <- sort(lags,decreasing=FALSE);
# 1 stands for constant, the other one stands for variance
nParamMax <- (1 + max(arMax %*% lags + iMax %*% lags,maMax %*% lags)
+ sum(arMax) + sum(maMax) + constantCheck);
# Try to figure out if the number of parameters can be tuned in order to fit something smaller on small samples
# Don't try to fix anything if the number of seasonalities is greater than 2
if(length(lags)<=2){
if(obsNonzero <= nParamMax){
armaLength <- length(arMax);
while(obsNonzero <= nParamMax){
if(any(c(arMax[armaLength],maMax[armaLength])>0)){
arMax[armaLength] <- max(0,arMax[armaLength] - 1);
nParamMax <- max(arMax %*% lags + iMax %*% lags,maMax %*% lags) + sum(arMax) + sum(maMax) + 1 + 1;
if(obsNonzero <= nParamMax){
maMax[armaLength] <- max(0,maMax[armaLength] - 1);
nParamMax <- max(arMax %*% lags + iMax %*% lags,maMax %*% lags) + sum(arMax) + sum(maMax) + 1 + 1;
}
}
else{
if(armaLength==2){
arMax[1] <- arMax[1] - 1;
nParamMax <- max(arMax %*% lags + iMax %*% lags,maMax %*% lags) + sum(arMax) + sum(maMax) + 1 + 1;
if(obsNonzero <= nParamMax){
maMax[1] <- maMax[1] - 1;
nParamMax <- max(arMax %*% lags + iMax %*% lags,maMax %*% lags) + sum(arMax) + sum(maMax) + 1 + 1;
}
}
else{
break;
}
}
if(all(c(arMax,maMax)==0)){
if(iMax[armaLength]>0){
iMax[armaLength] <- max(0,iMax[armaLength] - 1);
nParamMax <- max(arMax %*% lags + iMax %*% lags,maMax %*% lags) + sum(arMax) + sum(maMax) + 1 + 1;
}
else if(iMax[1]>0){
if(obsNonzero <= nParamMax){
iMax[1] <- max(0,iMax[1] - 1);
nParamMax <- max(arMax %*% lags + iMax %*% lags,maMax %*% lags) + sum(arMax) + sum(maMax) + 1 + 1;
}
}
else{
break;
}
}
}
nParamMax <- max(arMax %*% lags + iMax %*% lags,maMax %*% lags) + sum(arMax) + sum(maMax) + 1 + 1;
}
}
if(obsNonzero <= nParamMax){
message(paste0("Not enough observations for the reasonable fit. Number of possible parameters is ",
nParamMax," while the number of observations is ",obsNonzero,"!"));
stop("Redefine maximum orders and try again.",call.=FALSE)
}
# 1 stands for constant/no constant, another one stands for ARIMA(0,0,0)
if(all(maMax==0)){
nModels <- prod(iMax + 1) * (1 + sum(arMax)) + constantCheck;
}
else{
nModels <- prod(iMax + 1) * (1 + sum(maMax*(1 + sum(arMax)))) + constantCheck;
}
testModel <- list(NA);
# Array with elements x maxorders x horizon x point/lower/upper
if(combine){
testForecasts <- list(NA);
testFitted <- list(NA);
testICs <- list(NA);
testLevels <- list(NA);
testStates <- list(NA);
testTransition <- list(NA);
testPersistence <- list(NA);
}
ICValue <- 1E+100;
m <- 0;
# constant <- TRUE;
lagsTest <- maTest <- arTest <- rep(0,length(lags));
arBest <- maBest <- iBest <- rep(0,length(lags));
arBestLocal <- maBestLocal <- arBest;
#### Function corrects IC taking number of parameters on previous step ####
icCorrector <- function(icValue, nParam, obsNonzero, nParamNew){
if(ic=="AIC"){
llikelihood <- (2*nParam - icValue)/2;
correction <- 2*nParamNew - 2*llikelihood;
}
else if(ic=="AICc"){
llikelihood <- (2*nParam*obsNonzero/(obsNonzero-nParam-1) - icValue)/2;
correction <- 2*nParamNew*obsNonzero/(obsNonzero-nParamNew-1) - 2*llikelihood;
}
else if(ic=="BIC"){
llikelihood <- (nParam*log(obsNonzero) - icValue)/2;
correction <- nParamNew*log(obsNonzero) - 2*llikelihood;
}
else if(ic=="BICc"){
llikelihood <- ((nParam*log(obsNonzero)*obsNonzero)/(obsNonzero-nParam-1) - icValue)/2;
correction <- (nParamNew*log(obsNonzero)*obsNonzero)/(obsNonzero-nParamNew-1) - 2*llikelihood;
}
return(correction);
}
if(!silentText){
cat("Estimation progress: ");
}
### If for some reason we have model with zeroes for orders, return it.
if(all(c(arMax,iMax,maMax)==0)){
cat("\b\b\b\bDone!\n");
bestModel <- ssarima(y, orders=list(ar=arBest,i=(iBest),ma=(maBest)), lags=(lags),
constant=constantValue, initial=initialType, loss=loss,
h=h, holdout=holdout, cumulative=cumulative,
interval=intervalType, level=level,
bounds=bounds, silent=TRUE,
xreg=xreg, regressors=regressors, initialX=initialX, FI=FI);
return(bestModel);
}
iOrders <- matrix(0,prod(iMax+1),ncol=length(iMax));
##### Loop for differences #####
if(any(iMax!=0)){
# Prepare table with differences
iOrders[,1] <- rep(c(0:iMax[1]),times=prod(iMax[-1]+1));
if(length(iMax)>1){
for(seasLag in 2:length(iMax)){
iOrders[,seasLag] <- rep(c(0:iMax[seasLag]),each=prod(iMax[1:(seasLag-1)]+1))
}
}
}
# Start the loop with differences
for(d in 1:nrow(iOrders)){
m <- m + 1;
if(!silentText){
cat(paste0(rep("\b",nchar(round(m/nModels,2)*100)+1),collapse=""));
cat(paste0(round((m)/nModels,2)*100,"%"));
}
nParamOriginal <- 1;
if(silent[1]=="d"){
cat("I: ");cat(iOrders[d,]);cat(", ");
}
testModel <- ssarima(y, orders=list(ar=0,i=iOrders[d,],ma=0), lags=lags,
constant=constantValue, initial=initialType, loss=loss,
h=h, holdout=holdout, cumulative=cumulative,
interval=intervalType, level=level,
bounds=bounds, silent=TRUE,
xreg=xreg, regressors=regressors, initialX=initialX, FI=FI);
ICValue <- testModel$ICs[ic];
if(combine){
testForecasts[[m]] <- matrix(NA,h,3);
testForecasts[[m]][,1] <- testModel$forecast;
testForecasts[[m]][,2] <- testModel$lower;
testForecasts[[m]][,3] <- testModel$upper;
testFitted[[m]] <- testModel$fitted;
testICs[[m]] <- ICValue;
testLevels[[m]] <- 1;
testStates[[m]] <- testModel$states;
testTransition[[m]] <- testModel$transition;
testPersistence[[m]] <- testModel$persistence;
}
if(silent[1]=="d"){
cat(ICValue); cat("\n");
}
if(m==1){
bestIC <- ICValue;
dataMA <- dataI <- testModel$residuals;
iBest <- iOrders[d,];
bestICAR <- bestICI <- bestICMA <- bestIC;
}
else{
if(ICValue < bestICI){
bestICI <- ICValue;
dataMA <- dataI <- testModel$residuals;
if(ICValue < bestIC){
iBest <- iOrders[d,];
bestIC <- ICValue;
maBest <- arBest <- rep(0,length(arTest));
}
}
else{
if(fast){
m <- m + sum(maMax*(1 + sum(arMax)));
next;
}
}
}
##### Loop for MA #####
if(any(maMax!=0)){
bestICMA <- bestICI;
maBestLocal <- maTest <- rep(0,length(maTest));
for(seasSelectMA in 1:length(lags)){
if(maMax[seasSelectMA]!=0){
for(maSelect in 1:maMax[seasSelectMA]){
m <- m + 1;
if(!silentText){
cat(paste0(rep("\b",nchar(round(m/nModels,2)*100)+1),collapse=""));
cat(paste0(round((m)/nModels,2)*100,"%"));
}
maTest[seasSelectMA] <- maMax[seasSelectMA] - maSelect + 1;
nParamMA <- sum(maTest);
nParamNew <- nParamOriginal + nParamMA;
if(silent[1]=="d"){
cat("MA: ");cat(maTest);cat(", ");
}
testModel <- ssarima(dataI, orders=list(ar=0,i=0,ma=maTest), lags=lags,
constant=FALSE, initial=initialType, loss=loss,
h=h, holdout=FALSE,
interval=intervalType, level=level,
bounds=bounds, silent=TRUE,
xreg=NULL, regressors="use", initialX=initialX, FI=FI);
ICValue <- icCorrector(testModel$ICs[ic], nParamMA, obsNonzero, nParamNew);
if(combine){
testForecasts[[m]] <- matrix(NA,h,3);
testForecasts[[m]][,1] <- testModel$forecast;
testForecasts[[m]][,2] <- testModel$lower;
testForecasts[[m]][,3] <- testModel$upper;
testFitted[[m]] <- testModel$fitted;
testICs[[m]] <- ICValue;
testLevels[[m]] <- 2;
testStates[[m]] <- testModel$states;
testTransition[[m]] <- testModel$transition;
testPersistence[[m]] <- testModel$persistence;
}
if(silent[1]=="d"){
cat(ICValue); cat("\n");
}
if(ICValue < bestICMA){
bestICMA <- ICValue;
maBestLocal <- maTest;
if(ICValue < bestIC){
bestIC <- bestICMA;
iBest <- iOrders[d,];
maBest <- maTest;
arBest <- rep(0,length(arTest));
}
dataMA <- testModel$residuals;
}
else{
if(fast){
m <- m + maTest[seasSelectMA] * (1 + sum(arMax)) - 1;
maTest <- maBestLocal;
break;
}
else{
maTest <- maBestLocal;
}
}
##### Loop for AR #####
if(any(arMax!=0)){
bestICAR <- bestICMA;
arBestLocal <- arTest <- rep(0,length(arTest));
for(seasSelectAR in 1:length(lags)){
lagsTest[seasSelectAR] <- lags[seasSelectAR];
if(arMax[seasSelectAR]!=0){
for(arSelect in 1:arMax[seasSelectAR]){
m <- m + 1;
if(!silentText){
cat(paste0(rep("\b",nchar(round(m/nModels,2)*100)+1),collapse=""));
cat(paste0(round((m)/nModels,2)*100,"%"));
}
arTest[seasSelectAR] <- arMax[seasSelectAR] - arSelect + 1;
nParamAR <- sum(arTest);
nParamNew <- nParamOriginal + nParamMA + nParamAR;
if(silent[1]=="d"){
cat("AR: ");cat(arTest);cat(", ");
}
testModel <- ssarima(dataMA, orders=list(ar=arTest,i=0,ma=0), lags=lags,
constant=FALSE, initial=initialType, loss=loss,
h=h, holdout=FALSE,
interval=intervalType, level=level,
bounds=bounds, silent=TRUE,
xreg=NULL, regressors="use", initialX=initialX, FI=FI);
ICValue <- icCorrector(testModel$ICs[ic], nParamAR, obsNonzero, nParamNew);
if(combine){
testForecasts[[m]] <- matrix(NA,h,3);
testForecasts[[m]][,1] <- testModel$forecast;
testForecasts[[m]][,2] <- testModel$lower;
testForecasts[[m]][,3] <- testModel$upper;
testFitted[[m]] <- testModel$fitted;
testICs[[m]] <- ICValue;
testLevels[[m]] <- 3;
testStates[[m]] <- testModel$states;
testTransition[[m]] <- testModel$transition;
testPersistence[[m]] <- testModel$persistence;
}
if(silent[1]=="d"){
cat(ICValue); cat("\n");
}
if(ICValue < bestICAR){
bestICAR <- ICValue;
arBestLocal <- arTest;
if(ICValue < bestIC){
bestIC <- ICValue;
iBest <- iOrders[d,];
arBest <- arTest;
maBest <- maTest;
}
}
else{
if(fast){
m <- m + arTest[seasSelectAR] - 1;
arTest <- arBestLocal;
break;
}
else{
arTest <- arBestLocal;
}
}
}
}
}
}
}
}
}
}
else{
##### Loop for AR #####
if(any(arMax!=0)){
bestICAR <- bestICMA;
arBestLocal <- arTest <- rep(0,length(arTest));
for(seasSelectAR in 1:length(lags)){
lagsTest[seasSelectAR] <- lags[seasSelectAR];
if(arMax[seasSelectAR]!=0){
for(arSelect in 1:arMax[seasSelectAR]){
m <- m + 1;
if(!silentText){
cat(paste0(rep("\b",nchar(round(m/nModels,2)*100)+1),collapse=""));
cat(paste0(round((m)/nModels,2)*100,"%"));
}
arTest[seasSelectAR] <- arMax[seasSelectAR] - arSelect + 1;
nParamAR <- sum(arTest);
nParamNew <- nParamOriginal + nParamAR;
if(silent[1]=="d"){
cat("AR: ");cat(arTest);cat(", ");
}
testModel <- ssarima(dataMA, orders=list(ar=arTest,i=0,ma=0), lags=lags,
constant=FALSE, initial=initialType, loss=loss,
h=h, holdout=FALSE,
interval=intervalType, level=level,
bounds=bounds, silent=TRUE,
xreg=NULL, regressors="use", initialX=initialX, FI=FI);
ICValue <- icCorrector(testModel$ICs[ic], nParamAR, obsNonzero, nParamNew);
if(combine){
testForecasts[[m]] <- matrix(NA,h,3);
testForecasts[[m]][,1] <- testModel$forecast;
testForecasts[[m]][,2] <- testModel$lower;
testForecasts[[m]][,3] <- testModel$upper;
testFitted[[m]] <- testModel$fitted;
testICs[[m]] <- ICValue;
testLevels[[m]] <- 3;
testStates[[m]] <- testModel$states;
testTransition[[m]] <- testModel$transition;
testPersistence[[m]] <- testModel$persistence;
}
if(silent[1]=="d"){
cat(ICValue); cat("\n");
}
if(ICValue < bestICAR){
bestICAR <- ICValue;
arBestLocal <- arTest;
if(ICValue < bestIC){
bestIC <- ICValue;
iBest <- iOrders[d,];
arBest <- arTest;
maBest <- maTest;
}
}
else{
if(fast){
m <- m + arTest[seasSelectAR] - 1;
arTest <- arBestLocal;
break;
}
else{
arTest <- arBestLocal;
}
}
}
}
}
}
}
}
#### Test the constant ####
if(constantCheck){
m <- m + 1;
if(!silentText){
cat(paste0(rep("\b",nchar(round(m/nModels,2)*100)+1),collapse=""));
cat(paste0(round((m)/nModels,2)*100,"%"));
}
if(any(c(arBest,iBest,maBest)!=0)){
testModel <- ssarima(y, orders=list(ar=(arBest),i=(iBest),ma=(maBest)), lags=(lags),
constant=FALSE, initial=initialType, loss=loss,
h=h, holdout=holdout, cumulative=cumulative,
interval=intervalType, level=level,
bounds=bounds, silent=TRUE,
xreg=xreg, regressors=regressors, initialX=initialX, FI=FI);
ICValue <- testModel$ICs[ic];
if(combine){
testForecasts[[m]] <- matrix(NA,h,3);
testForecasts[[m]][,1] <- testModel$forecast;
testForecasts[[m]][,2] <- testModel$lower;
testForecasts[[m]][,3] <- testModel$upper;
testFitted[[m]] <- testModel$fitted;
testICs[[m]] <- ICValue;
testLevels[[m]] <- 1;
testStates[[m]] <- testModel$states;
testTransition[[m]] <- testModel$transition;
testPersistence[[m]] <- testModel$persistence;
}
# cat("Constant: ");print(ICValue);
if(ICValue < bestIC){
bestModel <- testModel;
constantValue <- FALSE;
}
else{
constantValue <- TRUE;
}
}
}
if(combine){
testICs <- unlist(testICs);
testLevels <- unlist(testLevels);
testForecasts <- array(unlist(testForecasts),c(h,3,length(testICs)));
testFitted <- matrix(unlist(testFitted),ncol=length(testICs));
icWeights <- exp(-0.5*(testICs-min(testICs)))/sum(exp(-0.5*(testICs-min(testICs))));
testForecastsNew <- testForecasts;
testFittedNew <- testFitted;
for(i in 1:length(testLevels)){
if(testLevels[i]==1){
j <- i;
}
else if(testLevels[i]==2){
k <- i;
testForecastsNew[,,i] <- testForecasts[,,j] + testForecasts[,,i];
testFittedNew[,i] <- testFitted[,j] + testFitted[,i];
}
else if(testLevels[i]==3){
testForecastsNew[,,i] <- testForecasts[,,j] + testForecasts[,,k] + testForecasts[,,i];
testFittedNew[,i] <- testFitted[,j] + testFitted[,k] + testFitted[,i];
}
}
yForecast <- ts(testForecastsNew[,1,] %*% icWeights,start=yForecastStart,frequency=dataFreq);
yLower <- ts(testForecastsNew[,2,] %*% icWeights,start=yForecastStart,frequency=dataFreq);
yUpper <- ts(testForecastsNew[,3,] %*% icWeights,start=yForecastStart,frequency=dataFreq);
yFitted <- ts(testFittedNew %*% icWeights,start=dataStart,frequency=dataFreq);
modelname <- "ARIMA combined";
errors <- ts(yInSample-c(yFitted),start=dataStart,frequency=dataFreq);
yHoldout <- ts(y[(obsNonzero+1):obsAll],start=yForecastStart,frequency=dataFreq);
s2 <- mean(errors^2);
errormeasures <- measures(yHoldout,yForecast,yInSample);
ICs <- c(t(testICs) %*% icWeights);
names(ICs) <- ic;
bestModel <- list(model=modelname,timeElapsed=Sys.time()-startTime,
initialType=initialType,
fitted=yFitted,forecast=yForecast,cumulative=cumulative,
lower=yLower,upper=yUpper,residuals=errors,s2=s2,interval=intervalType,level=level,
y=y,holdout=yHoldout,
xreg=xreg, regressors=regressors, initialX=initialX,
ICs=ICs,ICw=icWeights,lossValue=NULL,loss=loss,accuracy=errormeasures);
bestModel <- structure(bestModel,class="smooth");
}
else{
#### Reestimate the best model in order to get rid of bias ####
bestModel <- ssarima(y, orders=list(ar=(arBest),i=(iBest),ma=(maBest)), lags=(lags),
constant=constantValue, initial=initialType, loss=loss,
h=h, holdout=holdout, cumulative=cumulative,
interval=intervalType, level=level,
bounds=bounds, silent=TRUE,
xreg=xreg, regressors=regressors, initialX=initialX, FI=FI);
yFitted <- bestModel$fitted;
yForecast <- bestModel$forecast;
yUpper <- bestModel$upper;
yLower <- bestModel$lower;
modelname <- bestModel$model;
bestModel$timeElapsed <- Sys.time()-startTime;
}
if(!silentText){
cat("... Done! \n");
}
##### Make a plot #####
if(!silentGraph){
yForecastNew <- yForecast;
yUpperNew <- yUpper;
yLowerNew <- yLower;
if(cumulative){
yForecastNew <- ts(rep(yForecast/h,h),start=yForecastStart,frequency=dataFreq)
if(interval){
yUpperNew <- ts(rep(yUpper/h,h),start=yForecastStart,frequency=dataFreq)
yLowerNew <- ts(rep(yLower/h,h),start=yForecastStart,frequency=dataFreq)
}
}
if(interval){
graphmaker(actuals=y,forecast=yForecastNew,fitted=yFitted, lower=yLowerNew,upper=yUpperNew,
level=level,legend=!silentLegend,main=modelname,cumulative=cumulative);
}
else{
graphmaker(actuals=y,forecast=yForecastNew,fitted=yFitted,
legend=!silentLegend,main=modelname,cumulative=cumulative);
}
}
return(bestModel);
}
| /R/autossarima.R | no_license | config-i1/smooth | R | false | false | 34,369 | r | utils::globalVariables(c("silentText","silentGraph","silentLegend","initialType","ar.orders","i.orders","ma.orders"));
#' State Space ARIMA
#'
#' Function selects the best State Space ARIMA based on information criteria,
#' using fancy branch and bound mechanism. The resulting model can be not
#' optimal in IC meaning, but it is usually reasonable.
#'
#' The function constructs bunch of ARIMAs in Single Source of Error
#' state space form (see \link[smooth]{ssarima} documentation) and selects the
#' best one based on information criterion. The mechanism is described in
#' Svetunkov & Boylan (2019).
#'
#' Due to the flexibility of the model, multiple seasonalities can be used. For
#' example, something crazy like this can be constructed:
#' SARIMA(1,1,1)(0,1,1)[24](2,0,1)[24*7](0,0,1)[24*30], but the estimation may
#' take a lot of time... It is recommended to use \link[smooth]{auto.msarima} in
#' cases with more than one seasonality and high frequencies.
#'
#' For some more information about the model and its implementation, see the
#' vignette: \code{vignette("ssarima","smooth")}
#'
#' @template ssBasicParam
#' @template ssAdvancedParam
#' @template ssXregParam
#' @template ssIntervals
#' @template ssInitialParam
#' @template ssAuthor
#' @template ssKeywords
#'
#' @template ssGeneralRef
#' @template ssIntermittentRef
#' @template ssARIMARef
#'
#' @param orders List of maximum orders to check, containing vector variables
#' \code{ar}, \code{i} and \code{ma}. If a variable is not provided in the
#' list, then it is assumed to be equal to zero. At least one variable should
#' have the same length as \code{lags}.
#' @param lags Defines lags for the corresponding orders (see examples). The
#' length of \code{lags} must correspond to the length of \code{orders}. There
#' is no restrictions on the length of \code{lags} vector.
#' @param combine If \code{TRUE}, then resulting ARIMA is combined using AIC
#' weights.
#' @param fast If \code{TRUE}, then some of the orders of ARIMA are
#' skipped. This is not advised for models with \code{lags} greater than 12.
#' @param constant If \code{NULL}, then the function will check if constant is
#' needed. if \code{TRUE}, then constant is forced in the model. Otherwise
#' constant is not used.
#' @param ... Other non-documented parameters. For example \code{FI=TRUE} will
#' make the function also produce Fisher Information matrix, which then can be
#' used to calculated variances of parameters of the model. Maximum orders to
#' check can also be specified separately, however \code{orders} variable must
#' be set to \code{NULL}: \code{ar.orders} - Maximum order of AR term. Can be
#' vector, defining max orders of AR, SAR etc. \code{i.orders} - Maximum order
#' of I. Can be vector, defining max orders of I, SI etc. \code{ma.orders} -
#' Maximum order of MA term. Can be vector, defining max orders of MA, SMA etc.
#' @return Object of class "smooth" is returned. See \link[smooth]{ssarima} for
#' details.
#' @seealso \code{\link[smooth]{es}, \link[smooth]{ces},
#' \link[smooth]{sim.es}, \link[smooth]{gum}, \link[smooth]{ssarima}}
#'
#' @examples
#'
#' \donttest{x <- rnorm(118,100,3)}
#'
#' # The best ARIMA for the data
#' \donttest{ourModel <- auto.ssarima(x,orders=list(ar=c(2,1),i=c(1,1),ma=c(2,1)),lags=c(1,12),
#' h=18,holdout=TRUE,interval="np")}
#'
#' # The other one using optimised states
#' \donttest{auto.ssarima(x,orders=list(ar=c(3,2),i=c(2,1),ma=c(3,2)),lags=c(1,12),
#' initial="o",h=18,holdout=TRUE)}
#'
#' # And now combined ARIMA
#' \donttest{auto.ssarima(x,orders=list(ar=c(3,2),i=c(2,1),ma=c(3,2)),lags=c(1,12),
#' combine=TRUE,h=18,holdout=TRUE)}
#'
#' \donttest{summary(ourModel)
#' forecast(ourModel)
#' plot(forecast(ourModel))}
#'
#'
#' @export auto.ssarima
auto.ssarima <- function(y, orders=list(ar=c(3,3),i=c(2,1),ma=c(3,3)), lags=c(1,frequency(y)),
combine=FALSE, fast=TRUE, constant=NULL,
initial=c("backcasting","optimal"), ic=c("AICc","AIC","BIC","BICc"),
loss=c("likelihood","MSE","MAE","HAM","MSEh","TMSE","GTMSE","MSCE"),
h=10, holdout=FALSE, cumulative=FALSE,
interval=c("none","parametric","likelihood","semiparametric","nonparametric"), level=0.95,
bounds=c("admissible","none"),
silent=c("all","graph","legend","output","none"),
xreg=NULL, regressors=c("use","select"), initialX=NULL, ...){
# Function estimates several ssarima models and selects the best one using the selected information criterion.
#
# Copyright (C) 2015 - Inf Ivan Svetunkov
# Start measuring the time of calculations
startTime <- Sys.time();
### Depricate the old parameters
ellipsis <- list(...)
ellipsis <- depricator(ellipsis, "xregDo", "regressors");
updateX <- FALSE;
persistenceX <- transitionX <- NULL;
occurrence <- "none";
oesmodel <- "MNN";
# Add all the variables in ellipsis to current environment
list2env(ellipsis,environment());
if(!is.null(orders)){
arMax <- orders$ar;
iMax <- orders$i;
maMax <- orders$ma;
}
# If orders are provided in ellipsis via arMax, write them down.
if(exists("ar.orders",inherits=FALSE)){
if(is.null(ar.orders)){
arMax <- 0;
}
else{
arMax <- ar.orders;
}
}
else{
if(is.null(orders)){
arMax <- 0;
}
}
if(exists("i.orders",inherits=FALSE)){
if(is.null(i.orders)){
iMax <- 0;
}
else{
iMax <- i.orders;
}
}
else{
if(is.null(orders)){
iMax <- 0;
}
}
if(exists("ma.orders",inherits=FALSE)){
if(is.null(ma.orders)){
maMax <- 0;
}
else{
maMax <- ma.orders
}
}
else{
if(is.null(orders)){
maMax <- 0;
}
}
##### Set environment for ssInput and make all the checks #####
environment(ssAutoInput) <- environment();
ssAutoInput("auto.ssarima",ParentEnvironment=environment());
if(is.null(constant)){
constantCheck <- TRUE;
constantValue <- TRUE;
}
else{
if(is.logical(constant)){
constantCheck <- FALSE;
constantValue <- constant;
}
else{
constant <- NULL;
constantCheck <- TRUE;
constantValue <- TRUE;
warning("Strange value of constant parameter. We changed it to default value.");
}
}
if(any(is.complex(c(arMax,iMax,maMax,lags)))){
stop("Come on! Be serious! This is ARIMA, not CES!",call.=FALSE);
}
if(any(c(arMax,iMax,maMax)<0)){
stop("Funny guy! How am I gonna construct a model with negative order?",call.=FALSE);
}
if(any(c(lags)<0)){
stop("Right! Why don't you try complex lags then, mister smart guy?",call.=FALSE);
}
# If there are zero lags, drop them
if(any(lags==0)){
arMax <- arMax[lags!=0];
iMax <- iMax[lags!=0];
maMax <- maMax[lags!=0];
lags <- lags[lags!=0];
}
# Define maxorder and make all the values look similar (for the polynomials)
maxorder <- max(length(arMax),length(iMax),length(maMax));
if(length(arMax)!=maxorder){
arMax <- c(arMax,rep(0,maxorder-length(arMax)));
}
if(length(iMax)!=maxorder){
iMax <- c(iMax,rep(0,maxorder-length(iMax)));
}
if(length(maMax)!=maxorder){
maMax <- c(maMax,rep(0,maxorder-length(maMax)));
}
# If zeroes are defined as orders for some lags, drop them.
if(any((arMax + iMax + maMax)==0)){
orders2leave <- (arMax + iMax + maMax)!=0;
if(all(!orders2leave)){
orders2leave <- lags==min(lags);
}
arMax <- arMax[orders2leave];
iMax <- iMax[orders2leave];
maMax <- maMax[orders2leave];
lags <- lags[orders2leave];
}
# Get rid of duplicates in lags
if(length(unique(lags))!=length(lags)){
if(dataFreq!=1){
warning(paste0("'lags' variable contains duplicates: (",paste0(lags,collapse=","),"). Getting rid of some of them."),call.=FALSE);
}
lagsNew <- unique(lags);
arMaxNew <- iMaxNew <- maMaxNew <- lagsNew;
for(i in 1:length(lagsNew)){
arMaxNew[i] <- max(arMax[which(lags==lagsNew[i])],na.rm=TRUE);
iMaxNew[i] <- max(iMax[which(lags==lagsNew[i])],na.rm=TRUE);
maMaxNew[i] <- max(maMax[which(lags==lagsNew[i])],na.rm=TRUE);
}
arMax <- arMaxNew;
iMax <- iMaxNew;
maMax <- maMaxNew;
lags <- lagsNew;
}
# Order things, so we would deal with the lowest level of seasonality first
arMax <- arMax[order(lags,decreasing=FALSE)];
iMax <- iMax[order(lags,decreasing=FALSE)];
maMax <- maMax[order(lags,decreasing=FALSE)];
lags <- sort(lags,decreasing=FALSE);
# 1 stands for constant, the other one stands for variance
nParamMax <- (1 + max(arMax %*% lags + iMax %*% lags,maMax %*% lags)
+ sum(arMax) + sum(maMax) + constantCheck);
# Try to figure out if the number of parameters can be tuned in order to fit something smaller on small samples
# Don't try to fix anything if the number of seasonalities is greater than 2
if(length(lags)<=2){
if(obsNonzero <= nParamMax){
armaLength <- length(arMax);
while(obsNonzero <= nParamMax){
if(any(c(arMax[armaLength],maMax[armaLength])>0)){
arMax[armaLength] <- max(0,arMax[armaLength] - 1);
nParamMax <- max(arMax %*% lags + iMax %*% lags,maMax %*% lags) + sum(arMax) + sum(maMax) + 1 + 1;
if(obsNonzero <= nParamMax){
maMax[armaLength] <- max(0,maMax[armaLength] - 1);
nParamMax <- max(arMax %*% lags + iMax %*% lags,maMax %*% lags) + sum(arMax) + sum(maMax) + 1 + 1;
}
}
else{
if(armaLength==2){
arMax[1] <- arMax[1] - 1;
nParamMax <- max(arMax %*% lags + iMax %*% lags,maMax %*% lags) + sum(arMax) + sum(maMax) + 1 + 1;
if(obsNonzero <= nParamMax){
maMax[1] <- maMax[1] - 1;
nParamMax <- max(arMax %*% lags + iMax %*% lags,maMax %*% lags) + sum(arMax) + sum(maMax) + 1 + 1;
}
}
else{
break;
}
}
if(all(c(arMax,maMax)==0)){
if(iMax[armaLength]>0){
iMax[armaLength] <- max(0,iMax[armaLength] - 1);
nParamMax <- max(arMax %*% lags + iMax %*% lags,maMax %*% lags) + sum(arMax) + sum(maMax) + 1 + 1;
}
else if(iMax[1]>0){
if(obsNonzero <= nParamMax){
iMax[1] <- max(0,iMax[1] - 1);
nParamMax <- max(arMax %*% lags + iMax %*% lags,maMax %*% lags) + sum(arMax) + sum(maMax) + 1 + 1;
}
}
else{
break;
}
}
}
nParamMax <- max(arMax %*% lags + iMax %*% lags,maMax %*% lags) + sum(arMax) + sum(maMax) + 1 + 1;
}
}
if(obsNonzero <= nParamMax){
message(paste0("Not enough observations for the reasonable fit. Number of possible parameters is ",
nParamMax," while the number of observations is ",obsNonzero,"!"));
stop("Redefine maximum orders and try again.",call.=FALSE)
}
# 1 stands for constant/no constant, another one stands for ARIMA(0,0,0)
if(all(maMax==0)){
nModels <- prod(iMax + 1) * (1 + sum(arMax)) + constantCheck;
}
else{
nModels <- prod(iMax + 1) * (1 + sum(maMax*(1 + sum(arMax)))) + constantCheck;
}
testModel <- list(NA);
# Array with elements x maxorders x horizon x point/lower/upper
if(combine){
testForecasts <- list(NA);
testFitted <- list(NA);
testICs <- list(NA);
testLevels <- list(NA);
testStates <- list(NA);
testTransition <- list(NA);
testPersistence <- list(NA);
}
ICValue <- 1E+100;
m <- 0;
# constant <- TRUE;
lagsTest <- maTest <- arTest <- rep(0,length(lags));
arBest <- maBest <- iBest <- rep(0,length(lags));
arBestLocal <- maBestLocal <- arBest;
#### Function corrects IC taking number of parameters on previous step ####
icCorrector <- function(icValue, nParam, obsNonzero, nParamNew){
if(ic=="AIC"){
llikelihood <- (2*nParam - icValue)/2;
correction <- 2*nParamNew - 2*llikelihood;
}
else if(ic=="AICc"){
llikelihood <- (2*nParam*obsNonzero/(obsNonzero-nParam-1) - icValue)/2;
correction <- 2*nParamNew*obsNonzero/(obsNonzero-nParamNew-1) - 2*llikelihood;
}
else if(ic=="BIC"){
llikelihood <- (nParam*log(obsNonzero) - icValue)/2;
correction <- nParamNew*log(obsNonzero) - 2*llikelihood;
}
else if(ic=="BICc"){
llikelihood <- ((nParam*log(obsNonzero)*obsNonzero)/(obsNonzero-nParam-1) - icValue)/2;
correction <- (nParamNew*log(obsNonzero)*obsNonzero)/(obsNonzero-nParamNew-1) - 2*llikelihood;
}
return(correction);
}
if(!silentText){
cat("Estimation progress: ");
}
### If for some reason we have model with zeroes for orders, return it.
if(all(c(arMax,iMax,maMax)==0)){
cat("\b\b\b\bDone!\n");
bestModel <- ssarima(y, orders=list(ar=arBest,i=(iBest),ma=(maBest)), lags=(lags),
constant=constantValue, initial=initialType, loss=loss,
h=h, holdout=holdout, cumulative=cumulative,
interval=intervalType, level=level,
bounds=bounds, silent=TRUE,
xreg=xreg, regressors=regressors, initialX=initialX, FI=FI);
return(bestModel);
}
iOrders <- matrix(0,prod(iMax+1),ncol=length(iMax));
##### Loop for differences #####
if(any(iMax!=0)){
# Prepare table with differences
iOrders[,1] <- rep(c(0:iMax[1]),times=prod(iMax[-1]+1));
if(length(iMax)>1){
for(seasLag in 2:length(iMax)){
iOrders[,seasLag] <- rep(c(0:iMax[seasLag]),each=prod(iMax[1:(seasLag-1)]+1))
}
}
}
# Start the loop with differences
for(d in 1:nrow(iOrders)){
m <- m + 1;
if(!silentText){
cat(paste0(rep("\b",nchar(round(m/nModels,2)*100)+1),collapse=""));
cat(paste0(round((m)/nModels,2)*100,"%"));
}
nParamOriginal <- 1;
if(silent[1]=="d"){
cat("I: ");cat(iOrders[d,]);cat(", ");
}
testModel <- ssarima(y, orders=list(ar=0,i=iOrders[d,],ma=0), lags=lags,
constant=constantValue, initial=initialType, loss=loss,
h=h, holdout=holdout, cumulative=cumulative,
interval=intervalType, level=level,
bounds=bounds, silent=TRUE,
xreg=xreg, regressors=regressors, initialX=initialX, FI=FI);
ICValue <- testModel$ICs[ic];
if(combine){
testForecasts[[m]] <- matrix(NA,h,3);
testForecasts[[m]][,1] <- testModel$forecast;
testForecasts[[m]][,2] <- testModel$lower;
testForecasts[[m]][,3] <- testModel$upper;
testFitted[[m]] <- testModel$fitted;
testICs[[m]] <- ICValue;
testLevels[[m]] <- 1;
testStates[[m]] <- testModel$states;
testTransition[[m]] <- testModel$transition;
testPersistence[[m]] <- testModel$persistence;
}
if(silent[1]=="d"){
cat(ICValue); cat("\n");
}
if(m==1){
bestIC <- ICValue;
dataMA <- dataI <- testModel$residuals;
iBest <- iOrders[d,];
bestICAR <- bestICI <- bestICMA <- bestIC;
}
else{
if(ICValue < bestICI){
bestICI <- ICValue;
dataMA <- dataI <- testModel$residuals;
if(ICValue < bestIC){
iBest <- iOrders[d,];
bestIC <- ICValue;
maBest <- arBest <- rep(0,length(arTest));
}
}
else{
if(fast){
m <- m + sum(maMax*(1 + sum(arMax)));
next;
}
}
}
##### Loop for MA #####
if(any(maMax!=0)){
bestICMA <- bestICI;
maBestLocal <- maTest <- rep(0,length(maTest));
for(seasSelectMA in 1:length(lags)){
if(maMax[seasSelectMA]!=0){
for(maSelect in 1:maMax[seasSelectMA]){
m <- m + 1;
if(!silentText){
cat(paste0(rep("\b",nchar(round(m/nModels,2)*100)+1),collapse=""));
cat(paste0(round((m)/nModels,2)*100,"%"));
}
maTest[seasSelectMA] <- maMax[seasSelectMA] - maSelect + 1;
nParamMA <- sum(maTest);
nParamNew <- nParamOriginal + nParamMA;
if(silent[1]=="d"){
cat("MA: ");cat(maTest);cat(", ");
}
testModel <- ssarima(dataI, orders=list(ar=0,i=0,ma=maTest), lags=lags,
constant=FALSE, initial=initialType, loss=loss,
h=h, holdout=FALSE,
interval=intervalType, level=level,
bounds=bounds, silent=TRUE,
xreg=NULL, regressors="use", initialX=initialX, FI=FI);
ICValue <- icCorrector(testModel$ICs[ic], nParamMA, obsNonzero, nParamNew);
if(combine){
testForecasts[[m]] <- matrix(NA,h,3);
testForecasts[[m]][,1] <- testModel$forecast;
testForecasts[[m]][,2] <- testModel$lower;
testForecasts[[m]][,3] <- testModel$upper;
testFitted[[m]] <- testModel$fitted;
testICs[[m]] <- ICValue;
testLevels[[m]] <- 2;
testStates[[m]] <- testModel$states;
testTransition[[m]] <- testModel$transition;
testPersistence[[m]] <- testModel$persistence;
}
if(silent[1]=="d"){
cat(ICValue); cat("\n");
}
if(ICValue < bestICMA){
bestICMA <- ICValue;
maBestLocal <- maTest;
if(ICValue < bestIC){
bestIC <- bestICMA;
iBest <- iOrders[d,];
maBest <- maTest;
arBest <- rep(0,length(arTest));
}
dataMA <- testModel$residuals;
}
else{
if(fast){
m <- m + maTest[seasSelectMA] * (1 + sum(arMax)) - 1;
maTest <- maBestLocal;
break;
}
else{
maTest <- maBestLocal;
}
}
##### Loop for AR #####
if(any(arMax!=0)){
bestICAR <- bestICMA;
arBestLocal <- arTest <- rep(0,length(arTest));
for(seasSelectAR in 1:length(lags)){
lagsTest[seasSelectAR] <- lags[seasSelectAR];
if(arMax[seasSelectAR]!=0){
for(arSelect in 1:arMax[seasSelectAR]){
m <- m + 1;
if(!silentText){
cat(paste0(rep("\b",nchar(round(m/nModels,2)*100)+1),collapse=""));
cat(paste0(round((m)/nModels,2)*100,"%"));
}
arTest[seasSelectAR] <- arMax[seasSelectAR] - arSelect + 1;
nParamAR <- sum(arTest);
nParamNew <- nParamOriginal + nParamMA + nParamAR;
if(silent[1]=="d"){
cat("AR: ");cat(arTest);cat(", ");
}
testModel <- ssarima(dataMA, orders=list(ar=arTest,i=0,ma=0), lags=lags,
constant=FALSE, initial=initialType, loss=loss,
h=h, holdout=FALSE,
interval=intervalType, level=level,
bounds=bounds, silent=TRUE,
xreg=NULL, regressors="use", initialX=initialX, FI=FI);
ICValue <- icCorrector(testModel$ICs[ic], nParamAR, obsNonzero, nParamNew);
if(combine){
testForecasts[[m]] <- matrix(NA,h,3);
testForecasts[[m]][,1] <- testModel$forecast;
testForecasts[[m]][,2] <- testModel$lower;
testForecasts[[m]][,3] <- testModel$upper;
testFitted[[m]] <- testModel$fitted;
testICs[[m]] <- ICValue;
testLevels[[m]] <- 3;
testStates[[m]] <- testModel$states;
testTransition[[m]] <- testModel$transition;
testPersistence[[m]] <- testModel$persistence;
}
if(silent[1]=="d"){
cat(ICValue); cat("\n");
}
if(ICValue < bestICAR){
bestICAR <- ICValue;
arBestLocal <- arTest;
if(ICValue < bestIC){
bestIC <- ICValue;
iBest <- iOrders[d,];
arBest <- arTest;
maBest <- maTest;
}
}
else{
if(fast){
m <- m + arTest[seasSelectAR] - 1;
arTest <- arBestLocal;
break;
}
else{
arTest <- arBestLocal;
}
}
}
}
}
}
}
}
}
}
else{
##### Loop for AR #####
if(any(arMax!=0)){
bestICAR <- bestICMA;
arBestLocal <- arTest <- rep(0,length(arTest));
for(seasSelectAR in 1:length(lags)){
lagsTest[seasSelectAR] <- lags[seasSelectAR];
if(arMax[seasSelectAR]!=0){
for(arSelect in 1:arMax[seasSelectAR]){
m <- m + 1;
if(!silentText){
cat(paste0(rep("\b",nchar(round(m/nModels,2)*100)+1),collapse=""));
cat(paste0(round((m)/nModels,2)*100,"%"));
}
arTest[seasSelectAR] <- arMax[seasSelectAR] - arSelect + 1;
nParamAR <- sum(arTest);
nParamNew <- nParamOriginal + nParamAR;
if(silent[1]=="d"){
cat("AR: ");cat(arTest);cat(", ");
}
testModel <- ssarima(dataMA, orders=list(ar=arTest,i=0,ma=0), lags=lags,
constant=FALSE, initial=initialType, loss=loss,
h=h, holdout=FALSE,
interval=intervalType, level=level,
bounds=bounds, silent=TRUE,
xreg=NULL, regressors="use", initialX=initialX, FI=FI);
ICValue <- icCorrector(testModel$ICs[ic], nParamAR, obsNonzero, nParamNew);
if(combine){
testForecasts[[m]] <- matrix(NA,h,3);
testForecasts[[m]][,1] <- testModel$forecast;
testForecasts[[m]][,2] <- testModel$lower;
testForecasts[[m]][,3] <- testModel$upper;
testFitted[[m]] <- testModel$fitted;
testICs[[m]] <- ICValue;
testLevels[[m]] <- 3;
testStates[[m]] <- testModel$states;
testTransition[[m]] <- testModel$transition;
testPersistence[[m]] <- testModel$persistence;
}
if(silent[1]=="d"){
cat(ICValue); cat("\n");
}
if(ICValue < bestICAR){
bestICAR <- ICValue;
arBestLocal <- arTest;
if(ICValue < bestIC){
bestIC <- ICValue;
iBest <- iOrders[d,];
arBest <- arTest;
maBest <- maTest;
}
}
else{
if(fast){
m <- m + arTest[seasSelectAR] - 1;
arTest <- arBestLocal;
break;
}
else{
arTest <- arBestLocal;
}
}
}
}
}
}
}
}
#### Test the constant ####
if(constantCheck){
m <- m + 1;
if(!silentText){
cat(paste0(rep("\b",nchar(round(m/nModels,2)*100)+1),collapse=""));
cat(paste0(round((m)/nModels,2)*100,"%"));
}
if(any(c(arBest,iBest,maBest)!=0)){
testModel <- ssarima(y, orders=list(ar=(arBest),i=(iBest),ma=(maBest)), lags=(lags),
constant=FALSE, initial=initialType, loss=loss,
h=h, holdout=holdout, cumulative=cumulative,
interval=intervalType, level=level,
bounds=bounds, silent=TRUE,
xreg=xreg, regressors=regressors, initialX=initialX, FI=FI);
ICValue <- testModel$ICs[ic];
if(combine){
testForecasts[[m]] <- matrix(NA,h,3);
testForecasts[[m]][,1] <- testModel$forecast;
testForecasts[[m]][,2] <- testModel$lower;
testForecasts[[m]][,3] <- testModel$upper;
testFitted[[m]] <- testModel$fitted;
testICs[[m]] <- ICValue;
testLevels[[m]] <- 1;
testStates[[m]] <- testModel$states;
testTransition[[m]] <- testModel$transition;
testPersistence[[m]] <- testModel$persistence;
}
# cat("Constant: ");print(ICValue);
if(ICValue < bestIC){
bestModel <- testModel;
constantValue <- FALSE;
}
else{
constantValue <- TRUE;
}
}
}
if(combine){
testICs <- unlist(testICs);
testLevels <- unlist(testLevels);
testForecasts <- array(unlist(testForecasts),c(h,3,length(testICs)));
testFitted <- matrix(unlist(testFitted),ncol=length(testICs));
icWeights <- exp(-0.5*(testICs-min(testICs)))/sum(exp(-0.5*(testICs-min(testICs))));
testForecastsNew <- testForecasts;
testFittedNew <- testFitted;
for(i in 1:length(testLevels)){
if(testLevels[i]==1){
j <- i;
}
else if(testLevels[i]==2){
k <- i;
testForecastsNew[,,i] <- testForecasts[,,j] + testForecasts[,,i];
testFittedNew[,i] <- testFitted[,j] + testFitted[,i];
}
else if(testLevels[i]==3){
testForecastsNew[,,i] <- testForecasts[,,j] + testForecasts[,,k] + testForecasts[,,i];
testFittedNew[,i] <- testFitted[,j] + testFitted[,k] + testFitted[,i];
}
}
yForecast <- ts(testForecastsNew[,1,] %*% icWeights,start=yForecastStart,frequency=dataFreq);
yLower <- ts(testForecastsNew[,2,] %*% icWeights,start=yForecastStart,frequency=dataFreq);
yUpper <- ts(testForecastsNew[,3,] %*% icWeights,start=yForecastStart,frequency=dataFreq);
yFitted <- ts(testFittedNew %*% icWeights,start=dataStart,frequency=dataFreq);
modelname <- "ARIMA combined";
errors <- ts(yInSample-c(yFitted),start=dataStart,frequency=dataFreq);
yHoldout <- ts(y[(obsNonzero+1):obsAll],start=yForecastStart,frequency=dataFreq);
s2 <- mean(errors^2);
errormeasures <- measures(yHoldout,yForecast,yInSample);
ICs <- c(t(testICs) %*% icWeights);
names(ICs) <- ic;
bestModel <- list(model=modelname,timeElapsed=Sys.time()-startTime,
initialType=initialType,
fitted=yFitted,forecast=yForecast,cumulative=cumulative,
lower=yLower,upper=yUpper,residuals=errors,s2=s2,interval=intervalType,level=level,
y=y,holdout=yHoldout,
xreg=xreg, regressors=regressors, initialX=initialX,
ICs=ICs,ICw=icWeights,lossValue=NULL,loss=loss,accuracy=errormeasures);
bestModel <- structure(bestModel,class="smooth");
}
else{
#### Reestimate the best model in order to get rid of bias ####
bestModel <- ssarima(y, orders=list(ar=(arBest),i=(iBest),ma=(maBest)), lags=(lags),
constant=constantValue, initial=initialType, loss=loss,
h=h, holdout=holdout, cumulative=cumulative,
interval=intervalType, level=level,
bounds=bounds, silent=TRUE,
xreg=xreg, regressors=regressors, initialX=initialX, FI=FI);
yFitted <- bestModel$fitted;
yForecast <- bestModel$forecast;
yUpper <- bestModel$upper;
yLower <- bestModel$lower;
modelname <- bestModel$model;
bestModel$timeElapsed <- Sys.time()-startTime;
}
if(!silentText){
cat("... Done! \n");
}
##### Make a plot #####
if(!silentGraph){
yForecastNew <- yForecast;
yUpperNew <- yUpper;
yLowerNew <- yLower;
if(cumulative){
yForecastNew <- ts(rep(yForecast/h,h),start=yForecastStart,frequency=dataFreq)
if(interval){
yUpperNew <- ts(rep(yUpper/h,h),start=yForecastStart,frequency=dataFreq)
yLowerNew <- ts(rep(yLower/h,h),start=yForecastStart,frequency=dataFreq)
}
}
if(interval){
graphmaker(actuals=y,forecast=yForecastNew,fitted=yFitted, lower=yLowerNew,upper=yUpperNew,
level=level,legend=!silentLegend,main=modelname,cumulative=cumulative);
}
else{
graphmaker(actuals=y,forecast=yForecastNew,fitted=yFitted,
legend=!silentLegend,main=modelname,cumulative=cumulative);
}
}
return(bestModel);
}
|
#test
# Read whole data into workspace using read.table
data <- read.table("household_power_consumption.txt", header=T, sep=';')
# Subset only Feb 1 and 2
feb <- subset(data, Date == '1/2/2007' | Date == '2/2/2007')
# Plot histogram
par(mfrow=c(1,1))
png(filename='plot1.png', width=480, height=480, bg='transparent')
hist(as.numeric(feb$Global_active_power), xlab='Global Active Power (kilowatts)', main='Global Active Power', col='red')
dev.off()
| /plot1.R | no_license | cassimahmedattia/ExData_Plotting1 | R | false | false | 451 | r | #test
# Read whole data into workspace using read.table
data <- read.table("household_power_consumption.txt", header=T, sep=';')
# Subset only Feb 1 and 2
feb <- subset(data, Date == '1/2/2007' | Date == '2/2/2007')
# Plot histogram
par(mfrow=c(1,1))
png(filename='plot1.png', width=480, height=480, bg='transparent')
hist(as.numeric(feb$Global_active_power), xlab='Global Active Power (kilowatts)', main='Global Active Power', col='red')
dev.off()
|
cancerCodes = list(
site = c('All Sites'=0,'Oral Cavity and Pharynx'=20000,'Lip'=20010,'Tongue'=20020,
'Salivary Gland'=20030,'Floor of Mouth'=20040,'Gum and Other Mouth'=20050,
'Nasopharynx'=20060,'Tonsil'=20070,'Oropharynx'=20080,'Hypopharynx'=20090,
'Other Oral Cavity and Pharynx'=20100,'Digestive System'=21000,
'Esophagus'=21010,'Stomach'=21020,'Small Intestine'=21030,'Colon and Rectum'=1,
'Early Stage Colon and Rectum'=6,'Late Stage Colon and Rectum'=7,
'Colon excluding Rectum'=21040,'Cecum'=21041,'Appendix'=21042,'Ascending Colon'=21043,
'Hepatic Flexure'=21044,'Transverse Colon'=21045,'Splenic Flexure'=21046,
'Descending Colon'=21047,'Sigmoid Colon'=21048,'Large Intestine, NOS'=21049,
'Rectum and Rectosigmoid Junction'=21050,'Rectosigmoid Junction'=21051,
'Rectum'=21052,'Anus, Anal Canal, and Anorectum'=21060,
'Liver and Intrahepatic Bile Duct'=2,'Liver'=21071,'Intrahepatic Bile Duct'=21072,
'Gallbladder'=21080,'Other Biliary'=21090,'Pancreas'=21100,'Retroperitoneum'=21110,
'Peritoneum, Omentum, and Mesentery'=21120,'Other Digestive Organs'=21130,
'Respiratory System'=22000,'Nose, Nasal Cavity, and Middle Ear'=22010,
'Larynx'=22020,
'Lung and Bronchus'=22030,'Pleura'=22050,
'Trachea, Mediastinum and Other Respiratory Organs'=22060,'Bones and Joints'=23000,
'Soft Tissue including Heart'=24000,'Skin excluding Basal and Squamous'=25000,
'Melanoma of the Skin'=25010,'Other NonEpithelial Skin'=25020,'Breast'=26000,
'In Situ Breast'=3,'Early Stage Breast'=4,'Late Stage Breast'=5,
'Female Genital System'=27000,'Cervix Uteri'=27010,'Early Stage Cervix Uteri'=8,
'Late Stage Cervix Uteri'=9,'Corpus Uteri'=27020,'Uterus, NOS'=27030,
'Ovary'=27040,'Vagina'=27050,'Vulva'=27060,'Other Female Genital Organs'=27070,
'Male Genital System'=28000,'Prostate'=28010,'Testis'=28020,'Penis'=28030,
'Other Male Genital Organs'=28040,'Urinary System'=29000,'Urinary Bladder'=29010,
'Kidney and Renal pelvis'=29020,'Ureter'=29030,'Other Urinary Organs'=29040,
'Eye and Orbit'=30000,'Brain and Other Nervous System'=31000,'Brain'=31010,
'Cranial Nerves and Other Nervous System'=31040,'Endocrine System'=32000,
'Thyroid'=32010,'Other Endocrine including Thymus'=32020,'Lymphoma'=33000,
'Hodgkin Lymphoma'=33010,'Hodgkin Lymphoma Nodal'=33011,
'Hodgkin Lymphoma Extranodal'=33012,'NonHodgkin Lymphoma'=33040,
'NonHodgkin Lymphoma Nodal'=33041,'NonHodgkin Lymphoma Extranodal'=33042,
'Myeloma'=34000,'Leukemia'=35000,'Lymphocytic Leukemia'=35010,
'Acute Lymphocytic Leukemia'=35011,'Chronic Lymphocytic Leukemia'=35012,
'Other Lymphocytic Leukemia'=35013,'Myeloid and Monocytic Leukemia'=35020,
'Acute Myeloid Leukemia'=35021,'Acute Monocytic Leukemia'=35031,
'Chronic Myeloid Leukemia'=35022,'Other Myeloid/Monocytic Leukemia'=35023,
'Other Leukemia'=35040,'Other Acute Leukemia'=35041,
'Aleukemic, Subleukemic and NOS Leukemia'=35043,
'Mesothelioma'=36010,'Kaposi Sarcoma'=36020,'Miscellaneous'=37000),
state = c(Texas='tx', Georgia='ga', Kentucky = 'ky',
Michigan='mi', Arkansas='ar', Mississippi='ms',
Wisconsin='wi', Iowa='ia', 'New Mexico' ='nm',
Utah='ut', California='ca', Seattle='se',
Conneticut = 'ct', 'New Jersey' = 'nj'),
sex = c(M=1, F=2, both=0)
)
# minnisota
# https://apps.health.state.mn.us/mndata/cancer_query?p_auth=Wy7tICix&p_p_id=springQueryPortlet_WAR_mndataspringQueryportlet_INSTANCE_TQg4MoZMNm0w&p_p_lifecycle=1&p_p_state=normal&p_p_mode=view&p_p_col_id=column-1&p_p_col_count=2&p_p_col_pos=1&_springQueryPortlet_WAR_mndataspringQueryportlet_INSTANCE_TQg4MoZMNm0w__facesViewIdRender=%2Fpages%2Findex.xhtml
# https://apps.health.state.mn.us/mndata/cancer_query?p_auth=Wy7tICix&p_p_id=springQueryPortlet_WAR_mndataspringQueryportlet_INSTANCE_TQg4MoZMNm0w&p_p_lifecycle=1&p_p_state=normal&p_p_mode=view&p_p_col_id=column-1&p_p_col_count=2&p_p_col_pos=1&_springQueryPortlet_WAR_mndataspringQueryportlet_INSTANCE_TQg4MoZMNm0w__facesViewIdRender=%2Fpages%2Findex.xhtml
# https://apps.health.state.mn.us/mndata/cancer_query?p_auth=Wy7tICix&p_p_id=springQueryPortlet_WAR_mndataspringQueryportlet_INSTANCE_TQg4MoZMNm0w&p_p_lifecycle=1&p_p_state=normal&p_p_mode=view&p_p_col_id=column-1&p_p_col_count=2&p_p_col_pos=1&_springQueryPortlet_WAR_mndataspringQueryportlet_INSTANCE_TQg4MoZMNm0w__facesViewIdRender=%2Fpages%2Findex.xhtml
usCancer = function(
state='Kentucky',
site='Lung',
year = c(2004,2008),
sex='both'
) {
Ssite = cancerCodes$site[grep(paste(site, collapse='|'), names(cancerCodes$site), ignore.case=TRUE)]
Ssex = cancerCodes$sex[grep(paste(sex, collapse='|'), names(cancerCodes$sex), ignore.case=TRUE)]
Sstate = cancerCodes$state[grep(paste(state, collapse='|'), names(cancerCodes$state), ignore.case=TRUE)]
if(is.matrix(year)) {
SstartYear = year[,1]
SendYear = year[,2]
} else {
SstartYear = min(year)
SendYear = max(year)
}
forUrl = expand.grid(siteCode=Ssite, sexCode=Ssex, stateCode=Sstate, startYear = SstartYear, endYear=SendYear)
for(D in c('sex','state','site')){
forUrl[,D] = names(cancerCodes[[D]])[match(forUrl[,paste(D, 'Code', sep='')], cancerCodes[[D]])]
}
allCases = data.frame()
for(D in 1:nrow(forUrl)) {
if(any(is.na(forUrl[D,]))){
warning('site, sex or state not found')
}
if(forUrl[D, 'stateCode'] %in% c('ar')) {
middleUrl = 'beta/common/v1'
depth=5
} else {
middleUrl = 'common'
depth=5
}
kUrl = paste(
'http://www.cancer-rates.info/',middleUrl, '/index.php?',
'std=us2000m&geography=1&syear=', forUrl[D, 'startYear'],
'&eyear=', forUrl[D, 'endYear'], '&site=', forUrl[D, 'siteCode'],
'&race=0&sex=', forUrl[D, 'sexCode'], '&dataset=I&database=', forUrl[D, 'stateCode'],
'&datasource=inv&m_color=1&c_intrvls=0&title=Stuff&r=370,*&c=538',
sep='')
namesHere = forUrl[D, c('startYear','endYear','sex','state','site')]
myDir = file.path(tempdir(), gsub("[[:space:]]+", "_", paste(c("cfiles",as.character(namesHere)), collapse='_')))
myCommand = paste("httrack --depth=",depth, " --priority=1 -N1 -O ", myDir, " \'", kUrl, "\'", sep='')
cat('\ndownloading', paste(namesHere, collapse=" "))
sRes = try(system(myCommand))
cat(' done\n')
if(class(sRes)=='try-error') {
stop("install httrack from www.httrack.com")
}
fname = system(paste("ls ", myDir, "/web/newalldetails*.html", sep=''), TRUE)
fname = grep('newalldetails[[:alnum:]]+\\.html$', fname, value=TRUE)
if(requireNamespace("XML", quietly=TRUE)) {
datHeader = XML::readHTMLTable(fname[1], isUrl=FALSE, which=2)[1,]
} else {
warning("install the XML package to use usCancer")
}
datText = scan(fname[1], what=character(), quiet=TRUE)
startTable = grep("<TABLE", datText)
startTable = startTable[length(startTable)]
endTable = grep("</TABLE", datText)
endTable = endTable[length(endTable)]
datText = datText[startTable:endTable]
startTr = grep("<TR", datText)
if(min(startTr)>1)
datText = datText[-seq(1,min(startTr)-1)]
startTr = grep("<TR", datText)
startTd = grep("<TD", datText)
earlyTr = max(which(startTr <= startTd[1]))
startTr = startTr[seq(earlyTr, length(startTr))]
datText = paste(datText, collapse='')
datText = unlist(strsplit(datText, "<TR>"))
datText = grep("^[[:space:]]?$|^<TH", datText, invert=TRUE, value=TRUE)
datSplit = strsplit(datText, "<TD")
datLen = unlist(lapply(datSplit, length))
datSplit = unlist(datSplit[which(datLen ==6)])
datSplit = gsub("</A>$", "", datSplit)
datSplit = gsub("^[[:print:]]+>", "", datSplit)
datSplit = gsub("~", "1", datSplit)
dat = as.data.frame(matrix(datSplit, ncol=6, byrow=TRUE),
stringsAsFactors=FALSE)[,-1]
colnames(dat) = as.character(unlist(datHeader))
dat = dat[grep("^$", dat$County, invert=TRUE),]
for(Dcol in grep("County", colnames(dat), invert=TRUE)) {
dat[,Dcol] = as.numeric(as.character(dat[,Dcol]))
}
cases = dat[,c('County','Cases')]
if(nrow(cases)){
cases = cbind(cases, namesHere, stateCode=forUrl[D,'stateCode'])
allCases = rbind(allCases, cases)
}
}
getRid = grep("^[[:space:]]?(Note|Data|~|The population estimates):?[[:space:]]", allCases$County)
getRidState = grep("^[[:space:]]?(STATE|\\*\\*\\*Counts|Unknown)[[:space:]]?$", allCases$County)
getRid = c(getRid, getRidState)
if(length(getRid))
allCases = allCases[-getRid,]
allCases$Cases = as.numeric(gsub("~", "1", allCases$Cases))
allCases
} | /R/cancer-rates.info.R | no_license | cran/diseasemapping | R | false | false | 8,811 | r |
cancerCodes = list(
site = c('All Sites'=0,'Oral Cavity and Pharynx'=20000,'Lip'=20010,'Tongue'=20020,
'Salivary Gland'=20030,'Floor of Mouth'=20040,'Gum and Other Mouth'=20050,
'Nasopharynx'=20060,'Tonsil'=20070,'Oropharynx'=20080,'Hypopharynx'=20090,
'Other Oral Cavity and Pharynx'=20100,'Digestive System'=21000,
'Esophagus'=21010,'Stomach'=21020,'Small Intestine'=21030,'Colon and Rectum'=1,
'Early Stage Colon and Rectum'=6,'Late Stage Colon and Rectum'=7,
'Colon excluding Rectum'=21040,'Cecum'=21041,'Appendix'=21042,'Ascending Colon'=21043,
'Hepatic Flexure'=21044,'Transverse Colon'=21045,'Splenic Flexure'=21046,
'Descending Colon'=21047,'Sigmoid Colon'=21048,'Large Intestine, NOS'=21049,
'Rectum and Rectosigmoid Junction'=21050,'Rectosigmoid Junction'=21051,
'Rectum'=21052,'Anus, Anal Canal, and Anorectum'=21060,
'Liver and Intrahepatic Bile Duct'=2,'Liver'=21071,'Intrahepatic Bile Duct'=21072,
'Gallbladder'=21080,'Other Biliary'=21090,'Pancreas'=21100,'Retroperitoneum'=21110,
'Peritoneum, Omentum, and Mesentery'=21120,'Other Digestive Organs'=21130,
'Respiratory System'=22000,'Nose, Nasal Cavity, and Middle Ear'=22010,
'Larynx'=22020,
'Lung and Bronchus'=22030,'Pleura'=22050,
'Trachea, Mediastinum and Other Respiratory Organs'=22060,'Bones and Joints'=23000,
'Soft Tissue including Heart'=24000,'Skin excluding Basal and Squamous'=25000,
'Melanoma of the Skin'=25010,'Other NonEpithelial Skin'=25020,'Breast'=26000,
'In Situ Breast'=3,'Early Stage Breast'=4,'Late Stage Breast'=5,
'Female Genital System'=27000,'Cervix Uteri'=27010,'Early Stage Cervix Uteri'=8,
'Late Stage Cervix Uteri'=9,'Corpus Uteri'=27020,'Uterus, NOS'=27030,
'Ovary'=27040,'Vagina'=27050,'Vulva'=27060,'Other Female Genital Organs'=27070,
'Male Genital System'=28000,'Prostate'=28010,'Testis'=28020,'Penis'=28030,
'Other Male Genital Organs'=28040,'Urinary System'=29000,'Urinary Bladder'=29010,
'Kidney and Renal pelvis'=29020,'Ureter'=29030,'Other Urinary Organs'=29040,
'Eye and Orbit'=30000,'Brain and Other Nervous System'=31000,'Brain'=31010,
'Cranial Nerves and Other Nervous System'=31040,'Endocrine System'=32000,
'Thyroid'=32010,'Other Endocrine including Thymus'=32020,'Lymphoma'=33000,
'Hodgkin Lymphoma'=33010,'Hodgkin Lymphoma Nodal'=33011,
'Hodgkin Lymphoma Extranodal'=33012,'NonHodgkin Lymphoma'=33040,
'NonHodgkin Lymphoma Nodal'=33041,'NonHodgkin Lymphoma Extranodal'=33042,
'Myeloma'=34000,'Leukemia'=35000,'Lymphocytic Leukemia'=35010,
'Acute Lymphocytic Leukemia'=35011,'Chronic Lymphocytic Leukemia'=35012,
'Other Lymphocytic Leukemia'=35013,'Myeloid and Monocytic Leukemia'=35020,
'Acute Myeloid Leukemia'=35021,'Acute Monocytic Leukemia'=35031,
'Chronic Myeloid Leukemia'=35022,'Other Myeloid/Monocytic Leukemia'=35023,
'Other Leukemia'=35040,'Other Acute Leukemia'=35041,
'Aleukemic, Subleukemic and NOS Leukemia'=35043,
'Mesothelioma'=36010,'Kaposi Sarcoma'=36020,'Miscellaneous'=37000),
state = c(Texas='tx', Georgia='ga', Kentucky = 'ky',
Michigan='mi', Arkansas='ar', Mississippi='ms',
Wisconsin='wi', Iowa='ia', 'New Mexico' ='nm',
Utah='ut', California='ca', Seattle='se',
Conneticut = 'ct', 'New Jersey' = 'nj'),
sex = c(M=1, F=2, both=0)
)
# minnisota
# https://apps.health.state.mn.us/mndata/cancer_query?p_auth=Wy7tICix&p_p_id=springQueryPortlet_WAR_mndataspringQueryportlet_INSTANCE_TQg4MoZMNm0w&p_p_lifecycle=1&p_p_state=normal&p_p_mode=view&p_p_col_id=column-1&p_p_col_count=2&p_p_col_pos=1&_springQueryPortlet_WAR_mndataspringQueryportlet_INSTANCE_TQg4MoZMNm0w__facesViewIdRender=%2Fpages%2Findex.xhtml
# https://apps.health.state.mn.us/mndata/cancer_query?p_auth=Wy7tICix&p_p_id=springQueryPortlet_WAR_mndataspringQueryportlet_INSTANCE_TQg4MoZMNm0w&p_p_lifecycle=1&p_p_state=normal&p_p_mode=view&p_p_col_id=column-1&p_p_col_count=2&p_p_col_pos=1&_springQueryPortlet_WAR_mndataspringQueryportlet_INSTANCE_TQg4MoZMNm0w__facesViewIdRender=%2Fpages%2Findex.xhtml
# https://apps.health.state.mn.us/mndata/cancer_query?p_auth=Wy7tICix&p_p_id=springQueryPortlet_WAR_mndataspringQueryportlet_INSTANCE_TQg4MoZMNm0w&p_p_lifecycle=1&p_p_state=normal&p_p_mode=view&p_p_col_id=column-1&p_p_col_count=2&p_p_col_pos=1&_springQueryPortlet_WAR_mndataspringQueryportlet_INSTANCE_TQg4MoZMNm0w__facesViewIdRender=%2Fpages%2Findex.xhtml
usCancer = function(
state='Kentucky',
site='Lung',
year = c(2004,2008),
sex='both'
) {
Ssite = cancerCodes$site[grep(paste(site, collapse='|'), names(cancerCodes$site), ignore.case=TRUE)]
Ssex = cancerCodes$sex[grep(paste(sex, collapse='|'), names(cancerCodes$sex), ignore.case=TRUE)]
Sstate = cancerCodes$state[grep(paste(state, collapse='|'), names(cancerCodes$state), ignore.case=TRUE)]
if(is.matrix(year)) {
SstartYear = year[,1]
SendYear = year[,2]
} else {
SstartYear = min(year)
SendYear = max(year)
}
forUrl = expand.grid(siteCode=Ssite, sexCode=Ssex, stateCode=Sstate, startYear = SstartYear, endYear=SendYear)
for(D in c('sex','state','site')){
forUrl[,D] = names(cancerCodes[[D]])[match(forUrl[,paste(D, 'Code', sep='')], cancerCodes[[D]])]
}
allCases = data.frame()
for(D in 1:nrow(forUrl)) {
if(any(is.na(forUrl[D,]))){
warning('site, sex or state not found')
}
if(forUrl[D, 'stateCode'] %in% c('ar')) {
middleUrl = 'beta/common/v1'
depth=5
} else {
middleUrl = 'common'
depth=5
}
kUrl = paste(
'http://www.cancer-rates.info/',middleUrl, '/index.php?',
'std=us2000m&geography=1&syear=', forUrl[D, 'startYear'],
'&eyear=', forUrl[D, 'endYear'], '&site=', forUrl[D, 'siteCode'],
'&race=0&sex=', forUrl[D, 'sexCode'], '&dataset=I&database=', forUrl[D, 'stateCode'],
'&datasource=inv&m_color=1&c_intrvls=0&title=Stuff&r=370,*&c=538',
sep='')
namesHere = forUrl[D, c('startYear','endYear','sex','state','site')]
myDir = file.path(tempdir(), gsub("[[:space:]]+", "_", paste(c("cfiles",as.character(namesHere)), collapse='_')))
myCommand = paste("httrack --depth=",depth, " --priority=1 -N1 -O ", myDir, " \'", kUrl, "\'", sep='')
cat('\ndownloading', paste(namesHere, collapse=" "))
sRes = try(system(myCommand))
cat(' done\n')
if(class(sRes)=='try-error') {
stop("install httrack from www.httrack.com")
}
fname = system(paste("ls ", myDir, "/web/newalldetails*.html", sep=''), TRUE)
fname = grep('newalldetails[[:alnum:]]+\\.html$', fname, value=TRUE)
if(requireNamespace("XML", quietly=TRUE)) {
datHeader = XML::readHTMLTable(fname[1], isUrl=FALSE, which=2)[1,]
} else {
warning("install the XML package to use usCancer")
}
datText = scan(fname[1], what=character(), quiet=TRUE)
startTable = grep("<TABLE", datText)
startTable = startTable[length(startTable)]
endTable = grep("</TABLE", datText)
endTable = endTable[length(endTable)]
datText = datText[startTable:endTable]
startTr = grep("<TR", datText)
if(min(startTr)>1)
datText = datText[-seq(1,min(startTr)-1)]
startTr = grep("<TR", datText)
startTd = grep("<TD", datText)
earlyTr = max(which(startTr <= startTd[1]))
startTr = startTr[seq(earlyTr, length(startTr))]
datText = paste(datText, collapse='')
datText = unlist(strsplit(datText, "<TR>"))
datText = grep("^[[:space:]]?$|^<TH", datText, invert=TRUE, value=TRUE)
datSplit = strsplit(datText, "<TD")
datLen = unlist(lapply(datSplit, length))
datSplit = unlist(datSplit[which(datLen ==6)])
datSplit = gsub("</A>$", "", datSplit)
datSplit = gsub("^[[:print:]]+>", "", datSplit)
datSplit = gsub("~", "1", datSplit)
dat = as.data.frame(matrix(datSplit, ncol=6, byrow=TRUE),
stringsAsFactors=FALSE)[,-1]
colnames(dat) = as.character(unlist(datHeader))
dat = dat[grep("^$", dat$County, invert=TRUE),]
for(Dcol in grep("County", colnames(dat), invert=TRUE)) {
dat[,Dcol] = as.numeric(as.character(dat[,Dcol]))
}
cases = dat[,c('County','Cases')]
if(nrow(cases)){
cases = cbind(cases, namesHere, stateCode=forUrl[D,'stateCode'])
allCases = rbind(allCases, cases)
}
}
getRid = grep("^[[:space:]]?(Note|Data|~|The population estimates):?[[:space:]]", allCases$County)
getRidState = grep("^[[:space:]]?(STATE|\\*\\*\\*Counts|Unknown)[[:space:]]?$", allCases$County)
getRid = c(getRid, getRidState)
if(length(getRid))
allCases = allCases[-getRid,]
allCases$Cases = as.numeric(gsub("~", "1", allCases$Cases))
allCases
} |
setwd(dir = "/home/arthur/Documentos/UFAL/Huffman/Comparation")
data <- read.table("file.txt")
col2 <- data[,2]
col3 <- data[,3]
x <- 1:length(col2)
plot(x,col2, type='l', ylim=c(min(col2,col3),max(col2,col3)))
points(col3, type='l', col='red', lty=2)
| /Comparation/rscript.R | permissive | 4rthurmonteiro/Huffman | R | false | false | 255 | r | setwd(dir = "/home/arthur/Documentos/UFAL/Huffman/Comparation")
data <- read.table("file.txt")
col2 <- data[,2]
col3 <- data[,3]
x <- 1:length(col2)
plot(x,col2, type='l', ylim=c(min(col2,col3),max(col2,col3)))
points(col3, type='l', col='red', lty=2)
|
G = matrix(c(2.1, 1.5, 1.2,
0.0, 2.2, 1.3,
1.0, 0.0, 3.1), 3, 3)
g0 = c(6.0, 1.0, 1.0)
CE = matrix(c(1, 2, -1), 3, 1)
ce0 = c(-4);
CI = matrix(c( 1, 0, 0,
0, 1, 0,
0, 0, 1,
-1, -1, 0), 3, 4)
ci0 = c(0, 0, 0, 10)
rcppeigen_quadratic_solve(G,g0,CE,ce0,CI,ci0)
| /examples/teste3.R | no_license | PedroBSB/mlRFinance | R | false | false | 342 | r | G = matrix(c(2.1, 1.5, 1.2,
0.0, 2.2, 1.3,
1.0, 0.0, 3.1), 3, 3)
g0 = c(6.0, 1.0, 1.0)
CE = matrix(c(1, 2, -1), 3, 1)
ce0 = c(-4);
CI = matrix(c( 1, 0, 0,
0, 1, 0,
0, 0, 1,
-1, -1, 0), 3, 4)
ci0 = c(0, 0, 0, 10)
rcppeigen_quadratic_solve(G,g0,CE,ce0,CI,ci0)
|
library(Biostrings)
library(ShortRead)
x <- DNAStringSet('ACGTGTGCGCGATGCGAATTTACG')
x1 <- DNAStringSet('ACGTGTGCGCGATCGATTACG')
qual <- as.integer(rep(38, nchar(x)))
qual1 <- as.integer(rep(38, nchar(x1)))
ill_qual <- IlluminaQuality(qual)
ill_qual1 <- IlluminaQuality(qual1)
ugh <- BStringSet(ill_qual)
sr_q <- IntegerQuality(qual)
y <- QualityScaledDNAStringSet(x, ill_qual)
writeXStringSet(y, '/tmp/x.fastq')
z <- grep('A', '', y)
yay <- ShortReadQ(sread = x, quality = ugh, id = BStringSet('ogggieboogie'))
yay <- ShortReadQ(sread = x, quality = ill_qual, id = BStringSet('ogggieboogie'))
yay1 <- ShortReadQ(sread = c(x, x1), quality = c(ill_qual, ill_qual1),
id = BStringSet(c('ogggieboogie', 'x1')))
sread(yay1)
writeFastq(yay, '/tmp/yay.fastq', compress=FALSE)
| /inst/quality_test.R | no_license | philliplab/MotifBinner | R | false | false | 798 | r | library(Biostrings)
library(ShortRead)
x <- DNAStringSet('ACGTGTGCGCGATGCGAATTTACG')
x1 <- DNAStringSet('ACGTGTGCGCGATCGATTACG')
qual <- as.integer(rep(38, nchar(x)))
qual1 <- as.integer(rep(38, nchar(x1)))
ill_qual <- IlluminaQuality(qual)
ill_qual1 <- IlluminaQuality(qual1)
ugh <- BStringSet(ill_qual)
sr_q <- IntegerQuality(qual)
y <- QualityScaledDNAStringSet(x, ill_qual)
writeXStringSet(y, '/tmp/x.fastq')
z <- grep('A', '', y)
yay <- ShortReadQ(sread = x, quality = ugh, id = BStringSet('ogggieboogie'))
yay <- ShortReadQ(sread = x, quality = ill_qual, id = BStringSet('ogggieboogie'))
yay1 <- ShortReadQ(sread = c(x, x1), quality = c(ill_qual, ill_qual1),
id = BStringSet(c('ogggieboogie', 'x1')))
sread(yay1)
writeFastq(yay, '/tmp/yay.fastq', compress=FALSE)
|
\name{matern.cormat}
\alias{matern.cormat}
\title{
Matern Spatial Correlation
}
\description{
Sets a Matern spatial correlation matrix in Gaussian copula marginal regression models.
}
\usage{
matern.cormat(D, alpha = 0.5)
}
\arguments{
\item{D}{
matrix with values of the distances between pairs of data locations.
}
\item{alpha}{
value of the shape parameter of the Matern correlation class. The default {alpha = 0.5} corresponds to an exponential correlation model.
}
}
\value{
An object of class \code{\link{cormat.gcmr}} representing a Matern correlation matrix.
}
\details{
The Mat\'ern correlation function is inherited from the \code{geoR} package (Diggle and Ribeiro, 2007).
}
\references{
Diggle, P. and Ribeiro, P.J. (2007). \emph{Model-based Geostatistics}. Springer.
Masarotto, G. and Varin, C. (2012). Gaussian copula marginal regression. \emph{Electronic Journal of Statistics} \bold{6}, 1517--1549.
Masarotto, G. and Varin C. (2017). Gaussian Copula Regression in R. \emph{Journal of Statistical Software}, \bold{77}(8), 1--26.
}
\author{
Guido Masarotto and Cristiano Varin.
}
\seealso{
\code{\link{gcmr}}.
}
\keyword{regression}
\keyword{nonlinear} | /man/matern.cormat.Rd | no_license | cran/gcmr | R | false | false | 1,174 | rd | \name{matern.cormat}
\alias{matern.cormat}
\title{
Matern Spatial Correlation
}
\description{
Sets a Matern spatial correlation matrix in Gaussian copula marginal regression models.
}
\usage{
matern.cormat(D, alpha = 0.5)
}
\arguments{
\item{D}{
matrix with values of the distances between pairs of data locations.
}
\item{alpha}{
value of the shape parameter of the Matern correlation class. The default {alpha = 0.5} corresponds to an exponential correlation model.
}
}
\value{
An object of class \code{\link{cormat.gcmr}} representing a Matern correlation matrix.
}
\details{
The Mat\'ern correlation function is inherited from the \code{geoR} package (Diggle and Ribeiro, 2007).
}
\references{
Diggle, P. and Ribeiro, P.J. (2007). \emph{Model-based Geostatistics}. Springer.
Masarotto, G. and Varin, C. (2012). Gaussian copula marginal regression. \emph{Electronic Journal of Statistics} \bold{6}, 1517--1549.
Masarotto, G. and Varin C. (2017). Gaussian Copula Regression in R. \emph{Journal of Statistical Software}, \bold{77}(8), 1--26.
}
\author{
Guido Masarotto and Cristiano Varin.
}
\seealso{
\code{\link{gcmr}}.
}
\keyword{regression}
\keyword{nonlinear} |
#' Select nodes based on a walk distance from a specified node
#'
#' Select those nodes in the neighborhood of nodes connected a specified
#' distance from an initial node.
#'
#' @inheritParams render_graph
#' @param node The node from which the traversal will originate.
#' @param distance The maximum number of steps from the `node` for inclusion in
#' the selection.
#' @param set_op The set operation to perform upon consecutive selections of
#' graph nodes. This can either be as a `union` (the default), as an
#' intersection of selections with `intersect`, or, as a `difference` on the
#' previous selection, if it exists.
#' @return A graph object of class `dgr_graph`.
#' @examples
#' # Create a graph containing
#' # a balanced tree
#' graph <-
#' create_graph() %>%
#' add_balanced_tree(
#' k = 2, h = 2)
#'
#' # Create a graph selection by
#' # selecting nodes in the
#' # neighborhood of node `1`, where
#' # the neighborhood is limited by
#' # nodes that are 1 connection
#' # away from node `1`
#' graph <-
#' graph %>%
#' select_nodes_in_neighborhood(
#' node = 1,
#' distance = 1)
#'
#' # Get the selection of nodes
#' graph %>% get_selection()
#'
#' # Perform another selection
#' # of nodes, this time with a
#' # neighborhood spanning 2 nodes
#' # from node `1`
#' graph <-
#' graph %>%
#' clear_selection() %>%
#' select_nodes_in_neighborhood(
#' node = 1,
#' distance = 2)
#'
#' # Get the selection of nodes
#' graph %>% get_selection()
#'
#' @export
select_nodes_in_neighborhood <- function(graph,
node,
distance,
set_op = "union") {
# Get the time of function start
time_function_start <- Sys.time()
# Get the name of the function
fcn_name <- get_calling_fcn()
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph object is not valid")
}
# Validation: Graph contains nodes
if (graph_contains_nodes(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph contains no nodes")
}
# Obtain the input graph's node and edge
# selection properties
n_e_select_properties_in <-
node_edge_selection_properties(graph = graph)
# Create an empty list object
nodes <- list()
# Find nodes belonging to the neighborhood
for (i in 1:distance) {
if (i == 1) {
nodes[[i]] <- vector(mode = "integer")
nodes[[i]] <-
c(node,
get_edges(
graph,
return_type = "df")[
which(
get_edges(
graph,
return_type = "df")[, 1] ==
node), 2],
get_edges(
graph,
return_type = "df")[
which(
get_edges(
graph,
return_type = "df")[, 2] ==
node), 1])
}
if (i > 1) {
for (j in 1:length(nodes[[i - 1]])) {
if (j == 1) {
nodes[[i]] <- vector(mode = "integer")
}
nodes[[i]] <-
c(nodes[[i]],
get_edges(
graph,
return_type = "df")[
which(
get_edges(
graph,
return_type = "df")[, 1] ==
nodes[[i - 1]][j]), 2],
get_edges(
graph,
return_type = "df")[
which(
get_edges(
graph,
return_type = "df")[, 2] ==
nodes[[i - 1]][j]), 1])
}
}
}
# From list of nodes, obtain vector of unique
# nodes as neighbors
nodes_selected <- unique(unlist(nodes))
# If no node ID values in `nodes_selected` return
# the graph without a changed node selection
if (length(nodes_selected) == 0) {
return(graph)
}
# Obtain vector with node ID selection of nodes
# already present
nodes_prev_selection <- graph$node_selection$node
# Incorporate selected nodes into graph's selection
if (set_op == "union") {
nodes_combined <-
union(nodes_prev_selection, nodes_selected)
} else if (set_op == "intersect") {
nodes_combined <-
intersect(nodes_prev_selection, nodes_selected)
} else if (set_op == "difference") {
nodes_combined <-
base::setdiff(nodes_prev_selection, nodes_selected)
}
# Add the node ID values to the active selection
# of nodes in `graph$node_selection`
graph$node_selection <-
replace_graph_node_selection(
graph = graph,
replacement = nodes_combined)
# Replace `graph$edge_selection` with an empty df
graph$edge_selection <- create_empty_esdf()
# Obtain the output graph's node and edge
# selection properties
n_e_select_properties_out <-
node_edge_selection_properties(graph = graph)
# Update the `graph_log` df with an action
graph$graph_log <-
add_action_to_log(
graph_log = graph$graph_log,
version_id = nrow(graph$graph_log) + 1,
function_used = fcn_name,
time_modified = time_function_start,
duration = graph_function_duration(time_function_start),
nodes = nrow(graph$nodes_df),
edges = nrow(graph$edges_df))
# Write graph backup if the option is set
if (graph$graph_info$write_backups) {
save_graph_as_rds(graph = graph)
}
# Emit a message about the modification of a selection
# if that option is set
if (!is.null(graph$graph_info$display_msgs) &&
graph$graph_info$display_msgs) {
# Construct message body
if (!n_e_select_properties_in[["node_selection_available"]] &
!n_e_select_properties_in[["edge_selection_available"]]) {
msg_body <-
glue::glue(
"created a new selection of \\
{n_e_select_properties_out[['selection_count_str']]}")
} else if (n_e_select_properties_in[["node_selection_available"]] |
n_e_select_properties_in[["edge_selection_available"]]) {
if (n_e_select_properties_in[["edge_selection_available"]]) {
msg_body <-
glue::glue(
"modified an existing selection of\\
{n_e_select_properties_in[['selection_count_str']]}:
* {n_e_select_properties_out[['selection_count_str']]}\\
are now in the active selection
* used the `{set_op}` set operation")
}
if (n_e_select_properties_in[["node_selection_available"]]) {
msg_body <-
glue::glue(
"created a new selection of\\
{n_e_select_properties_out[['selection_count_str']]}:
* this replaces\\
{n_e_select_properties_in[['selection_count_str']]}\\
in the prior selection")
}
}
# Issue a message to the user
emit_message(
fcn_name = fcn_name,
message_body = msg_body)
}
graph
}
| /R/select_nodes_in_neighborhood.R | permissive | wush978/DiagrammeR | R | false | false | 7,004 | r | #' Select nodes based on a walk distance from a specified node
#'
#' Select those nodes in the neighborhood of nodes connected a specified
#' distance from an initial node.
#'
#' @inheritParams render_graph
#' @param node The node from which the traversal will originate.
#' @param distance The maximum number of steps from the `node` for inclusion in
#' the selection.
#' @param set_op The set operation to perform upon consecutive selections of
#' graph nodes. This can either be as a `union` (the default), as an
#' intersection of selections with `intersect`, or, as a `difference` on the
#' previous selection, if it exists.
#' @return A graph object of class `dgr_graph`.
#' @examples
#' # Create a graph containing
#' # a balanced tree
#' graph <-
#' create_graph() %>%
#' add_balanced_tree(
#' k = 2, h = 2)
#'
#' # Create a graph selection by
#' # selecting nodes in the
#' # neighborhood of node `1`, where
#' # the neighborhood is limited by
#' # nodes that are 1 connection
#' # away from node `1`
#' graph <-
#' graph %>%
#' select_nodes_in_neighborhood(
#' node = 1,
#' distance = 1)
#'
#' # Get the selection of nodes
#' graph %>% get_selection()
#'
#' # Perform another selection
#' # of nodes, this time with a
#' # neighborhood spanning 2 nodes
#' # from node `1`
#' graph <-
#' graph %>%
#' clear_selection() %>%
#' select_nodes_in_neighborhood(
#' node = 1,
#' distance = 2)
#'
#' # Get the selection of nodes
#' graph %>% get_selection()
#'
#' @export
select_nodes_in_neighborhood <- function(graph,
node,
distance,
set_op = "union") {
# Get the time of function start
time_function_start <- Sys.time()
# Get the name of the function
fcn_name <- get_calling_fcn()
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph object is not valid")
}
# Validation: Graph contains nodes
if (graph_contains_nodes(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph contains no nodes")
}
# Obtain the input graph's node and edge
# selection properties
n_e_select_properties_in <-
node_edge_selection_properties(graph = graph)
# Create an empty list object
nodes <- list()
# Find nodes belonging to the neighborhood
for (i in 1:distance) {
if (i == 1) {
nodes[[i]] <- vector(mode = "integer")
nodes[[i]] <-
c(node,
get_edges(
graph,
return_type = "df")[
which(
get_edges(
graph,
return_type = "df")[, 1] ==
node), 2],
get_edges(
graph,
return_type = "df")[
which(
get_edges(
graph,
return_type = "df")[, 2] ==
node), 1])
}
if (i > 1) {
for (j in 1:length(nodes[[i - 1]])) {
if (j == 1) {
nodes[[i]] <- vector(mode = "integer")
}
nodes[[i]] <-
c(nodes[[i]],
get_edges(
graph,
return_type = "df")[
which(
get_edges(
graph,
return_type = "df")[, 1] ==
nodes[[i - 1]][j]), 2],
get_edges(
graph,
return_type = "df")[
which(
get_edges(
graph,
return_type = "df")[, 2] ==
nodes[[i - 1]][j]), 1])
}
}
}
# From list of nodes, obtain vector of unique
# nodes as neighbors
nodes_selected <- unique(unlist(nodes))
# If no node ID values in `nodes_selected` return
# the graph without a changed node selection
if (length(nodes_selected) == 0) {
return(graph)
}
# Obtain vector with node ID selection of nodes
# already present
nodes_prev_selection <- graph$node_selection$node
# Incorporate selected nodes into graph's selection
if (set_op == "union") {
nodes_combined <-
union(nodes_prev_selection, nodes_selected)
} else if (set_op == "intersect") {
nodes_combined <-
intersect(nodes_prev_selection, nodes_selected)
} else if (set_op == "difference") {
nodes_combined <-
base::setdiff(nodes_prev_selection, nodes_selected)
}
# Add the node ID values to the active selection
# of nodes in `graph$node_selection`
graph$node_selection <-
replace_graph_node_selection(
graph = graph,
replacement = nodes_combined)
# Replace `graph$edge_selection` with an empty df
graph$edge_selection <- create_empty_esdf()
# Obtain the output graph's node and edge
# selection properties
n_e_select_properties_out <-
node_edge_selection_properties(graph = graph)
# Update the `graph_log` df with an action
graph$graph_log <-
add_action_to_log(
graph_log = graph$graph_log,
version_id = nrow(graph$graph_log) + 1,
function_used = fcn_name,
time_modified = time_function_start,
duration = graph_function_duration(time_function_start),
nodes = nrow(graph$nodes_df),
edges = nrow(graph$edges_df))
# Write graph backup if the option is set
if (graph$graph_info$write_backups) {
save_graph_as_rds(graph = graph)
}
# Emit a message about the modification of a selection
# if that option is set
if (!is.null(graph$graph_info$display_msgs) &&
graph$graph_info$display_msgs) {
# Construct message body
if (!n_e_select_properties_in[["node_selection_available"]] &
!n_e_select_properties_in[["edge_selection_available"]]) {
msg_body <-
glue::glue(
"created a new selection of \\
{n_e_select_properties_out[['selection_count_str']]}")
} else if (n_e_select_properties_in[["node_selection_available"]] |
n_e_select_properties_in[["edge_selection_available"]]) {
if (n_e_select_properties_in[["edge_selection_available"]]) {
msg_body <-
glue::glue(
"modified an existing selection of\\
{n_e_select_properties_in[['selection_count_str']]}:
* {n_e_select_properties_out[['selection_count_str']]}\\
are now in the active selection
* used the `{set_op}` set operation")
}
if (n_e_select_properties_in[["node_selection_available"]]) {
msg_body <-
glue::glue(
"created a new selection of\\
{n_e_select_properties_out[['selection_count_str']]}:
* this replaces\\
{n_e_select_properties_in[['selection_count_str']]}\\
in the prior selection")
}
}
# Issue a message to the user
emit_message(
fcn_name = fcn_name,
message_body = msg_body)
}
graph
}
|
\name{profit_neighbors}
\alias{profit_neighbors}
\title{Find profit for neighbors}
\usage{
profit_neighbors(landscape, neighbors)
}
\arguments{
\item{landscape}{A landscape object (previously
instantiated)}
\item{neighbors}{A matrix of neighboring points}
}
\value{
Returns a vector of profits for each neighboring point
}
\description{
Takes a landscape object and a matrix of neigboring points,
and determines the profit value for each of the neighboring
points
}
| /man/profit_neighbors.Rd | no_license | skysmith14/Programming-Project | R | false | false | 474 | rd | \name{profit_neighbors}
\alias{profit_neighbors}
\title{Find profit for neighbors}
\usage{
profit_neighbors(landscape, neighbors)
}
\arguments{
\item{landscape}{A landscape object (previously
instantiated)}
\item{neighbors}{A matrix of neighboring points}
}
\value{
Returns a vector of profits for each neighboring point
}
\description{
Takes a landscape object and a matrix of neigboring points,
and determines the profit value for each of the neighboring
points
}
|
#' @title Differential abundance (DA) analysis for
#' microbial absolute abundance data.
#'
#' @aliases ancom
#'
#' @description Determine taxa whose absolute abundances, per unit volume, of
#' the ecosystem (e.g. gut) are significantly different with changes in the
#' covariate of interest (e.g. the group effect). The current version of
#' \code{ancombc} function implements Analysis of Compositions of Microbiomes
#' with Bias Correction (ANCOM-BC) in cross-sectional data while allowing
#' the adjustment of covariates.
#'
#' @details The definition of structural zero can be found at
#' \href{https://doi.org/10.3389/fmicb.2017.02114}{ANCOM-II}.
#' Setting \code{neg_lb = TRUE} indicates that you are using both criteria
#' stated in section 3.2 of
#' \href{https://doi.org/10.3389/fmicb.2017.02114}{ANCOM-II}
#' to detect structural zeros; otherwise, the algorithm will only use the
#' equation 1 in section 3.2 for declaring structural zeros. Generally, it is
#' recommended to set \code{neg_lb = TRUE} when the sample size per group is
#' relatively large (e.g. > 30).
#'
#' @param phyloseq a phyloseq-class object, which consists of a feature table
#' (microbial observed abundance table), a sample metadata, a taxonomy table
#' (optional), and a phylogenetic tree (optional). The row names of the
#' metadata must match the sample names of the feature table, and the row names
#' of the taxonomy table must match the taxon (feature) names of the feature
#' table. See \code{\link[phyloseq]{phyloseq}} for more details.
#' @param formula the character string expresses how the microbial absolute
#' abundances for each taxon depend on the variables in metadata.
#' @param p_adj_method method to adjust p-values by. Default is "holm".
#' Options include "holm", "hochberg", "hommel", "bonferroni", "BH", "BY",
#' "fdr", "none". See \code{\link[stats]{p.adjust}} for more details.
#' @param zero_cut a numerical fraction between 0 and 1. Taxa with proportion of
#' zeroes greater than \code{zero_cut} will be excluded in the analysis. Default
#' is 0.90.
#' @param lib_cut a numerical threshold for filtering samples based on library
#' sizes. Samples with library sizes less than \code{lib_cut} will be
#' excluded in the analysis.
#' @param group the name of the group variable in metadata. Specifying
#' \code{group} is required for detecting structural zeros and
#' performing global test.
#' @param struc_zero whether to detect structural zeros. Default is FALSE.
#' @param neg_lb whether to classify a taxon as a structural zero in the
#' corresponding study group using its asymptotic lower bound.
#' Default is FALSE.
#' @param tol the iteration convergence tolerance for the E-M algorithm.
#' Default is 1e-05.
#' @param max_iter the maximum number of iterations for the E-M algorithm.
#' Default is 100.
#' @param conserve whether to use a conservative variance estimate of
#' the test statistic. It is recommended if the sample size is small and/or
#' the number of differentially abundant taxa is believed to be large.
#' Default is FALSE.
#' @param alpha level of significance. Default is 0.05.
#' @param global whether to perform global test. Default is FALSE.
#'
#' @return a \code{list} with components:
#' \itemize{
#' \item{ \code{feature_table}, a \code{data.frame} of pre-processed
#' (based on \code{zero_cut} and \code{lib_cut}) microbial observed
#' abundance table. }
#' \item{ \code{zero_ind}, a logical \code{matrix} with TRUE indicating
#' the taxon is identified as a structural zero for the specified
#' \code{group} variable.}
#' \item{ \code{samp_frac}, a numeric vector of estimated sampling
#' fractions in log scale (natural log). }
#' \item{ \code{resid}, a \code{matrix} of residuals from the ANCOM-BC
#' log-linear (natural log) model.
#' Rows are taxa and columns are samples.}
#' \item{ \code{delta_em}, estimated bias terms through E-M algorithm. }
#' \item{ \code{delta_wls}, estimated bias terms through weighted
#' least squares (WLS) algorithm.}
#' \item{ \code{res}, a \code{list} containing ANCOM-BC primary result,
#' which consists of:}
#' \itemize{
#' \item{ \code{beta}, a \code{data.frame} of coefficients obtained
#' from the ANCOM-BC log-linear (natural log) model. }
#' \item{ \code{se}, a \code{data.frame} of standard errors (SEs) of
#' \code{beta}. }
#' \item{ \code{W}, a \code{data.frame} of test statistics.
#' \code{W = beta/se}. }
#' \item{ \code{p_val}, a \code{data.frame} of p-values. P-values are
#' obtained from two-sided Z-test using the test statistic \code{W}. }
#' \item{ \code{q_val}, a \code{data.frame} of adjusted p-values.
#' Adjusted p-values are obtained by applying \code{p_adj_method}
#' to \code{p_val}.}
#' \item{ \code{diff_abn}, a logical \code{data.frame}. TRUE if the
#' taxon has \code{q_val} less than \code{alpha}.}
#' }
#' \item{ \code{res_global}, a \code{data.frame} containing ANCOM-BC
#' global test result for the variable specified in \code{group},
#' each column is:}
#' \itemize{
#' \item{ \code{W}, test statistics.}
#' \item{ \code{p_val}, p-values, which are obtained from two-sided
#' Chi-square test using \code{W}.}
#' \item{ \code{q_val}, adjusted p-values. Adjusted p-values are
#' obtained by applying \code{p_adj_method} to \code{p_val}.}
#' \item{ \code{diff_abn}, A logical vector. TRUE if the taxon has
#' \code{q_val} less than \code{alpha}.}
#' }
#' }
#'
#' @examples
#' #================Build a Phyloseq-Class Object from Scratch==================
#' library(phyloseq)
#'
#' otu_mat = matrix(sample(1:100, 100, replace = TRUE), nrow = 10, ncol = 10)
#' rownames(otu_mat) = paste0("taxon", 1:nrow(otu_mat))
#' colnames(otu_mat) = paste0("sample", 1:ncol(otu_mat))
#'
#'
#' meta = data.frame(group = sample(LETTERS[1:4], size = 10, replace = TRUE),
#' row.names = paste0("sample", 1:ncol(otu_mat)),
#' stringsAsFactors = FALSE)
#'
#' tax_mat = matrix(sample(letters, 70, replace = TRUE),
#' nrow = nrow(otu_mat), ncol = 7)
#' rownames(tax_mat) = rownames(otu_mat)
#' colnames(tax_mat) = c("Kingdom", "Phylum", "Class", "Order",
#' "Family", "Genus", "Species")
#'
#' OTU = otu_table(otu_mat, taxa_are_rows = TRUE)
#' META = sample_data(meta)
#' TAX = tax_table(tax_mat)
#' physeq = phyloseq(OTU, META, TAX)
#'
#' #========================Run ANCOMBC Using a Real Data=======================
#'
#' library(phyloseq)
#' library(tidyverse)
#' data(GlobalPatterns)
#'
#' # Aggregate to phylum level
#' phylum_data = tax_glom(GlobalPatterns, "Phylum")
#' # The taxonomy table
#' tax_mat = as(tax_table(phylum_data), "matrix")
#'
#' # Run ancombc function
#' out = ancombc(phyloseq = phylum_data, formula = "SampleType",
#' p_adj_method = "holm", zero_cut = 0.90, lib_cut = 1000,
#' group = "SampleType", struc_zero = TRUE, neg_lb = FALSE,
#' tol = 1e-5, max_iter = 100, conserve = TRUE,
#' alpha = 0.05, global = TRUE)
#'
#' res = out$res
#' res_global = out$res_global
#'
#' @author Huang Lin
#'
#
#'
#' @import stats
#' @import phyloseq
#' @importFrom MASS ginv
#' @importFrom nloptr neldermead
#' @importFrom Rdpack reprompt
#'
#' @export
ancombc = function(phyloseq, formula, p_adj_method = "holm", zero_cut = 0.90,
lib_cut, group = NULL, struc_zero = FALSE, neg_lb = FALSE,
tol = 1e-05, max_iter = 100, conserve = FALSE, alpha = 0.05,
global = FALSE){
# 1. Data pre-processing
fiuo_prep = data_prep(phyloseq, group, zero_cut, lib_cut, global = global)
feature_table = fiuo_prep$feature_table
meta_data = fiuo_prep$meta_data
global = fiuo_prep$global
# samp_id = colnames(feature_table)
# taxa_id = rownames(feature_table)
# n_samp = ncol(feature_table)
n_taxa = nrow(feature_table)
# Add pseudocount (1) and take logarithm.
y = log(feature_table + 1)
x = get_x(formula, meta_data)
covariates = colnames(x)
n_covariates = length(covariates)
# 2. Identify taxa with structural zeros
if (struc_zero) {
if (is.null(group)) {
stop("Please specify the group variable for detecting structural zeros.")
}
zero_ind = get_struc_zero(feature_table, meta_data, group, neg_lb)
}else{ zero_ind = NULL }
# 3. Estimation of parameters
fiuo_para = para_est(y, meta_data, formula, tol, max_iter)
beta = fiuo_para$beta
d = fiuo_para$d
e = fiuo_para$e
var_cov_hat = fiuo_para$var_cov_hat
var_hat = fiuo_para$var_hat
# 4. Estimation of the between-sample bias
fiuo_bias = bias_est(beta, var_hat, tol, max_iter, n_taxa)
delta_em = fiuo_bias$delta_em
delta_wls = fiuo_bias$delta_wls
var_delta = fiuo_bias$var_delta
# 5. Coefficients, standard error, and sampling fractions
fiuo_fit = fit_summary(y, x, beta, var_hat, delta_em, var_delta, conserve)
beta_hat = fiuo_fit$beta_hat
se_hat = fiuo_fit$se_hat
d_hat = fiuo_fit$d_hat
# 6. Primary results
W = beta_hat/se_hat
p = 2 * pnorm(abs(W), mean = 0, sd = 1, lower.tail = FALSE)
q = apply(p, 2, function(x) p.adjust(x, method = p_adj_method))
diff_abn = q < alpha & !is.na(q)
res = list(beta = data.frame(beta_hat, check.names = FALSE),
se = data.frame(se_hat, check.names = FALSE),
W = data.frame(W, check.names = FALSE),
p_val = data.frame(p, check.names = FALSE),
q_val = data.frame(q, check.names = FALSE),
diff_abn = data.frame(diff_abn, check.names = FALSE))
# 7. Global test results
if (global) {
res_global = global_test(y, x, group, beta_hat, var_cov_hat,
p_adj_method, alpha)
} else { res_global = NULL }
# 8. Combine the information of structural zeros
fiuo_out = res_combine_zero(x, group, struc_zero, zero_ind, alpha,
global, res, res_global)
res = fiuo_out$res
res_global = fiuo_out$res_global
# 9. Outputs
out = list(feature_table = feature_table, zero_ind = zero_ind,
samp_frac = d_hat, resid = e,
delta_em = delta_em, delta_wls = delta_wls,
res = res, res_global = res_global)
return(out)
}
# E-M algorithm for estimating the bias term
bias_est = function(beta, var_hat, tol, max_iter, n_taxa) {
delta_em = rep(NA, ncol(beta) - 1)
delta_wls = rep(NA, ncol(beta) - 1)
var_delta = rep(NA, ncol(beta) - 1)
for (i in seq_along(delta_em)) {
# Ignore the intercept
Delta = beta[, i + 1]
Delta = Delta[!is.na(Delta)]
nu0 = var_hat[, i + 1]
nu0 = nu0[!is.na(nu0)]
# Initials
pi0_0 = 0.75
pi1_0 = 0.125
pi2_0 = 0.125
delta_0 = mean(Delta[Delta >= quantile(Delta, 0.25, na.rm = TRUE)&
Delta <= quantile(Delta, 0.75, na.rm = TRUE)],
na.rm = TRUE)
l1_0 = mean(Delta[Delta < quantile(Delta, 0.125, na.rm = TRUE)],
na.rm = TRUE)
l2_0 = mean(Delta[Delta > quantile(Delta, 0.875, na.rm = TRUE)],
na.rm = TRUE)
kappa1_0 = var(Delta[Delta < quantile(Delta, 0.125, na.rm = TRUE)],
na.rm = TRUE)
if(is.na(kappa1_0)|kappa1_0 == 0) kappa1_0 = 1
kappa2_0 = var(Delta[Delta > quantile(Delta, 0.875, na.rm = TRUE)],
na.rm = TRUE)
if(is.na(kappa2_0)|kappa2_0 == 0) kappa2_0 = 1
# Apply E-M algorithm
fiuo_em = em_iter(Delta, nu0, pi0_0, pi1_0, pi2_0, delta_0,
l1_0, l2_0, kappa1_0, kappa2_0, tol, max_iter)
# The EM estimator of bias
delta_em[i] = fiuo_em$delta
# The WLS estimator of bias
pi1 = fiuo_em$pi1
pi2 = fiuo_em$pi2
l1 = fiuo_em$l1
l2 = fiuo_em$l2
kappa1 = fiuo_em$kappa1
kappa2 = fiuo_em$kappa2
# Cluster 0
C0 = which(Delta >= quantile(Delta, pi1, na.rm = TRUE) &
Delta < quantile(Delta, 1 - pi2, na.rm = TRUE))
# Cluster 1
C1 = which(Delta < quantile(Delta, pi1, na.rm = TRUE))
# Cluster 2
C2 = which(Delta >= quantile(Delta, 1 - pi2, na.rm = TRUE))
# Numerator of the WLS estimator
nu = nu0
nu[C1] = nu[C1] + kappa1
nu[C2] = nu[C2] + kappa2
wls_deno = sum(1 / nu)
# Denominator of the WLS estimator
wls_nume = 1 / nu
wls_nume[C0] = (wls_nume * Delta)[C0]
wls_nume[C1] = (wls_nume * (Delta - l1))[C1]
wls_nume[C2] = (wls_nume * (Delta - l2))[C2]
wls_nume = sum(wls_nume)
delta_wls[i] = wls_nume / wls_deno
# Estimate the variance of bias
var_delta[i] = 1 / wls_deno
if (is.na(var_delta[i])) var_delta[i] = 0
}
fiuo_bias = list(delta_em = delta_em, delta_wls = delta_wls,
var_delta = var_delta)
}
# Data pre-processing
data_prep = function(phyloseq, group, zero_cut, lib_cut, global = global) {
feature_table = as(otu_table(phyloseq), "matrix")
feature_table = data.frame(feature_table, check.names = FALSE)
meta_data = as(sample_data(phyloseq), "data.frame")
# Drop unused levels
meta_data[] = lapply(meta_data, function(x)
if(is.factor(x)) factor(x) else x)
# Check the group variable
if (is.null(group)) {
if (global) {
stop("Please specify the group variable for the global test.")
}
} else {
n_level = length(unique(meta_data[, group]))
if (n_level < 2) {
stop("The group variable should have >= 2 categories.")
} else if (n_level < 3) {
global = FALSE
warning("The multi-group comparison will be deactivated as the group variable has < 3 categories.")
}
}
# Discard taxa with zeros >= zero_cut
zero_prop = apply(feature_table, 1, function(x)
sum(x == 0, na.rm = TRUE)/length(x[!is.na(x)]))
tax_del = which(zero_prop >= zero_cut)
if (length(tax_del) > 0) {
feature_table = feature_table[- tax_del, ]
}
# Discard samples with library size < lib_cut
lib_size = colSums(feature_table, na.rm = TRUE)
if(any(lib_size < lib_cut)){
subj_del = which(lib_size < lib_cut)
feature_table = feature_table[, - subj_del]
meta_data = meta_data[- subj_del, ]
}
fiuo_prep = list(feature_table = feature_table,
meta_data = meta_data,
global = global)
return(fiuo_prep)
}
em_iter = function(Delta, nu0, pi0_0, pi1_0, pi2_0, delta_0,
l1_0, l2_0, kappa1_0, kappa2_0, tol, max_iter) {
# Store all paras in vectors/matrices
pi0_vec = pi0_0
pi1_vec = pi1_0
pi2_vec = pi2_0
delta_vec = delta_0
l1_vec = l1_0
l2_vec = l2_0
kappa1_vec = kappa1_0
kappa2_vec = kappa2_0
n_taxa = length(Delta)
# E-M iteration
iterNum = 0
epsilon = 100
while (epsilon > tol & iterNum < max_iter) {
# Current value of paras
pi0 = pi0_vec[length(pi0_vec)]
pi1 = pi1_vec[length(pi1_vec)]
pi2 = pi2_vec[length(pi2_vec)]
delta = delta_vec[length(delta_vec)]
l1 = l1_vec[length(l1_vec)]
l2 = l2_vec[length(l2_vec)]
kappa1 = kappa1_vec[length(kappa1_vec)]
kappa2 = kappa2_vec[length(kappa2_vec)]
# E-step
pdf0 = vapply(seq(n_taxa), function(i)
dnorm(Delta[i], delta, sqrt(nu0[i])), FUN.VALUE = double(1))
pdf1 = vapply(seq(n_taxa), function(i)
dnorm(Delta[i], delta + l1, sqrt(nu0[i] + kappa1)),
FUN.VALUE = double(1))
pdf2 = vapply(seq(n_taxa), function(i)
dnorm(Delta[i], delta + l2, sqrt(nu0[i] + kappa2)),
FUN.VALUE = double(1))
r0i = pi0*pdf0/(pi0*pdf0 + pi1*pdf1 + pi2*pdf2)
r0i[is.na(r0i)] = 0
r1i = pi1*pdf1/(pi0*pdf0 + pi1*pdf1 + pi2*pdf2)
r1i[is.na(r1i)] = 0
r2i = pi2*pdf2/(pi0*pdf0 + pi1*pdf1 + pi2*pdf2)
r2i[is.na(r2i)] = 0
# M-step
pi0_new = mean(r0i, na.rm = TRUE)
pi1_new = mean(r1i, na.rm = TRUE)
pi2_new = mean(r2i, na.rm = TRUE)
delta_new = sum(r0i*Delta/nu0 + r1i*(Delta-l1)/(nu0+kappa1) +
r2i*(Delta-l2)/(nu0+kappa2), na.rm = TRUE)/
sum(r0i/nu0 + r1i/(nu0+kappa1) + r2i/(nu0+kappa2), na.rm = TRUE)
l1_new = min(sum(r1i*(Delta-delta)/(nu0+kappa1), na.rm = TRUE)/
sum(r1i/(nu0+kappa1), na.rm = TRUE), 0)
l2_new = max(sum(r2i*(Delta-delta)/(nu0+kappa2), na.rm = TRUE)/
sum(r2i/(nu0+kappa2), na.rm = TRUE), 0)
# Nelder-Mead simplex algorithm for kappa1 and kappa2
obj_kappa1 = function(x){
log_pdf = log(vapply(seq(n_taxa), function(i)
dnorm(Delta[i], delta+l1, sqrt(nu0[i]+x)),
FUN.VALUE = double(1)))
log_pdf[is.infinite(log_pdf)] = 0
-sum(r1i*log_pdf, na.rm = TRUE)
}
kappa1_new = nloptr::neldermead(x0 = kappa1,
fn = obj_kappa1, lower = 0)$par
obj_kappa2 = function(x){
log_pdf = log(vapply(seq(n_taxa), function(i)
dnorm(Delta[i], delta+l2, sqrt(nu0[i]+x)),
FUN.VALUE = double(1)))
log_pdf[is.infinite(log_pdf)] = 0
-sum(r2i*log_pdf, na.rm = TRUE)
}
kappa2_new = nloptr::neldermead(x0 = kappa2,
fn = obj_kappa2, lower = 0)$par
# Merge to the paras vectors/matrices
pi0_vec = c(pi0_vec, pi0_new)
pi1_vec = c(pi1_vec, pi1_new)
pi2_vec = c(pi2_vec, pi2_new)
delta_vec = c(delta_vec, delta_new)
l1_vec = c(l1_vec, l1_new)
l2_vec = c(l2_vec, l2_new)
kappa1_vec = c(kappa1_vec, kappa1_new)
kappa2_vec = c(kappa2_vec, kappa2_new)
# Calculate the new epsilon
epsilon = sqrt((pi0_new-pi0)^2 + (pi1_new-pi1)^2 + (pi2_new-pi2)^2 +
(delta_new-delta)^2 + (l1_new-l1)^2 + (l2_new-l2)^2 +
(kappa1_new-kappa1)^2 + (kappa2_new-kappa2)^2)
iterNum = iterNum + 1
}
fiuo_em = list(pi0 = pi0_new, pi1 = pi1_new, pi2 = pi2_new,
delta = delta_new, l1 = l1_new, l2 = l2_new,
kappa1 = kappa1_new, kappa2 = kappa2_new)
return(fiuo_em)
}
fit_summary = function(y, x, beta, var_hat, delta_em, var_delta, conserve) {
n_taxa = nrow(y)
beta_hat = beta
beta_hat[, -1] = t(t(beta_hat[, -1]) - delta_em)
if (conserve) {
# Account for the variance of delta_hat
se_hat = sqrt(sweep(var_hat, 2, c(0, var_delta), "+") +
2 * sqrt(sweep(var_hat, 2, c(0, var_delta), "*")))
}else{ se_hat = sqrt(var_hat) }
d_hat = vector()
for (i in seq_len(n_taxa)) {
d_hat_i = y[i, ] - x %*% beta_hat[i, ]
d_hat = rbind(d_hat, d_hat_i)
}
d_hat = colMeans(d_hat, na.rm = TRUE)
fiuo_fit = list(beta_hat = beta_hat, se_hat = se_hat, d_hat = d_hat)
return(fiuo_fit)
}
# Identify structural zeros
get_struc_zero = function(feature_table, meta_data, group, neg_lb) {
group_data = factor(meta_data[, group])
present_table = as.matrix(feature_table)
present_table[is.na(present_table)] = 0
present_table[present_table != 0] = 1
n_taxa = nrow(feature_table)
n_group = nlevels(group_data)
p_hat = matrix(NA, nrow = n_taxa, ncol = n_group)
rownames(p_hat) = rownames(feature_table)
colnames(p_hat) = levels(group_data)
for (i in seq_len(n_taxa)) {
p_hat[i, ] = tapply(present_table[i, ], group_data,
function(x) mean(x, na.rm = TRUE))
}
samp_size = matrix(NA, nrow = n_taxa, ncol = n_group)
rownames(samp_size) = rownames(feature_table)
colnames(samp_size) = levels(group_data)
for (i in seq_len(n_taxa)) {
samp_size[i, ] = tapply(as.matrix(feature_table)[i, ], group_data,
function(x) length(x[!is.na(x)]))
}
p_hat_lo = p_hat - 1.96 * sqrt(p_hat * (1 - p_hat)/samp_size)
zero_ind = (p_hat == 0)
# Do we classify a taxon as a structural zero by its negative lower bound?
if (neg_lb) zero_ind[p_hat_lo <= 0] = TRUE
colnames(zero_ind) = paste0("structural_zero (", group,
" = ", colnames(zero_ind), ")")
return(zero_ind)
}
get_x = function(formula, meta_data) {
opt = options(na.action = "na.pass") # Keep NA's in rows of x
on.exit(options(opt)) # Switch it back
x = model.matrix(formula(paste0("~", formula)), data = meta_data)
return(x)
}
# Global test
global_test = function(y, x, group, beta_hat, var_cov_hat, p_adj_method, alpha){
taxa_id = rownames(y)
n_taxa = nrow(y)
covariates = colnames(x)
res_global = data.frame(matrix(NA, nrow = n_taxa, ncol = 4))
rownames(res_global) = taxa_id
colnames(res_global) = c("W", "p_val", "q_val", "diff_abn")
group_ind = grepl(group, covariates)
# Loop over the parameter of interest
beta_hat_sub = beta_hat[, group_ind]
var_cov_hat_sub = lapply(var_cov_hat, function(x)
x = x[group_ind, group_ind])
for (i in seq_len(n_taxa)) {
# Loop over taxa
beta_hat_sub_i = beta_hat_sub[i, ]
var_cov_hat_sub_i = var_cov_hat_sub[[i]]
A = diag(x = 1, nrow = length(beta_hat_sub_i))
W = t(A %*% beta_hat_sub_i) %*%
MASS::ginv(A %*% var_cov_hat_sub_i %*% t(A)) %*%
(A %*% beta_hat_sub_i)
p = 2 * min(pchisq(W, df = length(beta_hat_sub_i), lower.tail = TRUE),
pchisq(W, df = length(beta_hat_sub_i), lower.tail = FALSE))
res_global[i, "W"] = W
res_global[i, "p_val"] = p
}
# Model summary
q_global = p.adjust(res_global[, "p_val"], method = p_adj_method)
q_global[is.na(q_global)] = 1
diff_global = q_global < alpha & !is.na(q_global)
res_global$q_val = q_global
res_global$diff_abn = diff_global
return(res_global)
}
para_est = function(y, meta_data, formula, tol, max_iter) {
x = get_x(formula, meta_data)
taxa_id = rownames(y)
n_taxa = nrow(y)
samp_id = colnames(y)
n_samp = ncol(y)
covariates = colnames(x)
# Sampling fractions
d = rep(0, n_samp)
tformula = formula(paste0("y ~ ", formula))
fits = lapply(seq_len(n_taxa), function(i) {
df = data.frame(y = unlist(y[i, ]) - d, meta_data)
return(lm(tformula, data = df))
})
# Regression coefficients
beta = lapply(fits, function(i) {
beta_i = rep(NA, length(covariates)) # prevent errors of missing values
coef_i = coef(i)
beta_i[match(names(coef_i), covariates)] = coef_i
return(beta_i)
})
beta = Reduce('rbind', beta)
# Iterative least square
iterNum = 0
epsilon = 100
while (epsilon > tol & iterNum < max_iter) {
# Updating beta
fits = lapply(seq_len(n_taxa), function(i) {
df = data.frame(y = unlist(y[i, ]) - d, meta_data)
return(lm(tformula, data = df))
})
beta_new = lapply(fits, function(i) {
beta_i = rep(NA, length(covariates))
coef_i = coef(i)
beta_i[match(names(coef_i), covariates)] = coef_i
return(beta_i)
})
beta_new = Reduce('rbind', beta_new)
# Updating d
y_hat = lapply(fits, function(i) {
y_hat_i = rep(NA, n_samp)
fit_i = fitted(i)
y_hat_i[match(names(fit_i), samp_id)] = fit_i
return(y_hat_i)
})
y_hat = Reduce('rbind', y_hat)
d_new = colMeans(y - y_hat, na.rm = TRUE)
# Iteration
epsilon = sqrt(sum((beta_new - beta)^2, na.rm = TRUE) +
sum((d_new - d)^2, na.rm = TRUE))
iterNum = iterNum + 1
beta = beta_new
d = d_new
}
# Regression residuals
y_hat = lapply(fits, function(i) {
y_hat_i = rep(NA, n_samp)
fit_i = fitted(i)
y_hat_i[match(names(fit_i), samp_id)] = fit_i
return(y_hat_i)
})
y_hat = Reduce('rbind', y_hat)
e = t(t(y - y_hat) - d)
# Variance-covariance matrices of coefficients
fiuo_var_cov = var_cov_est(x, e, n_taxa)
var_cov_hat = fiuo_var_cov$var_cov_hat
var_hat = fiuo_var_cov$var_hat
colnames(beta) = covariates
rownames(beta) = taxa_id
names(d) = samp_id
names(var_cov_hat) = taxa_id
colnames(var_hat) = covariates
rownames(var_hat) = taxa_id
fiuo_para = list(beta = beta, d = d, e = e,
var_cov_hat = var_cov_hat, var_hat = var_hat)
return(fiuo_para)
}
res_combine_zero = function(x, group, struc_zero, zero_ind, alpha,
global, res, res_global) {
covariates = colnames(x)
# Set p/q-values of structural zeros to be 0s.
if (struc_zero) {
group_ind = grepl(group, covariates)
zero_mask = 1 - apply(zero_ind, 1, function(x) any(x == 1))
res$p_val[, group_ind] = res$p_val[, group_ind] * zero_mask
res$q_val[, group_ind] = res$q_val[, group_ind] * zero_mask
res$diff_abn = res$q_val < alpha & !is.na(res$q_val)
# Global test
if (global) {
res_global[, "p_val"] = res_global[, "p_val"] * zero_mask
res_global[, "q_val"] = res_global[, "q_val"] * zero_mask
res_global[, "diff_abn"] = res_global[, "q_val"] < alpha &
!is.na(res_global[, "q_val"])
}
}
fiuo_out = list(res = res, res_global = res_global)
return(fiuo_out)
}
var_cov_est = function(x, e, n_taxa) {
covariates = colnames(x)
n_covariates = length(covariates)
n_samp = nrow(x)
XTX_inv = MASS::ginv(t(x[complete.cases(x), ]) %*% x[complete.cases(x), ])
var_cov_hat = vector(mode = "list", length = n_taxa) # Covariances
var_hat = matrix(NA, nrow = n_taxa, ncol = n_covariates) # Variances
for (i in seq_len(n_taxa)) {
sigma2_xxT = matrix(0, ncol = n_covariates, nrow = n_covariates)
for (j in seq_len(n_samp)) {
sigma2_xxT_j = e[i, j]^2 * x[j, ] %*% t(x[j, ])
sigma2_xxT_j[is.na(sigma2_xxT_j)] = 0
sigma2_xxT = sigma2_xxT + sigma2_xxT_j
}
var_cov_hat[[i]] = XTX_inv %*% sigma2_xxT %*% XTX_inv
rownames(var_cov_hat[[i]]) = covariates
colnames(var_cov_hat[[i]]) = covariates
var_hat[i, ] = diag(var_cov_hat[[i]])
}
fiuo_var_cov = list(var_cov_hat = var_cov_hat, var_hat = var_hat)
return(fiuo_var_cov)
}
| /R/ancom_bc.R | permissive | rusher321/rmeta | R | false | false | 25,726 | r | #' @title Differential abundance (DA) analysis for
#' microbial absolute abundance data.
#'
#' @aliases ancom
#'
#' @description Determine taxa whose absolute abundances, per unit volume, of
#' the ecosystem (e.g. gut) are significantly different with changes in the
#' covariate of interest (e.g. the group effect). The current version of
#' \code{ancombc} function implements Analysis of Compositions of Microbiomes
#' with Bias Correction (ANCOM-BC) in cross-sectional data while allowing
#' the adjustment of covariates.
#'
#' @details The definition of structural zero can be found at
#' \href{https://doi.org/10.3389/fmicb.2017.02114}{ANCOM-II}.
#' Setting \code{neg_lb = TRUE} indicates that you are using both criteria
#' stated in section 3.2 of
#' \href{https://doi.org/10.3389/fmicb.2017.02114}{ANCOM-II}
#' to detect structural zeros; otherwise, the algorithm will only use the
#' equation 1 in section 3.2 for declaring structural zeros. Generally, it is
#' recommended to set \code{neg_lb = TRUE} when the sample size per group is
#' relatively large (e.g. > 30).
#'
#' @param phyloseq a phyloseq-class object, which consists of a feature table
#' (microbial observed abundance table), a sample metadata, a taxonomy table
#' (optional), and a phylogenetic tree (optional). The row names of the
#' metadata must match the sample names of the feature table, and the row names
#' of the taxonomy table must match the taxon (feature) names of the feature
#' table. See \code{\link[phyloseq]{phyloseq}} for more details.
#' @param formula the character string expresses how the microbial absolute
#' abundances for each taxon depend on the variables in metadata.
#' @param p_adj_method method to adjust p-values by. Default is "holm".
#' Options include "holm", "hochberg", "hommel", "bonferroni", "BH", "BY",
#' "fdr", "none". See \code{\link[stats]{p.adjust}} for more details.
#' @param zero_cut a numerical fraction between 0 and 1. Taxa with proportion of
#' zeroes greater than \code{zero_cut} will be excluded in the analysis. Default
#' is 0.90.
#' @param lib_cut a numerical threshold for filtering samples based on library
#' sizes. Samples with library sizes less than \code{lib_cut} will be
#' excluded in the analysis.
#' @param group the name of the group variable in metadata. Specifying
#' \code{group} is required for detecting structural zeros and
#' performing global test.
#' @param struc_zero whether to detect structural zeros. Default is FALSE.
#' @param neg_lb whether to classify a taxon as a structural zero in the
#' corresponding study group using its asymptotic lower bound.
#' Default is FALSE.
#' @param tol the iteration convergence tolerance for the E-M algorithm.
#' Default is 1e-05.
#' @param max_iter the maximum number of iterations for the E-M algorithm.
#' Default is 100.
#' @param conserve whether to use a conservative variance estimate of
#' the test statistic. It is recommended if the sample size is small and/or
#' the number of differentially abundant taxa is believed to be large.
#' Default is FALSE.
#' @param alpha level of significance. Default is 0.05.
#' @param global whether to perform global test. Default is FALSE.
#'
#' @return a \code{list} with components:
#' \itemize{
#' \item{ \code{feature_table}, a \code{data.frame} of pre-processed
#' (based on \code{zero_cut} and \code{lib_cut}) microbial observed
#' abundance table. }
#' \item{ \code{zero_ind}, a logical \code{matrix} with TRUE indicating
#' the taxon is identified as a structural zero for the specified
#' \code{group} variable.}
#' \item{ \code{samp_frac}, a numeric vector of estimated sampling
#' fractions in log scale (natural log). }
#' \item{ \code{resid}, a \code{matrix} of residuals from the ANCOM-BC
#' log-linear (natural log) model.
#' Rows are taxa and columns are samples.}
#' \item{ \code{delta_em}, estimated bias terms through E-M algorithm. }
#' \item{ \code{delta_wls}, estimated bias terms through weighted
#' least squares (WLS) algorithm.}
#' \item{ \code{res}, a \code{list} containing ANCOM-BC primary result,
#' which consists of:}
#' \itemize{
#' \item{ \code{beta}, a \code{data.frame} of coefficients obtained
#' from the ANCOM-BC log-linear (natural log) model. }
#' \item{ \code{se}, a \code{data.frame} of standard errors (SEs) of
#' \code{beta}. }
#' \item{ \code{W}, a \code{data.frame} of test statistics.
#' \code{W = beta/se}. }
#' \item{ \code{p_val}, a \code{data.frame} of p-values. P-values are
#' obtained from two-sided Z-test using the test statistic \code{W}. }
#' \item{ \code{q_val}, a \code{data.frame} of adjusted p-values.
#' Adjusted p-values are obtained by applying \code{p_adj_method}
#' to \code{p_val}.}
#' \item{ \code{diff_abn}, a logical \code{data.frame}. TRUE if the
#' taxon has \code{q_val} less than \code{alpha}.}
#' }
#' \item{ \code{res_global}, a \code{data.frame} containing ANCOM-BC
#' global test result for the variable specified in \code{group},
#' each column is:}
#' \itemize{
#' \item{ \code{W}, test statistics.}
#' \item{ \code{p_val}, p-values, which are obtained from two-sided
#' Chi-square test using \code{W}.}
#' \item{ \code{q_val}, adjusted p-values. Adjusted p-values are
#' obtained by applying \code{p_adj_method} to \code{p_val}.}
#' \item{ \code{diff_abn}, A logical vector. TRUE if the taxon has
#' \code{q_val} less than \code{alpha}.}
#' }
#' }
#'
#' @examples
#' #================Build a Phyloseq-Class Object from Scratch==================
#' library(phyloseq)
#'
#' otu_mat = matrix(sample(1:100, 100, replace = TRUE), nrow = 10, ncol = 10)
#' rownames(otu_mat) = paste0("taxon", 1:nrow(otu_mat))
#' colnames(otu_mat) = paste0("sample", 1:ncol(otu_mat))
#'
#'
#' meta = data.frame(group = sample(LETTERS[1:4], size = 10, replace = TRUE),
#' row.names = paste0("sample", 1:ncol(otu_mat)),
#' stringsAsFactors = FALSE)
#'
#' tax_mat = matrix(sample(letters, 70, replace = TRUE),
#' nrow = nrow(otu_mat), ncol = 7)
#' rownames(tax_mat) = rownames(otu_mat)
#' colnames(tax_mat) = c("Kingdom", "Phylum", "Class", "Order",
#' "Family", "Genus", "Species")
#'
#' OTU = otu_table(otu_mat, taxa_are_rows = TRUE)
#' META = sample_data(meta)
#' TAX = tax_table(tax_mat)
#' physeq = phyloseq(OTU, META, TAX)
#'
#' #========================Run ANCOMBC Using a Real Data=======================
#'
#' library(phyloseq)
#' library(tidyverse)
#' data(GlobalPatterns)
#'
#' # Aggregate to phylum level
#' phylum_data = tax_glom(GlobalPatterns, "Phylum")
#' # The taxonomy table
#' tax_mat = as(tax_table(phylum_data), "matrix")
#'
#' # Run ancombc function
#' out = ancombc(phyloseq = phylum_data, formula = "SampleType",
#' p_adj_method = "holm", zero_cut = 0.90, lib_cut = 1000,
#' group = "SampleType", struc_zero = TRUE, neg_lb = FALSE,
#' tol = 1e-5, max_iter = 100, conserve = TRUE,
#' alpha = 0.05, global = TRUE)
#'
#' res = out$res
#' res_global = out$res_global
#'
#' @author Huang Lin
#'
#
#'
#' @import stats
#' @import phyloseq
#' @importFrom MASS ginv
#' @importFrom nloptr neldermead
#' @importFrom Rdpack reprompt
#'
#' @export
ancombc = function(phyloseq, formula, p_adj_method = "holm", zero_cut = 0.90,
lib_cut, group = NULL, struc_zero = FALSE, neg_lb = FALSE,
tol = 1e-05, max_iter = 100, conserve = FALSE, alpha = 0.05,
global = FALSE){
# 1. Data pre-processing
fiuo_prep = data_prep(phyloseq, group, zero_cut, lib_cut, global = global)
feature_table = fiuo_prep$feature_table
meta_data = fiuo_prep$meta_data
global = fiuo_prep$global
# samp_id = colnames(feature_table)
# taxa_id = rownames(feature_table)
# n_samp = ncol(feature_table)
n_taxa = nrow(feature_table)
# Add pseudocount (1) and take logarithm.
y = log(feature_table + 1)
x = get_x(formula, meta_data)
covariates = colnames(x)
n_covariates = length(covariates)
# 2. Identify taxa with structural zeros
if (struc_zero) {
if (is.null(group)) {
stop("Please specify the group variable for detecting structural zeros.")
}
zero_ind = get_struc_zero(feature_table, meta_data, group, neg_lb)
}else{ zero_ind = NULL }
# 3. Estimation of parameters
fiuo_para = para_est(y, meta_data, formula, tol, max_iter)
beta = fiuo_para$beta
d = fiuo_para$d
e = fiuo_para$e
var_cov_hat = fiuo_para$var_cov_hat
var_hat = fiuo_para$var_hat
# 4. Estimation of the between-sample bias
fiuo_bias = bias_est(beta, var_hat, tol, max_iter, n_taxa)
delta_em = fiuo_bias$delta_em
delta_wls = fiuo_bias$delta_wls
var_delta = fiuo_bias$var_delta
# 5. Coefficients, standard error, and sampling fractions
fiuo_fit = fit_summary(y, x, beta, var_hat, delta_em, var_delta, conserve)
beta_hat = fiuo_fit$beta_hat
se_hat = fiuo_fit$se_hat
d_hat = fiuo_fit$d_hat
# 6. Primary results
W = beta_hat/se_hat
p = 2 * pnorm(abs(W), mean = 0, sd = 1, lower.tail = FALSE)
q = apply(p, 2, function(x) p.adjust(x, method = p_adj_method))
diff_abn = q < alpha & !is.na(q)
res = list(beta = data.frame(beta_hat, check.names = FALSE),
se = data.frame(se_hat, check.names = FALSE),
W = data.frame(W, check.names = FALSE),
p_val = data.frame(p, check.names = FALSE),
q_val = data.frame(q, check.names = FALSE),
diff_abn = data.frame(diff_abn, check.names = FALSE))
# 7. Global test results
if (global) {
res_global = global_test(y, x, group, beta_hat, var_cov_hat,
p_adj_method, alpha)
} else { res_global = NULL }
# 8. Combine the information of structural zeros
fiuo_out = res_combine_zero(x, group, struc_zero, zero_ind, alpha,
global, res, res_global)
res = fiuo_out$res
res_global = fiuo_out$res_global
# 9. Outputs
out = list(feature_table = feature_table, zero_ind = zero_ind,
samp_frac = d_hat, resid = e,
delta_em = delta_em, delta_wls = delta_wls,
res = res, res_global = res_global)
return(out)
}
# E-M algorithm for estimating the bias term
bias_est = function(beta, var_hat, tol, max_iter, n_taxa) {
delta_em = rep(NA, ncol(beta) - 1)
delta_wls = rep(NA, ncol(beta) - 1)
var_delta = rep(NA, ncol(beta) - 1)
for (i in seq_along(delta_em)) {
# Ignore the intercept
Delta = beta[, i + 1]
Delta = Delta[!is.na(Delta)]
nu0 = var_hat[, i + 1]
nu0 = nu0[!is.na(nu0)]
# Initials
pi0_0 = 0.75
pi1_0 = 0.125
pi2_0 = 0.125
delta_0 = mean(Delta[Delta >= quantile(Delta, 0.25, na.rm = TRUE)&
Delta <= quantile(Delta, 0.75, na.rm = TRUE)],
na.rm = TRUE)
l1_0 = mean(Delta[Delta < quantile(Delta, 0.125, na.rm = TRUE)],
na.rm = TRUE)
l2_0 = mean(Delta[Delta > quantile(Delta, 0.875, na.rm = TRUE)],
na.rm = TRUE)
kappa1_0 = var(Delta[Delta < quantile(Delta, 0.125, na.rm = TRUE)],
na.rm = TRUE)
if(is.na(kappa1_0)|kappa1_0 == 0) kappa1_0 = 1
kappa2_0 = var(Delta[Delta > quantile(Delta, 0.875, na.rm = TRUE)],
na.rm = TRUE)
if(is.na(kappa2_0)|kappa2_0 == 0) kappa2_0 = 1
# Apply E-M algorithm
fiuo_em = em_iter(Delta, nu0, pi0_0, pi1_0, pi2_0, delta_0,
l1_0, l2_0, kappa1_0, kappa2_0, tol, max_iter)
# The EM estimator of bias
delta_em[i] = fiuo_em$delta
# The WLS estimator of bias
pi1 = fiuo_em$pi1
pi2 = fiuo_em$pi2
l1 = fiuo_em$l1
l2 = fiuo_em$l2
kappa1 = fiuo_em$kappa1
kappa2 = fiuo_em$kappa2
# Cluster 0
C0 = which(Delta >= quantile(Delta, pi1, na.rm = TRUE) &
Delta < quantile(Delta, 1 - pi2, na.rm = TRUE))
# Cluster 1
C1 = which(Delta < quantile(Delta, pi1, na.rm = TRUE))
# Cluster 2
C2 = which(Delta >= quantile(Delta, 1 - pi2, na.rm = TRUE))
# Numerator of the WLS estimator
nu = nu0
nu[C1] = nu[C1] + kappa1
nu[C2] = nu[C2] + kappa2
wls_deno = sum(1 / nu)
# Denominator of the WLS estimator
wls_nume = 1 / nu
wls_nume[C0] = (wls_nume * Delta)[C0]
wls_nume[C1] = (wls_nume * (Delta - l1))[C1]
wls_nume[C2] = (wls_nume * (Delta - l2))[C2]
wls_nume = sum(wls_nume)
delta_wls[i] = wls_nume / wls_deno
# Estimate the variance of bias
var_delta[i] = 1 / wls_deno
if (is.na(var_delta[i])) var_delta[i] = 0
}
fiuo_bias = list(delta_em = delta_em, delta_wls = delta_wls,
var_delta = var_delta)
}
# Data pre-processing
data_prep = function(phyloseq, group, zero_cut, lib_cut, global = global) {
feature_table = as(otu_table(phyloseq), "matrix")
feature_table = data.frame(feature_table, check.names = FALSE)
meta_data = as(sample_data(phyloseq), "data.frame")
# Drop unused levels
meta_data[] = lapply(meta_data, function(x)
if(is.factor(x)) factor(x) else x)
# Check the group variable
if (is.null(group)) {
if (global) {
stop("Please specify the group variable for the global test.")
}
} else {
n_level = length(unique(meta_data[, group]))
if (n_level < 2) {
stop("The group variable should have >= 2 categories.")
} else if (n_level < 3) {
global = FALSE
warning("The multi-group comparison will be deactivated as the group variable has < 3 categories.")
}
}
# Discard taxa with zeros >= zero_cut
zero_prop = apply(feature_table, 1, function(x)
sum(x == 0, na.rm = TRUE)/length(x[!is.na(x)]))
tax_del = which(zero_prop >= zero_cut)
if (length(tax_del) > 0) {
feature_table = feature_table[- tax_del, ]
}
# Discard samples with library size < lib_cut
lib_size = colSums(feature_table, na.rm = TRUE)
if(any(lib_size < lib_cut)){
subj_del = which(lib_size < lib_cut)
feature_table = feature_table[, - subj_del]
meta_data = meta_data[- subj_del, ]
}
fiuo_prep = list(feature_table = feature_table,
meta_data = meta_data,
global = global)
return(fiuo_prep)
}
em_iter = function(Delta, nu0, pi0_0, pi1_0, pi2_0, delta_0,
l1_0, l2_0, kappa1_0, kappa2_0, tol, max_iter) {
# Store all paras in vectors/matrices
pi0_vec = pi0_0
pi1_vec = pi1_0
pi2_vec = pi2_0
delta_vec = delta_0
l1_vec = l1_0
l2_vec = l2_0
kappa1_vec = kappa1_0
kappa2_vec = kappa2_0
n_taxa = length(Delta)
# E-M iteration
iterNum = 0
epsilon = 100
while (epsilon > tol & iterNum < max_iter) {
# Current value of paras
pi0 = pi0_vec[length(pi0_vec)]
pi1 = pi1_vec[length(pi1_vec)]
pi2 = pi2_vec[length(pi2_vec)]
delta = delta_vec[length(delta_vec)]
l1 = l1_vec[length(l1_vec)]
l2 = l2_vec[length(l2_vec)]
kappa1 = kappa1_vec[length(kappa1_vec)]
kappa2 = kappa2_vec[length(kappa2_vec)]
# E-step
pdf0 = vapply(seq(n_taxa), function(i)
dnorm(Delta[i], delta, sqrt(nu0[i])), FUN.VALUE = double(1))
pdf1 = vapply(seq(n_taxa), function(i)
dnorm(Delta[i], delta + l1, sqrt(nu0[i] + kappa1)),
FUN.VALUE = double(1))
pdf2 = vapply(seq(n_taxa), function(i)
dnorm(Delta[i], delta + l2, sqrt(nu0[i] + kappa2)),
FUN.VALUE = double(1))
r0i = pi0*pdf0/(pi0*pdf0 + pi1*pdf1 + pi2*pdf2)
r0i[is.na(r0i)] = 0
r1i = pi1*pdf1/(pi0*pdf0 + pi1*pdf1 + pi2*pdf2)
r1i[is.na(r1i)] = 0
r2i = pi2*pdf2/(pi0*pdf0 + pi1*pdf1 + pi2*pdf2)
r2i[is.na(r2i)] = 0
# M-step
pi0_new = mean(r0i, na.rm = TRUE)
pi1_new = mean(r1i, na.rm = TRUE)
pi2_new = mean(r2i, na.rm = TRUE)
delta_new = sum(r0i*Delta/nu0 + r1i*(Delta-l1)/(nu0+kappa1) +
r2i*(Delta-l2)/(nu0+kappa2), na.rm = TRUE)/
sum(r0i/nu0 + r1i/(nu0+kappa1) + r2i/(nu0+kappa2), na.rm = TRUE)
l1_new = min(sum(r1i*(Delta-delta)/(nu0+kappa1), na.rm = TRUE)/
sum(r1i/(nu0+kappa1), na.rm = TRUE), 0)
l2_new = max(sum(r2i*(Delta-delta)/(nu0+kappa2), na.rm = TRUE)/
sum(r2i/(nu0+kappa2), na.rm = TRUE), 0)
# Nelder-Mead simplex algorithm for kappa1 and kappa2
obj_kappa1 = function(x){
log_pdf = log(vapply(seq(n_taxa), function(i)
dnorm(Delta[i], delta+l1, sqrt(nu0[i]+x)),
FUN.VALUE = double(1)))
log_pdf[is.infinite(log_pdf)] = 0
-sum(r1i*log_pdf, na.rm = TRUE)
}
kappa1_new = nloptr::neldermead(x0 = kappa1,
fn = obj_kappa1, lower = 0)$par
obj_kappa2 = function(x){
log_pdf = log(vapply(seq(n_taxa), function(i)
dnorm(Delta[i], delta+l2, sqrt(nu0[i]+x)),
FUN.VALUE = double(1)))
log_pdf[is.infinite(log_pdf)] = 0
-sum(r2i*log_pdf, na.rm = TRUE)
}
kappa2_new = nloptr::neldermead(x0 = kappa2,
fn = obj_kappa2, lower = 0)$par
# Merge to the paras vectors/matrices
pi0_vec = c(pi0_vec, pi0_new)
pi1_vec = c(pi1_vec, pi1_new)
pi2_vec = c(pi2_vec, pi2_new)
delta_vec = c(delta_vec, delta_new)
l1_vec = c(l1_vec, l1_new)
l2_vec = c(l2_vec, l2_new)
kappa1_vec = c(kappa1_vec, kappa1_new)
kappa2_vec = c(kappa2_vec, kappa2_new)
# Calculate the new epsilon
epsilon = sqrt((pi0_new-pi0)^2 + (pi1_new-pi1)^2 + (pi2_new-pi2)^2 +
(delta_new-delta)^2 + (l1_new-l1)^2 + (l2_new-l2)^2 +
(kappa1_new-kappa1)^2 + (kappa2_new-kappa2)^2)
iterNum = iterNum + 1
}
fiuo_em = list(pi0 = pi0_new, pi1 = pi1_new, pi2 = pi2_new,
delta = delta_new, l1 = l1_new, l2 = l2_new,
kappa1 = kappa1_new, kappa2 = kappa2_new)
return(fiuo_em)
}
fit_summary = function(y, x, beta, var_hat, delta_em, var_delta, conserve) {
n_taxa = nrow(y)
beta_hat = beta
beta_hat[, -1] = t(t(beta_hat[, -1]) - delta_em)
if (conserve) {
# Account for the variance of delta_hat
se_hat = sqrt(sweep(var_hat, 2, c(0, var_delta), "+") +
2 * sqrt(sweep(var_hat, 2, c(0, var_delta), "*")))
}else{ se_hat = sqrt(var_hat) }
d_hat = vector()
for (i in seq_len(n_taxa)) {
d_hat_i = y[i, ] - x %*% beta_hat[i, ]
d_hat = rbind(d_hat, d_hat_i)
}
d_hat = colMeans(d_hat, na.rm = TRUE)
fiuo_fit = list(beta_hat = beta_hat, se_hat = se_hat, d_hat = d_hat)
return(fiuo_fit)
}
# Identify structural zeros
get_struc_zero = function(feature_table, meta_data, group, neg_lb) {
group_data = factor(meta_data[, group])
present_table = as.matrix(feature_table)
present_table[is.na(present_table)] = 0
present_table[present_table != 0] = 1
n_taxa = nrow(feature_table)
n_group = nlevels(group_data)
p_hat = matrix(NA, nrow = n_taxa, ncol = n_group)
rownames(p_hat) = rownames(feature_table)
colnames(p_hat) = levels(group_data)
for (i in seq_len(n_taxa)) {
p_hat[i, ] = tapply(present_table[i, ], group_data,
function(x) mean(x, na.rm = TRUE))
}
samp_size = matrix(NA, nrow = n_taxa, ncol = n_group)
rownames(samp_size) = rownames(feature_table)
colnames(samp_size) = levels(group_data)
for (i in seq_len(n_taxa)) {
samp_size[i, ] = tapply(as.matrix(feature_table)[i, ], group_data,
function(x) length(x[!is.na(x)]))
}
p_hat_lo = p_hat - 1.96 * sqrt(p_hat * (1 - p_hat)/samp_size)
zero_ind = (p_hat == 0)
# Do we classify a taxon as a structural zero by its negative lower bound?
if (neg_lb) zero_ind[p_hat_lo <= 0] = TRUE
colnames(zero_ind) = paste0("structural_zero (", group,
" = ", colnames(zero_ind), ")")
return(zero_ind)
}
get_x = function(formula, meta_data) {
opt = options(na.action = "na.pass") # Keep NA's in rows of x
on.exit(options(opt)) # Switch it back
x = model.matrix(formula(paste0("~", formula)), data = meta_data)
return(x)
}
# Global test
global_test = function(y, x, group, beta_hat, var_cov_hat, p_adj_method, alpha){
taxa_id = rownames(y)
n_taxa = nrow(y)
covariates = colnames(x)
res_global = data.frame(matrix(NA, nrow = n_taxa, ncol = 4))
rownames(res_global) = taxa_id
colnames(res_global) = c("W", "p_val", "q_val", "diff_abn")
group_ind = grepl(group, covariates)
# Loop over the parameter of interest
beta_hat_sub = beta_hat[, group_ind]
var_cov_hat_sub = lapply(var_cov_hat, function(x)
x = x[group_ind, group_ind])
for (i in seq_len(n_taxa)) {
# Loop over taxa
beta_hat_sub_i = beta_hat_sub[i, ]
var_cov_hat_sub_i = var_cov_hat_sub[[i]]
A = diag(x = 1, nrow = length(beta_hat_sub_i))
W = t(A %*% beta_hat_sub_i) %*%
MASS::ginv(A %*% var_cov_hat_sub_i %*% t(A)) %*%
(A %*% beta_hat_sub_i)
p = 2 * min(pchisq(W, df = length(beta_hat_sub_i), lower.tail = TRUE),
pchisq(W, df = length(beta_hat_sub_i), lower.tail = FALSE))
res_global[i, "W"] = W
res_global[i, "p_val"] = p
}
# Model summary
q_global = p.adjust(res_global[, "p_val"], method = p_adj_method)
q_global[is.na(q_global)] = 1
diff_global = q_global < alpha & !is.na(q_global)
res_global$q_val = q_global
res_global$diff_abn = diff_global
return(res_global)
}
para_est = function(y, meta_data, formula, tol, max_iter) {
x = get_x(formula, meta_data)
taxa_id = rownames(y)
n_taxa = nrow(y)
samp_id = colnames(y)
n_samp = ncol(y)
covariates = colnames(x)
# Sampling fractions
d = rep(0, n_samp)
tformula = formula(paste0("y ~ ", formula))
fits = lapply(seq_len(n_taxa), function(i) {
df = data.frame(y = unlist(y[i, ]) - d, meta_data)
return(lm(tformula, data = df))
})
# Regression coefficients
beta = lapply(fits, function(i) {
beta_i = rep(NA, length(covariates)) # prevent errors of missing values
coef_i = coef(i)
beta_i[match(names(coef_i), covariates)] = coef_i
return(beta_i)
})
beta = Reduce('rbind', beta)
# Iterative least square
iterNum = 0
epsilon = 100
while (epsilon > tol & iterNum < max_iter) {
# Updating beta
fits = lapply(seq_len(n_taxa), function(i) {
df = data.frame(y = unlist(y[i, ]) - d, meta_data)
return(lm(tformula, data = df))
})
beta_new = lapply(fits, function(i) {
beta_i = rep(NA, length(covariates))
coef_i = coef(i)
beta_i[match(names(coef_i), covariates)] = coef_i
return(beta_i)
})
beta_new = Reduce('rbind', beta_new)
# Updating d
y_hat = lapply(fits, function(i) {
y_hat_i = rep(NA, n_samp)
fit_i = fitted(i)
y_hat_i[match(names(fit_i), samp_id)] = fit_i
return(y_hat_i)
})
y_hat = Reduce('rbind', y_hat)
d_new = colMeans(y - y_hat, na.rm = TRUE)
# Iteration
epsilon = sqrt(sum((beta_new - beta)^2, na.rm = TRUE) +
sum((d_new - d)^2, na.rm = TRUE))
iterNum = iterNum + 1
beta = beta_new
d = d_new
}
# Regression residuals
y_hat = lapply(fits, function(i) {
y_hat_i = rep(NA, n_samp)
fit_i = fitted(i)
y_hat_i[match(names(fit_i), samp_id)] = fit_i
return(y_hat_i)
})
y_hat = Reduce('rbind', y_hat)
e = t(t(y - y_hat) - d)
# Variance-covariance matrices of coefficients
fiuo_var_cov = var_cov_est(x, e, n_taxa)
var_cov_hat = fiuo_var_cov$var_cov_hat
var_hat = fiuo_var_cov$var_hat
colnames(beta) = covariates
rownames(beta) = taxa_id
names(d) = samp_id
names(var_cov_hat) = taxa_id
colnames(var_hat) = covariates
rownames(var_hat) = taxa_id
fiuo_para = list(beta = beta, d = d, e = e,
var_cov_hat = var_cov_hat, var_hat = var_hat)
return(fiuo_para)
}
res_combine_zero = function(x, group, struc_zero, zero_ind, alpha,
global, res, res_global) {
covariates = colnames(x)
# Set p/q-values of structural zeros to be 0s.
if (struc_zero) {
group_ind = grepl(group, covariates)
zero_mask = 1 - apply(zero_ind, 1, function(x) any(x == 1))
res$p_val[, group_ind] = res$p_val[, group_ind] * zero_mask
res$q_val[, group_ind] = res$q_val[, group_ind] * zero_mask
res$diff_abn = res$q_val < alpha & !is.na(res$q_val)
# Global test
if (global) {
res_global[, "p_val"] = res_global[, "p_val"] * zero_mask
res_global[, "q_val"] = res_global[, "q_val"] * zero_mask
res_global[, "diff_abn"] = res_global[, "q_val"] < alpha &
!is.na(res_global[, "q_val"])
}
}
fiuo_out = list(res = res, res_global = res_global)
return(fiuo_out)
}
var_cov_est = function(x, e, n_taxa) {
covariates = colnames(x)
n_covariates = length(covariates)
n_samp = nrow(x)
XTX_inv = MASS::ginv(t(x[complete.cases(x), ]) %*% x[complete.cases(x), ])
var_cov_hat = vector(mode = "list", length = n_taxa) # Covariances
var_hat = matrix(NA, nrow = n_taxa, ncol = n_covariates) # Variances
for (i in seq_len(n_taxa)) {
sigma2_xxT = matrix(0, ncol = n_covariates, nrow = n_covariates)
for (j in seq_len(n_samp)) {
sigma2_xxT_j = e[i, j]^2 * x[j, ] %*% t(x[j, ])
sigma2_xxT_j[is.na(sigma2_xxT_j)] = 0
sigma2_xxT = sigma2_xxT + sigma2_xxT_j
}
var_cov_hat[[i]] = XTX_inv %*% sigma2_xxT %*% XTX_inv
rownames(var_cov_hat[[i]]) = covariates
colnames(var_cov_hat[[i]]) = covariates
var_hat[i, ] = diag(var_cov_hat[[i]])
}
fiuo_var_cov = list(var_cov_hat = var_cov_hat, var_hat = var_hat)
return(fiuo_var_cov)
}
|
#!/usr/bin/env R
# This function calculates heights of trees given distance of each tree
# from its base and angle to its top, using the trigonometric formula
#
# height = distance * tan(radians)
#
# ARGUMENTS
# degrees: The angle of elevation of tree
# distance: The distance from base of tree (e.g. meters)
#
# OUTPUT
# The heights of the tree, same units as "distance"
# Loading data from command line input
args <- commandArgs(trailingOnly = TRUE)
string <- args[1]
# Remove relative path
string <- gsub(".*/","",string)
# Remove file extension
string <- tools::file_path_sans_ext(string)
tree_data <- read.csv(args[1])
# Function
TreeHeight <- function(degrees, distance) {
radians <- degrees * pi / 180
height <- distance * tan(radians)
return(height)
}
#change: added t to height - WHY IS THIS HERE?
print(paste("The height of a tree with an angle of 37 degrees at distance 40m is", as.character(TreeHeight(37,40))))
# Assigning the output of the function to a column
tree_data$Tree.Height.m <- TreeHeight(tree_data$Angle.degrees, tree_data$Distance.m)
# Creating a csv output
write.csv(tree_data, paste("../Results/", string, "_treeheights.csv", sep = ""), row.names = FALSE) # change: _treeheights
# Message to state complete
print("get_Treeheight.R complete") # change | /Elin/Code/get_TreeHeight.R | no_license | elinfalla/CMEEGroupWork | R | false | false | 1,311 | r | #!/usr/bin/env R
# This function calculates heights of trees given distance of each tree
# from its base and angle to its top, using the trigonometric formula
#
# height = distance * tan(radians)
#
# ARGUMENTS
# degrees: The angle of elevation of tree
# distance: The distance from base of tree (e.g. meters)
#
# OUTPUT
# The heights of the tree, same units as "distance"
# Loading data from command line input
args <- commandArgs(trailingOnly = TRUE)
string <- args[1]
# Remove relative path
string <- gsub(".*/","",string)
# Remove file extension
string <- tools::file_path_sans_ext(string)
tree_data <- read.csv(args[1])
# Function
TreeHeight <- function(degrees, distance) {
radians <- degrees * pi / 180
height <- distance * tan(radians)
return(height)
}
#change: added t to height - WHY IS THIS HERE?
print(paste("The height of a tree with an angle of 37 degrees at distance 40m is", as.character(TreeHeight(37,40))))
# Assigning the output of the function to a column
tree_data$Tree.Height.m <- TreeHeight(tree_data$Angle.degrees, tree_data$Distance.m)
# Creating a csv output
write.csv(tree_data, paste("../Results/", string, "_treeheights.csv", sep = ""), row.names = FALSE) # change: _treeheights
# Message to state complete
print("get_Treeheight.R complete") # change |
# Function Space Optimization case study with d-GR4J
# Moritz Feigl, 2019
#
# 0. Load everything needed
setwd("FSO_paper")
source("Functions/FSO_functions.R")
FSO_setup()
# 1. Optimization, testing & diagnostic plots
grid <- expand.grid("Test_number" = c(1.1, 1.2, 2.4, 2.5, 2.6, 4.4, 4.5, 4.6),
"Optimizer" = c("GA", "DDS", "PSO"),
"run" = c(1:5),
stringsAsFactors = FALSE)
mapply(FSO,
Optimizer = grid$Optimizer,
Test_number = grid$Test_number,
run = grid$run,
MoreArgs = list("iterations" = 5000,
"training_basins" = training_basins,
'test_basins' = test_basins))
# 2. Plot results
source("Paper code/FSO_plots")
| /Paper code/5_GR4J_FSO.R | permissive | MoritzFeigl/FSO_paper | R | false | false | 750 | r | # Function Space Optimization case study with d-GR4J
# Moritz Feigl, 2019
#
# 0. Load everything needed
setwd("FSO_paper")
source("Functions/FSO_functions.R")
FSO_setup()
# 1. Optimization, testing & diagnostic plots
grid <- expand.grid("Test_number" = c(1.1, 1.2, 2.4, 2.5, 2.6, 4.4, 4.5, 4.6),
"Optimizer" = c("GA", "DDS", "PSO"),
"run" = c(1:5),
stringsAsFactors = FALSE)
mapply(FSO,
Optimizer = grid$Optimizer,
Test_number = grid$Test_number,
run = grid$run,
MoreArgs = list("iterations" = 5000,
"training_basins" = training_basins,
'test_basins' = test_basins))
# 2. Plot results
source("Paper code/FSO_plots")
|
library(sgmcmc)
### Name: sghmc
### Title: Stochastic Gradient Hamiltonian Monte Carlo
### Aliases: sghmc
### ** Examples
## Not run:
##D # Simulate from a Normal Distribution with uninformative, improper prior
##D dataset = list("x" = rnorm(1000))
##D params = list("theta" = 0)
##D logLik = function(params, dataset) {
##D distn = tf$distributions$Normal(params$theta, 1)
##D return(tf$reduce_sum(distn$log_prob(dataset$x)))
##D }
##D stepsize = list("theta" = 1e-5)
##D output = sghmc(logLik, dataset, params, stepsize)
##D # For more examples see vignettes
## End(Not run)
| /data/genthat_extracted_code/sgmcmc/examples/sghmc.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 594 | r | library(sgmcmc)
### Name: sghmc
### Title: Stochastic Gradient Hamiltonian Monte Carlo
### Aliases: sghmc
### ** Examples
## Not run:
##D # Simulate from a Normal Distribution with uninformative, improper prior
##D dataset = list("x" = rnorm(1000))
##D params = list("theta" = 0)
##D logLik = function(params, dataset) {
##D distn = tf$distributions$Normal(params$theta, 1)
##D return(tf$reduce_sum(distn$log_prob(dataset$x)))
##D }
##D stepsize = list("theta" = 1e-5)
##D output = sghmc(logLik, dataset, params, stepsize)
##D # For more examples see vignettes
## End(Not run)
|
library(MASS)
library(tree)
attach(Boston)
#Split data into testing and training using
set.seed(1)
train = sample(1:nrow(Boston),nrow(Boston)/2)
test = -train
train_data = Boston[train,]
test_data = Boston[test,]
testing_medv = medv[test]
# fit a tree based on training data
tree_model = tree(medv~.,train_data)
tree_model
plot(tree_model)
text(tree_model,pretty = 0)
# check the model is doing using the testing dataset
tree_predict = predict(tree_model,test_data)
mean((tree_predict-testing_medv)^2) #25%
#cross validation for pruning the tree
cv_tree = cv.tree(tree_model)
plot(cv_tree$size,cv_tree$dev,type = 'b',xlab = 'Tree Size',ylab = 'MSE')
which.min(cv_tree$dev)
cv_tree$size[1]
#prune the tree to size 4
prune_model = prune.tree(tree_model,best = 4)
plot(prune_model)
text(prune_model)
#check the accuracy of the model using testing data
tree_pred = predict(prune_model,test_data)
mean((tree_pred-testing_medv)^2)
| /An Introduction to Statistical Learning /R/Decision Trees Regression.R | no_license | lzxyzq/Data-Analysis | R | false | false | 939 | r |
library(MASS)
library(tree)
attach(Boston)
#Split data into testing and training using
set.seed(1)
train = sample(1:nrow(Boston),nrow(Boston)/2)
test = -train
train_data = Boston[train,]
test_data = Boston[test,]
testing_medv = medv[test]
# fit a tree based on training data
tree_model = tree(medv~.,train_data)
tree_model
plot(tree_model)
text(tree_model,pretty = 0)
# check the model is doing using the testing dataset
tree_predict = predict(tree_model,test_data)
mean((tree_predict-testing_medv)^2) #25%
#cross validation for pruning the tree
cv_tree = cv.tree(tree_model)
plot(cv_tree$size,cv_tree$dev,type = 'b',xlab = 'Tree Size',ylab = 'MSE')
which.min(cv_tree$dev)
cv_tree$size[1]
#prune the tree to size 4
prune_model = prune.tree(tree_model,best = 4)
plot(prune_model)
text(prune_model)
#check the accuracy of the model using testing data
tree_pred = predict(prune_model,test_data)
mean((tree_pred-testing_medv)^2)
|
#' Calculate the rich club of a graph
#'
#' This function calculates the rich club of a graph, both the coefficient
#' \eqn{\phi} and the nodes that make up this subgraph.
#'
#' @param g The graph of interest
#' @param k The minimum degree for including a vertex (default: 1)
#' @param weighted A logical indicating whether or not edge weights should be
#' used (default: FALSE)
#' @export
#'
#' @return A list with the following components:
#' \item{phi}{The rich club coefficient, \eqn{\phi}.}
#' \item{graph}{A subgraph containing only the rich club nodes.}
#' \item{Nk}{The number of vertices in the rich club graph.}
#' \item{Ek}{The number of edges in the rich club graph.}
#'
#' @seealso \code{\link{rich.club.norm}}
#'
#' @author Christopher G. Watson, \email{cgwatson@@bu.edu}
#' @references Zhou S., Mondragon R.J. (2004) \emph{The rich-club phenomenon
#' in the internet topology}. IEEE Comm Lett, 8:180-182.
#' @references Opsahl T., Colizza V., Panzarasa P., Ramasco J.J. (2008)
#' \emph{Prominence and control: the weighted rich-club effect}. Physical Review
#' Letters, 101.16:168702.
rich.club.coeff <- function(g, k=1, weighted=FALSE) {
stopifnot(is_igraph(g))
if ('degree' %in% vertex_attr_names(g)) {
degs <- V(g)$degree
} else {
degs <- degree(g)
}
Nv <- vcount(g)
Nk <- sum(degs > k)
if (Nk == 0) {
return(list(phi=NaN, graph=make_empty_graph(), Nk=0, Ek=0))
} else {
rich.club.nodes <- order(degs)[(Nv - Nk + 1):Nv]
rich.club.graph <- induced.subgraph(g, rich.club.nodes)
Ek <- ecount(rich.club.graph)
if (isTRUE(weighted)) {
Wr <- sum(E(rich.club.graph)$weight)
weights <- sort(E(g)$weight, decreasing=T)[1:Ek]
phi <- Wr / sum(weights)
} else {
phi <- graph.density(rich.club.graph)
}
return(list(phi=phi, graph=rich.club.graph, Nk=Nk, Ek=Ek))
}
}
| /R/rich_club_coeff.R | no_license | nagyistge/brainGraph | R | false | false | 1,909 | r | #' Calculate the rich club of a graph
#'
#' This function calculates the rich club of a graph, both the coefficient
#' \eqn{\phi} and the nodes that make up this subgraph.
#'
#' @param g The graph of interest
#' @param k The minimum degree for including a vertex (default: 1)
#' @param weighted A logical indicating whether or not edge weights should be
#' used (default: FALSE)
#' @export
#'
#' @return A list with the following components:
#' \item{phi}{The rich club coefficient, \eqn{\phi}.}
#' \item{graph}{A subgraph containing only the rich club nodes.}
#' \item{Nk}{The number of vertices in the rich club graph.}
#' \item{Ek}{The number of edges in the rich club graph.}
#'
#' @seealso \code{\link{rich.club.norm}}
#'
#' @author Christopher G. Watson, \email{cgwatson@@bu.edu}
#' @references Zhou S., Mondragon R.J. (2004) \emph{The rich-club phenomenon
#' in the internet topology}. IEEE Comm Lett, 8:180-182.
#' @references Opsahl T., Colizza V., Panzarasa P., Ramasco J.J. (2008)
#' \emph{Prominence and control: the weighted rich-club effect}. Physical Review
#' Letters, 101.16:168702.
rich.club.coeff <- function(g, k=1, weighted=FALSE) {
stopifnot(is_igraph(g))
if ('degree' %in% vertex_attr_names(g)) {
degs <- V(g)$degree
} else {
degs <- degree(g)
}
Nv <- vcount(g)
Nk <- sum(degs > k)
if (Nk == 0) {
return(list(phi=NaN, graph=make_empty_graph(), Nk=0, Ek=0))
} else {
rich.club.nodes <- order(degs)[(Nv - Nk + 1):Nv]
rich.club.graph <- induced.subgraph(g, rich.club.nodes)
Ek <- ecount(rich.club.graph)
if (isTRUE(weighted)) {
Wr <- sum(E(rich.club.graph)$weight)
weights <- sort(E(g)$weight, decreasing=T)[1:Ek]
phi <- Wr / sum(weights)
} else {
phi <- graph.density(rich.club.graph)
}
return(list(phi=phi, graph=rich.club.graph, Nk=Nk, Ek=Ek))
}
}
|
#' Add many differenced columns to the data
#'
#' A handy function for adding multiple lagged difference values to a data frame.
#' Works with `dplyr` groups too.
#'
#' @param .data A tibble.
#' @param .value A column to have a difference transformation applied
#' @param .lags One or more lags for the difference(s)
#' @param .differences The number of differences to apply.
#' @param .log If TRUE, applies log-differences.
#' @param .names A vector of names for the new columns. Must be of same length as the
#' number of output columns. Use "auto" to automatically rename the columns.
#'
#'
#' @return Returns a `tibble` object describing the timeseries.
#'
#' @details
#'
#' __Benefits__
#'
#' This is a scalable function that is:
#'
#' - Designed to work with grouped data using `dplyr::group_by()`
#' - Add multiple differences by adding a sequence of differences using
#' the `.lags` argument (e.g. `lags = 1:20`)
#'
#'
#' @seealso
#'
#' Augment Operations:
#'
#' - [tk_augment_timeseries_signature()] - Group-wise augmentation of timestamp features
#' - [tk_augment_holiday_signature()] - Group-wise augmentation of holiday features
#' - [tk_augment_slidify()] - Group-wise augmentation of rolling functions
#' - [tk_augment_lags()] - Group-wise augmentation of lagged data
#' - [tk_augment_differences()] - Group-wise augmentation of differenced data
#' - [tk_augment_fourier()] - Group-wise augmentation of fourier series
#'
#' Underlying Function:
#'
#' - [`diff_vec()`] - Underlying function that powers `tk_augment_differences()`
#'
#' @examples
#' library(tidyverse)
#' library(timetk)
#'
#' m4_monthly %>%
#' group_by(id) %>%
#' tk_augment_differences(value, .lags = 1:20)
#'
#' @name tk_augment_differences
NULL
#' @export
#' @rdname tk_augment_differences
tk_augment_differences <- function(.data,
.value,
.lags = 1,
.differences = 1,
.log = FALSE,
.names = "auto") {
# Checks
column_expr <- enquo(.value)
if (rlang::quo_is_missing(column_expr)) stop(call. = FALSE, "tk_augment_differences(.value) is missing.")
if (rlang::is_missing(.lags)) stop(call. = FALSE, "tk_augment_differences(.lags) is missing.")
if (rlang::is_missing(.differences)) stop(call. = FALSE, "tk_augment_differences(.differences) is missing.")
if (!any(.names == "auto")) {
if (length(.names) != length(.lags) * length(.differences)) {
rlang::abort(".names must be a vector of length ", length(.lags) * length(.differences))
}
}
UseMethod("tk_augment_differences", .data)
}
#' @export
tk_augment_differences.data.frame <- function(.data,
.value,
.lags = 1,
.differences = 1,
.log = FALSE,
.names = "auto") {
column_expr <- enquo(.value)
make_call <- function(col, lag_val, diff_val) {
rlang::call2(
"diff_vec",
x = rlang::sym(col),
lag = lag_val,
difference = diff_val,
log = .log,
silent = TRUE,
.ns = "timetk"
)
}
grid <- expand.grid(
col = rlang::quo_name(column_expr),
lag_val = .lags,
diff_val = .differences,
stringsAsFactors = FALSE)
calls <- purrr::pmap(.l = list(grid$col, grid$lag_val, grid$diff_val), make_call)
if (any(.names == "auto")) {
newname <- paste0(grid$col, "_lag", grid$lag_val, "_diff", grid$diff_val)
} else {
newname <- as.list(.names)
}
calls <- purrr::set_names(calls, newname)
ret <- tibble::as_tibble(dplyr::mutate(.data, !!!calls))
return(ret)
}
#' @export
tk_augment_differences.grouped_df <- function(.data,
.value,
.lags = 1,
.differences = 1,
.log = FALSE,
.names = "auto") {
# Tidy Eval Setup
column_expr <- enquo(.value)
group_names <- dplyr::group_vars(.data)
# # Checks
# if (rlang::quo_is_missing(column_expr)) stop(call. = FALSE, "tk_augment_differences(.value) is missing.")
# if (rlang::is_missing(.lags)) stop(call. = FALSE, "tk_augment_differences(.lags) is missing.")
# if (rlang::is_missing(.differences)) stop(call. = FALSE, "tk_augment_differences(.differences) is missing.")
.data %>%
tidyr::nest() %>%
dplyr::mutate(nested.col = purrr::map(
.x = data,
.f = function(df) tk_augment_differences(
.data = df,
.value = !! enquo(.value),
.lags = .lags,
.differences = .differences,
.log = .log,
.names = .names
)
)) %>%
dplyr::select(-data) %>%
tidyr::unnest(cols = nested.col) %>%
dplyr::group_by_at(.vars = group_names)
}
#' @export
tk_augment_differences.default <- function(.data,
.value,
.lags = 1,
.differences = 1,
.log = FALSE,
.names = "auto") {
stop(paste0("`tk_augment_differences` has no method for class ", class(data)[[1]]))
}
| /R/augment-tk_augment_differences.R | no_license | WmHenry/timetk | R | false | false | 5,789 | r | #' Add many differenced columns to the data
#'
#' A handy function for adding multiple lagged difference values to a data frame.
#' Works with `dplyr` groups too.
#'
#' @param .data A tibble.
#' @param .value A column to have a difference transformation applied
#' @param .lags One or more lags for the difference(s)
#' @param .differences The number of differences to apply.
#' @param .log If TRUE, applies log-differences.
#' @param .names A vector of names for the new columns. Must be of same length as the
#' number of output columns. Use "auto" to automatically rename the columns.
#'
#'
#' @return Returns a `tibble` object describing the timeseries.
#'
#' @details
#'
#' __Benefits__
#'
#' This is a scalable function that is:
#'
#' - Designed to work with grouped data using `dplyr::group_by()`
#' - Add multiple differences by adding a sequence of differences using
#' the `.lags` argument (e.g. `lags = 1:20`)
#'
#'
#' @seealso
#'
#' Augment Operations:
#'
#' - [tk_augment_timeseries_signature()] - Group-wise augmentation of timestamp features
#' - [tk_augment_holiday_signature()] - Group-wise augmentation of holiday features
#' - [tk_augment_slidify()] - Group-wise augmentation of rolling functions
#' - [tk_augment_lags()] - Group-wise augmentation of lagged data
#' - [tk_augment_differences()] - Group-wise augmentation of differenced data
#' - [tk_augment_fourier()] - Group-wise augmentation of fourier series
#'
#' Underlying Function:
#'
#' - [`diff_vec()`] - Underlying function that powers `tk_augment_differences()`
#'
#' @examples
#' library(tidyverse)
#' library(timetk)
#'
#' m4_monthly %>%
#' group_by(id) %>%
#' tk_augment_differences(value, .lags = 1:20)
#'
#' @name tk_augment_differences
NULL
#' @export
#' @rdname tk_augment_differences
tk_augment_differences <- function(.data,
.value,
.lags = 1,
.differences = 1,
.log = FALSE,
.names = "auto") {
# Checks
column_expr <- enquo(.value)
if (rlang::quo_is_missing(column_expr)) stop(call. = FALSE, "tk_augment_differences(.value) is missing.")
if (rlang::is_missing(.lags)) stop(call. = FALSE, "tk_augment_differences(.lags) is missing.")
if (rlang::is_missing(.differences)) stop(call. = FALSE, "tk_augment_differences(.differences) is missing.")
if (!any(.names == "auto")) {
if (length(.names) != length(.lags) * length(.differences)) {
rlang::abort(".names must be a vector of length ", length(.lags) * length(.differences))
}
}
UseMethod("tk_augment_differences", .data)
}
#' @export
tk_augment_differences.data.frame <- function(.data,
.value,
.lags = 1,
.differences = 1,
.log = FALSE,
.names = "auto") {
column_expr <- enquo(.value)
make_call <- function(col, lag_val, diff_val) {
rlang::call2(
"diff_vec",
x = rlang::sym(col),
lag = lag_val,
difference = diff_val,
log = .log,
silent = TRUE,
.ns = "timetk"
)
}
grid <- expand.grid(
col = rlang::quo_name(column_expr),
lag_val = .lags,
diff_val = .differences,
stringsAsFactors = FALSE)
calls <- purrr::pmap(.l = list(grid$col, grid$lag_val, grid$diff_val), make_call)
if (any(.names == "auto")) {
newname <- paste0(grid$col, "_lag", grid$lag_val, "_diff", grid$diff_val)
} else {
newname <- as.list(.names)
}
calls <- purrr::set_names(calls, newname)
ret <- tibble::as_tibble(dplyr::mutate(.data, !!!calls))
return(ret)
}
#' @export
tk_augment_differences.grouped_df <- function(.data,
.value,
.lags = 1,
.differences = 1,
.log = FALSE,
.names = "auto") {
# Tidy Eval Setup
column_expr <- enquo(.value)
group_names <- dplyr::group_vars(.data)
# # Checks
# if (rlang::quo_is_missing(column_expr)) stop(call. = FALSE, "tk_augment_differences(.value) is missing.")
# if (rlang::is_missing(.lags)) stop(call. = FALSE, "tk_augment_differences(.lags) is missing.")
# if (rlang::is_missing(.differences)) stop(call. = FALSE, "tk_augment_differences(.differences) is missing.")
.data %>%
tidyr::nest() %>%
dplyr::mutate(nested.col = purrr::map(
.x = data,
.f = function(df) tk_augment_differences(
.data = df,
.value = !! enquo(.value),
.lags = .lags,
.differences = .differences,
.log = .log,
.names = .names
)
)) %>%
dplyr::select(-data) %>%
tidyr::unnest(cols = nested.col) %>%
dplyr::group_by_at(.vars = group_names)
}
#' @export
tk_augment_differences.default <- function(.data,
.value,
.lags = 1,
.differences = 1,
.log = FALSE,
.names = "auto") {
stop(paste0("`tk_augment_differences` has no method for class ", class(data)[[1]]))
}
|
#Manejo de errores
rm(x)
mean(x)
traceback()
lm(y~x)
traceback()
debug(lm)
lm(y~x)
#Simulaci??n
str(str)
str(lm)
str(ls)
x <- rnorm(100)
str(x)
summary(x)
f<- gl(40,10) #creaci??n del factor
str(f)
summary(f)
str(airquality)
m <- matrix(rnorm(100),10,10)
str(m)
s <- split(airquality,arquiality$mounth)
str(s)
x <- rnorm(10)
x
y <- rnorm(10,50,5)
y
summary(x)
summary(y)
set.seed(1)
set.seed(1) #semilla para n??meros aleatorios
set.seed(2)
rnorm(5)
set.seed(1)
rnorm(5)
normal1 <- rnorm(10000)
normal2 <- rnorm(10000,10,5)
hist(normal1)
summary(normal1)
hist(normal2)
summary(normal2)
rpois(10,1)
poisson1 <- rpois(10000,1)
poisson2 <- rpois(10000,10)
hist(poisson1)
hist(poisson2)
ppois(2,2)
ppois(4,2)
ppois(6,2)
hist(rpois(10000,2))
for (i in 0:11){print(dpois(1,2))}
hist(runif(10000,10,20))
#Supongamos que vamos a asimilar el siguente modelo lineal y=B0+B1x+e
#donde e~N(0,2^2) y se asume que x~N(0,1^2)
set.seed(20)
x <- rnorm(100,0,1)
e <- rnorm(100,0,2)
y <- 0.5+ 2*x+e
plot(x,y)
z <- 0.5 + 2 * x
plot(x,z)
plot(z,y)
set.seed(10)
x <- rbinom(100,1,0.5)#N??mero de ??xitos que puede tener en n intentos
e <- rnorm(100,0,2)
y <- 0.5 + 2*x + e
summary(y)
plot(x,y, main = "Modelo Lineal", col="dark red")
set.seed(1)
x <- rnorm(100)
log.mu <- 0.5 + 0.3*x
y <- rpois(100,exp(log.mu))
summary(y)
plot(x,y, main = "Modelo Poisson", col="forestgreen")
#Muestreo
set.seed(1)
sample(1:10,4)
sample(letters,5)
sample(1:10)#permutaci??n diferente orden que puede adoptar
sample(1:10,replace=TRUE)#se repiten los n??meros (extracci??n con remplazo)
#perfilaje (??por qu?? mi c??digo es lento?)
system.time(readLines("http://www.fcfm.buap.mx"))
hilbert <- function(n){
i <- 1:n
1/outer(i-1,i,"+")
}
x <- hilbert(1000)
system.time(x <- hilbert(1000))
system.time(svd(x))
| /Más apuntes.R | no_license | LauraCrystel/Programacion_Actuarial_III | R | false | false | 1,784 | r | #Manejo de errores
rm(x)
mean(x)
traceback()
lm(y~x)
traceback()
debug(lm)
lm(y~x)
#Simulaci??n
str(str)
str(lm)
str(ls)
x <- rnorm(100)
str(x)
summary(x)
f<- gl(40,10) #creaci??n del factor
str(f)
summary(f)
str(airquality)
m <- matrix(rnorm(100),10,10)
str(m)
s <- split(airquality,arquiality$mounth)
str(s)
x <- rnorm(10)
x
y <- rnorm(10,50,5)
y
summary(x)
summary(y)
set.seed(1)
set.seed(1) #semilla para n??meros aleatorios
set.seed(2)
rnorm(5)
set.seed(1)
rnorm(5)
normal1 <- rnorm(10000)
normal2 <- rnorm(10000,10,5)
hist(normal1)
summary(normal1)
hist(normal2)
summary(normal2)
rpois(10,1)
poisson1 <- rpois(10000,1)
poisson2 <- rpois(10000,10)
hist(poisson1)
hist(poisson2)
ppois(2,2)
ppois(4,2)
ppois(6,2)
hist(rpois(10000,2))
for (i in 0:11){print(dpois(1,2))}
hist(runif(10000,10,20))
#Supongamos que vamos a asimilar el siguente modelo lineal y=B0+B1x+e
#donde e~N(0,2^2) y se asume que x~N(0,1^2)
set.seed(20)
x <- rnorm(100,0,1)
e <- rnorm(100,0,2)
y <- 0.5+ 2*x+e
plot(x,y)
z <- 0.5 + 2 * x
plot(x,z)
plot(z,y)
set.seed(10)
x <- rbinom(100,1,0.5)#N??mero de ??xitos que puede tener en n intentos
e <- rnorm(100,0,2)
y <- 0.5 + 2*x + e
summary(y)
plot(x,y, main = "Modelo Lineal", col="dark red")
set.seed(1)
x <- rnorm(100)
log.mu <- 0.5 + 0.3*x
y <- rpois(100,exp(log.mu))
summary(y)
plot(x,y, main = "Modelo Poisson", col="forestgreen")
#Muestreo
set.seed(1)
sample(1:10,4)
sample(letters,5)
sample(1:10)#permutaci??n diferente orden que puede adoptar
sample(1:10,replace=TRUE)#se repiten los n??meros (extracci??n con remplazo)
#perfilaje (??por qu?? mi c??digo es lento?)
system.time(readLines("http://www.fcfm.buap.mx"))
hilbert <- function(n){
i <- 1:n
1/outer(i-1,i,"+")
}
x <- hilbert(1000)
system.time(x <- hilbert(1000))
system.time(svd(x))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nlaHumanImpact.r
\name{nlaHumanImpact}
\alias{nlaHumanImpact}
\title{Calculate NLA Human Influence Metrics}
\usage{
nlaHumanImpact(buildings = NULL, buildings_dd = NULL, commercial = NULL,
commercial_dd = NULL, crops = NULL, crops_dd = NULL, docks = NULL,
docks_dd = NULL, landfill = NULL, landfill_dd = NULL, lawn = NULL,
lawn_dd = NULL, orchard = NULL, orchard_dd = NULL, other = NULL,
other_dd = NULL, park = NULL, park_dd = NULL, pasture = NULL,
pasture_dd = NULL, powerlines = NULL, powerlines_dd = NULL,
roads = NULL, roads_dd = NULL, walls = NULL, walls_dd = NULL,
drawdown = NULL, horizontalDistance_dd = NULL, data2007 = FALSE,
fillinDrawdown = TRUE, proximityWeights = data.frame(proximity = c("0",
"P", "C"), calc = c(0, 0.5, 1), circa = c(0, 0, 1), present = c(0, 1, 1),
stringsAsFactors = FALSE))
}
\arguments{
\item{buildings}{A data frame containing buildings human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{buildings_dd}{A data frame containing buildings human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{commercial_dd}{A data frame containing commercial human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{crops}{A data frame containing row crops human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{crops_dd}{A data frame containing row crops human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{docks}{A data frame containing docks or boats human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{docks_dd}{A data frame containing docks or boats human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{landfill}{A data frame containing landfill/trash human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{landfill_dd}{A data frame containing landfill/trash human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{lawn}{A data frame containing lawn human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{lawn_dd}{A data frame containing lawn human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{orchard}{A data frame containing orchard human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{orchard_dd}{A data frame containing orchard human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{other}{A data frame containing other human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{other_dd}{A data frame containing other human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{park}{A data frame containing park facilities/man-made beach human
influence proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{park_dd}{A data frame containing park facilities/man-made beach
human influence proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{pasture}{A data frame containing pasture/range/hay field human
influence proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{pasture_dd}{A data frame containing pasture/range/hay field human
influence proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{powerlines}{A data frame containing power lines human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{powerlines_dd}{A data frame containing power lines human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{roads}{A data frame containing roads or railroad human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{roads_dd}{A data frame containing roads or railroad human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{walls}{A data frame containing walls, dikes, or revetments human
influence proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{walls_dd}{A data frame containing walls, dikes, or revetments
human influence proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{drawdown}{A data frame containing the presence of drawdown at stations.
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE an integer value, or character value that is castable to an
integer, indicating the presence of drawdown at a station.
}}
\item{horizontalDistance_dd}{A data frame containing the horizontal
distance from waterline to high water mark (m) when drawdown is present,
with the columns:
\itemize{
\item SITE an integer or character value identifying a single site visit.
\item STATION a character value identifying the station within the SITE
\item VALUE an numeric value, or character value that is castable to an
numeric.
}}
\item{data2007}{A logical value, which equals TRUE if 2007 data is being
processed. The default value is FALSE.}
\item{fillinDrawdown}{A logical value, which specifies whether to use the
DRAWDOWN parameter to fill in unrecorded cover and horizontalDistance_dd
values. The default value is TRUE.}
\item{proximityWeights}{A data frame relating categorical proximity values
to various numeric weights for different types of metrics. The default data frame
consists of the following values:
\itemize{
\item proximity c('0','P','C')
\item calc c(0.0, 0.5, 1.0)
\item circa c(0, 0, 1)
\item present c(0, 1, 1)
}}
\item{commerical}{A data frame containing commercial human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
}
\value{
Either a data frame when metric calculation is successful or a
character string containing an error message when metric calculation
is not successful. The data frame contains the following columns:
\itemize{
\item SITE - unique site visit identifier
\item METRIC - metric name
\item VALUE - metric value
}
The output metrics include:
HIFPANY_DD, HIFPANY_RIP, HIFPANY_SYN, HIFPANYCIRCA_DD,
HIFPANYCIRCA_RIP, HIFPANYCIRCA_SYN, HIIAG_DD, HIIAG_RIP,
HIIAG_SYN, HIIAGCIRCA_DD, HIIAGCIRCA_RIP, HIIAGCIRCA_SYN,
HIIALL_DD, HIIALL_RIP, HIIALL_SYN, HIIALLCIRCA_DD,
HIIALLCIRCA_RIP, HIIALLCIRCA_SYN, HIINONAG_DD, HIINONAG_RIP,
HIINONAG_SYN, HIINONAGCIRCA_DD, HIINONAGCIRCA_RIP, HIINONAGCIRCA_SYN,
HINAG_DD, HINAG_RIP, HINAG_SYN, HINALL_DD,
HINALL_RIP, HINALL_SYN, HINBUILDINGS_DD, HINBUILDINGS_RIP,
HINBUILDINGS_SYN, HINCOMMERCIAL_DD, HINCOMMERCIAL_RIP, HINCOMMERCIAL_SYN,
HINCROPS_DD, HINCROPS_RIP, HINCROPS_SYN, HINDOCKS_DD,
HINDOCKS_RIP, HINDOCKS_SYN, HINLANDFILL_DD, HINLANDFILL_RIP,
HINLANDFILL_SYN, HINLAWN_DD, HINLAWN_RIP, HINLAWN_SYN,
HINNONAG_DD, HINNONAG_RIP, HINNONAG_SYN, HINORCHARD_DD,
HINORCHARD_RIP, HINORCHARD_SYN, HINOTHER_DD, HINOTHER_RIP,
HINOTHER_SYN, HINPARK_DD, HINPARK_RIP, HINPARK_SYN,
HINPASTURE_DD, HINPASTURE_RIP, HINPASTURE_SYN, HINPOWERLINES_DD,
HINPOWERLINES_RIP, HINPOWERLINES_SYN, HINROADS_DD, HINROADS_RIP,
HINROADS_SYN, HINWALLS_DD, HINWALLS_RIP, HINWALLS_SYN,
HIPWAG_DD, HIPWAG_RIP, HIPWAG_SYN, HIPWALL_DD,
HIPWALL_RIP, HIPWALL_SYN, HIPWBUILDINGS_DD, HIPWBUILDINGS_RIP,
HIPWBUILDINGS_SYN, HIPWCOMMERCIAL_DD, HIPWCOMMERCIAL_RIP, HIPWCOMMERCIAL_SYN
HIPWCROPS_DD, HIPWCROPS_RIP, HIPWCROPS_SYN, HIPWDOCKS_DD,
HIPWDOCKS_RIP, HIPWDOCKS_SYN, HIPWLANDFILL_DD, HIPWLANDFILL_RIP,
HIPWLANDFILL_SYN, HIPWLAWN_DD, HIPWLAWN_RIP, HIPWLAWN_SYN,
HIPWNONAG_DD, HIPWNONAG_RIP, HIPWNONAG_SYN, HIPWORCHARD_DD,
HIPWORCHARD_RIP, HIPWORCHARD_SYN, HIPWOTHER_DD, HIPWOTHER_RIP,
HIPWOTHER_SYN, HIPWPARK_DD, HIPWPARK_RIP, HIPWPARK_SYN,
HIPWPASTURE_DD, HIPWPASTURE_RIP, HIPWPASTURE_SYN, HIPWPOWERLINES_DD,
HIPWPOWERLINES_RIP, HIPWPOWERLINES_SYN, HIPWROADS_DD, HIPWROADS_RIP,
HIPWROADS_SYN, HIPWWALLS_DD, HIPWWALLS_RIP, HIPWWALLS_SYN
Descriptions for all metrics are included in
\emph{NLA_Physical_Habitat_Metric_Descriptions.pdf} in the package
documentation.
}
\description{
This function calculates the human influence
portion of the physical habitat metrics for National Lakes Assessment
(NLA) data.
}
\examples{
head(nlaPhabEx)
buildings <- subset(nlaPhabEx,PARAMETER=='HI_BUILDINGS',select=-PARAMETER)
buildings_dd <- subset(nlaPhabEx,PARAMETER=='HI_BUILDINGS_DD',select=-PARAMETER)
commercial <- subset(nlaPhabEx,PARAMETER=='HI_COMMERCIAL',select=-PARAMETER)
commercial_dd <- subset(nlaPhabEx,PARAMETER=='HI_COMMERCIAL_DD',select=-PARAMETER)
crops <- subset(nlaPhabEx,PARAMETER=='HI_CROPS',select=-PARAMETER)
crops_dd <- subset(nlaPhabEx,PARAMETER=='HI_CROPS_DD',select=-PARAMETER)
docks <- subset(nlaPhabEx,PARAMETER=='HI_DOCKS',select=-PARAMETER)
docks_dd <- subset(nlaPhabEx,PARAMETER=='HI_DOCKS_DD',select=-PARAMETER)
landfill <- subset(nlaPhabEx,PARAMETER=='HI_LANDFILL',select=-PARAMETER)
landfill_dd <- subset(nlaPhabEx,PARAMETER=='HI_LANDFILL_DD',select=-PARAMETER)
lawn <- subset(nlaPhabEx,PARAMETER=='HI_LAWN',select=-PARAMETER)
lawn_dd <- subset(nlaPhabEx,PARAMETER=='HI_LAWN_DD',select=-PARAMETER)
orchard <- subset(nlaPhabEx,PARAMETER=='HI_ORCHARD',select=-PARAMETER)
orchard_dd <- subset(nlaPhabEx,PARAMETER=='HI_ORCHARD_DD',select=-PARAMETER)
other <- subset(nlaPhabEx,PARAMETER=='HI_OTHER',select=-PARAMETER)
other_dd <- subset(nlaPhabEx,PARAMETER=='HI_OTHER_DD',select=-PARAMETER)
park <- subset(nlaPhabEx,PARAMETER=='HI_PARK',select=-PARAMETER)
park_dd <- subset(nlaPhabEx,PARAMETER=='HI_PARK_DD',select=-PARAMETER)
pasture <- subset(nlaPhabEx,PARAMETER=='HI_PASTURE',select=-PARAMETER)
pasture_dd <- subset(nlaPhabEx,PARAMETER=='HI_PASTURE_DD',select=-PARAMETER)
powerlines <- subset(nlaPhabEx,PARAMETER=='HI_POWERLINES',select=-PARAMETER)
powerlines_dd <- subset(nlaPhabEx,PARAMETER=='HI_POWERLINES_DD',select=-PARAMETER)
roads <- subset(nlaPhabEx,PARAMETER=='HI_ROADS',select=-PARAMETER)
roads_dd <- subset(nlaPhabEx,PARAMETER=='HI_ROADS_DD',select=-PARAMETER)
walls <- subset(nlaPhabEx,PARAMETER=='HI_WALLS',select=-PARAMETER)
walls_dd <- subset(nlaPhabEx,PARAMETER=='HI_WALLS_DD',select=-PARAMETER)
drawdown <- subset(nlaPhabEx,PARAMETER=='DRAWDOWN',select=-PARAMETER)
horizontalDistance_dd <- subset(nlaPhabEx,PARAMETER=='HORIZ_DIST_DD',select=-PARAMETER)
# Use defaults for data2007, fillinDrawdown, and proximityWeights
# arguments
exHumInfl <- nlaHumanImpact(buildings, buildings_dd, commercial, commercial_dd,
crops, crops_dd,docks, docks_dd, landfill, landfill_dd, lawn, lawn_dd, orchard,
orchard_dd, other, other_dd, park, park_dd, pasture, pasture_dd, powerlines,
powerlines_dd, roads, roads_dd, walls, walls_dd, drawdown, horizontalDistance_dd)
head(exHumInfl)
}
\author{
Curt Seeliger \email{Seeliger.Curt@epa.gov}\cr
Tom Kincaid \email{Kincaid.Tom@epa.gov}
}
\keyword{survey}
| /man/nlaHumanImpact.Rd | no_license | jasonelaw/aquamet | R | false | true | 17,288 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nlaHumanImpact.r
\name{nlaHumanImpact}
\alias{nlaHumanImpact}
\title{Calculate NLA Human Influence Metrics}
\usage{
nlaHumanImpact(buildings = NULL, buildings_dd = NULL, commercial = NULL,
commercial_dd = NULL, crops = NULL, crops_dd = NULL, docks = NULL,
docks_dd = NULL, landfill = NULL, landfill_dd = NULL, lawn = NULL,
lawn_dd = NULL, orchard = NULL, orchard_dd = NULL, other = NULL,
other_dd = NULL, park = NULL, park_dd = NULL, pasture = NULL,
pasture_dd = NULL, powerlines = NULL, powerlines_dd = NULL,
roads = NULL, roads_dd = NULL, walls = NULL, walls_dd = NULL,
drawdown = NULL, horizontalDistance_dd = NULL, data2007 = FALSE,
fillinDrawdown = TRUE, proximityWeights = data.frame(proximity = c("0",
"P", "C"), calc = c(0, 0.5, 1), circa = c(0, 0, 1), present = c(0, 1, 1),
stringsAsFactors = FALSE))
}
\arguments{
\item{buildings}{A data frame containing buildings human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{buildings_dd}{A data frame containing buildings human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{commercial_dd}{A data frame containing commercial human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{crops}{A data frame containing row crops human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{crops_dd}{A data frame containing row crops human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{docks}{A data frame containing docks or boats human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{docks_dd}{A data frame containing docks or boats human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{landfill}{A data frame containing landfill/trash human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{landfill_dd}{A data frame containing landfill/trash human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{lawn}{A data frame containing lawn human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{lawn_dd}{A data frame containing lawn human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{orchard}{A data frame containing orchard human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{orchard_dd}{A data frame containing orchard human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{other}{A data frame containing other human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{other_dd}{A data frame containing other human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{park}{A data frame containing park facilities/man-made beach human
influence proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{park_dd}{A data frame containing park facilities/man-made beach
human influence proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{pasture}{A data frame containing pasture/range/hay field human
influence proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{pasture_dd}{A data frame containing pasture/range/hay field human
influence proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{powerlines}{A data frame containing power lines human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{powerlines_dd}{A data frame containing power lines human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{roads}{A data frame containing roads or railroad human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{roads_dd}{A data frame containing roads or railroad human influence
proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{walls}{A data frame containing walls, dikes, or revetments human
influence proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{walls_dd}{A data frame containing walls, dikes, or revetments
human influence proximity class values from drawdown zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
\item{drawdown}{A data frame containing the presence of drawdown at stations.
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE an integer value, or character value that is castable to an
integer, indicating the presence of drawdown at a station.
}}
\item{horizontalDistance_dd}{A data frame containing the horizontal
distance from waterline to high water mark (m) when drawdown is present,
with the columns:
\itemize{
\item SITE an integer or character value identifying a single site visit.
\item STATION a character value identifying the station within the SITE
\item VALUE an numeric value, or character value that is castable to an
numeric.
}}
\item{data2007}{A logical value, which equals TRUE if 2007 data is being
processed. The default value is FALSE.}
\item{fillinDrawdown}{A logical value, which specifies whether to use the
DRAWDOWN parameter to fill in unrecorded cover and horizontalDistance_dd
values. The default value is TRUE.}
\item{proximityWeights}{A data frame relating categorical proximity values
to various numeric weights for different types of metrics. The default data frame
consists of the following values:
\itemize{
\item proximity c('0','P','C')
\item calc c(0.0, 0.5, 1.0)
\item circa c(0, 0, 1)
\item present c(0, 1, 1)
}}
\item{commerical}{A data frame containing commercial human influence
proximity class values from riparian zone, with the columns:
\itemize{
\item SITE an integer or character value identifying a single site
visit.
\item STATION a character value identifying the station within the SITE
\item VALUE a character value of 0, P, or C representing the proximity
category.
}}
}
\value{
Either a data frame when metric calculation is successful or a
character string containing an error message when metric calculation
is not successful. The data frame contains the following columns:
\itemize{
\item SITE - unique site visit identifier
\item METRIC - metric name
\item VALUE - metric value
}
The output metrics include:
HIFPANY_DD, HIFPANY_RIP, HIFPANY_SYN, HIFPANYCIRCA_DD,
HIFPANYCIRCA_RIP, HIFPANYCIRCA_SYN, HIIAG_DD, HIIAG_RIP,
HIIAG_SYN, HIIAGCIRCA_DD, HIIAGCIRCA_RIP, HIIAGCIRCA_SYN,
HIIALL_DD, HIIALL_RIP, HIIALL_SYN, HIIALLCIRCA_DD,
HIIALLCIRCA_RIP, HIIALLCIRCA_SYN, HIINONAG_DD, HIINONAG_RIP,
HIINONAG_SYN, HIINONAGCIRCA_DD, HIINONAGCIRCA_RIP, HIINONAGCIRCA_SYN,
HINAG_DD, HINAG_RIP, HINAG_SYN, HINALL_DD,
HINALL_RIP, HINALL_SYN, HINBUILDINGS_DD, HINBUILDINGS_RIP,
HINBUILDINGS_SYN, HINCOMMERCIAL_DD, HINCOMMERCIAL_RIP, HINCOMMERCIAL_SYN,
HINCROPS_DD, HINCROPS_RIP, HINCROPS_SYN, HINDOCKS_DD,
HINDOCKS_RIP, HINDOCKS_SYN, HINLANDFILL_DD, HINLANDFILL_RIP,
HINLANDFILL_SYN, HINLAWN_DD, HINLAWN_RIP, HINLAWN_SYN,
HINNONAG_DD, HINNONAG_RIP, HINNONAG_SYN, HINORCHARD_DD,
HINORCHARD_RIP, HINORCHARD_SYN, HINOTHER_DD, HINOTHER_RIP,
HINOTHER_SYN, HINPARK_DD, HINPARK_RIP, HINPARK_SYN,
HINPASTURE_DD, HINPASTURE_RIP, HINPASTURE_SYN, HINPOWERLINES_DD,
HINPOWERLINES_RIP, HINPOWERLINES_SYN, HINROADS_DD, HINROADS_RIP,
HINROADS_SYN, HINWALLS_DD, HINWALLS_RIP, HINWALLS_SYN,
HIPWAG_DD, HIPWAG_RIP, HIPWAG_SYN, HIPWALL_DD,
HIPWALL_RIP, HIPWALL_SYN, HIPWBUILDINGS_DD, HIPWBUILDINGS_RIP,
HIPWBUILDINGS_SYN, HIPWCOMMERCIAL_DD, HIPWCOMMERCIAL_RIP, HIPWCOMMERCIAL_SYN
HIPWCROPS_DD, HIPWCROPS_RIP, HIPWCROPS_SYN, HIPWDOCKS_DD,
HIPWDOCKS_RIP, HIPWDOCKS_SYN, HIPWLANDFILL_DD, HIPWLANDFILL_RIP,
HIPWLANDFILL_SYN, HIPWLAWN_DD, HIPWLAWN_RIP, HIPWLAWN_SYN,
HIPWNONAG_DD, HIPWNONAG_RIP, HIPWNONAG_SYN, HIPWORCHARD_DD,
HIPWORCHARD_RIP, HIPWORCHARD_SYN, HIPWOTHER_DD, HIPWOTHER_RIP,
HIPWOTHER_SYN, HIPWPARK_DD, HIPWPARK_RIP, HIPWPARK_SYN,
HIPWPASTURE_DD, HIPWPASTURE_RIP, HIPWPASTURE_SYN, HIPWPOWERLINES_DD,
HIPWPOWERLINES_RIP, HIPWPOWERLINES_SYN, HIPWROADS_DD, HIPWROADS_RIP,
HIPWROADS_SYN, HIPWWALLS_DD, HIPWWALLS_RIP, HIPWWALLS_SYN
Descriptions for all metrics are included in
\emph{NLA_Physical_Habitat_Metric_Descriptions.pdf} in the package
documentation.
}
\description{
This function calculates the human influence
portion of the physical habitat metrics for National Lakes Assessment
(NLA) data.
}
\examples{
head(nlaPhabEx)
buildings <- subset(nlaPhabEx,PARAMETER=='HI_BUILDINGS',select=-PARAMETER)
buildings_dd <- subset(nlaPhabEx,PARAMETER=='HI_BUILDINGS_DD',select=-PARAMETER)
commercial <- subset(nlaPhabEx,PARAMETER=='HI_COMMERCIAL',select=-PARAMETER)
commercial_dd <- subset(nlaPhabEx,PARAMETER=='HI_COMMERCIAL_DD',select=-PARAMETER)
crops <- subset(nlaPhabEx,PARAMETER=='HI_CROPS',select=-PARAMETER)
crops_dd <- subset(nlaPhabEx,PARAMETER=='HI_CROPS_DD',select=-PARAMETER)
docks <- subset(nlaPhabEx,PARAMETER=='HI_DOCKS',select=-PARAMETER)
docks_dd <- subset(nlaPhabEx,PARAMETER=='HI_DOCKS_DD',select=-PARAMETER)
landfill <- subset(nlaPhabEx,PARAMETER=='HI_LANDFILL',select=-PARAMETER)
landfill_dd <- subset(nlaPhabEx,PARAMETER=='HI_LANDFILL_DD',select=-PARAMETER)
lawn <- subset(nlaPhabEx,PARAMETER=='HI_LAWN',select=-PARAMETER)
lawn_dd <- subset(nlaPhabEx,PARAMETER=='HI_LAWN_DD',select=-PARAMETER)
orchard <- subset(nlaPhabEx,PARAMETER=='HI_ORCHARD',select=-PARAMETER)
orchard_dd <- subset(nlaPhabEx,PARAMETER=='HI_ORCHARD_DD',select=-PARAMETER)
other <- subset(nlaPhabEx,PARAMETER=='HI_OTHER',select=-PARAMETER)
other_dd <- subset(nlaPhabEx,PARAMETER=='HI_OTHER_DD',select=-PARAMETER)
park <- subset(nlaPhabEx,PARAMETER=='HI_PARK',select=-PARAMETER)
park_dd <- subset(nlaPhabEx,PARAMETER=='HI_PARK_DD',select=-PARAMETER)
pasture <- subset(nlaPhabEx,PARAMETER=='HI_PASTURE',select=-PARAMETER)
pasture_dd <- subset(nlaPhabEx,PARAMETER=='HI_PASTURE_DD',select=-PARAMETER)
powerlines <- subset(nlaPhabEx,PARAMETER=='HI_POWERLINES',select=-PARAMETER)
powerlines_dd <- subset(nlaPhabEx,PARAMETER=='HI_POWERLINES_DD',select=-PARAMETER)
roads <- subset(nlaPhabEx,PARAMETER=='HI_ROADS',select=-PARAMETER)
roads_dd <- subset(nlaPhabEx,PARAMETER=='HI_ROADS_DD',select=-PARAMETER)
walls <- subset(nlaPhabEx,PARAMETER=='HI_WALLS',select=-PARAMETER)
walls_dd <- subset(nlaPhabEx,PARAMETER=='HI_WALLS_DD',select=-PARAMETER)
drawdown <- subset(nlaPhabEx,PARAMETER=='DRAWDOWN',select=-PARAMETER)
horizontalDistance_dd <- subset(nlaPhabEx,PARAMETER=='HORIZ_DIST_DD',select=-PARAMETER)
# Use defaults for data2007, fillinDrawdown, and proximityWeights
# arguments
exHumInfl <- nlaHumanImpact(buildings, buildings_dd, commercial, commercial_dd,
crops, crops_dd,docks, docks_dd, landfill, landfill_dd, lawn, lawn_dd, orchard,
orchard_dd, other, other_dd, park, park_dd, pasture, pasture_dd, powerlines,
powerlines_dd, roads, roads_dd, walls, walls_dd, drawdown, horizontalDistance_dd)
head(exHumInfl)
}
\author{
Curt Seeliger \email{Seeliger.Curt@epa.gov}\cr
Tom Kincaid \email{Kincaid.Tom@epa.gov}
}
\keyword{survey}
|
###############
#### model I ###
###############
##### some definitions
MAE=c()
bias=c()
SSresults=c()
difference.best.worst=c()
difference.best.reference=c()
sd.best.worst=c()
sd.best.ref=c()
coverage=c()
jags.m=list()
#####################
model1=function() {
for(i in 1:NS){
dm[i]<-d[t2[i]]-d[t1[i]]
prec[i]<-1/(SE[i]*SE[i])
y[i]~dnorm(dm[i],prec[i])}
d[1]<-0
for(i in 2:NT){
d[i]~dnorm(0,1)}
for (i in 1:NT){
for (j in i:NT){
D[j,i]<-d[j]-d[i]}}
#TreatmeNT hierarchy
order[1:NT]<- NT+1- rank(d[1:NT])
for(k in 1:NT) {
# this is when the outcome is positive - omit 'NT+1-' when the outcome is negative
most.effective[k]<-equals(order[k],1)
for(j in 1:NT) {
effectiveness[k,j]<- equals(order[k],j)}}
for(k in 1:NT) {
for(j in 1:NT) {
cumeffectiveness[k,j]<- sum(effectiveness[k,1:j])}}
#SUCRAS#
for(k in 1:NT) {
SUCRA[k]<- sum(cumeffectiveness[k,1:(NT-1)]) /(NT-1)}}
params=c()
for (i in 1:(N.treat-1)){
for (j in (i+1):N.treat){
params=c(params, paste("D[",j,",",i,"]",sep=""))
}}
for (i in 2:(N.treat)){
params=c(params, paste("d[",i,"]",sep=""))
}
for (i in 1:(N.treat)){
params=c(params, paste("SUCRA[",i,"]",sep=""))
}
#number of D parameters
no.D=N.treat*(N.treat-1)/2
##
for (i in 1:N.sim){
initialval = NULL
data2 <- list(y = data1[[i]]$TE,SE=data1[[i]]$seTE, NS=length(data1[[i]]$studlab), t1=data1[[i]]$t1,t2=data1[[i]]$t2, NT=N.treat)
jags.m[[i]] <- jags.parallel(data=data2,initialval,parameters.to.save = params, n.chains = 2, n.iter = 15000, n.thin=1, n.burnin = 5000, DIC=F, model.file = model1)
print(i)
bias[i]=(mean(jags.m[[i]]$BUGSoutput$summary[(no.D+N.treat+1):(no.D+2*N.treat-1),1]))
coverage[i]=(mean(jags.m[[i]]$BUGSoutput$summary[(no.D+N.treat+1):(no.D+2*N.treat-1),3]<0&jags.m[[i]]$BUGSoutput$summary[(no.D+N.treat+1):(no.D+2*N.treat-1),7]>0))
MAE[i]=mean(abs(jags.m[[i]]$BUGSoutput$summary[(no.D+N.treat+1):(no.D+2*N.treat-1),1]))
SSresults[i]=sum(jags.m[[i]]$BUGSoutput$summary[1:no.D,3]>0|jags.m[[i]]$BUGSoutput$summary[1:no.D,7]<0) ### 95% CrI does not include 0
difference.best.worst[i]=abs(jags.m[[i]]$BUGSoutput$summary[1,1])
sd.best.worst[i]=abs(jags.m[[i]]$BUGSoutput$summary[1,2])
jags.m[[i]]=NULL
}
| /simulation study 1/Model I - Fixed Effect - Informative prior (Scenario C).R | no_license | esm-ispm-unibe-ch-REPRODUCIBLE/the_dark_side_of_the_force | R | false | false | 2,297 | r | ###############
#### model I ###
###############
##### some definitions
MAE=c()
bias=c()
SSresults=c()
difference.best.worst=c()
difference.best.reference=c()
sd.best.worst=c()
sd.best.ref=c()
coverage=c()
jags.m=list()
#####################
model1=function() {
for(i in 1:NS){
dm[i]<-d[t2[i]]-d[t1[i]]
prec[i]<-1/(SE[i]*SE[i])
y[i]~dnorm(dm[i],prec[i])}
d[1]<-0
for(i in 2:NT){
d[i]~dnorm(0,1)}
for (i in 1:NT){
for (j in i:NT){
D[j,i]<-d[j]-d[i]}}
#TreatmeNT hierarchy
order[1:NT]<- NT+1- rank(d[1:NT])
for(k in 1:NT) {
# this is when the outcome is positive - omit 'NT+1-' when the outcome is negative
most.effective[k]<-equals(order[k],1)
for(j in 1:NT) {
effectiveness[k,j]<- equals(order[k],j)}}
for(k in 1:NT) {
for(j in 1:NT) {
cumeffectiveness[k,j]<- sum(effectiveness[k,1:j])}}
#SUCRAS#
for(k in 1:NT) {
SUCRA[k]<- sum(cumeffectiveness[k,1:(NT-1)]) /(NT-1)}}
params=c()
for (i in 1:(N.treat-1)){
for (j in (i+1):N.treat){
params=c(params, paste("D[",j,",",i,"]",sep=""))
}}
for (i in 2:(N.treat)){
params=c(params, paste("d[",i,"]",sep=""))
}
for (i in 1:(N.treat)){
params=c(params, paste("SUCRA[",i,"]",sep=""))
}
#number of D parameters
no.D=N.treat*(N.treat-1)/2
##
for (i in 1:N.sim){
initialval = NULL
data2 <- list(y = data1[[i]]$TE,SE=data1[[i]]$seTE, NS=length(data1[[i]]$studlab), t1=data1[[i]]$t1,t2=data1[[i]]$t2, NT=N.treat)
jags.m[[i]] <- jags.parallel(data=data2,initialval,parameters.to.save = params, n.chains = 2, n.iter = 15000, n.thin=1, n.burnin = 5000, DIC=F, model.file = model1)
print(i)
bias[i]=(mean(jags.m[[i]]$BUGSoutput$summary[(no.D+N.treat+1):(no.D+2*N.treat-1),1]))
coverage[i]=(mean(jags.m[[i]]$BUGSoutput$summary[(no.D+N.treat+1):(no.D+2*N.treat-1),3]<0&jags.m[[i]]$BUGSoutput$summary[(no.D+N.treat+1):(no.D+2*N.treat-1),7]>0))
MAE[i]=mean(abs(jags.m[[i]]$BUGSoutput$summary[(no.D+N.treat+1):(no.D+2*N.treat-1),1]))
SSresults[i]=sum(jags.m[[i]]$BUGSoutput$summary[1:no.D,3]>0|jags.m[[i]]$BUGSoutput$summary[1:no.D,7]<0) ### 95% CrI does not include 0
difference.best.worst[i]=abs(jags.m[[i]]$BUGSoutput$summary[1,1])
sd.best.worst[i]=abs(jags.m[[i]]$BUGSoutput$summary[1,2])
jags.m[[i]]=NULL
}
|
library(testthat)
library(pmxTools)
context("Miscellaneous functions")
### AUC
test_that("AUC", {
d <- expand.grid(ID = 1:5, TIME=seq(0,24,by=0.5))
d <- d[order(d$ID, d$TIME),]
d$DV <- c(calc_sd_1cmt_linear_bolus(t=seq(0,24,by=0.5), CL=6, V=25, dose=600),
calc_sd_1cmt_linear_bolus(t=seq(0,24,by=0.5), CL=5.2, V=26, dose=600),
calc_sd_1cmt_linear_bolus(t=seq(0,24,by=0.5), CL=3, V=27, dose=600),
calc_sd_1cmt_linear_bolus(t=seq(0,24,by=0.5), CL=4.2, V=30, dose=600),
calc_sd_1cmt_linear_bolus(t=seq(0,24,by=0.5), CL=9, V=20, dose=600))
a <- get_auc(d)
a$AUC <- signif(a$AUC,5)
expect_equal(a, data.frame(ID=1:5, AUC=c(99.804, 114.530, 186.150, 137.950, 66.946)))
})
| /tests/testthat/test-get_auc.R | no_license | billdenney/pmxTools | R | false | false | 732 | r | library(testthat)
library(pmxTools)
context("Miscellaneous functions")
### AUC
test_that("AUC", {
d <- expand.grid(ID = 1:5, TIME=seq(0,24,by=0.5))
d <- d[order(d$ID, d$TIME),]
d$DV <- c(calc_sd_1cmt_linear_bolus(t=seq(0,24,by=0.5), CL=6, V=25, dose=600),
calc_sd_1cmt_linear_bolus(t=seq(0,24,by=0.5), CL=5.2, V=26, dose=600),
calc_sd_1cmt_linear_bolus(t=seq(0,24,by=0.5), CL=3, V=27, dose=600),
calc_sd_1cmt_linear_bolus(t=seq(0,24,by=0.5), CL=4.2, V=30, dose=600),
calc_sd_1cmt_linear_bolus(t=seq(0,24,by=0.5), CL=9, V=20, dose=600))
a <- get_auc(d)
a$AUC <- signif(a$AUC,5)
expect_equal(a, data.frame(ID=1:5, AUC=c(99.804, 114.530, 186.150, 137.950, 66.946)))
})
|
## The first function, `makeVector` creates a special "vector", which is
## really a list containing a function to
## 1. set the value of the vector
## 2. get the value of the vector
## 3. set the value of the mean
## 4. get the value of the mean
makeVector <- function(x = numeric()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmean <- function(mean) m <<- mean
getmean <- function() m
list(set = set, get = get,
setmean = setmean,
getmean = getmean)
}
#The following function calculates the mean of the special "vector"
#created with the above function. However, it first checks to see if the
#mean has already been calculated. If so, it `get`s the mean from the
#cache and skips the computation. Otherwise, it calculates the mean of
#the data and sets the value of the mean in the cache via the `setmean`
#function.
cachemean <- function(x, ...) {
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setmean(m)
m
} | /cachemean.R | no_license | ludovicvalet/ProgrammingAssignment2 | R | false | false | 1,131 | r | ## The first function, `makeVector` creates a special "vector", which is
## really a list containing a function to
## 1. set the value of the vector
## 2. get the value of the vector
## 3. set the value of the mean
## 4. get the value of the mean
makeVector <- function(x = numeric()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmean <- function(mean) m <<- mean
getmean <- function() m
list(set = set, get = get,
setmean = setmean,
getmean = getmean)
}
#The following function calculates the mean of the special "vector"
#created with the above function. However, it first checks to see if the
#mean has already been calculated. If so, it `get`s the mean from the
#cache and skips the computation. Otherwise, it calculates the mean of
#the data and sets the value of the mean in the cache via the `setmean`
#function.
cachemean <- function(x, ...) {
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setmean(m)
m
} |
setwd('~/Projects/yelp_phoenix_academic_dataset/scripts/')
df_checkin <- read.table('yelp_checkin.tsv', sep = '\t', stringsAsFactors=F, header = T)
df_business <- read.csv('yelp_business.tsv', sep = '\t')
business <- merge(df_business, df_checkin, by = 'id_business', all.x= T, all.y = F)
# if a business didn't have a checkin in the data, impute zeros
business[is.na(business)] <- 0
require(plyr)
require(reshape2)
require(stringr)
extractWeekDay <- function(x){
# extract week day from checkin info
strsplit(x, '\\.')[[1]][2]
}
extractHr <- function(x){
# extract hour from checkin info
strsplit(x, '\\.')[[1]][1]
}
checkinData <- melt(df_checkin, id.vars='id_business')
checkinData$variable <- str_replace(checkinData$variable,'X','')
checkinData$weekDay <- sapply(checkinData$variable, extractWeekDay)
checkinData$hr <- sapply(checkinData$variable, extractHr)
checkinData_byWeekDay <- ddply(.data = checkinData, .variables=c('id_business','weekDay'),
summarise,
numCheckins = sum(value))
checkinData_byWeekDay <- recast(checkinData_byWeekDay,
id_business ~ weekDay,fun.aggregate=sum)
weekdays <- c('sun','mon','tue','wed','thu','fri','sat')
names(checkinData_byWeekDay)[2:8] <- weekdays
weekDay.Hr <- ddply(.data = checkinData,
.variables=c('weekDay', 'hr'),
summarise,
numCheckins = sum(value))
getCategory = function(df, categorylist){
df$primaryCategory = NA
for (i in 1:length(categorylist)){
df$primaryCategory[df[,categorylist[i]] == 1 & is.na(df$primaryCategory)] <- categorylist[i]
}
df$primaryCategory[is.na(df$primaryCategory)] <- "Other"
df$primaryCategory
}
categories_primary <- c('Restaurants','Shopping','Beauty...Spas','Nightlife', 'Active.Life')
business_categories <- data.frame(business$id_business)
business_categories$primaryCategory <- getCategory(business, categories_primary)
names(business_categories) = c('id_business', 'primaryCategory')
weekDay.Hr.Cat <- ddply(.data = merge(checkinData,business_categories, by='id_business'),
.variables=c('weekDay', 'hr', 'primaryCategory'),
summarise,
numCheckins = sum(value))
require(ggplot2)
require(ggthemes)
### Variations in checkins over the course of the day, by weekday
ggplot(weekDay.Hr, aes(x = as.integer(hr), y = numCheckins)) +
geom_line(aes(group = weekDay, color = weekDay)) + geom_point(aes(color = weekDay)) +
xlab('Hours in the day') + ylab('Total Number of Checkins') +
ggtitle('Checkins by day and hour') + theme_minimal()
weekDay.Hr.Cat <- weekDay.Hr.Cat[order(weekDay.Hr.Cat$weekDay),]
ggplot(weekDay.Hr.Cat, aes(x = as.integer(hr), y = numCheckins)) +
geom_line(aes(group = as.factor( weekDay), color = weekDay)) + geom_point(aes(color = weekDay)) +
facet_grid(. ~ primaryCategory) +
xlab('Hours in the day') + ylab('Total Number of Checkins') +
ggtitle('Checkins by day and hour') + theme_minimal() + guides(fill = guide_legend(reverse = TRUE))
# use business_categories to merge categories to business
#business_melt <- melt(data = business, id.vars=names(business)[1:515])
#business_melt$variable <- str_replace(business_melt$variable,'X','')
#business_melt$weekDay <- sapply(business_melt$variable, extractWeekDay)
#business_melt$hr <- sapply(business_melt$variable, extractHr)
#################################
temp <- melt(
ddply(.data = merge(checkinData_byWeekDay, business_categories,by='id_business'),
.variables= c('primaryCategory'),
summarise,
sun = sum(sun),
mon= sum(mon),
tue= sum(tue),
wed= sum(wed),
thu= sum(thu),
fri= sum(fri),
sat= sum(sat)
)
)
ggplot(temp, aes(variable, value, fill = primaryCategory)) +
geom_bar(stat = 'identity') +
xlab('Day of the week') + ylab('Number of checkins') + ggtitle('Checkins by major category') +
theme_minimal()
#################################
temp <- melt(
ddply(.data = merge(checkinData_byWeekDay, business_categories,by='id_business'),
.variables= c('primaryCategory'),
summarise,
sun = sum(sun),
mon= sum(mon),
tue= sum(tue),
wed= sum(wed),
thu= sum(thu),
fri= sum(fri),
sat= sum(sat)
)
)
temp2 <- data.frame(colSums(checkinData_byWeekDay))
temp2$variable <- weekdays
names(temp2) <- c('tot','variable')
temp <- merge(temp,temp2,by='variable')
temp$Share <- with(temp, value/tot * 100)
ggplot(temp, aes(variable, Share, fill = primaryCategory)) + geom_bar(stat = 'identity') +
xlab('Day of the week') + ylab('Number of checkins') + ggtitle('Checkins by major category') +
theme_minimal()
########################################################
restaurant_list <- c('Buffets', 'Pizza', 'Fast.Food', 'Bars','Breakfast...Brunch','Steakhouses')
restaurant_categories <- business[business$Restaurants == 1,]
restaurant_categories$restaurantCategory <- getCategory(restaurant_categories, restaurant_list)
restaurant_categories <- restaurant_categories[restaurant_categories$restaurantCategory != 'Other',c('id_business','restaurantCategory')]
temp <- melt(
ddply(.data = merge(checkinData_byWeekDay, restaurant_categories, by = 'id_business', all.x = F, all.y = F),
.variables= c('restaurantCategory'),
summarise,
sun = sum(sun),
mon= sum(mon),
tue= sum(tue),
wed= sum(wed),
thu= sum(thu),
fri= sum(fri),
sat= sum(sat)
)
)
ggplot(temp, aes(variable, value, fill = restaurantCategory)) +
geom_bar(stat = 'identity') +
xlab('Day of the week') + ylab('Number of checkins') + ggtitle('Checkins by major category') +
theme_minimal()
###########################################
business_success <- business[, c('stars_business','review_count_business','id_business')]
temp <- merge(checkinData, business_success, by = 'id_business')
temp <- merge(temp, business_categories, by = 'id_business')
temp2 <- ddply(.data = temp,
.variables = c('id_business','weekDay', 'stars_business', 'review_count_business', 'primaryCategory'),
summarise,
dayCheckins = sum(value)
)
temp3 <- ddply(.data = temp,
.variables = c('weekDay','primaryCategory'),
summarise,
totCheckins = sum(value)
)
temp4 <- merge(temp2, temp3, by = c('weekDay','primaryCategory'))
temp4$contr_star <- with(temp4, dayCheckins / totCheckins * stars_business)
temp4$contr_rev <- with(temp4, dayCheckins / totCheckins * review_count_business)
temp5 <- ddply(.data = temp4, .variables = c('weekDay','primaryCategory'), summarise,
avgStars = sum(contr_star),
avgNumReviews = sum(contr_rev),
avgCheckins = mean(totCheckins)
)
ggplot(temp5, aes(weekDay, avgNumReviews)) + geom_point(aes(size = avgCheckins, color = avgStars))
ggplot(checkins_6, aes(as.integer(weekDay), avgStars, color = primaryCategory)) +
geom_point(aes(size = avgNumReviews)) + geom_line()
theme_minimal()
ggplot(checkins_6, aes(avgNumReviews)) + geom_histogram()
df3_melt <- melt(data=df3, id.vars= c('name_business', 'lon_business', 'lat_business', 'weekDay', 'review_count_business', 'stars_business'))
df3_reshaped <- cast(data=df3_melt,formula=name_business + lon_business + lat_business + review_count_business + stars_business~weekDay, sum)
getRowMax <- function(row){
normalized = row / sum(row)
maxVal = max(normalized)
maxDay = which.max(matrix(normalized)) - 1
if(maxVal >0.33){
result = maxDay}
else{
result = -1
}
}
df3_reshaped$topWeekDay <- apply(df3_reshaped[,5:11], 1, getRowMax)
df3_reshaped$totalCheckins <- apply(df3_reshaped[,5:11], 1, sum)
ggplot(df3_reshaped[df3_reshaped$topWeekDay!=-1 ,], aes(lat_business, lon_business, colour = as.factor(topWeekDay))) +
geom_point(alpha = 0.3, aes(size = review_count_business)) +
theme_minimal()
ggplot(df3_reshaped, aes(review_count_business, totalCheckins)) +
geom_point(alpha = 0.3, aes(size = review_count_business)) +
theme_minimal()
ggplot(checkins_collapsed3[checkins_collapsed3$primaryCategory == 'Restaurants',], aes(x = as.integer(hr), y = numCheckins)) +
geom_line(aes(group = weekDay, color = weekDay)) +
theme_minimal()
# Chart: hours in day X weekdays ~ number of checkins
#- by category
#- by secondary category
# Chart: week day X avg star ~ size: average review count
#- by category
#- by secondary category
# Chart: Stacked bar of types of places over course of week
# word cloud of associated terms with each of hte week days in review text
# bubble chart: bubbles represent each categories. x = week day. y = number of checkins (avg). size = number of reviews (avg)
# bar chart: average number of words in review X week day
# deep dive: fast food, pizza, deli, brunch, steakhouses
# reception of reviews by weekday. average votes.
# cluster analysis using features [monday, tuesday, wednesday, thursday, fri, saturday, sunday, weekday, weekend, morning, night afternoon, evening] using review X business for lonogitude and latitude.
# do users with multiple reviews write on the same day? which day?
sun_df <- colMeans(read.csv(file = '../docStat_Sunday.csv'))
mon_df <- colMeans(read.csv(file = '../docStat_Monday.csv'))
tues_df <- colMeans(read.csv(file = '../docStat_Tuesday.csv'))
wed_df <- colMeans(read.csv(file = '../docStat_Wednesday.csv'))
thurs_df <- colMeans(read.csv(file = '../docStat_Thursday.csv'))
fri_df <- colMeans(read.csv(file = '../docStat_Friday.csv'))
sat_df <- colMeans(read.csv(file = '../docStat_Saturday.csv'))
weekdays <- c('sun','mon','tue','wed','thu','fri','sat')
docStats <- data.frame(rbind(sun_df, mon_df, tues_df, wed_df, thurs_df, fri_df, sat_df))
docStats$day <- factor(weekdays)
require(ggplot2)
ggplot(docStats, aes(x = day, y = Number.of.Words)) + geom_bar()
reviews <- read.csv('yelp_review.tsv', sep = '\t')
require(plyr)
unique.users <- ddply(.data = reviews, .variables = c('id_user','date_weekday'), summarise,
numReviews = NROW)
require(data.table)
key(reviews)
dt <- data.table(reviews, key=c('id_user', 'date_weekday')
unique_users <- dt[,list(ct=NROW(id_review)), by=c('id_user','date_weekday')]
require(reshape2)
users_melt <- melt(data=unique_users, id.vars=c('id_user','date_weekday'))
users_recast <- cast(users_melt, id_user~date_weekday, sum)
weekdays <- c('sun','mon','tue','wed','thu','fri','sat')
names(users_recast)[2:8] <- weekdays
user_df <- read.csv('yelp_user.tsv', sep = '\t')
users_recast$total <- with(users_recast, sun + mon + tue + wed + thu + fri + sat)
users_merged <- merge(user_df, users_recast, by= 'id_user')
for(day in weekdays){
users_recast[,day] <- users_recast[,day]/users_recast$total
}
rowMax <- function(row){
maxVal = max(row)
}
colOfMax <- function(row){
loc <- which.max(matrix(row))
print(loc)
maxCol = weekdays[loc]
}
users_recast$conc <- apply(users_recast[,weekdays], MARGIN=1, rowMax)
users_recast$primaryDay <- apply(users_recast[,weekdays], MARGIN=1, colOfMax)
users_recast_merged <- merge(users_recast, user_df, by = 'id_user')
multUserusers <- users_recast_merged[users_recast_merged$total >7,]
ggplot(multUserusers, aes(x = conc, y = stars_user)) +
geom_point(aes(color = as.factor(primaryDay), size = review_count_user), alpha = 0.5) +
theme_minimal()
ggplot(users_recast_merged, aes(total)) + geom_histogram()
users_melt_merged <- merge( users_melt, user_df, by = 'id_user')
require(ggplot2)
ggplot(users_melt_merged, aes(y = review_count_user, x = stars_user)) +
geom_point(aes(color = as.factor(date_weekday), size = review_count_user), alpha = 0.4)+
theme_minimal()
ggplot(multUserusers, aes(x = log(votes_useful_user), y = review_count_user)) +
geom_point(aes(color = stars_user)) + facet_grid(.~primaryDay) +
scale_colour_gradient2(high="darkred", mid="white", low = 'white')
ggplot(multUserusers, aes(conc)) + geom_boxplot(aes(primaryDay,conc))
require(maptools)
require(gdal)
require(maps)
require(RColorBrewer)
phoenix <- readShapeSpatial('1970_Subdivision')
phoenix.sp <- readShapeLines('1970_Subdivision.shp')
points(businesses_filtered$lon_business, businesses_filtered$lat_business, cex = 2, pch = 16, col = 'black')
plot(phoenix.sp, xlim=c(-113.5, -110), ylim=c(34.14,34.151))
# symbol plot -- equal-interval class intervals
plot(phoenix, xlim=c(-113.5, -110), ylim=c(34.15,34.151))
points(businesses_filtered$lon_business, businesses_filtered$lat_business, pch=23, col='red', cex=.6,)
points(businesses_filtered$lon_business, businesses_filtered$lat_business, cex=2)
title("Oregon Climate Station Data -- Annual Temperature",
sub="Equal-Interval Class Intervals")
setwd(dir='~/Projects/yelp_phoenix_academic_dataset/CountySubdivision_1970/')
df_checkin <- read.table('../scripts/yelp_checkin.tsv', sep = '\t', stringsAsFactors=F, header = T)
require(plyr)
require(reshape2)
require(stringr)
extractWeekDay <- function(x){
# extract week day from checkin info
as.integer(strsplit(x, '\\.')[[1]][2])
}
extractHr <- function(x){
# extract hour from checkin info
as.integer(strsplit(x, '\\.')[[1]][1])
}
checkinData <- melt(df_checkin, id.vars='id_business')
checkinData$variable <- str_replace(checkinData$variable,'X','')
checkinData$weekDay <- sapply(checkinData$variable, extractWeekDay)
checkinData$hr <- sapply(checkinData$variable, extractHr)
checkinData <- checkinData[checkinData$hr >= 9,]
checkinData_byWeekDay <- ddply(.data = checkinData, .variables=c('id_business','weekDay'),
summarise,
numCheckins = sum(value))
require(reshape)
checkinData_byWeekDay_recast <- cast(checkinData_byWeekDay,
id_business ~ weekDay, fun.aggregate=sum)
weekdays <- c('sun','mon','tue','wed','thu','fri','sat')
names(checkinData_byWeekDay_recast)[2:8] <- weekdays
checkinData_byWeekDay_recast$funDays <- with(checkinData_byWeekDay_recast, (thu + fri + sat)/(sun + mon + tue + wed + thu + fri + sat))
setwd('~/Projects/yelp_phoenix_academic_dataset/scripts/')
businesses <- read.csv('yelp_business.tsv', sep = '\t')
businesses_filtered = businesses[businesses$review_count_business>50,c('lon_business','lat_business','id_business')]
businesses_filtered <- merge(businesses_filtered, checkinData_byWeekDay_recast, by= 'id_business')
businesses_filtered$funBin <- cut(businesses_filtered$funDays,breaks=c(0,0.25, 0.5, 0.75,1))
setwd(dir='~/Projects/yelp_phoenix_academic_dataset/CountySubdivision_1970/')
require(ggmap)
mp <- get_map(location = c(lon = -112.0667, lat = 33.45), zoom = 10, maptype = 'roadmap')
map <- ggmap(mp)
map +
geom_point(data = businesses_filtered, aes(x = lon_business, y = lat_business, color = as.factor(funBin)), size = 7, alpha = 1) +
scale_colour_manual(values = c('lightyellow','pink','red', 'darkred')) +
theme_minimal()
setwd('~/Projects/yelp_phoenix_academic_dataset/scripts/')
reviews <- read.csv('yelp_review.tsv', sep = '\t')
require(data.table)
dt <- data.table(reviews, key=c('id_business', 'date_weekday'))
unique_businesses <- dt[,list(ct=NROW(id_review)), by=c('id_business','date_weekday')]
bus_melt <- melt(data=unique_businesses, id.vars=c('id_business','date_weekday'))
bus_recast <- cast(bus_melt, id_business~date_weekday, sum)
weekdays <- c('sun','mon','tue','wed','thu','fri','sat')
names(bus_recast)[2:8] <- weekdays
bus_recast$funRev <- with(bus_recast, (thu + fri + sat)/(sun + mon + tue + wed + thu + fri + sat))
businesses_filtered_2 <- merge(businesses_filtered, bus_recast, by = 'id_business')
map +
geom_point(data = businesses_filtered_2, aes(x = lon_business, y = lat_business, color = funRev), size = 7, alpha = 1) +
theme_minimal()
docStats <- data.frame(rbind(sun_df, mon_df, tues_df, wed_df, thurs_df, fri_df, sat_df))
docStats$day <- weekdays
| /scripts/checkinAnalysis.R | no_license | Libardo1/YelpAcademicDataAnalysis | R | false | false | 16,344 | r | setwd('~/Projects/yelp_phoenix_academic_dataset/scripts/')
df_checkin <- read.table('yelp_checkin.tsv', sep = '\t', stringsAsFactors=F, header = T)
df_business <- read.csv('yelp_business.tsv', sep = '\t')
business <- merge(df_business, df_checkin, by = 'id_business', all.x= T, all.y = F)
# if a business didn't have a checkin in the data, impute zeros
business[is.na(business)] <- 0
require(plyr)
require(reshape2)
require(stringr)
extractWeekDay <- function(x){
# extract week day from checkin info
strsplit(x, '\\.')[[1]][2]
}
extractHr <- function(x){
# extract hour from checkin info
strsplit(x, '\\.')[[1]][1]
}
checkinData <- melt(df_checkin, id.vars='id_business')
checkinData$variable <- str_replace(checkinData$variable,'X','')
checkinData$weekDay <- sapply(checkinData$variable, extractWeekDay)
checkinData$hr <- sapply(checkinData$variable, extractHr)
checkinData_byWeekDay <- ddply(.data = checkinData, .variables=c('id_business','weekDay'),
summarise,
numCheckins = sum(value))
checkinData_byWeekDay <- recast(checkinData_byWeekDay,
id_business ~ weekDay,fun.aggregate=sum)
weekdays <- c('sun','mon','tue','wed','thu','fri','sat')
names(checkinData_byWeekDay)[2:8] <- weekdays
weekDay.Hr <- ddply(.data = checkinData,
.variables=c('weekDay', 'hr'),
summarise,
numCheckins = sum(value))
getCategory = function(df, categorylist){
df$primaryCategory = NA
for (i in 1:length(categorylist)){
df$primaryCategory[df[,categorylist[i]] == 1 & is.na(df$primaryCategory)] <- categorylist[i]
}
df$primaryCategory[is.na(df$primaryCategory)] <- "Other"
df$primaryCategory
}
categories_primary <- c('Restaurants','Shopping','Beauty...Spas','Nightlife', 'Active.Life')
business_categories <- data.frame(business$id_business)
business_categories$primaryCategory <- getCategory(business, categories_primary)
names(business_categories) = c('id_business', 'primaryCategory')
weekDay.Hr.Cat <- ddply(.data = merge(checkinData,business_categories, by='id_business'),
.variables=c('weekDay', 'hr', 'primaryCategory'),
summarise,
numCheckins = sum(value))
require(ggplot2)
require(ggthemes)
### Variations in checkins over the course of the day, by weekday
ggplot(weekDay.Hr, aes(x = as.integer(hr), y = numCheckins)) +
geom_line(aes(group = weekDay, color = weekDay)) + geom_point(aes(color = weekDay)) +
xlab('Hours in the day') + ylab('Total Number of Checkins') +
ggtitle('Checkins by day and hour') + theme_minimal()
weekDay.Hr.Cat <- weekDay.Hr.Cat[order(weekDay.Hr.Cat$weekDay),]
ggplot(weekDay.Hr.Cat, aes(x = as.integer(hr), y = numCheckins)) +
geom_line(aes(group = as.factor( weekDay), color = weekDay)) + geom_point(aes(color = weekDay)) +
facet_grid(. ~ primaryCategory) +
xlab('Hours in the day') + ylab('Total Number of Checkins') +
ggtitle('Checkins by day and hour') + theme_minimal() + guides(fill = guide_legend(reverse = TRUE))
# use business_categories to merge categories to business
#business_melt <- melt(data = business, id.vars=names(business)[1:515])
#business_melt$variable <- str_replace(business_melt$variable,'X','')
#business_melt$weekDay <- sapply(business_melt$variable, extractWeekDay)
#business_melt$hr <- sapply(business_melt$variable, extractHr)
#################################
temp <- melt(
ddply(.data = merge(checkinData_byWeekDay, business_categories,by='id_business'),
.variables= c('primaryCategory'),
summarise,
sun = sum(sun),
mon= sum(mon),
tue= sum(tue),
wed= sum(wed),
thu= sum(thu),
fri= sum(fri),
sat= sum(sat)
)
)
ggplot(temp, aes(variable, value, fill = primaryCategory)) +
geom_bar(stat = 'identity') +
xlab('Day of the week') + ylab('Number of checkins') + ggtitle('Checkins by major category') +
theme_minimal()
#################################
temp <- melt(
ddply(.data = merge(checkinData_byWeekDay, business_categories,by='id_business'),
.variables= c('primaryCategory'),
summarise,
sun = sum(sun),
mon= sum(mon),
tue= sum(tue),
wed= sum(wed),
thu= sum(thu),
fri= sum(fri),
sat= sum(sat)
)
)
temp2 <- data.frame(colSums(checkinData_byWeekDay))
temp2$variable <- weekdays
names(temp2) <- c('tot','variable')
temp <- merge(temp,temp2,by='variable')
temp$Share <- with(temp, value/tot * 100)
ggplot(temp, aes(variable, Share, fill = primaryCategory)) + geom_bar(stat = 'identity') +
xlab('Day of the week') + ylab('Number of checkins') + ggtitle('Checkins by major category') +
theme_minimal()
########################################################
restaurant_list <- c('Buffets', 'Pizza', 'Fast.Food', 'Bars','Breakfast...Brunch','Steakhouses')
restaurant_categories <- business[business$Restaurants == 1,]
restaurant_categories$restaurantCategory <- getCategory(restaurant_categories, restaurant_list)
restaurant_categories <- restaurant_categories[restaurant_categories$restaurantCategory != 'Other',c('id_business','restaurantCategory')]
temp <- melt(
ddply(.data = merge(checkinData_byWeekDay, restaurant_categories, by = 'id_business', all.x = F, all.y = F),
.variables= c('restaurantCategory'),
summarise,
sun = sum(sun),
mon= sum(mon),
tue= sum(tue),
wed= sum(wed),
thu= sum(thu),
fri= sum(fri),
sat= sum(sat)
)
)
ggplot(temp, aes(variable, value, fill = restaurantCategory)) +
geom_bar(stat = 'identity') +
xlab('Day of the week') + ylab('Number of checkins') + ggtitle('Checkins by major category') +
theme_minimal()
###########################################
business_success <- business[, c('stars_business','review_count_business','id_business')]
temp <- merge(checkinData, business_success, by = 'id_business')
temp <- merge(temp, business_categories, by = 'id_business')
temp2 <- ddply(.data = temp,
.variables = c('id_business','weekDay', 'stars_business', 'review_count_business', 'primaryCategory'),
summarise,
dayCheckins = sum(value)
)
temp3 <- ddply(.data = temp,
.variables = c('weekDay','primaryCategory'),
summarise,
totCheckins = sum(value)
)
temp4 <- merge(temp2, temp3, by = c('weekDay','primaryCategory'))
temp4$contr_star <- with(temp4, dayCheckins / totCheckins * stars_business)
temp4$contr_rev <- with(temp4, dayCheckins / totCheckins * review_count_business)
temp5 <- ddply(.data = temp4, .variables = c('weekDay','primaryCategory'), summarise,
avgStars = sum(contr_star),
avgNumReviews = sum(contr_rev),
avgCheckins = mean(totCheckins)
)
ggplot(temp5, aes(weekDay, avgNumReviews)) + geom_point(aes(size = avgCheckins, color = avgStars))
ggplot(checkins_6, aes(as.integer(weekDay), avgStars, color = primaryCategory)) +
geom_point(aes(size = avgNumReviews)) + geom_line()
theme_minimal()
ggplot(checkins_6, aes(avgNumReviews)) + geom_histogram()
df3_melt <- melt(data=df3, id.vars= c('name_business', 'lon_business', 'lat_business', 'weekDay', 'review_count_business', 'stars_business'))
df3_reshaped <- cast(data=df3_melt,formula=name_business + lon_business + lat_business + review_count_business + stars_business~weekDay, sum)
getRowMax <- function(row){
normalized = row / sum(row)
maxVal = max(normalized)
maxDay = which.max(matrix(normalized)) - 1
if(maxVal >0.33){
result = maxDay}
else{
result = -1
}
}
df3_reshaped$topWeekDay <- apply(df3_reshaped[,5:11], 1, getRowMax)
df3_reshaped$totalCheckins <- apply(df3_reshaped[,5:11], 1, sum)
ggplot(df3_reshaped[df3_reshaped$topWeekDay!=-1 ,], aes(lat_business, lon_business, colour = as.factor(topWeekDay))) +
geom_point(alpha = 0.3, aes(size = review_count_business)) +
theme_minimal()
ggplot(df3_reshaped, aes(review_count_business, totalCheckins)) +
geom_point(alpha = 0.3, aes(size = review_count_business)) +
theme_minimal()
ggplot(checkins_collapsed3[checkins_collapsed3$primaryCategory == 'Restaurants',], aes(x = as.integer(hr), y = numCheckins)) +
geom_line(aes(group = weekDay, color = weekDay)) +
theme_minimal()
# Chart: hours in day X weekdays ~ number of checkins
#- by category
#- by secondary category
# Chart: week day X avg star ~ size: average review count
#- by category
#- by secondary category
# Chart: Stacked bar of types of places over course of week
# word cloud of associated terms with each of hte week days in review text
# bubble chart: bubbles represent each categories. x = week day. y = number of checkins (avg). size = number of reviews (avg)
# bar chart: average number of words in review X week day
# deep dive: fast food, pizza, deli, brunch, steakhouses
# reception of reviews by weekday. average votes.
# cluster analysis using features [monday, tuesday, wednesday, thursday, fri, saturday, sunday, weekday, weekend, morning, night afternoon, evening] using review X business for lonogitude and latitude.
# do users with multiple reviews write on the same day? which day?
sun_df <- colMeans(read.csv(file = '../docStat_Sunday.csv'))
mon_df <- colMeans(read.csv(file = '../docStat_Monday.csv'))
tues_df <- colMeans(read.csv(file = '../docStat_Tuesday.csv'))
wed_df <- colMeans(read.csv(file = '../docStat_Wednesday.csv'))
thurs_df <- colMeans(read.csv(file = '../docStat_Thursday.csv'))
fri_df <- colMeans(read.csv(file = '../docStat_Friday.csv'))
sat_df <- colMeans(read.csv(file = '../docStat_Saturday.csv'))
weekdays <- c('sun','mon','tue','wed','thu','fri','sat')
docStats <- data.frame(rbind(sun_df, mon_df, tues_df, wed_df, thurs_df, fri_df, sat_df))
docStats$day <- factor(weekdays)
require(ggplot2)
ggplot(docStats, aes(x = day, y = Number.of.Words)) + geom_bar()
reviews <- read.csv('yelp_review.tsv', sep = '\t')
require(plyr)
unique.users <- ddply(.data = reviews, .variables = c('id_user','date_weekday'), summarise,
numReviews = NROW)
require(data.table)
key(reviews)
dt <- data.table(reviews, key=c('id_user', 'date_weekday')
unique_users <- dt[,list(ct=NROW(id_review)), by=c('id_user','date_weekday')]
require(reshape2)
users_melt <- melt(data=unique_users, id.vars=c('id_user','date_weekday'))
users_recast <- cast(users_melt, id_user~date_weekday, sum)
weekdays <- c('sun','mon','tue','wed','thu','fri','sat')
names(users_recast)[2:8] <- weekdays
user_df <- read.csv('yelp_user.tsv', sep = '\t')
users_recast$total <- with(users_recast, sun + mon + tue + wed + thu + fri + sat)
users_merged <- merge(user_df, users_recast, by= 'id_user')
for(day in weekdays){
users_recast[,day] <- users_recast[,day]/users_recast$total
}
rowMax <- function(row){
maxVal = max(row)
}
colOfMax <- function(row){
loc <- which.max(matrix(row))
print(loc)
maxCol = weekdays[loc]
}
users_recast$conc <- apply(users_recast[,weekdays], MARGIN=1, rowMax)
users_recast$primaryDay <- apply(users_recast[,weekdays], MARGIN=1, colOfMax)
users_recast_merged <- merge(users_recast, user_df, by = 'id_user')
multUserusers <- users_recast_merged[users_recast_merged$total >7,]
ggplot(multUserusers, aes(x = conc, y = stars_user)) +
geom_point(aes(color = as.factor(primaryDay), size = review_count_user), alpha = 0.5) +
theme_minimal()
ggplot(users_recast_merged, aes(total)) + geom_histogram()
users_melt_merged <- merge( users_melt, user_df, by = 'id_user')
require(ggplot2)
ggplot(users_melt_merged, aes(y = review_count_user, x = stars_user)) +
geom_point(aes(color = as.factor(date_weekday), size = review_count_user), alpha = 0.4)+
theme_minimal()
ggplot(multUserusers, aes(x = log(votes_useful_user), y = review_count_user)) +
geom_point(aes(color = stars_user)) + facet_grid(.~primaryDay) +
scale_colour_gradient2(high="darkred", mid="white", low = 'white')
ggplot(multUserusers, aes(conc)) + geom_boxplot(aes(primaryDay,conc))
require(maptools)
require(gdal)
require(maps)
require(RColorBrewer)
phoenix <- readShapeSpatial('1970_Subdivision')
phoenix.sp <- readShapeLines('1970_Subdivision.shp')
points(businesses_filtered$lon_business, businesses_filtered$lat_business, cex = 2, pch = 16, col = 'black')
plot(phoenix.sp, xlim=c(-113.5, -110), ylim=c(34.14,34.151))
# symbol plot -- equal-interval class intervals
plot(phoenix, xlim=c(-113.5, -110), ylim=c(34.15,34.151))
points(businesses_filtered$lon_business, businesses_filtered$lat_business, pch=23, col='red', cex=.6,)
points(businesses_filtered$lon_business, businesses_filtered$lat_business, cex=2)
title("Oregon Climate Station Data -- Annual Temperature",
sub="Equal-Interval Class Intervals")
setwd(dir='~/Projects/yelp_phoenix_academic_dataset/CountySubdivision_1970/')
df_checkin <- read.table('../scripts/yelp_checkin.tsv', sep = '\t', stringsAsFactors=F, header = T)
require(plyr)
require(reshape2)
require(stringr)
extractWeekDay <- function(x){
# extract week day from checkin info
as.integer(strsplit(x, '\\.')[[1]][2])
}
extractHr <- function(x){
# extract hour from checkin info
as.integer(strsplit(x, '\\.')[[1]][1])
}
checkinData <- melt(df_checkin, id.vars='id_business')
checkinData$variable <- str_replace(checkinData$variable,'X','')
checkinData$weekDay <- sapply(checkinData$variable, extractWeekDay)
checkinData$hr <- sapply(checkinData$variable, extractHr)
checkinData <- checkinData[checkinData$hr >= 9,]
checkinData_byWeekDay <- ddply(.data = checkinData, .variables=c('id_business','weekDay'),
summarise,
numCheckins = sum(value))
require(reshape)
checkinData_byWeekDay_recast <- cast(checkinData_byWeekDay,
id_business ~ weekDay, fun.aggregate=sum)
weekdays <- c('sun','mon','tue','wed','thu','fri','sat')
names(checkinData_byWeekDay_recast)[2:8] <- weekdays
checkinData_byWeekDay_recast$funDays <- with(checkinData_byWeekDay_recast, (thu + fri + sat)/(sun + mon + tue + wed + thu + fri + sat))
setwd('~/Projects/yelp_phoenix_academic_dataset/scripts/')
businesses <- read.csv('yelp_business.tsv', sep = '\t')
businesses_filtered = businesses[businesses$review_count_business>50,c('lon_business','lat_business','id_business')]
businesses_filtered <- merge(businesses_filtered, checkinData_byWeekDay_recast, by= 'id_business')
businesses_filtered$funBin <- cut(businesses_filtered$funDays,breaks=c(0,0.25, 0.5, 0.75,1))
setwd(dir='~/Projects/yelp_phoenix_academic_dataset/CountySubdivision_1970/')
require(ggmap)
mp <- get_map(location = c(lon = -112.0667, lat = 33.45), zoom = 10, maptype = 'roadmap')
map <- ggmap(mp)
map +
geom_point(data = businesses_filtered, aes(x = lon_business, y = lat_business, color = as.factor(funBin)), size = 7, alpha = 1) +
scale_colour_manual(values = c('lightyellow','pink','red', 'darkred')) +
theme_minimal()
setwd('~/Projects/yelp_phoenix_academic_dataset/scripts/')
reviews <- read.csv('yelp_review.tsv', sep = '\t')
require(data.table)
dt <- data.table(reviews, key=c('id_business', 'date_weekday'))
unique_businesses <- dt[,list(ct=NROW(id_review)), by=c('id_business','date_weekday')]
bus_melt <- melt(data=unique_businesses, id.vars=c('id_business','date_weekday'))
bus_recast <- cast(bus_melt, id_business~date_weekday, sum)
weekdays <- c('sun','mon','tue','wed','thu','fri','sat')
names(bus_recast)[2:8] <- weekdays
bus_recast$funRev <- with(bus_recast, (thu + fri + sat)/(sun + mon + tue + wed + thu + fri + sat))
businesses_filtered_2 <- merge(businesses_filtered, bus_recast, by = 'id_business')
map +
geom_point(data = businesses_filtered_2, aes(x = lon_business, y = lat_business, color = funRev), size = 7, alpha = 1) +
theme_minimal()
docStats <- data.frame(rbind(sun_df, mon_df, tues_df, wed_df, thurs_df, fri_df, sat_df))
docStats$day <- weekdays
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/markedIPPP_mix.R
\name{rMIPPP_cond_loc}
\alias{rMIPPP_cond_loc}
\title{Generate a Marked Poisson point process (conditional on location)}
\usage{
rMIPPP_cond_loc(surf, locPP, gammas, r, hyper = 0.01, truncate = FALSE,
win = owin(c(-3, 3), c(-3, 3)), bigwin, discrete_mark = TRUE,
open_new_window = FALSE, grayscale = FALSE, show_plots = TRUE,
LL = 128, L = 50000, mark_distr_choice = 0, GRFmu = 0, df = 10,
nu = 0.5, theta = 1, sig = 1)
}
\arguments{
\item{surf}{An object of type \code{intensity_surface} representing the
IPPP surface for the ground process. Omit this argument to create a surface randomly.}
\item{locPP}{The ground IPPP (locations of the events). If missing then these
are generated using a call to \code{\link{rsppmix}}. Note that if
\code{surf} is not supplied, then it will be generated which may lead to
completely inappropriate locations of the events, if the supplied \code{locPP}
was created with a completely different surface. It is safer to supply both the surface
and ground locations at the same time or none of the two, so that both will be generated.}
\item{gammas}{For discrete marks (\code{discrete_mark=TRUE}), this is
a vector of length equal to the number of marks.
These parameters should typically be non-negative and
they represent weights affecting the probability
fields of each mark. For values close to 0, we
get higher probabilities of observing this mark.
Large positive values lead to small probabilities of observing
the corresponding mark. Negative values are allowed, but they can lead
to a mark not being present in the generated pattern.
If the vector \code{gammas} is not supplied,
then we randomly generate the number of marks from \code{1:10}
and the values of the vector \code{gammas}
from a gamma distribution.}
\item{r}{Radius used to define the
neighborhood system. Any two locations
within this distance are considered
neighbors. If missing, we randomly select
the radius using the generated (ground) point
pattern over the window parameter \code{win}.}
\item{hyper}{Hyperparameter for the distribution of gamma.}
\item{truncate}{Logical variable indicating whether or not we
normalize the densities of the mixture components
to have all their mass within the window defined
in the window \code{win}. This affects the mixture model for the
intensity surface of the ground process.}
\item{win}{Object of type \code{\link[spatstat]{owin}} defining the window of observation.}
\item{bigwin}{Object of type \code{\link[spatstat]{owin}}. If supplied, this will be the
window of observation, even if the pattern is generated over \code{win}. Useful if we
do not truncate (\code{truncate=FALSE}) and we want better presentation of the generated MIPPP.}
\item{discrete_mark}{Logical flag indicating whether the mark is discrete or not.
Default is TRUE. For continuous marks set this to FALSE.}
\item{open_new_window}{Open a new window for a plot.}
\item{grayscale}{Logical to request plots in grayscale.}
\item{show_plots}{Logical variable requesting to produce exploratory plots of the
Marked IPPP intensity surface and generated point pattern.}
\item{LL}{Length of the side of the square grid.
The larger this value is, the better the picture resolution.}
\item{L}{Number of iterations. Required when sampling from
the mark model conditional on locations.}
\item{mark_distr_choice}{A number indicating which
mark distribution to use. Currently we have
only one choice in the discrete mark case, which is essentialy a Markov random field (MRF)
over the window. See details for more on the mark model currently used. For continuous marks,
we have two choices, Gaussian random field (GRF) for
\code{mark_distr_choice=0} or Chi-Square random field for
\code{mark_distr_choice=1}.}
\item{GRFmu}{This is the mean of the
Gaussian random field. Only stationarity
is currently supported (i.e., \code{GRFmu} does
not depend on location). Used only if
\code{discrete_mark=FALSE}.}
\item{df}{Degrees of freedom (an integer) for the
chi-square random field when \code{mark_distr_choice=1}. Default is \code{df=10}. Used only if
\code{discrete_mark=FALSE}.}
\item{nu, theta, sig}{Additional arguments passed to the
\code{\link{MaternCov}} function in order to
create the spatial covariance field.
Default values are \code{nu=.5},
\code{theta=1}, and \code{sig=1}.
See \code{\link{MaternCov}} for details. Used only if
\code{discrete_mark=FALSE}.}
}
\value{
A list containing the following components:
\item{surf}{The generated or supplied intensity surface object \code{surf} for the ground process.}
\item{gammas}{The generated or supplied parameters \code{gammas}. Returned only if \code{discrete_mark=TRUE}.}
\item{genMPP}{The generated point pattern as an object of class \code{\link[spatstat]{ppp}} and \code{sppmix}. The member \code{$marks} contains the marks at each of the generated locations. If the ground PP \code{locPP} was supplied, this is also the ground process for the MIPPP and only the marks are generated (at those locations).}
\item{r}{The generated or supplied parameter \code{r}. Returned only if \code{discrete_mark=TRUE}.}
\item{prob_fields}{In the continuous mark case this is the realization of the random field (as an image \code{\link[spatstat]{im}} object). For discrete marks, this is a list of size equal to the number of marks containing the probability fields for each mark value.}
\item{prob_field_params}{A list of the parameters used to create the continuous valued mark fields. Returned only if \code{discrete_mark=FALSE}.}
}
\description{
This function generates realizations (point patterns) from
a given Marked IPPP or a generated one. See details for the choice of models
for the mark distribution. The location (ground) process is
a standard IPPP (unmarked) with mixture intensity surface, and is responsible
for the number of events in the point pattern.
For examples see
\url{http://faculty.missouri.edu/~micheasa/sppmix/sppmix_all_examples.html
#rMIPPP_cond_loc}
}
\details{
We assume that the joint distribution of a
marked point pattern \code{N=[s,m(s)]} with \code{n}
events is of the form:
\code{p(N)=lambda^n*exp(-lambda)/(n!)*f(all s|theta1)*g(all m|theta2(s),all s)}
where \code{s} denotes a location and \code{m=m(s)}
a mark value at that location, lambda a parameter
with the interpretation as the average number of points
over the window of observation, and \code{f}, \code{g} are proper densities.
In order to simulate from this Marked IPPP
we first simulate the number of events
and their locations from an IPPP with
mixture intensity surface \code{lambda*f(s|theta1)} (e.g.,
using \code{\link{rsppmix}}), and then generate
the mark at that location \code{s}.
In the discrete mark case, the mark is modeled using
a mixture distribution of Dirac measures on
the marks with the probability \code{q(m,s)} of observing a
specific mark value \code{m} depending on the current location
\code{s} and the marks of its neighbors. Since
we have a window of observation, any point in there
can potentially be marked, which leads to \code{q(m,s)} being
a field. In particular, the probability \code{q(m,s)} is analogous to
\code{exp(-gammas_(j)*(sum over all neighbors of s of their marks minus m squared))}
and when we fit the MIPPP model, our goal
is to estimate the parameters \code{gammas}.
Note that if all \code{gammas} are zero then
we fall back to a discrete uniform mark distribution.
The neighborhood system is controlled by
\code{r} and is crucial in this case. Small values
tend to produce probability fields with concentrated
masses about observed events of the process,
whereas, large neighborhoods allow us to borrow
strength across locations and result in much smoother
probability fields.
In the continuous case the mark is generated from
a (typically stationary) Gaussian process or chi-squared random process,
e.g., using function \code{\link{rGRF}}.
See Micheas (2014) for more details on
Marked IPPP models via conditioning arguments.
}
\examples{
\donttest{
# Create a marked point pattern; use randomization and discrete marks (default values)
newMPP=rMIPPP_cond_loc()
plot(newMPP$surf,main="True IPPP intensity surface for the locations")
newMPP$gammas
newMPP$genMPP
newMPP$r
print(table(newMPP$genMPP$marks))
#we can reproduce the random field plots anytime using the following call
plot_MPP_fields(newMPP$genMPP,newMPP$gammas,newMPP$r)
#Now generate continuous marks according to a Gaussian process
newMPP=rMIPPP_cond_loc(discrete_mark = FALSE)
plot(newMPP$surf,main="True IPPP intensity surface for the locations")
#now the marks are taken from a chi-square field
newMPP=rMIPPP_cond_loc(mark_distr_choice=1, discrete_mark = FALSE)
plot(newMPP$surf,main="True IPPP intensity surface for the locations")}
}
\references{
Hierarchical Bayesian Modeling of Marked Non-Homogeneous Poisson Processes with finite mixtures and inclusion of covariate information. Micheas, A.C. (2014). Journal of Applied Statistics, 41, 12, 2596-2615, DOI: 10.1080/02664763.2014.922167.
}
\seealso{
\code{\link{plot_MPP_fields}}
}
\author{
Sakis Micheas
}
| /sppmix/man/rMIPPP_cond_loc.Rd | no_license | akhikolla/TestedPackages-NoIssues | R | false | true | 9,362 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/markedIPPP_mix.R
\name{rMIPPP_cond_loc}
\alias{rMIPPP_cond_loc}
\title{Generate a Marked Poisson point process (conditional on location)}
\usage{
rMIPPP_cond_loc(surf, locPP, gammas, r, hyper = 0.01, truncate = FALSE,
win = owin(c(-3, 3), c(-3, 3)), bigwin, discrete_mark = TRUE,
open_new_window = FALSE, grayscale = FALSE, show_plots = TRUE,
LL = 128, L = 50000, mark_distr_choice = 0, GRFmu = 0, df = 10,
nu = 0.5, theta = 1, sig = 1)
}
\arguments{
\item{surf}{An object of type \code{intensity_surface} representing the
IPPP surface for the ground process. Omit this argument to create a surface randomly.}
\item{locPP}{The ground IPPP (locations of the events). If missing then these
are generated using a call to \code{\link{rsppmix}}. Note that if
\code{surf} is not supplied, then it will be generated which may lead to
completely inappropriate locations of the events, if the supplied \code{locPP}
was created with a completely different surface. It is safer to supply both the surface
and ground locations at the same time or none of the two, so that both will be generated.}
\item{gammas}{For discrete marks (\code{discrete_mark=TRUE}), this is
a vector of length equal to the number of marks.
These parameters should typically be non-negative and
they represent weights affecting the probability
fields of each mark. For values close to 0, we
get higher probabilities of observing this mark.
Large positive values lead to small probabilities of observing
the corresponding mark. Negative values are allowed, but they can lead
to a mark not being present in the generated pattern.
If the vector \code{gammas} is not supplied,
then we randomly generate the number of marks from \code{1:10}
and the values of the vector \code{gammas}
from a gamma distribution.}
\item{r}{Radius used to define the
neighborhood system. Any two locations
within this distance are considered
neighbors. If missing, we randomly select
the radius using the generated (ground) point
pattern over the window parameter \code{win}.}
\item{hyper}{Hyperparameter for the distribution of gamma.}
\item{truncate}{Logical variable indicating whether or not we
normalize the densities of the mixture components
to have all their mass within the window defined
in the window \code{win}. This affects the mixture model for the
intensity surface of the ground process.}
\item{win}{Object of type \code{\link[spatstat]{owin}} defining the window of observation.}
\item{bigwin}{Object of type \code{\link[spatstat]{owin}}. If supplied, this will be the
window of observation, even if the pattern is generated over \code{win}. Useful if we
do not truncate (\code{truncate=FALSE}) and we want better presentation of the generated MIPPP.}
\item{discrete_mark}{Logical flag indicating whether the mark is discrete or not.
Default is TRUE. For continuous marks set this to FALSE.}
\item{open_new_window}{Open a new window for a plot.}
\item{grayscale}{Logical to request plots in grayscale.}
\item{show_plots}{Logical variable requesting to produce exploratory plots of the
Marked IPPP intensity surface and generated point pattern.}
\item{LL}{Length of the side of the square grid.
The larger this value is, the better the picture resolution.}
\item{L}{Number of iterations. Required when sampling from
the mark model conditional on locations.}
\item{mark_distr_choice}{A number indicating which
mark distribution to use. Currently we have
only one choice in the discrete mark case, which is essentialy a Markov random field (MRF)
over the window. See details for more on the mark model currently used. For continuous marks,
we have two choices, Gaussian random field (GRF) for
\code{mark_distr_choice=0} or Chi-Square random field for
\code{mark_distr_choice=1}.}
\item{GRFmu}{This is the mean of the
Gaussian random field. Only stationarity
is currently supported (i.e., \code{GRFmu} does
not depend on location). Used only if
\code{discrete_mark=FALSE}.}
\item{df}{Degrees of freedom (an integer) for the
chi-square random field when \code{mark_distr_choice=1}. Default is \code{df=10}. Used only if
\code{discrete_mark=FALSE}.}
\item{nu, theta, sig}{Additional arguments passed to the
\code{\link{MaternCov}} function in order to
create the spatial covariance field.
Default values are \code{nu=.5},
\code{theta=1}, and \code{sig=1}.
See \code{\link{MaternCov}} for details. Used only if
\code{discrete_mark=FALSE}.}
}
\value{
A list containing the following components:
\item{surf}{The generated or supplied intensity surface object \code{surf} for the ground process.}
\item{gammas}{The generated or supplied parameters \code{gammas}. Returned only if \code{discrete_mark=TRUE}.}
\item{genMPP}{The generated point pattern as an object of class \code{\link[spatstat]{ppp}} and \code{sppmix}. The member \code{$marks} contains the marks at each of the generated locations. If the ground PP \code{locPP} was supplied, this is also the ground process for the MIPPP and only the marks are generated (at those locations).}
\item{r}{The generated or supplied parameter \code{r}. Returned only if \code{discrete_mark=TRUE}.}
\item{prob_fields}{In the continuous mark case this is the realization of the random field (as an image \code{\link[spatstat]{im}} object). For discrete marks, this is a list of size equal to the number of marks containing the probability fields for each mark value.}
\item{prob_field_params}{A list of the parameters used to create the continuous valued mark fields. Returned only if \code{discrete_mark=FALSE}.}
}
\description{
This function generates realizations (point patterns) from
a given Marked IPPP or a generated one. See details for the choice of models
for the mark distribution. The location (ground) process is
a standard IPPP (unmarked) with mixture intensity surface, and is responsible
for the number of events in the point pattern.
For examples see
\url{http://faculty.missouri.edu/~micheasa/sppmix/sppmix_all_examples.html
#rMIPPP_cond_loc}
}
\details{
We assume that the joint distribution of a
marked point pattern \code{N=[s,m(s)]} with \code{n}
events is of the form:
\code{p(N)=lambda^n*exp(-lambda)/(n!)*f(all s|theta1)*g(all m|theta2(s),all s)}
where \code{s} denotes a location and \code{m=m(s)}
a mark value at that location, lambda a parameter
with the interpretation as the average number of points
over the window of observation, and \code{f}, \code{g} are proper densities.
In order to simulate from this Marked IPPP
we first simulate the number of events
and their locations from an IPPP with
mixture intensity surface \code{lambda*f(s|theta1)} (e.g.,
using \code{\link{rsppmix}}), and then generate
the mark at that location \code{s}.
In the discrete mark case, the mark is modeled using
a mixture distribution of Dirac measures on
the marks with the probability \code{q(m,s)} of observing a
specific mark value \code{m} depending on the current location
\code{s} and the marks of its neighbors. Since
we have a window of observation, any point in there
can potentially be marked, which leads to \code{q(m,s)} being
a field. In particular, the probability \code{q(m,s)} is analogous to
\code{exp(-gammas_(j)*(sum over all neighbors of s of their marks minus m squared))}
and when we fit the MIPPP model, our goal
is to estimate the parameters \code{gammas}.
Note that if all \code{gammas} are zero then
we fall back to a discrete uniform mark distribution.
The neighborhood system is controlled by
\code{r} and is crucial in this case. Small values
tend to produce probability fields with concentrated
masses about observed events of the process,
whereas, large neighborhoods allow us to borrow
strength across locations and result in much smoother
probability fields.
In the continuous case the mark is generated from
a (typically stationary) Gaussian process or chi-squared random process,
e.g., using function \code{\link{rGRF}}.
See Micheas (2014) for more details on
Marked IPPP models via conditioning arguments.
}
\examples{
\donttest{
# Create a marked point pattern; use randomization and discrete marks (default values)
newMPP=rMIPPP_cond_loc()
plot(newMPP$surf,main="True IPPP intensity surface for the locations")
newMPP$gammas
newMPP$genMPP
newMPP$r
print(table(newMPP$genMPP$marks))
#we can reproduce the random field plots anytime using the following call
plot_MPP_fields(newMPP$genMPP,newMPP$gammas,newMPP$r)
#Now generate continuous marks according to a Gaussian process
newMPP=rMIPPP_cond_loc(discrete_mark = FALSE)
plot(newMPP$surf,main="True IPPP intensity surface for the locations")
#now the marks are taken from a chi-square field
newMPP=rMIPPP_cond_loc(mark_distr_choice=1, discrete_mark = FALSE)
plot(newMPP$surf,main="True IPPP intensity surface for the locations")}
}
\references{
Hierarchical Bayesian Modeling of Marked Non-Homogeneous Poisson Processes with finite mixtures and inclusion of covariate information. Micheas, A.C. (2014). Journal of Applied Statistics, 41, 12, 2596-2615, DOI: 10.1080/02664763.2014.922167.
}
\seealso{
\code{\link{plot_MPP_fields}}
}
\author{
Sakis Micheas
}
|
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install(version = "3.12")
BiocManager::install("rrvgo")
library(rrvgo)
# go_analysis <- read.delim(system.file("extdata/example.txt", package="rrvgo"))
go_analysis <- bpsig[,"lor"]
names(go_analysis) <- rownames(bpsig)
simMatrix <- calculateSimMatrix(names(go_analysis),
orgdb="org.Hs.eg.db",
ont="BP",
method="Rel")
scores <- setNames(-log10(go_analysis$qvalue), go_analysis$ID)
reducedTerms <- reduceSimMatrix(simMatrix,
go_analysis,
threshold=0.7,
orgdb="org.Hs.eg.db")
scatterPlot(simMatrix, reducedTerms)
go_abs <- abs(go_analysis)
reducedTerms <- reduceSimMatrix(simMatrix,
go_abs,
threshold=0.7,
orgdb="org.Hs.eg.db")
scatterPlot(simMatrix, reducedTerms)
rrvgo::shiny_rrvgo()
install.packages("heatmaply")
| /SCRIPTS/07_Revigoshiny.R | no_license | edurlaf/TFG_meta-analysis_RA | R | false | false | 1,106 | r | if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install(version = "3.12")
BiocManager::install("rrvgo")
library(rrvgo)
# go_analysis <- read.delim(system.file("extdata/example.txt", package="rrvgo"))
go_analysis <- bpsig[,"lor"]
names(go_analysis) <- rownames(bpsig)
simMatrix <- calculateSimMatrix(names(go_analysis),
orgdb="org.Hs.eg.db",
ont="BP",
method="Rel")
scores <- setNames(-log10(go_analysis$qvalue), go_analysis$ID)
reducedTerms <- reduceSimMatrix(simMatrix,
go_analysis,
threshold=0.7,
orgdb="org.Hs.eg.db")
scatterPlot(simMatrix, reducedTerms)
go_abs <- abs(go_analysis)
reducedTerms <- reduceSimMatrix(simMatrix,
go_abs,
threshold=0.7,
orgdb="org.Hs.eg.db")
scatterPlot(simMatrix, reducedTerms)
rrvgo::shiny_rrvgo()
install.packages("heatmaply")
|
### R code from vignette source 'tp3.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: tp3.Rnw:25-26
###################################################
data.body = read.table("body.csv",header=T,sep=";",dec=",",row.names=1)
###################################################
### code chunk number 2: tp3.Rnw:31-32
###################################################
distances <- dist(data.body)
###################################################
### code chunk number 3: tp3.Rnw:39-40
###################################################
hc.indiv <- hclust(distances,method="ward")
###################################################
### code chunk number 4: tp3.Rnw:45-46
###################################################
plot(hc.indiv)
###################################################
### code chunk number 5: tp3.Rnw:51-52
###################################################
indiv.4groupes <- cutree(hc.indiv,k=4)
###################################################
### code chunk number 6: tp3.Rnw:55-57
###################################################
plot(hc.indiv)
rect.hclust(hc.indiv,k=4,border=1:4)
###################################################
### code chunk number 7: tp3.Rnw:62-63
###################################################
res.acp.body = prcomp(data.body)
###################################################
### code chunk number 8: tp3.Rnw:68-73
###################################################
par(pty="s") # pour avoir une zone graphique carrée
plot(res.acp.body$x,type="n")
abline(h=0,v=0,lty=2)
text(res.acp.body$x,rownames(data.body))
par(pty="m") # pour revenir à la configuration par défaut
###################################################
### code chunk number 9: tp3.Rnw:78-83
###################################################
par(pty="s")
plot(res.acp.body$x,type="n")
abline(h=0,v=0,lty=2)
text(res.acp.body$x,rownames(data.body),col=indiv.4groupes,font=2)
par(pty="m")
###################################################
### code chunk number 10: tp3.Rnw:94-96
###################################################
res1.kmeans <- kmeans(data.body,centers=4)
res2.kmeans <- kmeans(data.body,centers=4)
###################################################
### code chunk number 11: tp3.Rnw:101-102
###################################################
table(res1.kmeans$cluster,res2.kmeans$cluster,dnn=c("kmeans1","kmeans2"))
###################################################
### code chunk number 12: tp3.Rnw:107-108
###################################################
table(res1.kmeans$cluster,indiv.4groupes,dnn=c("kmeans1","CAH"))
###################################################
### code chunk number 13: tp3.Rnw:119-122
###################################################
mat.centres.init <- matrix(nrow=4,ncol=5)
for (i in 1:4) mat.centres.init[i,]=apply(data.body[indiv.4groupes==i,],2,mean)
km.body.init <- kmeans(data.body,centers=mat.centres.init)
###################################################
### code chunk number 14: tp3.Rnw:126-127
###################################################
table(km.body.init$cluster,indiv.4groupes,dnn=c("kmeans","cah"))
###################################################
### code chunk number 15: tp3.Rnw:130-137
###################################################
par(mfrow=c(1,2))
plot(res.acp.body$x,type="n",main="ACP - CAH")
abline(h=0,v=0,lty=2)
text(res.acp.body$x,rownames(data.body),col=indiv.4groupes,font=2)
plot(res.acp.body$x,type="n",main="ACP - k-means")
abline(h=0,v=0,lty=2)
text(res.acp.body$x,rownames(data.body),col=km.body.init$cluster,font=2)
###################################################
### code chunk number 16: tp3.Rnw:150-151
###################################################
DataBody <- read.table("Data/data-body.csv",sep=";",dec=",",header=TRUE)
###################################################
### code chunk number 17: tp3.Rnw:157-159
###################################################
hc.DataBody <- hclust(dist(DataBody[,1:24]),method="ward")
plot(hc.DataBody)
###################################################
### code chunk number 18: tp3.Rnw:164-165
###################################################
plot(hc.DataBody$height[506:495],type="b")
###################################################
### code chunk number 19: tp3.Rnw:170-173
###################################################
table(cutree(hc.DataBody,k=2),DataBody$gender)
table(cutree(hc.DataBody,k=3),DataBody$gender)
table(cutree(hc.DataBody,k=6),DataBody$gender)
###################################################
### code chunk number 20: tp3.Rnw:178-194
###################################################
indiv.3groupes <- cutree(hc.DataBody,k=3)
mean.groupe1 <- apply(DataBody[indiv.3groupes==1,1:24],2,mean)
mean.groupe2 <- apply(DataBody[indiv.3groupes==2,1:24],2,mean)
mean.groupe3 <- apply(DataBody[indiv.3groupes==3,1:24],2,mean)
mean.3groupes <- rbind(mean.groupe1,mean.groupe2,mean.groupe3)
barplot(mean.3groupes,beside=TRUE,las=3,col=1:3)
barplot(mean.3groupes[c(3,1,2),],beside=TRUE,las=3,col=1:3)
legend("topleft",paste("Groupe",1:3),text.col=c(2,3,1))
pca.body <- prcomp(DataBody[,1:24])
biplot(pca.body)
par(pty="s")
plot(pca.body$rotation,type="n",xlim=c(-0.7,0.7),ylim=c(-0.7,0.7))
text(pca.body$rotation,rownames(pca.body$rotation),font=3)
abline(h=0,v=0,lty=2)
plot(pca.body$x,pch=16,col=indiv.3groupes)
abline(h=0,v=0,lty=2)
| /TPs/tp3.R | no_license | nicokla/Supaero_R | R | false | false | 5,472 | r | ### R code from vignette source 'tp3.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: tp3.Rnw:25-26
###################################################
data.body = read.table("body.csv",header=T,sep=";",dec=",",row.names=1)
###################################################
### code chunk number 2: tp3.Rnw:31-32
###################################################
distances <- dist(data.body)
###################################################
### code chunk number 3: tp3.Rnw:39-40
###################################################
hc.indiv <- hclust(distances,method="ward")
###################################################
### code chunk number 4: tp3.Rnw:45-46
###################################################
plot(hc.indiv)
###################################################
### code chunk number 5: tp3.Rnw:51-52
###################################################
indiv.4groupes <- cutree(hc.indiv,k=4)
###################################################
### code chunk number 6: tp3.Rnw:55-57
###################################################
plot(hc.indiv)
rect.hclust(hc.indiv,k=4,border=1:4)
###################################################
### code chunk number 7: tp3.Rnw:62-63
###################################################
res.acp.body = prcomp(data.body)
###################################################
### code chunk number 8: tp3.Rnw:68-73
###################################################
par(pty="s") # pour avoir une zone graphique carrée
plot(res.acp.body$x,type="n")
abline(h=0,v=0,lty=2)
text(res.acp.body$x,rownames(data.body))
par(pty="m") # pour revenir à la configuration par défaut
###################################################
### code chunk number 9: tp3.Rnw:78-83
###################################################
par(pty="s")
plot(res.acp.body$x,type="n")
abline(h=0,v=0,lty=2)
text(res.acp.body$x,rownames(data.body),col=indiv.4groupes,font=2)
par(pty="m")
###################################################
### code chunk number 10: tp3.Rnw:94-96
###################################################
res1.kmeans <- kmeans(data.body,centers=4)
res2.kmeans <- kmeans(data.body,centers=4)
###################################################
### code chunk number 11: tp3.Rnw:101-102
###################################################
table(res1.kmeans$cluster,res2.kmeans$cluster,dnn=c("kmeans1","kmeans2"))
###################################################
### code chunk number 12: tp3.Rnw:107-108
###################################################
table(res1.kmeans$cluster,indiv.4groupes,dnn=c("kmeans1","CAH"))
###################################################
### code chunk number 13: tp3.Rnw:119-122
###################################################
mat.centres.init <- matrix(nrow=4,ncol=5)
for (i in 1:4) mat.centres.init[i,]=apply(data.body[indiv.4groupes==i,],2,mean)
km.body.init <- kmeans(data.body,centers=mat.centres.init)
###################################################
### code chunk number 14: tp3.Rnw:126-127
###################################################
table(km.body.init$cluster,indiv.4groupes,dnn=c("kmeans","cah"))
###################################################
### code chunk number 15: tp3.Rnw:130-137
###################################################
par(mfrow=c(1,2))
plot(res.acp.body$x,type="n",main="ACP - CAH")
abline(h=0,v=0,lty=2)
text(res.acp.body$x,rownames(data.body),col=indiv.4groupes,font=2)
plot(res.acp.body$x,type="n",main="ACP - k-means")
abline(h=0,v=0,lty=2)
text(res.acp.body$x,rownames(data.body),col=km.body.init$cluster,font=2)
###################################################
### code chunk number 16: tp3.Rnw:150-151
###################################################
DataBody <- read.table("Data/data-body.csv",sep=";",dec=",",header=TRUE)
###################################################
### code chunk number 17: tp3.Rnw:157-159
###################################################
hc.DataBody <- hclust(dist(DataBody[,1:24]),method="ward")
plot(hc.DataBody)
###################################################
### code chunk number 18: tp3.Rnw:164-165
###################################################
plot(hc.DataBody$height[506:495],type="b")
###################################################
### code chunk number 19: tp3.Rnw:170-173
###################################################
table(cutree(hc.DataBody,k=2),DataBody$gender)
table(cutree(hc.DataBody,k=3),DataBody$gender)
table(cutree(hc.DataBody,k=6),DataBody$gender)
###################################################
### code chunk number 20: tp3.Rnw:178-194
###################################################
indiv.3groupes <- cutree(hc.DataBody,k=3)
mean.groupe1 <- apply(DataBody[indiv.3groupes==1,1:24],2,mean)
mean.groupe2 <- apply(DataBody[indiv.3groupes==2,1:24],2,mean)
mean.groupe3 <- apply(DataBody[indiv.3groupes==3,1:24],2,mean)
mean.3groupes <- rbind(mean.groupe1,mean.groupe2,mean.groupe3)
barplot(mean.3groupes,beside=TRUE,las=3,col=1:3)
barplot(mean.3groupes[c(3,1,2),],beside=TRUE,las=3,col=1:3)
legend("topleft",paste("Groupe",1:3),text.col=c(2,3,1))
pca.body <- prcomp(DataBody[,1:24])
biplot(pca.body)
par(pty="s")
plot(pca.body$rotation,type="n",xlim=c(-0.7,0.7),ylim=c(-0.7,0.7))
text(pca.body$rotation,rownames(pca.body$rotation),font=3)
abline(h=0,v=0,lty=2)
plot(pca.body$x,pch=16,col=indiv.3groupes)
abline(h=0,v=0,lty=2)
|
# TODO: Add comment
#
# Author: dhorowitz
###############################################################################
#####################################
library(QFPortfolio)
curveGroup <- 'AllSystemsQ'
runDate <- dateTimeFromArguments(commandArgs())
outputDir <- squish(dataDirectory(),'/STProcess/RightEdge/Portfolio/',format(runDate,'%Y%m%d'))
curvesDir <- squish(outputDir, '/curves')
outputDir <- squish(outputDir, '/reports')
fileName <- squish(outputDir, '/AggregatedRiskReport.pdf')
pa <- PositionAggregator(curveDirectory = curvesDir, groupName = curveGroup)
output <- pa$riskBySector()
relOutput <- output / rowSums(output, na.rm = TRUE)
output <- cbind(output, rowSums(output, na.rm = TRUE))
colnames(output)[[NCOL(output)]] <- 'TOTAL'
lastOutput <- output[NROW(output),]
lastRelOutput <- relOutput[NROW(relOutput),]
absBarPlotX <- barplot(as.matrix(lastOutput))
relBarPlotX <- barplot(as.matrix(lastRelOutput))
pdf(fileName,paper="special",width=10,height=10)
plot(relOutput, main = 'Percentage of Total Risk')
barplot(as.matrix(lastRelOutput), main = 'Percentage of Total Risk', col = 'blue', ylim = c(0, max(lastRelOutput) + 0.05))
for(i in 1:length(relBarPlotX)) text(relBarPlotX[[i]], lastRelOutput[[i]] + 0.01, round(lastRelOutput[[i]],2))
plot(output, main = 'Absolute Total Risk')
barplot(as.matrix(lastOutput), main = 'Absolute Total Risk', col = 'blue', ylim = c(0, max(lastOutput) + 0.1 * max(lastOutput)))
for(i in 1:length(lastOutput)) text(absBarPlotX[[i]], lastOutput[[i]] + 0.05 * max(lastOutput), round(lastOutput[[i]], 0))
dev.off()
print('Emailing reports...')
email <- Mail$notification( subject=squish('Aggregated Risk Reports - ',format(runDate,'%Y-%m-%d')),
content=squish('Report - available in ', fileName))
email$attachFile(fileName)
email$sendTo('team')
| /R/scripts/Jobs/runAggregatedRiskReport.R | no_license | rsheftel/ratel | R | false | false | 1,870 | r | # TODO: Add comment
#
# Author: dhorowitz
###############################################################################
#####################################
library(QFPortfolio)
curveGroup <- 'AllSystemsQ'
runDate <- dateTimeFromArguments(commandArgs())
outputDir <- squish(dataDirectory(),'/STProcess/RightEdge/Portfolio/',format(runDate,'%Y%m%d'))
curvesDir <- squish(outputDir, '/curves')
outputDir <- squish(outputDir, '/reports')
fileName <- squish(outputDir, '/AggregatedRiskReport.pdf')
pa <- PositionAggregator(curveDirectory = curvesDir, groupName = curveGroup)
output <- pa$riskBySector()
relOutput <- output / rowSums(output, na.rm = TRUE)
output <- cbind(output, rowSums(output, na.rm = TRUE))
colnames(output)[[NCOL(output)]] <- 'TOTAL'
lastOutput <- output[NROW(output),]
lastRelOutput <- relOutput[NROW(relOutput),]
absBarPlotX <- barplot(as.matrix(lastOutput))
relBarPlotX <- barplot(as.matrix(lastRelOutput))
pdf(fileName,paper="special",width=10,height=10)
plot(relOutput, main = 'Percentage of Total Risk')
barplot(as.matrix(lastRelOutput), main = 'Percentage of Total Risk', col = 'blue', ylim = c(0, max(lastRelOutput) + 0.05))
for(i in 1:length(relBarPlotX)) text(relBarPlotX[[i]], lastRelOutput[[i]] + 0.01, round(lastRelOutput[[i]],2))
plot(output, main = 'Absolute Total Risk')
barplot(as.matrix(lastOutput), main = 'Absolute Total Risk', col = 'blue', ylim = c(0, max(lastOutput) + 0.1 * max(lastOutput)))
for(i in 1:length(lastOutput)) text(absBarPlotX[[i]], lastOutput[[i]] + 0.05 * max(lastOutput), round(lastOutput[[i]], 0))
dev.off()
print('Emailing reports...')
email <- Mail$notification( subject=squish('Aggregated Risk Reports - ',format(runDate,'%Y-%m-%d')),
content=squish('Report - available in ', fileName))
email$attachFile(fileName)
email$sendTo('team')
|
library(FEAR)
# Data
y <- cbind(1,2,3,4,5)
x <- cbind(2,4,3,5,6)
# DEA, Shephard input distance function,
d <- FEAR::dea(x,y, RTS=3, ORIENTATION=1)
dea.plot(t(x),t(y),RTS="vrs",ORIENTATION="in-out",txt=1:length(x), GRID=TRUE)
dea.plot(t(x),t(y),RTS="crs",ORIENTATION="in-out",add=TRUE,lty="dashed", GRID=TRUE)
# Efficiencies
print(1/d,digits=3)
print(mean(1/d),digits=3)
# Bootstrap
b <- boot.sw98(x,y, RTS=3, ORIENTATION=1, NREP=2000)
#NREP é o que? O tamanho da amostra ou a quantidade de amostras que foram geradas?
#Se for um, qual é o outro?
print(b,digits=3)
print(sqrt(b$var),digits=3)
#DEA Frontier
dea.plot.frontier(t(x),t(y),RTS="crs",txt=1:dim(t(x))[1], GRID=TRUE)
#Bias-corrected frontier
dea.plot.frontier(t(x)/b$dhat.bc,t(y),RTS="crs",lty="dashed",add=T, GRID=TRUE)
#Upper 95% confidence frontier
dea.plot.frontier(t(x)/b$conf.int[,2],t(y),RTS="crs",lty="dotted",add=T, GRID=TRUE)
#Marca, usando uma bola, a eficiência de cada firma
plot(1/b$dhat, ylim = c(.45,1),main = "Resultado da análise",xlab = "Firms",ylab = "Efficiency")
#Marca, usando uma losango, a "bias corrected" eficiência de cada firma
points(1/b$dhat.bc,pch=5)
#Traça o intervalo de confiança
for (i in 1:5 ) lines(rep(i,2),1/b$conf.int[i,],type = "o", pch=3) | /FEAR-DEA-Bootstrap-1in-1out.R | permissive | mcf-rocha/bootstrap | R | false | false | 1,263 | r | library(FEAR)
# Data
y <- cbind(1,2,3,4,5)
x <- cbind(2,4,3,5,6)
# DEA, Shephard input distance function,
d <- FEAR::dea(x,y, RTS=3, ORIENTATION=1)
dea.plot(t(x),t(y),RTS="vrs",ORIENTATION="in-out",txt=1:length(x), GRID=TRUE)
dea.plot(t(x),t(y),RTS="crs",ORIENTATION="in-out",add=TRUE,lty="dashed", GRID=TRUE)
# Efficiencies
print(1/d,digits=3)
print(mean(1/d),digits=3)
# Bootstrap
b <- boot.sw98(x,y, RTS=3, ORIENTATION=1, NREP=2000)
#NREP é o que? O tamanho da amostra ou a quantidade de amostras que foram geradas?
#Se for um, qual é o outro?
print(b,digits=3)
print(sqrt(b$var),digits=3)
#DEA Frontier
dea.plot.frontier(t(x),t(y),RTS="crs",txt=1:dim(t(x))[1], GRID=TRUE)
#Bias-corrected frontier
dea.plot.frontier(t(x)/b$dhat.bc,t(y),RTS="crs",lty="dashed",add=T, GRID=TRUE)
#Upper 95% confidence frontier
dea.plot.frontier(t(x)/b$conf.int[,2],t(y),RTS="crs",lty="dotted",add=T, GRID=TRUE)
#Marca, usando uma bola, a eficiência de cada firma
plot(1/b$dhat, ylim = c(.45,1),main = "Resultado da análise",xlab = "Firms",ylab = "Efficiency")
#Marca, usando uma losango, a "bias corrected" eficiência de cada firma
points(1/b$dhat.bc,pch=5)
#Traça o intervalo de confiança
for (i in 1:5 ) lines(rep(i,2),1/b$conf.int[i,],type = "o", pch=3) |
rm(list = ls()); gc()
setwd("E:/NCHC/project2020/mask_dedicate/")
library(dplyr)
library(data.table)
library(jsonlite)
library(lubridate)
# ---
webcrawler <- function(crawl_date){
detail <- list()
i <- 1
keep_go <- 1
url <- paste0("https://taiwancanhelp.com.tw/api/mask", "?date=", crawl_date, "&show=1000")
while(keep_go==1){
response = fromJSON(url)
detail[[i]] <- response$data$list %>% setDT %>% .[, !"_created"]
i <- i+1
# ---
if (is.null(response$data$next_api)) {
keep_go <- 0
dailysummary <- data.table(date = response$data$date,
total_masks = response$data$total_masks,
total_dedicators = response$data$total_dedicators)
}
else url <- url %>% gsub("(.*)/api.*", paste0("\\1", response$data$next_api), .)
Sys.sleep(0.5)
}
res <- list(detail = detail, dailysummary = dailysummary)
return(res)
}
# ---
# 要爬取的日期列表
date_start <- "2020/04/27" %>% ymd
date_end <- Sys.time() %>% substring(., 1, 10) %>% ymd() %>% `-`(1)
date_all <- seq(date_start, date_end, by = "days")
if (file.exists("./data/dailysummary.csv")){
tmp <- fread("./data/dailysummary.csv")
} else {
tmp <- NULL
}
date_vec <- date_all[!date_all %in% ymd(tmp$date)]
for (date_i in 1:length(date_vec)) {
crawl_date <- date_vec[date_i] %>% gsub("-", "/", .)
res <- webcrawler(crawl_date = crawl_date)
detail_all <- res$detail %>% rbindlist()
nn <- paste0("detail_", date_vec[date_i], ".csv")
fwrite(x = detail_all, file = paste0("./data/", nn), row.names = F)
# ---
fwrite(x = res$dailysummary, file = "./data/dailysummary.csv", row.names = F, append = T)
}
# ---
# 如果 dailysummary 要不斷覆寫,記得tmp要更新
# dailysummary_add <- list(tmp, res$dailysummary) %>% rbindlist()
# tmp <- dailysummary_add
# fwrite(x = dailysummary_add, file = "./data/dailysummary.csv", row.names = F)
| /code/00_web_crawler.R | no_license | littlefish0331/mask_dedicate | R | false | false | 1,942 | r | rm(list = ls()); gc()
setwd("E:/NCHC/project2020/mask_dedicate/")
library(dplyr)
library(data.table)
library(jsonlite)
library(lubridate)
# ---
webcrawler <- function(crawl_date){
detail <- list()
i <- 1
keep_go <- 1
url <- paste0("https://taiwancanhelp.com.tw/api/mask", "?date=", crawl_date, "&show=1000")
while(keep_go==1){
response = fromJSON(url)
detail[[i]] <- response$data$list %>% setDT %>% .[, !"_created"]
i <- i+1
# ---
if (is.null(response$data$next_api)) {
keep_go <- 0
dailysummary <- data.table(date = response$data$date,
total_masks = response$data$total_masks,
total_dedicators = response$data$total_dedicators)
}
else url <- url %>% gsub("(.*)/api.*", paste0("\\1", response$data$next_api), .)
Sys.sleep(0.5)
}
res <- list(detail = detail, dailysummary = dailysummary)
return(res)
}
# ---
# 要爬取的日期列表
date_start <- "2020/04/27" %>% ymd
date_end <- Sys.time() %>% substring(., 1, 10) %>% ymd() %>% `-`(1)
date_all <- seq(date_start, date_end, by = "days")
if (file.exists("./data/dailysummary.csv")){
tmp <- fread("./data/dailysummary.csv")
} else {
tmp <- NULL
}
date_vec <- date_all[!date_all %in% ymd(tmp$date)]
for (date_i in 1:length(date_vec)) {
crawl_date <- date_vec[date_i] %>% gsub("-", "/", .)
res <- webcrawler(crawl_date = crawl_date)
detail_all <- res$detail %>% rbindlist()
nn <- paste0("detail_", date_vec[date_i], ".csv")
fwrite(x = detail_all, file = paste0("./data/", nn), row.names = F)
# ---
fwrite(x = res$dailysummary, file = "./data/dailysummary.csv", row.names = F, append = T)
}
# ---
# 如果 dailysummary 要不斷覆寫,記得tmp要更新
# dailysummary_add <- list(tmp, res$dailysummary) %>% rbindlist()
# tmp <- dailysummary_add
# fwrite(x = dailysummary_add, file = "./data/dailysummary.csv", row.names = F)
|
#' Principal coordinates ordination
#'
#' @description Performs a principal coordinates analysis of a distance matrix
#'
#' @param V a square distance matrix
#'
#' @return
#' A list containing the following named components:
#' \item{k}{the number of groups (value)}
#' \item{vectors}{the eigenvectors of the centered inner product matrix (matrix)}
#' \item{values}{the eigenvalues of the centered inner product matrix (vector)}
#' \item{PCoords}{the principal coordinates = scaled eigenvectors (matrix)}
#' \item{Variance}{a dataframe containing the following named variables:
#' \describe{
#' \item{eigenvalues}{eigenvalues of the centered inner product matrix}
#' \item{variance}{variance of each principal coordinate}
#' \item{exVar}{proportion of the total variation accounted by each principal coordinate}
#' \item{cumVar}{cumulative proportion of the total variation accounted by principal coordinate}
#' }
#' }
#'
#' @examples
#'
#' # Data matrix of 2D landmark coordinates
#' data("Tropheus.IK.coord")
#' coords <- which(names(Tropheus.IK.coord) == "X1"):which(names(Tropheus.IK.coord) == "Y19")
#' proc.coord <- as.matrix(Tropheus.IK.coord[coords])
#'
#' # Data reduction
#' phen.pca <- prcomp(proc.coord, rank. = 5, tol = sqrt(.Machine$double.eps))
#' pc.scores <- phen.pca$x
#'
#' # Covariance matrix of each population
#' S.phen.pop <- cov.group(pc.scores, groups = Tropheus.IK.coord$POP.ID)
#'
#' # Squared distance matrix of the covariance matrices of all populations
#' eigen.phen.pop <- mat.sq.dist(S.phen.pop, dist. = "Riemannian") # Riemannian distances
#'
#' # Ordination of the squared distance matrix
#' prcoa.pop <- pr.coord(eigen.phen.pop)
#'
#' # Visualization
#' plot(prcoa.pop$PCoords[, 1], prcoa.pop$PCoords[, 2])
#' abline(h = 0) ; abline(v = 0)
#' text(prcoa.pop$PCoords[, 1], prcoa.pop$PCoords[, 1], labels = rownames(prcoa.pop$PCoords))
#'
#' @export
pr.coord <-
function (V) {
if (is.data.frame(V))
V <- as.matrix(V)
else if (!is.matrix(V))
stop("'V' must be a matrix or a data frame")
if (!all(is.finite(V)))
stop("'V' must contain finite values only")
if (dim(V)[1] != dim(V)[2])
stop("'V' must be a square matrix")
# Centered inner product matrix
k <- dim(V)[1]
H <- diag(k) - matrix((1 / k), nrow = k, ncol = k) # centering matrix
D <- - 0.5 * H %*% V %*% H
# Number of principal coordinates
max_pc <- k - 1
# Eigenanalysis
E <- eigen(D)
vectors <- E$vectors[, 1:max_pc]
rownames(vectors) <- rownames(V)
colnames(vectors) <- paste("PCo", 1:max_pc, sep = "")
L <- E$values[1:max_pc]
L0 <- rep(0, length(L))
# Keeps only the nonzero eigenvalues (above tol or below -tol)
tol <- .Machine$double.eps * max(dim(D)) * max(L) # Machine tolerance value
for (i in 1:length(L)) {
if (abs(L[i]) > tol) {
L0[i] <- L[i]
}
}
values <- L0
PCoords <- vectors %*% diag(sqrt(values))
colnames(PCoords) <- paste("PCo", 1:max_pc, sep = "")
variance <- values / max_pc
exVar <- values / sum(values)
cumVar <- exVar
for (i in 2:max_pc) {
cumVar[i] <- cumVar[i - 1] + exVar[i]
}
Variance <- data.frame("eigenvalues" = values, "variance" = variance, "exVar" = exVar, "cumVar" = cumVar)
prCoord <- list("k" = k, "vectors" = vectors, "values" = values, "PCoords" = PCoords, "Variance" = Variance)
return(prCoord)
}
| /R/pr.coord.R | no_license | cran/vcvComp | R | false | false | 3,517 | r | #' Principal coordinates ordination
#'
#' @description Performs a principal coordinates analysis of a distance matrix
#'
#' @param V a square distance matrix
#'
#' @return
#' A list containing the following named components:
#' \item{k}{the number of groups (value)}
#' \item{vectors}{the eigenvectors of the centered inner product matrix (matrix)}
#' \item{values}{the eigenvalues of the centered inner product matrix (vector)}
#' \item{PCoords}{the principal coordinates = scaled eigenvectors (matrix)}
#' \item{Variance}{a dataframe containing the following named variables:
#' \describe{
#' \item{eigenvalues}{eigenvalues of the centered inner product matrix}
#' \item{variance}{variance of each principal coordinate}
#' \item{exVar}{proportion of the total variation accounted by each principal coordinate}
#' \item{cumVar}{cumulative proportion of the total variation accounted by principal coordinate}
#' }
#' }
#'
#' @examples
#'
#' # Data matrix of 2D landmark coordinates
#' data("Tropheus.IK.coord")
#' coords <- which(names(Tropheus.IK.coord) == "X1"):which(names(Tropheus.IK.coord) == "Y19")
#' proc.coord <- as.matrix(Tropheus.IK.coord[coords])
#'
#' # Data reduction
#' phen.pca <- prcomp(proc.coord, rank. = 5, tol = sqrt(.Machine$double.eps))
#' pc.scores <- phen.pca$x
#'
#' # Covariance matrix of each population
#' S.phen.pop <- cov.group(pc.scores, groups = Tropheus.IK.coord$POP.ID)
#'
#' # Squared distance matrix of the covariance matrices of all populations
#' eigen.phen.pop <- mat.sq.dist(S.phen.pop, dist. = "Riemannian") # Riemannian distances
#'
#' # Ordination of the squared distance matrix
#' prcoa.pop <- pr.coord(eigen.phen.pop)
#'
#' # Visualization
#' plot(prcoa.pop$PCoords[, 1], prcoa.pop$PCoords[, 2])
#' abline(h = 0) ; abline(v = 0)
#' text(prcoa.pop$PCoords[, 1], prcoa.pop$PCoords[, 1], labels = rownames(prcoa.pop$PCoords))
#'
#' @export
pr.coord <-
function (V) {
if (is.data.frame(V))
V <- as.matrix(V)
else if (!is.matrix(V))
stop("'V' must be a matrix or a data frame")
if (!all(is.finite(V)))
stop("'V' must contain finite values only")
if (dim(V)[1] != dim(V)[2])
stop("'V' must be a square matrix")
# Centered inner product matrix
k <- dim(V)[1]
H <- diag(k) - matrix((1 / k), nrow = k, ncol = k) # centering matrix
D <- - 0.5 * H %*% V %*% H
# Number of principal coordinates
max_pc <- k - 1
# Eigenanalysis
E <- eigen(D)
vectors <- E$vectors[, 1:max_pc]
rownames(vectors) <- rownames(V)
colnames(vectors) <- paste("PCo", 1:max_pc, sep = "")
L <- E$values[1:max_pc]
L0 <- rep(0, length(L))
# Keeps only the nonzero eigenvalues (above tol or below -tol)
tol <- .Machine$double.eps * max(dim(D)) * max(L) # Machine tolerance value
for (i in 1:length(L)) {
if (abs(L[i]) > tol) {
L0[i] <- L[i]
}
}
values <- L0
PCoords <- vectors %*% diag(sqrt(values))
colnames(PCoords) <- paste("PCo", 1:max_pc, sep = "")
variance <- values / max_pc
exVar <- values / sum(values)
cumVar <- exVar
for (i in 2:max_pc) {
cumVar[i] <- cumVar[i - 1] + exVar[i]
}
Variance <- data.frame("eigenvalues" = values, "variance" = variance, "exVar" = exVar, "cumVar" = cumVar)
prCoord <- list("k" = k, "vectors" = vectors, "values" = values, "PCoords" = PCoords, "Variance" = Variance)
return(prCoord)
}
|
## cladophora
## 1,230 e–0.55 * Depth
#max_depth = 1,230 e–0.55 * Depth
library(tidyverse)
library(tidyr)
library(sm)
library(lubridate) # work with dates
library(dplyr) # data manipulation (filter, summarize, mutate)
library(ggplot2) # graphics
library(gridExtra) # tile several plots next to each other
library(scales)
library(data.table)
library(mgcv)
depth <- read.csv("input_data/Depth_2_Higgins_etal_2005.csv")
depth <- na.omit(depth)
depth
## convert depth to cm and biomass to % and presence/absence for glm
depth <- depth %>%
mutate(depth_cm = depth_m*100) %>%
mutate(max_biomass_percent = (maximum_biomass_g_DW_m..2/1230)*100) %>%
mutate(presence_absence = ifelse(max_biomass_percent == 0, 0, 1)) %>%
mutate(max_biomass_percent_log = log(max_biomass_percent+1))
depth
mean(depth$depth_m)
## test for normality
shapiro.test(depth$maximum_biomass_g_DW_m..2)
shapiro.test(depth$max_biomass_percent)
ggqqplot(depth$max_biomass_percent)
### model exploation
log(depth$max_biomass_percent+1)
depth_lm <- lm( max_biomass_percent~ depth_cm, data=depth)
summary(depth_lm) # p-value: 0.01694
depth_gam <- gam(max_biomass_percent ~ depth_cm, data=depth)
summary(depth_gam)
## Deviance explained = 71.2%
depth_lmq <- lm(max_biomass_percent ~ depth_cm + I(depth_cm^2), data=depth)
summary(depth_lmq) ## p-value: 0.02099
depth_glm <- glm(max_biomass_percent~depth_cm, data=depth, family="gaussian")
summary(depth_glm)
depth_lm_log <- lm( max_biomass_percent_log~ depth_cm, data=depth)
summary(depth_lm_log) ## p-value: 1.903e-05
depth_lmq <- lm(max_biomass_percent_log ~ depth_cm + I(depth_cm^2), data=depth)
summary(depth_lmq) ## p-value: 0.0003203
## plot
## best model
depth_lmq <- lm(max_biomass_percent ~ depth_cm + I(depth_cm^2), data=depth)
summary(depth_lmq) ## 0.010316, Adjusted R-squared: 0.9305
plot(depth_lmq)
## qqplot awful
## qqplot not good but passed normality above
## plot
png("figures/Final_curves/Depth/C1_Cladophora_depth_model.png", width = 500, height = 600)
ggplot(data = depth, mapping = aes(x = depth_cm, y = max_biomass_percent))+
geom_point(size = 2)+
stat_smooth(method="lm", formula = y ~ x + I(x^2)) +
# scale_y_continuous(trans=log1p_trans()) +
# scale_y_log10()+
labs(x = "Depth (cm)", y = "Biomass (%)")+
theme_classic()+
# scale_y_continuous(limits=c(,100)) +
theme(axis.text = element_text(size = 20), axis.title = element_text(size = 20))
dev.off()
| /scripts/old/C1_F57C_Cladophora_depth.R | no_license | ksirving/flow_eco_mech | R | false | false | 2,448 | r | ## cladophora
## 1,230 e–0.55 * Depth
#max_depth = 1,230 e–0.55 * Depth
library(tidyverse)
library(tidyr)
library(sm)
library(lubridate) # work with dates
library(dplyr) # data manipulation (filter, summarize, mutate)
library(ggplot2) # graphics
library(gridExtra) # tile several plots next to each other
library(scales)
library(data.table)
library(mgcv)
depth <- read.csv("input_data/Depth_2_Higgins_etal_2005.csv")
depth <- na.omit(depth)
depth
## convert depth to cm and biomass to % and presence/absence for glm
depth <- depth %>%
mutate(depth_cm = depth_m*100) %>%
mutate(max_biomass_percent = (maximum_biomass_g_DW_m..2/1230)*100) %>%
mutate(presence_absence = ifelse(max_biomass_percent == 0, 0, 1)) %>%
mutate(max_biomass_percent_log = log(max_biomass_percent+1))
depth
mean(depth$depth_m)
## test for normality
shapiro.test(depth$maximum_biomass_g_DW_m..2)
shapiro.test(depth$max_biomass_percent)
ggqqplot(depth$max_biomass_percent)
### model exploation
log(depth$max_biomass_percent+1)
depth_lm <- lm( max_biomass_percent~ depth_cm, data=depth)
summary(depth_lm) # p-value: 0.01694
depth_gam <- gam(max_biomass_percent ~ depth_cm, data=depth)
summary(depth_gam)
## Deviance explained = 71.2%
depth_lmq <- lm(max_biomass_percent ~ depth_cm + I(depth_cm^2), data=depth)
summary(depth_lmq) ## p-value: 0.02099
depth_glm <- glm(max_biomass_percent~depth_cm, data=depth, family="gaussian")
summary(depth_glm)
depth_lm_log <- lm( max_biomass_percent_log~ depth_cm, data=depth)
summary(depth_lm_log) ## p-value: 1.903e-05
depth_lmq <- lm(max_biomass_percent_log ~ depth_cm + I(depth_cm^2), data=depth)
summary(depth_lmq) ## p-value: 0.0003203
## plot
## best model
depth_lmq <- lm(max_biomass_percent ~ depth_cm + I(depth_cm^2), data=depth)
summary(depth_lmq) ## 0.010316, Adjusted R-squared: 0.9305
plot(depth_lmq)
## qqplot awful
## qqplot not good but passed normality above
## plot
png("figures/Final_curves/Depth/C1_Cladophora_depth_model.png", width = 500, height = 600)
ggplot(data = depth, mapping = aes(x = depth_cm, y = max_biomass_percent))+
geom_point(size = 2)+
stat_smooth(method="lm", formula = y ~ x + I(x^2)) +
# scale_y_continuous(trans=log1p_trans()) +
# scale_y_log10()+
labs(x = "Depth (cm)", y = "Biomass (%)")+
theme_classic()+
# scale_y_continuous(limits=c(,100)) +
theme(axis.text = element_text(size = 20), axis.title = element_text(size = 20))
dev.off()
|
##Set working directory where data are saved
setwd("C:/Users/petere/Desktop/Experimente-Antrag/Simulation CPS post/")
library(foreign)
for(r in 1:1000){
sd <- read.table("MCTransfer_r.dat")
sd[,7] <- (sd[,3]*sd[,4])
write.table(sd, "sdint_r", sep="\t")
}
ss <- read.table("MCTransfer_1.dat")
ss[,7] <- 1
ss <- n_[r]
for (t in 1:length(i)){
is <- i[t]
for (s in 1:m){
sm <- sim.rasch(ss, is)
rm <- RM(sm) # Estimate Rasch parameters
pp <- person.parameter(rm) #Read person parameters
fs <- itemfit(pp) #Compute fit statistics
ifmat_rasch <- rbind(ifmat_rasch, fs$i.infitMSQ)
ifmat_rasch$Infit_0713[nrow(ifmat_rasch)] <- (sum(ifmat_rasch[nrow(ifmat_rasch),1:is]<0.7)+sum(ifmat_rasch[nrow(ifmat_rasch),1:is]>1.3))
ifmat_rasch$Infit_0812[nrow(ifmat_rasch)] <- (sum(ifmat_rasch[nrow(ifmat_rasch),1:is]<0.8)+sum(ifmat_rasch[nrow(ifmat_rasch),1:is]>1.2))
ifmat_rasch$Infit_085115[nrow(ifmat_rasch)] <- (sum(ifmat_rasch[nrow(ifmat_rasch),1:is]<0.85)+sum(ifmat_rasch[nrow(ifmat_rasch),1:is]>1.15))
ifmat_rasch$ss[nrow(ifmat_rasch)] <- ss
ifmat_rasch$is[nrow(ifmat_rasch)] <- is
ifmat_rasch$ni[nrow(ifmat_rasch)] <- ncol(ifmat_rasch[nrow(ifmat_rasch),])
}
}
}
##Activate required packages
library(foreign)
library(reshape)
library(plyr)
library(gdata)
library(BayesFactor)
library(ggthemes)
library(grid)
library(XLConnect)
library(gridExtra)
#Save settings for "Pety"-theme to be rather APA
theme_pety <- function(base_size = 16, base_family = "") {
theme(text = element_text(size=30), #Larger text font size
panel.background = element_rect(fill = 'white', colour = 'black'), #Figure background colour
panel.grid.major=element_blank(), #Remove major grid
panel.grid.minor=element_blank(), #Remove minor grid
axis.text.x = element_text(colour="black"), #x-Axis text colour
axis.text.y = element_text(colour="black")) #y-Axis text colour
}
petyPalette <- c("blue4","dodgerblue4","dodgerblue2", "cornflowerblue")
EstherPalette <- c("blue", "red", "green", "purple")
EstherLinetype <- c("solid", "dashed", "solid", "dashed")
cbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
plotdata <- read.spss("plotdata.sav", use.value.labels=TRUE,
max.value.labels=Inf, to.data.frame=TRUE) #Read data set from SPSS-file
str (plotdata) #Check Data set
#Bar graph for intervention effects on immediate learning measures
bar_gra <- ggplot(subset(plotdata,
!is.na(il_measure)),
aes(x=il_intervention,
y=il_mean)) +
facet_wrap(~il_measure) +
geom_bar(position=position_dodge(),
stat="identity",
) +
theme_pety() +
# coord_fixed(ratio=0.15) +
scale_x_discrete(name="Intervention") +
scale_y_continuous(limits=c(0, 100),
breaks=seq(0, 100, 10),
name="Solution rate (in %)") +
theme(legend.title=element_blank()) +
geom_errorbar(aes(ymin=il_mean-(il_CI*(1/1.96)),
ymax=il_mean+(il_CI*(1/1.96)),
group=il_intervention),
width=0.1,
position=position_dodge(0.9),
alpha=0.85,
colour="black",
fill="black")
bar_gra
#Bar graph for intervention effects on immediate learning measures: Try to cut ordinate
bar_gra_a <- ggplot(subset(plotdata,
!is.na(il_measure)),
aes(x=il_intervention,
y=il_mean)) +
coord_cartesian(ylim=c(75, 100)) +
facet_wrap(~il_measure) +
geom_bar(position=position_dodge(),
stat="identity",
) +
#theme_pety() +
# coord_fixed(ratio=0.15) +
scale_x_discrete(name="Intervention") +
scale_y_continuous(breaks=seq(75, 100, 5),
name="Solution rate (in %)") +
theme(legend.title=element_blank()) +
geom_errorbar(aes(ymin=il_mean-(il_CI*(1/1.96)),
ymax=il_mean+(il_CI*(1/1.96)),
group=il_intervention),
width=0.1,
position=position_dodge(0.9),
alpha=0.85,
colour="black",
fill="black") +
theme(text = element_text(size=40), #Larger text font size
panel.background = element_rect(fill = 'white', colour = 'black'), #Figure background colour
panel.grid.major=element_blank(), #Remove major grid
panel.grid.minor=element_blank(), #Remove minor grid
axis.text.x = element_text(colour="black"), #x-Axis text colour
axis.text.y = element_text(colour="black")) #y-Axis text colour
bar_gra_a
tiff("bar_gra_a.tiff", width = 2200, height = 1100)
plot(bar_gra_a)
dev.off()
##Line Graphs##
#Data preparation line graphs
fup_1 <- plotdata[1:12,] #Data for transformation knowledge
fup_2 <- plotdata[13:24,] #Data for explicit content knowledge
fup_3 <- plotdata[25:36,] #Data for misconceptions
#Line graph for intervention effects on I) transformation knowledge
fupline_1 <- ggplot(fup_1,
aes(x = Time,
y = mean,
group = Intervention,
shape=Intervention,
fill = Intervention)) +
geom_line(size = 2,
position = position_dodge(width = 0.1)) +
geom_errorbar(aes(ymin=mean-(CI*(1/1.96)), ymax=mean+(CI*(1/1.96))),
linetype = 1,
position = position_dodge(width = 0.1),
width=0.15,
size = 0.75) +
geom_point(size = 7,
position = position_dodge(width = 0.1)) +
scale_shape_manual(values=c(22,22,24,24)) +
scale_fill_manual(values=c("black", "white", "black", "white")) +
scale_x_continuous(breaks=seq(1,3,1),
labels =c("T1", "T2", "T3")) +
scale_y_continuous(limits = c(30, 58),
breaks=seq(30, 58, 5)) +
#scale_linetype_manual(values = EstherLinetype) +
theme_pety() +
labs(x = "Transformation knowledge",
y = "Score (max = 58)",
shape = "Intervention") +
theme(legend.position=c(0.2,0.2),
legend.text=element_text(size=30)) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
guides(linetype=guide_legend(override.aes=list(size=4)),
shape=guide_legend(override.aes=list(size=7)))
fupline_1
tiff("fupline_1.tiff", width = 3000, height = 2000)
plot(fupline_1)
dev.off()
#Line graph for intervention effects on I) transformation knowledge
fupline_1_a <- ggplot(fup_1,
aes(x = Time,
y = mean,
group = Intervention,
shape=Intervention,
fill = Intervention)) +
geom_line(size = 4,
position = position_dodge(width = 0.1)) +
geom_errorbar(aes(ymin=mean-(CI*(1/1.96)), ymax=mean+(CI*(1/1.96))),
linetype = 1,
position = position_dodge(width = 0.1),
width=0.15,
size = 0.75) +
geom_point(size = 20,
position = position_dodge(width = 0.1)) +
scale_shape_manual(values=c(22,22,24,24)) +
scale_fill_manual(values=c("black", "white", "black", "white")) +
scale_x_continuous(breaks=seq(1,3,1),
labels =c("T1", "T2", "T3")) +
scale_y_continuous(limits = c(30, 58),
breaks=seq(30, 58, 5)) +
#scale_linetype_manual(values = EstherLinetype) +
#theme_pety() +
labs(x = "Transformation knowledge",
y = "Score (max = 58)",
shape = "Intervention") +
theme(legend.position=c(0.2,0.2),
legend.text=element_text(size=70)) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
guides(linetype=guide_legend(override.aes=list(size=17)),
shape=guide_legend(override.aes=list(size=20))) +
theme(text = element_text(size=70), #Larger text font size
#panel.background = element_rect(fill = 'white', colour = 'black'), #Figure background colour
panel.grid.major=element_blank(), #Remove major grid
panel.grid.minor=element_blank(), #Remove minor grid
axis.text.x = element_text(colour="black"), #x-Axis text colour
axis.text.y = element_text(colour="black")) #y-Axis text colour
fupline_1_a
tiff("fupline_1_a.tiff", width = 2400, height = 1800)
plot(fupline_1_a)
dev.off()
#Line graph for intervention effects on II) explicit concept knowledge
fupline_2 <- ggplot(fup_2,
aes(x = Time,
y = mean,
group = Intervention,
shape=Intervention,
fill = Intervention)) +
geom_line(size = 2, position = position_dodge(width = 0.1)) +
geom_errorbar(aes(ymin=mean-(CI*(1/1.96)), ymax=mean+(CI*(1/1.96))),
linetype = 1,
position = position_dodge(width = 0.1),
width=0.15,
size = 0.75) +
geom_point(size = 7,
position = position_dodge(width = 0.1)) +
scale_shape_manual(values=c(22,22,24,24)) +
scale_fill_manual(values=c("black", "white", "black", "white")) +
scale_x_continuous(breaks=seq(1,3,1),
labels =c("T1", "T2", "T3")) +
scale_y_continuous(limits=c(2, 12),
breaks=seq(2, 12, 2)) +
theme_pety() +
labs(x = "Explicit concept knowledge",
y = "Score (max = 19)",
shape = "Intervention") +
theme(legend.position=c(0.2,0.2),
legend.text=element_text(size=30)) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
guides(linetype=guide_legend(override.aes=list(size=7)),
shape=guide_legend(override.aes=list(size=7)))
fupline_2
#Line graph for intervention effects on II) explicit concept knowledge
fupline_2_a <- ggplot(fup_2,
aes(x = Time,
y = mean,
group = Intervention,
shape=Intervention,
fill = Intervention)) +
geom_line(size = 4, position = position_dodge(width = 0.1)) +
geom_errorbar(aes(ymin=mean-(CI*(1/1.96)), ymax=mean+(CI*(1/1.96))),
linetype = 1,
position = position_dodge(width = 0.1),
width=0.15,
size = 0.75) +
geom_point(size = 20,
position = position_dodge(width = 0.1)) +
scale_shape_manual(values=c(22,22,24,24)) +
scale_fill_manual(values=c("black", "white", "black", "white")) +
scale_x_continuous(breaks=seq(1,3,1),
labels =c("T1", "T2", "T3")) +
scale_y_continuous(limits=c(2, 12),
breaks=seq(2, 12, 2)) +
theme_pety() +
labs(x = "Explicit concept knowledge",
y = "Score (max = 19)",
shape = "Intervention") +
theme(legend.position=c(0.2,0.2),
legend.text=element_text(size=70)) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
guides(linetype=guide_legend(override.aes=list(size=17)),
shape=guide_legend(override.aes=list(size=20))) +
theme(text = element_text(size=70), #Larger text font size
#panel.background = element_rect(fill = 'white', colour = 'black'), #Figure background colour
panel.grid.major=element_blank(), #Remove major grid
panel.grid.minor=element_blank(), #Remove minor grid
axis.text.x = element_text(colour="black"), #x-Axis text colour
axis.text.y = element_text(colour="black")) #y-Axis text colour
fupline_2_a
tiff("fupline_2_a.tiff", width = 2400, height = 1800)
plot(fupline_2_a)
dev.off()
#Line graph for intervention effects on III) misconceptions
fupline_3 <- ggplot(fup_3,
aes(x = Time,
y = mean,
group = Intervention,
shape=Intervention,
fill = Intervention)) +
geom_line(size = 2,
position = position_dodge(width = 0.1)) +
geom_errorbar(aes(ymin=mean-(CI*(1/1.96)), ymax=mean+(CI*(1/1.96))),
linetype = 1,
position = position_dodge(width = 0.1),
width=0.15,
size = 0.75) +
geom_point(size = 7,
position = position_dodge(width = 0.1)) +
scale_shape_manual(values=c(22,22,24,24)) +
scale_fill_manual(values=c("black", "white", "black", "white")) +
scale_x_continuous(breaks=seq(1,3,1),
labels =c("T1", "T2", "T3")) +
scale_y_continuous(limits = c(0, 2),
breaks=seq(0, 2, 0.5)) +
theme_pety() +
labs(x = "Misconceptions",
y = "Errors",
shape = "Intervention") +
theme(legend.position=c(0.2,0.4),
legend.text=element_text(size=30)) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
guides(linetype=guide_legend(override.aes=list(size=7)),
shape=guide_legend(override.aes=list(size=7)))
fupline_3
tiff("fupline_2_a.tiff", width = 2400, height = 1800)
plot(fupline_2_a)
dev.off()
#Line graph for intervention effects on III) misconceptions
fupline_3_a <- ggplot(fup_3,
aes(x = Time,
y = mean,
group = Intervention,
shape=Intervention,
fill = Intervention)) +
geom_line(size = 4,
position = position_dodge(width = 0.1)) +
geom_errorbar(aes(ymin=mean-(CI*(1/1.96)), ymax=mean+(CI*(1/1.96))),
linetype = 1,
position = position_dodge(width = 0.1),
width=0.15,
size = 0.75) +
geom_point(size = 20,
position = position_dodge(width = 0.1)) +
scale_shape_manual(values=c(22,22,24,24)) +
scale_fill_manual(values=c("black", "white", "black", "white")) +
scale_x_continuous(breaks=seq(1,3,1),
labels =c("T1", "T2", "T3")) +
scale_y_continuous(limits = c(0, 2),
breaks=seq(0, 2, 0.5)) +
#theme_pety() +
labs(x = "Misconceptions",
y = "Errors",
shape = "Intervention") +
theme(legend.position=c(0.2,0.4),
legend.text=element_text(size=70)) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
guides(linetype=guide_legend(override.aes=list(size=17)),
shape=guide_legend(override.aes=list(size=20))) +
theme(text = element_text(size=70), #Larger text font size
#panel.background = element_rect(fill = 'white', colour = 'black'), #Figure background colour
panel.grid.major=element_blank(), #Remove major grid
panel.grid.minor=element_blank(), #Remove minor grid
axis.text.x = element_text(colour="black"), #x-Axis text colour
axis.text.y = element_text(colour="black")) #y-Axis text colour
fupline_3_a
tiff("fupline_3_a.tiff", width = 2400, height = 1800)
plot(fupline_3_a)
dev.off()
grid.arrange(fupline_1, fupline_2, fupline_3, nrow=1)
##MISC
#colour="black"
#scale_fill_manual(values = EstherPalette)
#scale_fill_hue(l=50)
#scale_fill_brewer(palette="Set1") +
#scale_shape_manual(values = c(1, 5, 10, 15)) +
#facet_grid( ~ Measure) +
#coord_cartesian(ylim=c(30, 58)) + #different option to regulate axis range
#pd_2 <- subset(plotdata, Measure=1)
#pd_2 <-plotdata[plotdata$Measure==1,]
#at line aes aes(linetype = Intervention),
# at main aes , fill=Intervention
#black scale for shape fillings #scale_fill_manual(values=c("black", "black", "black", "black")) +
#guides(colour = guide_legend("Region",
# override.aes = list(size = 6)))
#scale_shape_discrete(guide=guide_legend(override.aes=aes(size=3)))
#scale_shape_discrete(guide=guide_legend(override.aes=aes(size=3,
# linetype=0))
# + guides(colour = guide_legend(override.aes = list(size=10)))
# | /Mplus data/auslese.R | no_license | peter1328/Monte-Carlo-CPS-Transfer | R | false | false | 16,933 | r | ##Set working directory where data are saved
setwd("C:/Users/petere/Desktop/Experimente-Antrag/Simulation CPS post/")
library(foreign)
for(r in 1:1000){
sd <- read.table("MCTransfer_r.dat")
sd[,7] <- (sd[,3]*sd[,4])
write.table(sd, "sdint_r", sep="\t")
}
ss <- read.table("MCTransfer_1.dat")
ss[,7] <- 1
ss <- n_[r]
for (t in 1:length(i)){
is <- i[t]
for (s in 1:m){
sm <- sim.rasch(ss, is)
rm <- RM(sm) # Estimate Rasch parameters
pp <- person.parameter(rm) #Read person parameters
fs <- itemfit(pp) #Compute fit statistics
ifmat_rasch <- rbind(ifmat_rasch, fs$i.infitMSQ)
ifmat_rasch$Infit_0713[nrow(ifmat_rasch)] <- (sum(ifmat_rasch[nrow(ifmat_rasch),1:is]<0.7)+sum(ifmat_rasch[nrow(ifmat_rasch),1:is]>1.3))
ifmat_rasch$Infit_0812[nrow(ifmat_rasch)] <- (sum(ifmat_rasch[nrow(ifmat_rasch),1:is]<0.8)+sum(ifmat_rasch[nrow(ifmat_rasch),1:is]>1.2))
ifmat_rasch$Infit_085115[nrow(ifmat_rasch)] <- (sum(ifmat_rasch[nrow(ifmat_rasch),1:is]<0.85)+sum(ifmat_rasch[nrow(ifmat_rasch),1:is]>1.15))
ifmat_rasch$ss[nrow(ifmat_rasch)] <- ss
ifmat_rasch$is[nrow(ifmat_rasch)] <- is
ifmat_rasch$ni[nrow(ifmat_rasch)] <- ncol(ifmat_rasch[nrow(ifmat_rasch),])
}
}
}
##Activate required packages
library(foreign)
library(reshape)
library(plyr)
library(gdata)
library(BayesFactor)
library(ggthemes)
library(grid)
library(XLConnect)
library(gridExtra)
#Save settings for "Pety"-theme to be rather APA
theme_pety <- function(base_size = 16, base_family = "") {
theme(text = element_text(size=30), #Larger text font size
panel.background = element_rect(fill = 'white', colour = 'black'), #Figure background colour
panel.grid.major=element_blank(), #Remove major grid
panel.grid.minor=element_blank(), #Remove minor grid
axis.text.x = element_text(colour="black"), #x-Axis text colour
axis.text.y = element_text(colour="black")) #y-Axis text colour
}
petyPalette <- c("blue4","dodgerblue4","dodgerblue2", "cornflowerblue")
EstherPalette <- c("blue", "red", "green", "purple")
EstherLinetype <- c("solid", "dashed", "solid", "dashed")
cbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
plotdata <- read.spss("plotdata.sav", use.value.labels=TRUE,
max.value.labels=Inf, to.data.frame=TRUE) #Read data set from SPSS-file
str (plotdata) #Check Data set
#Bar graph for intervention effects on immediate learning measures
bar_gra <- ggplot(subset(plotdata,
!is.na(il_measure)),
aes(x=il_intervention,
y=il_mean)) +
facet_wrap(~il_measure) +
geom_bar(position=position_dodge(),
stat="identity",
) +
theme_pety() +
# coord_fixed(ratio=0.15) +
scale_x_discrete(name="Intervention") +
scale_y_continuous(limits=c(0, 100),
breaks=seq(0, 100, 10),
name="Solution rate (in %)") +
theme(legend.title=element_blank()) +
geom_errorbar(aes(ymin=il_mean-(il_CI*(1/1.96)),
ymax=il_mean+(il_CI*(1/1.96)),
group=il_intervention),
width=0.1,
position=position_dodge(0.9),
alpha=0.85,
colour="black",
fill="black")
bar_gra
#Bar graph for intervention effects on immediate learning measures: Try to cut ordinate
bar_gra_a <- ggplot(subset(plotdata,
!is.na(il_measure)),
aes(x=il_intervention,
y=il_mean)) +
coord_cartesian(ylim=c(75, 100)) +
facet_wrap(~il_measure) +
geom_bar(position=position_dodge(),
stat="identity",
) +
#theme_pety() +
# coord_fixed(ratio=0.15) +
scale_x_discrete(name="Intervention") +
scale_y_continuous(breaks=seq(75, 100, 5),
name="Solution rate (in %)") +
theme(legend.title=element_blank()) +
geom_errorbar(aes(ymin=il_mean-(il_CI*(1/1.96)),
ymax=il_mean+(il_CI*(1/1.96)),
group=il_intervention),
width=0.1,
position=position_dodge(0.9),
alpha=0.85,
colour="black",
fill="black") +
theme(text = element_text(size=40), #Larger text font size
panel.background = element_rect(fill = 'white', colour = 'black'), #Figure background colour
panel.grid.major=element_blank(), #Remove major grid
panel.grid.minor=element_blank(), #Remove minor grid
axis.text.x = element_text(colour="black"), #x-Axis text colour
axis.text.y = element_text(colour="black")) #y-Axis text colour
bar_gra_a
tiff("bar_gra_a.tiff", width = 2200, height = 1100)
plot(bar_gra_a)
dev.off()
##Line Graphs##
#Data preparation line graphs
fup_1 <- plotdata[1:12,] #Data for transformation knowledge
fup_2 <- plotdata[13:24,] #Data for explicit content knowledge
fup_3 <- plotdata[25:36,] #Data for misconceptions
#Line graph for intervention effects on I) transformation knowledge
fupline_1 <- ggplot(fup_1,
aes(x = Time,
y = mean,
group = Intervention,
shape=Intervention,
fill = Intervention)) +
geom_line(size = 2,
position = position_dodge(width = 0.1)) +
geom_errorbar(aes(ymin=mean-(CI*(1/1.96)), ymax=mean+(CI*(1/1.96))),
linetype = 1,
position = position_dodge(width = 0.1),
width=0.15,
size = 0.75) +
geom_point(size = 7,
position = position_dodge(width = 0.1)) +
scale_shape_manual(values=c(22,22,24,24)) +
scale_fill_manual(values=c("black", "white", "black", "white")) +
scale_x_continuous(breaks=seq(1,3,1),
labels =c("T1", "T2", "T3")) +
scale_y_continuous(limits = c(30, 58),
breaks=seq(30, 58, 5)) +
#scale_linetype_manual(values = EstherLinetype) +
theme_pety() +
labs(x = "Transformation knowledge",
y = "Score (max = 58)",
shape = "Intervention") +
theme(legend.position=c(0.2,0.2),
legend.text=element_text(size=30)) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
guides(linetype=guide_legend(override.aes=list(size=4)),
shape=guide_legend(override.aes=list(size=7)))
fupline_1
tiff("fupline_1.tiff", width = 3000, height = 2000)
plot(fupline_1)
dev.off()
#Line graph for intervention effects on I) transformation knowledge
fupline_1_a <- ggplot(fup_1,
aes(x = Time,
y = mean,
group = Intervention,
shape=Intervention,
fill = Intervention)) +
geom_line(size = 4,
position = position_dodge(width = 0.1)) +
geom_errorbar(aes(ymin=mean-(CI*(1/1.96)), ymax=mean+(CI*(1/1.96))),
linetype = 1,
position = position_dodge(width = 0.1),
width=0.15,
size = 0.75) +
geom_point(size = 20,
position = position_dodge(width = 0.1)) +
scale_shape_manual(values=c(22,22,24,24)) +
scale_fill_manual(values=c("black", "white", "black", "white")) +
scale_x_continuous(breaks=seq(1,3,1),
labels =c("T1", "T2", "T3")) +
scale_y_continuous(limits = c(30, 58),
breaks=seq(30, 58, 5)) +
#scale_linetype_manual(values = EstherLinetype) +
#theme_pety() +
labs(x = "Transformation knowledge",
y = "Score (max = 58)",
shape = "Intervention") +
theme(legend.position=c(0.2,0.2),
legend.text=element_text(size=70)) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
guides(linetype=guide_legend(override.aes=list(size=17)),
shape=guide_legend(override.aes=list(size=20))) +
theme(text = element_text(size=70), #Larger text font size
#panel.background = element_rect(fill = 'white', colour = 'black'), #Figure background colour
panel.grid.major=element_blank(), #Remove major grid
panel.grid.minor=element_blank(), #Remove minor grid
axis.text.x = element_text(colour="black"), #x-Axis text colour
axis.text.y = element_text(colour="black")) #y-Axis text colour
fupline_1_a
tiff("fupline_1_a.tiff", width = 2400, height = 1800)
plot(fupline_1_a)
dev.off()
#Line graph for intervention effects on II) explicit concept knowledge
fupline_2 <- ggplot(fup_2,
aes(x = Time,
y = mean,
group = Intervention,
shape=Intervention,
fill = Intervention)) +
geom_line(size = 2, position = position_dodge(width = 0.1)) +
geom_errorbar(aes(ymin=mean-(CI*(1/1.96)), ymax=mean+(CI*(1/1.96))),
linetype = 1,
position = position_dodge(width = 0.1),
width=0.15,
size = 0.75) +
geom_point(size = 7,
position = position_dodge(width = 0.1)) +
scale_shape_manual(values=c(22,22,24,24)) +
scale_fill_manual(values=c("black", "white", "black", "white")) +
scale_x_continuous(breaks=seq(1,3,1),
labels =c("T1", "T2", "T3")) +
scale_y_continuous(limits=c(2, 12),
breaks=seq(2, 12, 2)) +
theme_pety() +
labs(x = "Explicit concept knowledge",
y = "Score (max = 19)",
shape = "Intervention") +
theme(legend.position=c(0.2,0.2),
legend.text=element_text(size=30)) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
guides(linetype=guide_legend(override.aes=list(size=7)),
shape=guide_legend(override.aes=list(size=7)))
fupline_2
#Line graph for intervention effects on II) explicit concept knowledge
fupline_2_a <- ggplot(fup_2,
aes(x = Time,
y = mean,
group = Intervention,
shape=Intervention,
fill = Intervention)) +
geom_line(size = 4, position = position_dodge(width = 0.1)) +
geom_errorbar(aes(ymin=mean-(CI*(1/1.96)), ymax=mean+(CI*(1/1.96))),
linetype = 1,
position = position_dodge(width = 0.1),
width=0.15,
size = 0.75) +
geom_point(size = 20,
position = position_dodge(width = 0.1)) +
scale_shape_manual(values=c(22,22,24,24)) +
scale_fill_manual(values=c("black", "white", "black", "white")) +
scale_x_continuous(breaks=seq(1,3,1),
labels =c("T1", "T2", "T3")) +
scale_y_continuous(limits=c(2, 12),
breaks=seq(2, 12, 2)) +
theme_pety() +
labs(x = "Explicit concept knowledge",
y = "Score (max = 19)",
shape = "Intervention") +
theme(legend.position=c(0.2,0.2),
legend.text=element_text(size=70)) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
guides(linetype=guide_legend(override.aes=list(size=17)),
shape=guide_legend(override.aes=list(size=20))) +
theme(text = element_text(size=70), #Larger text font size
#panel.background = element_rect(fill = 'white', colour = 'black'), #Figure background colour
panel.grid.major=element_blank(), #Remove major grid
panel.grid.minor=element_blank(), #Remove minor grid
axis.text.x = element_text(colour="black"), #x-Axis text colour
axis.text.y = element_text(colour="black")) #y-Axis text colour
fupline_2_a
tiff("fupline_2_a.tiff", width = 2400, height = 1800)
plot(fupline_2_a)
dev.off()
#Line graph for intervention effects on III) misconceptions
fupline_3 <- ggplot(fup_3,
aes(x = Time,
y = mean,
group = Intervention,
shape=Intervention,
fill = Intervention)) +
geom_line(size = 2,
position = position_dodge(width = 0.1)) +
geom_errorbar(aes(ymin=mean-(CI*(1/1.96)), ymax=mean+(CI*(1/1.96))),
linetype = 1,
position = position_dodge(width = 0.1),
width=0.15,
size = 0.75) +
geom_point(size = 7,
position = position_dodge(width = 0.1)) +
scale_shape_manual(values=c(22,22,24,24)) +
scale_fill_manual(values=c("black", "white", "black", "white")) +
scale_x_continuous(breaks=seq(1,3,1),
labels =c("T1", "T2", "T3")) +
scale_y_continuous(limits = c(0, 2),
breaks=seq(0, 2, 0.5)) +
theme_pety() +
labs(x = "Misconceptions",
y = "Errors",
shape = "Intervention") +
theme(legend.position=c(0.2,0.4),
legend.text=element_text(size=30)) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
guides(linetype=guide_legend(override.aes=list(size=7)),
shape=guide_legend(override.aes=list(size=7)))
fupline_3
tiff("fupline_2_a.tiff", width = 2400, height = 1800)
plot(fupline_2_a)
dev.off()
#Line graph for intervention effects on III) misconceptions
fupline_3_a <- ggplot(fup_3,
aes(x = Time,
y = mean,
group = Intervention,
shape=Intervention,
fill = Intervention)) +
geom_line(size = 4,
position = position_dodge(width = 0.1)) +
geom_errorbar(aes(ymin=mean-(CI*(1/1.96)), ymax=mean+(CI*(1/1.96))),
linetype = 1,
position = position_dodge(width = 0.1),
width=0.15,
size = 0.75) +
geom_point(size = 20,
position = position_dodge(width = 0.1)) +
scale_shape_manual(values=c(22,22,24,24)) +
scale_fill_manual(values=c("black", "white", "black", "white")) +
scale_x_continuous(breaks=seq(1,3,1),
labels =c("T1", "T2", "T3")) +
scale_y_continuous(limits = c(0, 2),
breaks=seq(0, 2, 0.5)) +
#theme_pety() +
labs(x = "Misconceptions",
y = "Errors",
shape = "Intervention") +
theme(legend.position=c(0.2,0.4),
legend.text=element_text(size=70)) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
guides(linetype=guide_legend(override.aes=list(size=17)),
shape=guide_legend(override.aes=list(size=20))) +
theme(text = element_text(size=70), #Larger text font size
#panel.background = element_rect(fill = 'white', colour = 'black'), #Figure background colour
panel.grid.major=element_blank(), #Remove major grid
panel.grid.minor=element_blank(), #Remove minor grid
axis.text.x = element_text(colour="black"), #x-Axis text colour
axis.text.y = element_text(colour="black")) #y-Axis text colour
fupline_3_a
tiff("fupline_3_a.tiff", width = 2400, height = 1800)
plot(fupline_3_a)
dev.off()
grid.arrange(fupline_1, fupline_2, fupline_3, nrow=1)
##MISC
#colour="black"
#scale_fill_manual(values = EstherPalette)
#scale_fill_hue(l=50)
#scale_fill_brewer(palette="Set1") +
#scale_shape_manual(values = c(1, 5, 10, 15)) +
#facet_grid( ~ Measure) +
#coord_cartesian(ylim=c(30, 58)) + #different option to regulate axis range
#pd_2 <- subset(plotdata, Measure=1)
#pd_2 <-plotdata[plotdata$Measure==1,]
#at line aes aes(linetype = Intervention),
# at main aes , fill=Intervention
#black scale for shape fillings #scale_fill_manual(values=c("black", "black", "black", "black")) +
#guides(colour = guide_legend("Region",
# override.aes = list(size = 6)))
#scale_shape_discrete(guide=guide_legend(override.aes=aes(size=3)))
#scale_shape_discrete(guide=guide_legend(override.aes=aes(size=3,
# linetype=0))
# + guides(colour = guide_legend(override.aes = list(size=10)))
# |
as.tfidf <- function(sentences, ngram_bounduary = c(1L, 1L), token_min = 3L, skip_gram_window = 5L){
require(text2vec)
local_tfidf <- TfIdf$new()
get_sentence_tfidf <- function(sentence_pair){
if(is.character(sentence_pair) == FALSE){
sentence_pair <- as.character(sentence_pair)
}
local_sentence_token <- itoken(sentence_pair, tokenizer = space_tokenizer, progressbar = FALSE)
local_sentence_vocab = create_vocabulary(local_sentence_token)
local_sentence_vectorizer = vocab_vectorizer(local_sentence_vocab)
local_dtm <- create_dtm(local_sentence_token, local_sentence_vectorizer)
local_tfidf_matrix <- fit_transform(local_dtm, local_tfidf)
local_tfidf_matrix <- as.matrix(local_tfidf_matrix)
local_tfidf_matrix <- data.frame(
t = paste(as.character(local_tfidf_matrix[1,]), collapse=";"),
h = paste(as.character(local_tfidf_matrix[2,]), collapse=";")
)
return(local_tfidf_matrix)
}
local_list_tfidf <- c()
for(index in 1:nrow(sentences)){
row <- sentences[index, ]
row[1] <- Stemming(row[1])
row[2] <- Stemming(row[2])
row <- get_sentence_tfidf(row)
local_list_tfidf <- rbind(local_list_tfidf, row)
}
return(local_list_tfidf)
}
as.embeddings.vector <- function(sentence, word_vectors, sumFunction = rowSums){
embeddings_phrase <- as.embeddings.matrix(sentence, word_vectors)
embeddings_phrase <- as.matrix(sumFunction(embeddings_phrase))
return(embeddings_phrase)
}
as.embeddings.matrix <- function(sentence, word_vectors){
splited_phrase <- unlist(strsplit(as.character(trimws(sentence, which = "both")), " "))
splited_phrase <- unlist(lapply(splited_phrase, tolower))
get_embeddings <- function(el){
if(el %in% rownames(word_vectors)){
el <- word_vectors[el, , drop = FALSE]
}
else{
el <- matrix(0, ncol(word_vectors))
}
return(as.numeric(el))
}
splited_phrase <- sapply(splited_phrase, get_embeddings)
columns_only_zero <- apply(splited_phrase, 2, function(x) mean(x) == 0)
mean_phrase <- apply(splited_phrase, 1, mean)
splited_phrase[,which(columns_only_zero == TRUE)] <- mean_phrase
return(splited_phrase)
}
sim.embeddings.vector <- function(sentences, word_vectors = WordVectors, sim.method = "cosine", regularization = "none"){
local_we_sentences_t <- sapply(sentences[, 1], function(x) as.embeddings.vector(x, word_vectors, sumFunction = rowSums)) # It is a list of lists of word embeddings
local_we_sentences_h <- sapply(sentences[, 2], function(x) as.embeddings.vector(x, word_vectors, sumFunction = rowSums))
local_cos_sim_we_sentences <-1:nrow(sentences)
calculate.similarity_ <- function(index){
return(sim2(t(as.matrix(local_we_sentences_t[,index])), t(as.matrix(local_we_sentences_h[,index])), method = sim.method, norm = regularization))
}
local_cos_sim_we_sentences <- sapply(local_cos_sim_we_sentences, function(x) calculate.similarity_(x))
return(local_cos_sim_we_sentences)
}
sim.pca.embeddings.matrix <- function(sentences, word_vectors = WordVectors, sim.method = "cosine", checkDistance = FALSE, regularization = "none"){
require(stats)
local_pca_cos_sim_we_sentences <- 1:nrow(sentences)
calculate.similarity_ <- function(index){
x <- as.embeddings.matrix(sentences[index,1], word_vectors)
y <- as.embeddings.matrix(sentences[index,2], word_vectors)
x <- princomp(x, cor = FALSE)
y <- princomp(y, cor = FALSE)
x <- as.matrix(x$scores) #Utilizar apenas o Score, a PCA 1
y <- as.matrix(y$scores) #Utilizar apenas o Score, a PCA 1
if(checkDistance){
return(dist2(t(x[,1]), t(y[,1]), method = sim.method, norm = regularization))
}else{
return(sim2(t(x[,1]), t(y[,1]), method = sim.method, norm = regularization))
}
}
local_pca_cos_sim_we_sentences <- unlist(lapply(local_pca_cos_sim_we_sentences, function(x) calculate.similarity_(x)))
return(local_pca_cos_sim_we_sentences)
}
sim.svdpca.embeddings.matrix <- function(sentences, word_vectors = WordVectors, sim.method = "cosine", checkDistance = FALSE, regularization = "none"){
require(stats)
require(rsvd)
local_pca_cos_sim_we_sentences <- 1:nrow(sentences)
calculate.similarity_ <- function(index){
# Error line 2474
x <- as.embeddings.matrix(sentences_train[index,1], word_vectors)
y <- as.embeddings.matrix(sentences_train[index,2], word_vectors)
if(all(x == 0)){
x <- diag(nrow = nrow(x), ncol = ncol(x))
}
if(all(y == 0)){
y <- diag(nrow = nrow(y), ncol = ncol(y))
}
x <- rpca(x, retx = TRUE)
y <- rpca(y, retx = TRUE)
x <- as.matrix(x$x) #Utilizar apenas o Score, a PCA 1
y <- as.matrix(y$x) #Utilizar apenas o Score, a PCA 1
if(checkDistance){
return(dist2(t(x[,1]), t(y[,1]), method = sim.method, norm = regularization))
}else{
return(sim2(t(x[,1]), t(y[,1]), method = sim.method, norm = regularization))
}
}
local_pca_cos_sim_we_sentences <- unlist(lapply(local_pca_cos_sim_we_sentences, function(x) calculate.similarity_(x)))
return(local_pca_cos_sim_we_sentences)
}
sim.rwmd.embeddings.matrix <- function(sentences, word_vectors = WordVectors, rwmd_model, checkDistance = FALSE, regularization = "none"){
require(stats)
local_rwmd_cos_sim_we_sentences <- 1:nrow(sentences)
calculate.similarity_ <- function(index){
x <- as.embeddings.matrix(sentences[index,1], word_vectors)
y <- as.embeddings.matrix(sentences[index,2], word_vectors)
if(checkDistance){
return(dist2(t(x[,1]), t(y[,1]), method = rwmd_model, norm = regularization))
}else{
return(pdist2 (x, y, method = rwmd_model, norm = regularization))
}
}
local_rwmd_cos_sim_we_sentences <- unlist(lapply(local_rwmd_cos_sim_we_sentences, function(x) calculate.similarity_(x)))
return(local_rwmd_cos_sim_we_sentences)
}
sim.tsne.embeddings.matrix <- function(sentences, word_vectors = WordVectors,
sim.method = "cosine", regularization = "none",
mode.perplexity = 50, max.iterations = 1000){
require(Rtsne)
local_tsne_cos_sim_we_sentences <-c()
for(index in 1:nrow(sentences)){
x <- as.embeddings.matrix(sentences[index,1], word_vectors)
y <- as.embeddings.matrix(sentences[index,2], word_vectors)
x <- Rtsne(x, dims = 1, perplexity=mode.perplexity, verbose=FALSE, max_iter = max.iterations, check_duplicates = FALSE)
y <- Rtsne(y, dims = 1, perplexity=mode.perplexity, verbose=FALSE, max_iter = max.iterations, check_duplicates = FALSE)
x <- x$Y #Utilizar apenas o Score, a PCA 1
y <- y$Y #Utilizar apenas o Score, a PCA 1
local_tsne_cos_sim_we_sentences <- rbind(local_tsne_cos_sim_we_sentences, sim2(t(x), t(y), method = sim.method, norm = regularization))
}
return(local_tsne_cos_sim_we_sentences)
}
sim.pca.embeddings.vector <- function(sentences, word_vectors = WordVectors, sim.method = "cosine", regularization = "none"){
require(stats)
local_pca_cos_sim_we_sentences <-c()
for(index in 1:nrow(sentences)){
x <- as.embeddings.vector(sentences[index,1], word_vectors)
y <- as.embeddings.vector(sentences[index,2], word_vectors)
x <- princomp(x, cor = FALSE)
y <- princomp(y, cor = FALSE)
x <- as.matrix(x$scores) #Utilizar apenas o Score, a PCA 1
y <- as.matrix(y$scores) #Utilizar apenas o Score, a PCA 1
local_pca_cos_sim_we_sentences <- rbind(local_pca_cos_sim_we_sentences, sim2(t(x[,1]), t(y[,1]), method = sim.method, norm = regularization))
}
return(local_pca_cos_sim_we_sentences)
}
sim.tfidf <- function(sentences, sim.method = "cosine", regularization = "none") {
sentences <- as.tfidf(sentences)
local_cos_sim_tfidf_sentences <- c()
for(index in 1:nrow(sentences)){
t_row <- sentences[index, 1]
h_row <- sentences[index, 2]
t_row <- strsplit(as.character(t_row), ";")
t_row <- as.numeric(unlist(t_row))
t_row <- as.matrix(t_row)
h_row <- strsplit(as.character(h_row), ";")
h_row <- as.numeric(unlist(h_row))
h_row <- as.matrix(h_row)
local_cos_sim_tfidf_sentences <- rbind(local_cos_sim_tfidf_sentences, sim2(t(t_row), t(h_row), method = sim.method, norm = regularization))
}
return(local_cos_sim_tfidf_sentences)
}
sim.diff.penalization <- function(x, y, avereagedSimilarity){
x <- strsplit(x, "")
y <- strsplit(y, "")
x <- unlist(x)
y <- unlist(y)
x <- length(x)
y <- length(y)
if(x > y){
penalization <- ((abs(x - y)) * avereagedSimilarity)/x
}else{
penalization <- ((abs(x - y)) * avereagedSimilarity)/y
}
}
sim.hyponym <- function(s1, s2, hyponym_relations, sim.method = "cosine", checkDistance = FALSE, regularization = "none"){
require(text2vec)
search.in <- function(x, tokens){
return(unlist(
lapply(
tokens,
function(y) nrow(hyponym_relations[
hyponym_relations$target == x &
hyponym_relations$description == "has_hyponym" &
hyponym_relations$source == y
,])
)
)
)
}
s1 <- unlist(strsplit(s1, " "))
s2 <- unlist(strsplit(s2, " "))
s1_onehot <- unlist(lapply(s1, function(x) search.in(x, s2)))
s2_onehot <- unlist(lapply(s2, function(x) search.in(x, s1)))
s1_onehot <- as.numeric(s1_onehot)
s2_onehot <- as.numeric(s2_onehot)
if(checkDistance){
return(dist2(t(s1_onehot), t(s2_onehot), method = sim.method, norm = regularization))
}else{
return(sim2(t(s1_onehot), t(s2_onehot), method = sim.method, norm = regularization))
}
}
sim.hyperonym <- function(s1, s2, hyponym_relations, sim.method = "cosine", checkDistance = FALSE, regularization = "none"){
require(text2vec)
search.in <- function(x, tokens){
return(unlist(
lapply(
tokens,
function(y) nrow(hyponym_relations[
hyponym_relations$target == x &
hyponym_relations$description == "has_hyperonym" &
hyponym_relations$source == y
,])
)
)
)
}
s1 <- unlist(strsplit(s1, " "))
s2 <- unlist(strsplit(s2, " "))
s1_onehot <- unlist(lapply(s1, function(x) search.in(x, s2)))
s2_onehot <- unlist(lapply(s2, function(x) search.in(x, s1)))
s1_onehot <- as.numeric(s1_onehot)
s2_onehot <- as.numeric(s2_onehot)
if(checkDistance){
return(dist2(t(s1_onehot), t(s2_onehot), method = sim.method, norm = regularization))
}else{
return(sim2(t(s1_onehot), t(s2_onehot), method = sim.method, norm = regularization))
}
}
sim.hyperonym_hyponym <- function(s1, s2, hyponym_relations, sim.method = "cosine", checkDistance = FALSE, regularization = "none"){
require(text2vec)
search.in <- function(x, tokens){
return(unlist(
lapply(
tokens,
function(y) nrow(hyponym_relations[
hyponym_relations$target == x &
hyponym_relations$source == y
,])
)
)
)
}
s1 <- unlist(strsplit(s1, " "))
s2 <- unlist(strsplit(s2, " "))
s1_onehot <- unlist(lapply(s1, function(x) search.in(x, s2)))
s2_onehot <- unlist(lapply(s2, function(x) search.in(x, s1)))
s1_onehot <- as.numeric(s1_onehot)
s2_onehot <- as.numeric(s2_onehot)
if(checkDistance){
return(dist2(t(s1_onehot), t(s2_onehot), method = sim.method, norm = regularization))
}else{
return(sim2(t(s1_onehot), t(s2_onehot), method = sim.method, norm = regularization))
}
}
| /R language/Commons/similarity.R | no_license | albarsil/unists | R | false | false | 11,649 | r |
as.tfidf <- function(sentences, ngram_bounduary = c(1L, 1L), token_min = 3L, skip_gram_window = 5L){
require(text2vec)
local_tfidf <- TfIdf$new()
get_sentence_tfidf <- function(sentence_pair){
if(is.character(sentence_pair) == FALSE){
sentence_pair <- as.character(sentence_pair)
}
local_sentence_token <- itoken(sentence_pair, tokenizer = space_tokenizer, progressbar = FALSE)
local_sentence_vocab = create_vocabulary(local_sentence_token)
local_sentence_vectorizer = vocab_vectorizer(local_sentence_vocab)
local_dtm <- create_dtm(local_sentence_token, local_sentence_vectorizer)
local_tfidf_matrix <- fit_transform(local_dtm, local_tfidf)
local_tfidf_matrix <- as.matrix(local_tfidf_matrix)
local_tfidf_matrix <- data.frame(
t = paste(as.character(local_tfidf_matrix[1,]), collapse=";"),
h = paste(as.character(local_tfidf_matrix[2,]), collapse=";")
)
return(local_tfidf_matrix)
}
local_list_tfidf <- c()
for(index in 1:nrow(sentences)){
row <- sentences[index, ]
row[1] <- Stemming(row[1])
row[2] <- Stemming(row[2])
row <- get_sentence_tfidf(row)
local_list_tfidf <- rbind(local_list_tfidf, row)
}
return(local_list_tfidf)
}
as.embeddings.vector <- function(sentence, word_vectors, sumFunction = rowSums){
embeddings_phrase <- as.embeddings.matrix(sentence, word_vectors)
embeddings_phrase <- as.matrix(sumFunction(embeddings_phrase))
return(embeddings_phrase)
}
as.embeddings.matrix <- function(sentence, word_vectors){
splited_phrase <- unlist(strsplit(as.character(trimws(sentence, which = "both")), " "))
splited_phrase <- unlist(lapply(splited_phrase, tolower))
get_embeddings <- function(el){
if(el %in% rownames(word_vectors)){
el <- word_vectors[el, , drop = FALSE]
}
else{
el <- matrix(0, ncol(word_vectors))
}
return(as.numeric(el))
}
splited_phrase <- sapply(splited_phrase, get_embeddings)
columns_only_zero <- apply(splited_phrase, 2, function(x) mean(x) == 0)
mean_phrase <- apply(splited_phrase, 1, mean)
splited_phrase[,which(columns_only_zero == TRUE)] <- mean_phrase
return(splited_phrase)
}
sim.embeddings.vector <- function(sentences, word_vectors = WordVectors, sim.method = "cosine", regularization = "none"){
local_we_sentences_t <- sapply(sentences[, 1], function(x) as.embeddings.vector(x, word_vectors, sumFunction = rowSums)) # It is a list of lists of word embeddings
local_we_sentences_h <- sapply(sentences[, 2], function(x) as.embeddings.vector(x, word_vectors, sumFunction = rowSums))
local_cos_sim_we_sentences <-1:nrow(sentences)
calculate.similarity_ <- function(index){
return(sim2(t(as.matrix(local_we_sentences_t[,index])), t(as.matrix(local_we_sentences_h[,index])), method = sim.method, norm = regularization))
}
local_cos_sim_we_sentences <- sapply(local_cos_sim_we_sentences, function(x) calculate.similarity_(x))
return(local_cos_sim_we_sentences)
}
sim.pca.embeddings.matrix <- function(sentences, word_vectors = WordVectors, sim.method = "cosine", checkDistance = FALSE, regularization = "none"){
require(stats)
local_pca_cos_sim_we_sentences <- 1:nrow(sentences)
calculate.similarity_ <- function(index){
x <- as.embeddings.matrix(sentences[index,1], word_vectors)
y <- as.embeddings.matrix(sentences[index,2], word_vectors)
x <- princomp(x, cor = FALSE)
y <- princomp(y, cor = FALSE)
x <- as.matrix(x$scores) #Utilizar apenas o Score, a PCA 1
y <- as.matrix(y$scores) #Utilizar apenas o Score, a PCA 1
if(checkDistance){
return(dist2(t(x[,1]), t(y[,1]), method = sim.method, norm = regularization))
}else{
return(sim2(t(x[,1]), t(y[,1]), method = sim.method, norm = regularization))
}
}
local_pca_cos_sim_we_sentences <- unlist(lapply(local_pca_cos_sim_we_sentences, function(x) calculate.similarity_(x)))
return(local_pca_cos_sim_we_sentences)
}
sim.svdpca.embeddings.matrix <- function(sentences, word_vectors = WordVectors, sim.method = "cosine", checkDistance = FALSE, regularization = "none"){
require(stats)
require(rsvd)
local_pca_cos_sim_we_sentences <- 1:nrow(sentences)
calculate.similarity_ <- function(index){
# Error line 2474
x <- as.embeddings.matrix(sentences_train[index,1], word_vectors)
y <- as.embeddings.matrix(sentences_train[index,2], word_vectors)
if(all(x == 0)){
x <- diag(nrow = nrow(x), ncol = ncol(x))
}
if(all(y == 0)){
y <- diag(nrow = nrow(y), ncol = ncol(y))
}
x <- rpca(x, retx = TRUE)
y <- rpca(y, retx = TRUE)
x <- as.matrix(x$x) #Utilizar apenas o Score, a PCA 1
y <- as.matrix(y$x) #Utilizar apenas o Score, a PCA 1
if(checkDistance){
return(dist2(t(x[,1]), t(y[,1]), method = sim.method, norm = regularization))
}else{
return(sim2(t(x[,1]), t(y[,1]), method = sim.method, norm = regularization))
}
}
local_pca_cos_sim_we_sentences <- unlist(lapply(local_pca_cos_sim_we_sentences, function(x) calculate.similarity_(x)))
return(local_pca_cos_sim_we_sentences)
}
sim.rwmd.embeddings.matrix <- function(sentences, word_vectors = WordVectors, rwmd_model, checkDistance = FALSE, regularization = "none"){
require(stats)
local_rwmd_cos_sim_we_sentences <- 1:nrow(sentences)
calculate.similarity_ <- function(index){
x <- as.embeddings.matrix(sentences[index,1], word_vectors)
y <- as.embeddings.matrix(sentences[index,2], word_vectors)
if(checkDistance){
return(dist2(t(x[,1]), t(y[,1]), method = rwmd_model, norm = regularization))
}else{
return(pdist2 (x, y, method = rwmd_model, norm = regularization))
}
}
local_rwmd_cos_sim_we_sentences <- unlist(lapply(local_rwmd_cos_sim_we_sentences, function(x) calculate.similarity_(x)))
return(local_rwmd_cos_sim_we_sentences)
}
sim.tsne.embeddings.matrix <- function(sentences, word_vectors = WordVectors,
sim.method = "cosine", regularization = "none",
mode.perplexity = 50, max.iterations = 1000){
require(Rtsne)
local_tsne_cos_sim_we_sentences <-c()
for(index in 1:nrow(sentences)){
x <- as.embeddings.matrix(sentences[index,1], word_vectors)
y <- as.embeddings.matrix(sentences[index,2], word_vectors)
x <- Rtsne(x, dims = 1, perplexity=mode.perplexity, verbose=FALSE, max_iter = max.iterations, check_duplicates = FALSE)
y <- Rtsne(y, dims = 1, perplexity=mode.perplexity, verbose=FALSE, max_iter = max.iterations, check_duplicates = FALSE)
x <- x$Y #Utilizar apenas o Score, a PCA 1
y <- y$Y #Utilizar apenas o Score, a PCA 1
local_tsne_cos_sim_we_sentences <- rbind(local_tsne_cos_sim_we_sentences, sim2(t(x), t(y), method = sim.method, norm = regularization))
}
return(local_tsne_cos_sim_we_sentences)
}
sim.pca.embeddings.vector <- function(sentences, word_vectors = WordVectors, sim.method = "cosine", regularization = "none"){
require(stats)
local_pca_cos_sim_we_sentences <-c()
for(index in 1:nrow(sentences)){
x <- as.embeddings.vector(sentences[index,1], word_vectors)
y <- as.embeddings.vector(sentences[index,2], word_vectors)
x <- princomp(x, cor = FALSE)
y <- princomp(y, cor = FALSE)
x <- as.matrix(x$scores) #Utilizar apenas o Score, a PCA 1
y <- as.matrix(y$scores) #Utilizar apenas o Score, a PCA 1
local_pca_cos_sim_we_sentences <- rbind(local_pca_cos_sim_we_sentences, sim2(t(x[,1]), t(y[,1]), method = sim.method, norm = regularization))
}
return(local_pca_cos_sim_we_sentences)
}
sim.tfidf <- function(sentences, sim.method = "cosine", regularization = "none") {
sentences <- as.tfidf(sentences)
local_cos_sim_tfidf_sentences <- c()
for(index in 1:nrow(sentences)){
t_row <- sentences[index, 1]
h_row <- sentences[index, 2]
t_row <- strsplit(as.character(t_row), ";")
t_row <- as.numeric(unlist(t_row))
t_row <- as.matrix(t_row)
h_row <- strsplit(as.character(h_row), ";")
h_row <- as.numeric(unlist(h_row))
h_row <- as.matrix(h_row)
local_cos_sim_tfidf_sentences <- rbind(local_cos_sim_tfidf_sentences, sim2(t(t_row), t(h_row), method = sim.method, norm = regularization))
}
return(local_cos_sim_tfidf_sentences)
}
sim.diff.penalization <- function(x, y, avereagedSimilarity){
x <- strsplit(x, "")
y <- strsplit(y, "")
x <- unlist(x)
y <- unlist(y)
x <- length(x)
y <- length(y)
if(x > y){
penalization <- ((abs(x - y)) * avereagedSimilarity)/x
}else{
penalization <- ((abs(x - y)) * avereagedSimilarity)/y
}
}
sim.hyponym <- function(s1, s2, hyponym_relations, sim.method = "cosine", checkDistance = FALSE, regularization = "none"){
require(text2vec)
search.in <- function(x, tokens){
return(unlist(
lapply(
tokens,
function(y) nrow(hyponym_relations[
hyponym_relations$target == x &
hyponym_relations$description == "has_hyponym" &
hyponym_relations$source == y
,])
)
)
)
}
s1 <- unlist(strsplit(s1, " "))
s2 <- unlist(strsplit(s2, " "))
s1_onehot <- unlist(lapply(s1, function(x) search.in(x, s2)))
s2_onehot <- unlist(lapply(s2, function(x) search.in(x, s1)))
s1_onehot <- as.numeric(s1_onehot)
s2_onehot <- as.numeric(s2_onehot)
if(checkDistance){
return(dist2(t(s1_onehot), t(s2_onehot), method = sim.method, norm = regularization))
}else{
return(sim2(t(s1_onehot), t(s2_onehot), method = sim.method, norm = regularization))
}
}
sim.hyperonym <- function(s1, s2, hyponym_relations, sim.method = "cosine", checkDistance = FALSE, regularization = "none"){
require(text2vec)
search.in <- function(x, tokens){
return(unlist(
lapply(
tokens,
function(y) nrow(hyponym_relations[
hyponym_relations$target == x &
hyponym_relations$description == "has_hyperonym" &
hyponym_relations$source == y
,])
)
)
)
}
s1 <- unlist(strsplit(s1, " "))
s2 <- unlist(strsplit(s2, " "))
s1_onehot <- unlist(lapply(s1, function(x) search.in(x, s2)))
s2_onehot <- unlist(lapply(s2, function(x) search.in(x, s1)))
s1_onehot <- as.numeric(s1_onehot)
s2_onehot <- as.numeric(s2_onehot)
if(checkDistance){
return(dist2(t(s1_onehot), t(s2_onehot), method = sim.method, norm = regularization))
}else{
return(sim2(t(s1_onehot), t(s2_onehot), method = sim.method, norm = regularization))
}
}
sim.hyperonym_hyponym <- function(s1, s2, hyponym_relations, sim.method = "cosine", checkDistance = FALSE, regularization = "none"){
require(text2vec)
search.in <- function(x, tokens){
return(unlist(
lapply(
tokens,
function(y) nrow(hyponym_relations[
hyponym_relations$target == x &
hyponym_relations$source == y
,])
)
)
)
}
s1 <- unlist(strsplit(s1, " "))
s2 <- unlist(strsplit(s2, " "))
s1_onehot <- unlist(lapply(s1, function(x) search.in(x, s2)))
s2_onehot <- unlist(lapply(s2, function(x) search.in(x, s1)))
s1_onehot <- as.numeric(s1_onehot)
s2_onehot <- as.numeric(s2_onehot)
if(checkDistance){
return(dist2(t(s1_onehot), t(s2_onehot), method = sim.method, norm = regularization))
}else{
return(sim2(t(s1_onehot), t(s2_onehot), method = sim.method, norm = regularization))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tar_process.R
\name{tar_process}
\alias{tar_process}
\title{Get main process info.}
\usage{
tar_process(names = NULL)
}
\arguments{
\item{names}{Optional, names of the data points to return.
If supplied, \code{tar_process()}
returns only the rows of the names you select.
You can supply symbols, a character vector,
or \code{tidyselect} helpers like \code{\link[=starts_with]{starts_with()}}.
If \code{NULL}, all names are selected.}
}
\value{
A data frame with metadata on the most recent main R process
to orchestrate the targets of the current project.
The output includes the \code{pid} of the main process.
}
\description{
Get info on the most recent main R process
to orchestrate the targets of the current project.
}
\details{
The main process is the R process invoked
by \code{\link[=tar_make]{tar_make()}} or similar. If \code{callr_function} is not \code{NULL},
this is an external process, and the \code{pid} in the return value
will not agree with \code{Sys.getpid()} in your current interactive session.
The process may or may not be alive. You may want to
check the status with \code{tar_pid() \%in\% ps::ps_pids()}
before running another call to \code{\link[=tar_make]{tar_make()}}
for the same project.
}
\examples{
if (identical(Sys.getenv("TAR_LONG_EXAMPLES"), "true")) {
tar_dir({ # tar_dir() runs code from a temporary directory.
tar_script({
list(
tar_target(x, seq_len(2)),
tar_target(y, 2 * x, pattern = map(x))
)
}, ask = FALSE)
tar_make()
tar_process()
tar_process(pid)
})
}
}
| /man/tar_process.Rd | permissive | ian-flores/targets | R | false | true | 1,592 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tar_process.R
\name{tar_process}
\alias{tar_process}
\title{Get main process info.}
\usage{
tar_process(names = NULL)
}
\arguments{
\item{names}{Optional, names of the data points to return.
If supplied, \code{tar_process()}
returns only the rows of the names you select.
You can supply symbols, a character vector,
or \code{tidyselect} helpers like \code{\link[=starts_with]{starts_with()}}.
If \code{NULL}, all names are selected.}
}
\value{
A data frame with metadata on the most recent main R process
to orchestrate the targets of the current project.
The output includes the \code{pid} of the main process.
}
\description{
Get info on the most recent main R process
to orchestrate the targets of the current project.
}
\details{
The main process is the R process invoked
by \code{\link[=tar_make]{tar_make()}} or similar. If \code{callr_function} is not \code{NULL},
this is an external process, and the \code{pid} in the return value
will not agree with \code{Sys.getpid()} in your current interactive session.
The process may or may not be alive. You may want to
check the status with \code{tar_pid() \%in\% ps::ps_pids()}
before running another call to \code{\link[=tar_make]{tar_make()}}
for the same project.
}
\examples{
if (identical(Sys.getenv("TAR_LONG_EXAMPLES"), "true")) {
tar_dir({ # tar_dir() runs code from a temporary directory.
tar_script({
list(
tar_target(x, seq_len(2)),
tar_target(y, 2 * x, pattern = map(x))
)
}, ask = FALSE)
tar_make()
tar_process()
tar_process(pid)
})
}
}
|
testlist <- list(A = structure(c(2.17107980817984e+205, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477817892182e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613121599-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 323 | r | testlist <- list(A = structure(c(2.17107980817984e+205, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477817892182e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
\docType{data}
\name{qog.demo}
\alias{qog.cs.demo}
\alias{qog.demo}
\alias{qog.ts.demo}
\title{Quality of Government demo data}
\format{two data frames, cross-sectional (\code{cs}) and time series (\code{ts})}
\description{
Selected variables from the Quality of Government
Standard dataset:
}
\details{
\itemize{ \item \code{year}: year of measurement
(\code{ts} only) \item \code{ccode}: country code,
numeric (ISO-3N) \item \code{ccodealp}: country code,
alphabetical (ISO-3C) \item \code{cname}: country name
\item \code{wdi_pop}: population (millions) \item
\code{wdi_gdpc}: GDP per capita (contant dollars) \item
\code{wdi_fr}: fertility rate (average births per woman)
\item \code{chga_hinst}: regime type \item
\code{bl_asy25mf}: average schooling years, both sexes
aged 25+ \item \code{bl_asy15f}: average schooling years,
females aged 15+ \item \code{bl_asy15m}: average
schooling years, males aged 15+ }
}
\references{
Teorell, Jan, Nicholas Charron, Stefan Dahlberg, Soren
Holmberg, Bo Rothstein, Petrus Sundin & Richard Svensson.
2013. \emph{The Quality of Government Dataset}, version
15May13. University of Gothenburg: The Quality of
Government Institute.
}
\seealso{
\code{\link{qogfind}} to search the index of a QOG
dataset
}
\keyword{datasets}
\keyword{qog}
| /man/qog.demo.Rd | no_license | qianmingax/qogdata | R | false | false | 1,320 | rd | \docType{data}
\name{qog.demo}
\alias{qog.cs.demo}
\alias{qog.demo}
\alias{qog.ts.demo}
\title{Quality of Government demo data}
\format{two data frames, cross-sectional (\code{cs}) and time series (\code{ts})}
\description{
Selected variables from the Quality of Government
Standard dataset:
}
\details{
\itemize{ \item \code{year}: year of measurement
(\code{ts} only) \item \code{ccode}: country code,
numeric (ISO-3N) \item \code{ccodealp}: country code,
alphabetical (ISO-3C) \item \code{cname}: country name
\item \code{wdi_pop}: population (millions) \item
\code{wdi_gdpc}: GDP per capita (contant dollars) \item
\code{wdi_fr}: fertility rate (average births per woman)
\item \code{chga_hinst}: regime type \item
\code{bl_asy25mf}: average schooling years, both sexes
aged 25+ \item \code{bl_asy15f}: average schooling years,
females aged 15+ \item \code{bl_asy15m}: average
schooling years, males aged 15+ }
}
\references{
Teorell, Jan, Nicholas Charron, Stefan Dahlberg, Soren
Holmberg, Bo Rothstein, Petrus Sundin & Richard Svensson.
2013. \emph{The Quality of Government Dataset}, version
15May13. University of Gothenburg: The Quality of
Government Institute.
}
\seealso{
\code{\link{qogfind}} to search the index of a QOG
dataset
}
\keyword{datasets}
\keyword{qog}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/examples.R
\name{franke}
\alias{franke}
\title{Franke function - http://www.sfu.ca/~ssurjano/franke2d.html}
\usage{
franke(x, y)
}
\arguments{
\item{x}{First dimension}
\item{y}{Second dimension}
}
\value{
The franke function evaluated at x,y
}
\description{
Franke function - http://www.sfu.ca/~ssurjano/franke2d.html
}
\examples{
franke(0,1)
}
| /man/franke.Rd | permissive | sigopt/SigOptR | R | false | true | 426 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/examples.R
\name{franke}
\alias{franke}
\title{Franke function - http://www.sfu.ca/~ssurjano/franke2d.html}
\usage{
franke(x, y)
}
\arguments{
\item{x}{First dimension}
\item{y}{Second dimension}
}
\value{
The franke function evaluated at x,y
}
\description{
Franke function - http://www.sfu.ca/~ssurjano/franke2d.html
}
\examples{
franke(0,1)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PyObjectTuple.R
\name{pyTuple}
\alias{pyTuple}
\title{Creates a virtual Python tuple}
\usage{
pyTuple(key, value, regFinalizer = FALSE)
}
\arguments{
\item{key}{a character string giving the name of the Python object.}
\item{value}{an optional value, allowed values are vectors, lists and NULL.}
\item{regFinalizer}{a logical indicating if a finalizer should be
be registered, the default value is TRUE.}
}
\description{
The function pyTuple creates a virtual Python object
of type PythonInR_Tuple.
}
\details{
If no value is provided a virtual Python tuple for an existing
Python object is created. If the value is NULL an empty
virtual Python object for an empty tuple is created.
If the value is a vector or tuple a new Python
object based on the vector or list is created.
}
\examples{
\dontshow{PythonInR:::pyCranConnect()}
if ( pyIsConnected() ){
pyExec('myPyTuple = (1, 2, 5, "Hello R!")')
# create a virtual Python tuple for an existing tuple
myTuple <- pyTuple("myPyTuple")
myTuple[0]
tryCatch({myTuple[1] <- "should give an error since tuple are not mutable"},
error = function(e) print(e))
myTuple
# create a new Python tuple and virtual tuple
newTuple <- pyTuple('myNewTuple', list(1:3, 'Hello Python'))
newTuple[1]
}
}
| /man/pyTuple.Rd | no_license | zimingd/PythonEmbedInR | R | false | true | 1,373 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PyObjectTuple.R
\name{pyTuple}
\alias{pyTuple}
\title{Creates a virtual Python tuple}
\usage{
pyTuple(key, value, regFinalizer = FALSE)
}
\arguments{
\item{key}{a character string giving the name of the Python object.}
\item{value}{an optional value, allowed values are vectors, lists and NULL.}
\item{regFinalizer}{a logical indicating if a finalizer should be
be registered, the default value is TRUE.}
}
\description{
The function pyTuple creates a virtual Python object
of type PythonInR_Tuple.
}
\details{
If no value is provided a virtual Python tuple for an existing
Python object is created. If the value is NULL an empty
virtual Python object for an empty tuple is created.
If the value is a vector or tuple a new Python
object based on the vector or list is created.
}
\examples{
\dontshow{PythonInR:::pyCranConnect()}
if ( pyIsConnected() ){
pyExec('myPyTuple = (1, 2, 5, "Hello R!")')
# create a virtual Python tuple for an existing tuple
myTuple <- pyTuple("myPyTuple")
myTuple[0]
tryCatch({myTuple[1] <- "should give an error since tuple are not mutable"},
error = function(e) print(e))
myTuple
# create a new Python tuple and virtual tuple
newTuple <- pyTuple('myNewTuple', list(1:3, 'Hello Python'))
newTuple[1]
}
}
|
library(processx)
library(rjobkernel)
Lsize <<- 0
rear <<- new.env()
Lqueue <<- new.env()
enqueue(list(val='/home/armaninspace/jobs/job1.R'))
enqueue(list(val='/home/armaninspace/jobs/job2.R'))
enqueue(list(val='/home/armaninspace/jobs/job3.R'))
count <- 1
while(!isEmptyQueue()) {
ll <- bottom()
procname <- paste0('p',count)
cat('Process Name:',procname,"\n")
filepath <- ll$val
Sys.sleep(10)
procname <- process$new("Rscript", filepath)
print(procname)
dequeue();
count <- count + 1
}
| /example.R | no_license | armaninspace/rjobkernel | R | false | false | 510 | r | library(processx)
library(rjobkernel)
Lsize <<- 0
rear <<- new.env()
Lqueue <<- new.env()
enqueue(list(val='/home/armaninspace/jobs/job1.R'))
enqueue(list(val='/home/armaninspace/jobs/job2.R'))
enqueue(list(val='/home/armaninspace/jobs/job3.R'))
count <- 1
while(!isEmptyQueue()) {
ll <- bottom()
procname <- paste0('p',count)
cat('Process Name:',procname,"\n")
filepath <- ll$val
Sys.sleep(10)
procname <- process$new("Rscript", filepath)
print(procname)
dequeue();
count <- count + 1
}
|
## Chapter 1
# View the structure of loan_data
str(loan_data)
# Load the gmodels package
library(gmodels)
# Call CrossTable() on loan_status
CrossTable(loan_data$loan_status)
# Call CrossTable() on grade and loan_status
CrossTable(loan_data$grade, loan_data$loan_status, prop.r = TRUE, prop.c = FALSE, prop.t = FALSE, prop.chisq = FALSE)
# Create histogram of loan_amnt: hist_1
hist_1 <- hist(loan_data$loan_amnt)
# Print locations of the breaks in hist_1
hist_1$breaks
# Change number of breaks and add labels: hist_2
hist_2 <- hist(loan_data$loan_amnt, breaks = 200, xlab = "Loan amount",
main = "Histogram of the loan amount")
# Plot the age variable
plot(loan_data$age, ylab = "Age")
# Save the outlier's index to index_highage
index_highage <- which(loan_data$age > 122)
# Create data set new_data with outlier deleted
new_data <- loan_data[-index_highage, ]
# Make bivariate scatterplot of age and annual income
plot(loan_data$age, loan_data$annual_inc, xlab = "Age", ylab = "Annual Income")
# Look at summary of loan_data
summary(loan_data$int_rate)
# Get indices of missing interest rates: na_index
na_index <- which(is.na(loan_data$int_rate))
# Remove observations with missing interest rates: loan_data_delrow_na
loan_data_delrow_na <- loan_data[-na_index, ]
# Make copy of loan_data
loan_data_delcol_na <- loan_data
# Delete interest rate column from loan_data_delcol_na
loan_data_delcol_na$int_rate <- NULL
# Compute the median of int_rate
median_ir <- median(loan_data$int_rate, na.rm = TRUE)
# Make copy of loan_data
loan_data_replace <- loan_data
# Replace missing interest rates with median
loan_data_replace$int_rate[na_index] <- median_ir
# Check if the NAs are gone
summary(loan_data_replace$int_rate)
# Make the necessary replacements in the coarse classification example below
loan_data$ir_cat <- rep(NA, length(loan_data$int_rate))
loan_data$ir_cat[which(loan_data$int_rate <= 8)] <- "0-8"
loan_data$ir_cat[which(loan_data$int_rate > 8 & loan_data$int_rate <= 11)] <- "8-11"
loan_data$ir_cat[which(loan_data$int_rate > 11 & loan_data$int_rate <= 13.5)] <- "11-13.5"
loan_data$ir_cat[which(loan_data$int_rate > 13.5)] <- "13.5+"
loan_data$ir_cat[which(is.na(loan_data$int_rate))] <- "Missing"
loan_data$ir_cat <- as.factor(loan_data$ir_cat)
# Look at your new variable using plot()
plot(loan_data$ir_cat)
# Set seed of 567
set.seed(567)
# Store row numbers for training set: index_train
index_train <- sample(1:nrow(loan_data), 2/3 * nrow(loan_data))
# Create training set: training_set
training_set <- loan_data[index_train, ]
# Create test set: test_set
test_set <- loan_data[-index_train, ]
# Create confusion matrix
conf_matrix <- table(test_set$loan_status, model_pred)
# Compute classification accuracy
(conf_matrix[2, 2] + conf_matrix [1, 1]) / (conf_matrix[1, 1] + conf_matrix[1, 2] + conf_matrix[2, 1] + conf_matrix[2, 2])
# Compute sensitivity
conf_matrix[2, 2] / (conf_matrix[2, 2] + conf_matrix[2, 1])
## Chapter 2
# Build a glm model with variable ir_cat as a predictor
log_model_cat <- glm(loan_status ~ ir_cat, family = "binomial", data = training_set)
# Print the parameter estimates
log_model_cat
# Look at the different categories in ir_cat using table()
table(loan_data$ir_cat)
# Build the logistic regression model
log_model_multi <- glm(loan_status ~ age + ir_cat + grade + loan_amnt + annual_inc, family = "binomial", data = training_set)
# Obtain significance levels using summary()
summary(log_model_multi)
# Build the logistic regression model
predictions_all_small <- predict(log_model_small, newdata = test_set, type = "response")
# Look at the range of the object "predictions_all_small"
range(predictions_all_small)
# Change the code below to construct a logistic regression model using all available predictors in the data set
log_model_small <- glm(loan_status ~ age + ir_cat, family = "binomial", data = training_set)
log_model_full <- glm(loan_status ~ ., family = "binomial", data = training_set)
# Make PD-predictions for all test set elements using the the full logistic regression model
predictions_all_full <- predict(log_model_full, newdata = test_set, type = "response")
# Look at the predictions range
range(predictions_all_full)
# The code for the logistic regression model and the predictions is given below
log_model_full <- glm(loan_status ~ ., family = "binomial", data = training_set)
predictions_all_full <- predict(log_model_full, newdata = test_set, type = "response")
# Make a binary predictions-vector using a cut-off of 15%
pred_cutoff_15 <- ifelse(predictions_all_full > 0.15, 1, 0)
# Construct a confusion matrix
table(test_set$loan_status, pred_cutoff_15)
# Fit the logit, probit and cloglog-link logistic regression models
log_model_logit <- glm(loan_status ~ age + emp_cat + ir_cat + loan_amnt,
family = binomial(link = logit), data = training_set)
log_model_probit <- glm(loan_status ~ age + emp_cat + ir_cat + loan_amnt, family = binomial(link = probit), data = training_set)
log_model_cloglog <- glm(loan_status ~ age + emp_cat + ir_cat + loan_amnt,
family = binomial(link = cloglog), data = training_set)
# Make predictions for all models using the test set
predictions_logit <- predict(log_model_logit, newdata = test_set, type = "response")
predictions_probit <- predict(log_model_probit, newdata = test_set, type = "response")
predictions_cloglog <- predict(log_model_cloglog, newdata = test_set, type = "response")
# Use a cut-off of 14% to make binary predictions-vectors
cutoff <- 0.14
class_pred_logit <- ifelse(predictions_logit > cutoff, 1, 0)
class_pred_probit <- ifelse(predictions_probit > cutoff, 1, 0)
class_pred_cloglog <- ifelse(predictions_cloglog > cutoff, 1, 0)
# Make a confusion matrix for the three models
tab_class_logit <- table(true_val,class_pred_logit)
tab_class_probit <- table(true_val, class_pred_probit)
tab_class_cloglog <- table(true_val, class_pred_cloglog)
# Compute the classification accuracy for all three models
acc_logit <- sum(diag(tab_class_logit)) / nrow(test_set)
acc_probit <- sum(diag(tab_class_probit)) / nrow(test_set)
acc_cloglog <- sum(diag(tab_class_cloglog)) / nrow(test_set)
## Chapter 3
# The Gini-measure of the root node is given below
gini_root <- 2 * 89 / 500 * 411 / 500
# Compute the Gini measure for the left leaf node
gini_ll <- 2 * (401/446) * (45/446)
# Compute the Gini measure for the right leaf node
gini_rl <- 2 * (10/54) * (44/54)
# Compute the gain
gain <- gini_root - 446 / 500 * gini_ll - 54 / 500 * gini_rl
# compare the gain-column in small_tree$splits with our computed gain, multiplied by 500, and assure they are the same
small_tree$splits
improve <- gain * 500
# Load package rpart in your workspace.
library(rpart)
# Change the code provided in the video such that a decision tree is constructed using the undersampled training set. Include rpart.control to relax the complexity parameter to 0.001.
tree_undersample <- rpart(loan_status ~ ., method = "class",
data = undersampled_training_set, control = rpart.control(cp = 0.001))
# Plot the decision tree
plot(tree_undersample, uniform = TRUE)
# Add labels to the decision tree
text(tree_undersample)
# Change the code below such that a tree is constructed with adjusted prior probabilities.
tree_prior <- rpart(loan_status ~ ., method = "class",
data = training_set, parms = list(prior = c(0.7, 0.3)), control = rpart.control(cp = 0.001))
# Plot the decision tree
plot(tree_prior, uniform = TRUE)
# Add labels to the decision tree
text(tree_prior)
# Change the code below such that a decision tree is constructed using a loss matrix penalizing 10 times more heavily for misclassified defaults.
tree_loss_matrix <- rpart(loan_status ~ ., method = "class",
data = training_set, parms = list(loss = matrix(c(0, 10, 1, 0), ncol = 2)), control = rpart.control(cp = 0.001))
# Plot the decision tree
plot(tree_loss_matrix, uniform = TRUE)
# Add labels to the decision tree
text(tree_loss_matrix)
# tree_prior is loaded in your workspace
# Plot the cross-validated error rate as a function of the complexity parameter
plotcp(tree_prior)
# Use printcp() to identify for which complexity parameter the cross-validated error rate is minimized.
printcp(tree_prior)
# Create an index for of the row with the minimum xerror
index <- which.min(tree_prior$cptable[ , "xerror"])
# Create tree_min
tree_min <- tree_prior$cptable[index, "CP"]
# Prune the tree using tree_min
ptree_prior <- prune(tree_prior, cp = tree_min)
# Use prp() to plot the pruned tree
prp(ptree_prior)
# set a seed and run the code to construct the tree with the loss matrix again
set.seed(345)
tree_loss_matrix <- rpart(loan_status ~ ., method = "class", data = training_set,
parms = list(loss=matrix(c(0, 10, 1, 0), ncol = 2)),
control = rpart.control(cp = 0.001))
# Plot the cross-validated error rate as a function of the complexity parameter
plotcp(tree_loss_matrix)
# Prune the tree using cp = 0.0012788
ptree_loss_matrix <- prune(tree_loss_matrix, cp = 0.0012788)
# Use prp() and argument extra = 1 to plot the pruned tree
prp(ptree_loss_matrix, extra = 1)
# set a seed and run the code to obtain a tree using weights, minsplit and minbucket
set.seed(345)
tree_weights <- rpart(loan_status ~ ., method = "class",
data = training_set, weights = case_weights,
control = rpart.control(minsplit = 5, minbucket = 2, cp = 0.001))
# Plot the cross-validated error rate for a changing cp
plotcp(tree_weights)
# Create an index for of the row with the minimum xerror
index <- which.min(tree_weights$cp[ , "xerror"])
# Create tree_min
tree_min <- tree_weights$cp[index, "CP"]
# Prune the tree using tree_min
ptree_weights <- prune(tree_weights, tree_min)
# Plot the pruned tree using the rpart.plot()-package
prp(ptree_weights, extra = 1)
# Make predictions for each of the pruned trees using the test set.
pred_undersample <- predict(ptree_undersample, newdata = test_set, type = "class")
pred_prior <- predict(ptree_prior, newdata = test_set, type = "class")
pred_loss_matrix <- predict(ptree_loss_matrix, newdata = test_set, type = "class")
pred_weights <- predict(ptree_weights, newdata = test_set, type = "class")
# Construct confusion matrices using the predictions.
confmat_undersample <- table(test_set$loan_status, pred_undersample)
confmat_prior <- table(test_set$loan_status, pred_prior)
confmat_loss_matrix <- table(test_set$loan_status, pred_loss_matrix)
confmat_weights <- table(test_set$loan_status, pred_weights)
# Compute the accuracies
acc_undersample <- sum(diag(confmat_undersample)) / nrow(test_set)
acc_prior <- sum(diag(confmat_prior)) / nrow(test_set)
acc_loss_matrix <- sum(diag(confmat_loss_matrix)) / nrow(test_set)
acc_weights <- sum(diag(confmat_weights)) / nrow(test_set)
## Chapter 4
# Make predictions for the probability of default using the pruned tree and the test set.
prob_default_prior <- predict(ptree_prior, newdata = test_set)[ ,2]
# Obtain the cutoff for acceptance rate 80%
cutoff_prior <- quantile(prob_default_prior, 0.8)
# Obtain the binary predictions.
bin_pred_prior_80 <- ifelse(prob_default_prior > cutoff_prior, 1, 0)
# Obtain the actual default status for the accepted loans
accepted_status_prior_80 <- test_set$loan_status[bin_pred_prior_80 == 0]
# Obtain the bad rate for the accepted loans
sum(accepted_status_prior_80)/length(accepted_status_prior_80)
# Have a look at the function strategy_bank
strategy_bank
# Apply the function strategy_bank to both predictions_cloglog and predictions_loss_matrix
strategy_cloglog <- strategy_bank(predictions_cloglog)
strategy_loss_matrix <- strategy_bank(predictions_loss_matrix)
# Obtain the strategy tables for both prediction-vectors
strategy_cloglog$table
strategy_loss_matrix$table
# Draw the strategy functions
par(mfrow = c(1,2))
plot(strategy_cloglog$accept_rate, strategy_cloglog$bad_rate,
type = "l", xlab = "Acceptance rate", ylab = "Bad rate",
lwd = 2, main = "logistic regression")
plot(strategy_loss_matrix$accept_rate, strategy_loss_matrix$bad_rate,
type = "l", xlab = "Acceptance rate",
ylab = "Bad rate", lwd = 2, main = "tree")
# Load the pROC-package
library(pROC)
# Construct the objects containing ROC-information
ROC_logit <- roc(test_set$loan_status, predictions_logit)
ROC_probit <- roc(test_set$loan_status, predictions_probit)
ROC_cloglog <- roc(test_set$loan_status, predictions_cloglog)
ROC_all_full <- roc(test_set$loan_status, predictions_all_full)
# Draw all ROCs on one plot
plot(ROC_logit)
lines(ROC_probit, col= "blue")
lines(ROC_cloglog, col= "red")
lines(ROC_all_full, col= "green")
# Compute the AUCs
auc(ROC_logit)
auc(ROC_probit)
auc(ROC_cloglog)
auc(ROC_all_full)
# Construct the objects containing ROC-information
ROC_undersample <- roc(test_set$loan_status, predictions_undersample)
ROC_prior <- roc(test_set$loan_status, predictions_prior)
ROC_loss_matrix <- roc(test_set$loan_status, predictions_loss_matrix)
ROC_weights <- roc(test_set$loan_status, predictions_weights)
# Draw the ROC-curves in one plot
plot(ROC_undersample)
lines(ROC_prior, col="blue")
lines(ROC_loss_matrix, col="red")
lines(ROC_weights, col="green")
# Compute the AUCs
auc(ROC_undersample)
auc(ROC_prior)
auc(ROC_loss_matrix)
auc(ROC_weights)
# Build four models each time deleting one variable in log_3_remove_ir
log_4_remove_amnt <- glm(loan_status ~ grade + annual_inc + emp_cat,
family = binomial, data = training_set)
log_4_remove_grade <- glm(loan_status ~ annual_inc + emp_cat,
family = binomial, data = training_set)
log_4_remove_inc <- glm(loan_status ~ grade + emp_cat,
family = binomial, data = training_set)
log_4_remove_emp <- glm(loan_status ~ grade + annual_inc,
family = binomial, data = training_set)
# Make PD-predictions for each of the models
pred_4_remove_amnt <- predict(log_4_remove_amnt, newdata = test_set, type = "response")
pred_4_remove_grade <- predict(log_4_remove_grade, newdata = test_set, type = "response")
pred_4_remove_inc <- predict(log_4_remove_inc, newdata = test_set, type = "response")
pred_4_remove_emp <- predict(log_4_remove_emp, newdata = test_set, type = "response")
# Compute the AUCs
auc(test_set$loan_status, pred_4_remove_amnt)
auc(test_set$loan_status, pred_4_remove_grade)
auc(test_set$loan_status, pred_4_remove_inc)
auc(test_set$loan_status, pred_4_remove_emp)
# Build three models each time deleting one variable in log_4_remove_amnt
log_5_remove_grade <- glm(loan_status ~ annual_inc + emp_cat, family = binomial, data = training_set)
log_5_remove_inc <- glm(loan_status ~ grade + emp_cat , family = binomial, data = training_set)
log_5_remove_emp <- glm(loan_status ~ grade + annual_inc, family = binomial, data = training_set)
# Make PD-predictions for each of the models
pred_5_remove_grade <- predict(log_5_remove_grade, newdata = test_set, type = "response")
pred_5_remove_inc <- predict(log_5_remove_inc, newdata = test_set, type = "response")
pred_5_remove_emp <- predict(log_5_remove_emp, newdata = test_set, type = "response")
# Compute the AUCs
auc(test_set$loan_status, pred_5_remove_grade)
auc(test_set$loan_status, pred_5_remove_inc)
auc(test_set$loan_status, pred_5_remove_emp)
# Plot the ROC-curve for the best model here
plot(roc(test_set$loan_status, pred_4_remove_amnt)) | /Quantitative Analyst with R/Credit Risk Modeling in R/Credit Risk Modeling in R.R | no_license | Lockyar/LearningR | R | false | false | 15,612 | r | ## Chapter 1
# View the structure of loan_data
str(loan_data)
# Load the gmodels package
library(gmodels)
# Call CrossTable() on loan_status
CrossTable(loan_data$loan_status)
# Call CrossTable() on grade and loan_status
CrossTable(loan_data$grade, loan_data$loan_status, prop.r = TRUE, prop.c = FALSE, prop.t = FALSE, prop.chisq = FALSE)
# Create histogram of loan_amnt: hist_1
hist_1 <- hist(loan_data$loan_amnt)
# Print locations of the breaks in hist_1
hist_1$breaks
# Change number of breaks and add labels: hist_2
hist_2 <- hist(loan_data$loan_amnt, breaks = 200, xlab = "Loan amount",
main = "Histogram of the loan amount")
# Plot the age variable
plot(loan_data$age, ylab = "Age")
# Save the outlier's index to index_highage
index_highage <- which(loan_data$age > 122)
# Create data set new_data with outlier deleted
new_data <- loan_data[-index_highage, ]
# Make bivariate scatterplot of age and annual income
plot(loan_data$age, loan_data$annual_inc, xlab = "Age", ylab = "Annual Income")
# Look at summary of loan_data
summary(loan_data$int_rate)
# Get indices of missing interest rates: na_index
na_index <- which(is.na(loan_data$int_rate))
# Remove observations with missing interest rates: loan_data_delrow_na
loan_data_delrow_na <- loan_data[-na_index, ]
# Make copy of loan_data
loan_data_delcol_na <- loan_data
# Delete interest rate column from loan_data_delcol_na
loan_data_delcol_na$int_rate <- NULL
# Compute the median of int_rate
median_ir <- median(loan_data$int_rate, na.rm = TRUE)
# Make copy of loan_data
loan_data_replace <- loan_data
# Replace missing interest rates with median
loan_data_replace$int_rate[na_index] <- median_ir
# Check if the NAs are gone
summary(loan_data_replace$int_rate)
# Make the necessary replacements in the coarse classification example below
loan_data$ir_cat <- rep(NA, length(loan_data$int_rate))
loan_data$ir_cat[which(loan_data$int_rate <= 8)] <- "0-8"
loan_data$ir_cat[which(loan_data$int_rate > 8 & loan_data$int_rate <= 11)] <- "8-11"
loan_data$ir_cat[which(loan_data$int_rate > 11 & loan_data$int_rate <= 13.5)] <- "11-13.5"
loan_data$ir_cat[which(loan_data$int_rate > 13.5)] <- "13.5+"
loan_data$ir_cat[which(is.na(loan_data$int_rate))] <- "Missing"
loan_data$ir_cat <- as.factor(loan_data$ir_cat)
# Look at your new variable using plot()
plot(loan_data$ir_cat)
# Set seed of 567
set.seed(567)
# Store row numbers for training set: index_train
index_train <- sample(1:nrow(loan_data), 2/3 * nrow(loan_data))
# Create training set: training_set
training_set <- loan_data[index_train, ]
# Create test set: test_set
test_set <- loan_data[-index_train, ]
# Create confusion matrix
conf_matrix <- table(test_set$loan_status, model_pred)
# Compute classification accuracy
(conf_matrix[2, 2] + conf_matrix [1, 1]) / (conf_matrix[1, 1] + conf_matrix[1, 2] + conf_matrix[2, 1] + conf_matrix[2, 2])
# Compute sensitivity
conf_matrix[2, 2] / (conf_matrix[2, 2] + conf_matrix[2, 1])
## Chapter 2
# Build a glm model with variable ir_cat as a predictor
log_model_cat <- glm(loan_status ~ ir_cat, family = "binomial", data = training_set)
# Print the parameter estimates
log_model_cat
# Look at the different categories in ir_cat using table()
table(loan_data$ir_cat)
# Build the logistic regression model
log_model_multi <- glm(loan_status ~ age + ir_cat + grade + loan_amnt + annual_inc, family = "binomial", data = training_set)
# Obtain significance levels using summary()
summary(log_model_multi)
# Build the logistic regression model
predictions_all_small <- predict(log_model_small, newdata = test_set, type = "response")
# Look at the range of the object "predictions_all_small"
range(predictions_all_small)
# Change the code below to construct a logistic regression model using all available predictors in the data set
log_model_small <- glm(loan_status ~ age + ir_cat, family = "binomial", data = training_set)
log_model_full <- glm(loan_status ~ ., family = "binomial", data = training_set)
# Make PD-predictions for all test set elements using the the full logistic regression model
predictions_all_full <- predict(log_model_full, newdata = test_set, type = "response")
# Look at the predictions range
range(predictions_all_full)
# The code for the logistic regression model and the predictions is given below
log_model_full <- glm(loan_status ~ ., family = "binomial", data = training_set)
predictions_all_full <- predict(log_model_full, newdata = test_set, type = "response")
# Make a binary predictions-vector using a cut-off of 15%
pred_cutoff_15 <- ifelse(predictions_all_full > 0.15, 1, 0)
# Construct a confusion matrix
table(test_set$loan_status, pred_cutoff_15)
# Fit the logit, probit and cloglog-link logistic regression models
log_model_logit <- glm(loan_status ~ age + emp_cat + ir_cat + loan_amnt,
family = binomial(link = logit), data = training_set)
log_model_probit <- glm(loan_status ~ age + emp_cat + ir_cat + loan_amnt, family = binomial(link = probit), data = training_set)
log_model_cloglog <- glm(loan_status ~ age + emp_cat + ir_cat + loan_amnt,
family = binomial(link = cloglog), data = training_set)
# Make predictions for all models using the test set
predictions_logit <- predict(log_model_logit, newdata = test_set, type = "response")
predictions_probit <- predict(log_model_probit, newdata = test_set, type = "response")
predictions_cloglog <- predict(log_model_cloglog, newdata = test_set, type = "response")
# Use a cut-off of 14% to make binary predictions-vectors
cutoff <- 0.14
class_pred_logit <- ifelse(predictions_logit > cutoff, 1, 0)
class_pred_probit <- ifelse(predictions_probit > cutoff, 1, 0)
class_pred_cloglog <- ifelse(predictions_cloglog > cutoff, 1, 0)
# Make a confusion matrix for the three models
tab_class_logit <- table(true_val,class_pred_logit)
tab_class_probit <- table(true_val, class_pred_probit)
tab_class_cloglog <- table(true_val, class_pred_cloglog)
# Compute the classification accuracy for all three models
acc_logit <- sum(diag(tab_class_logit)) / nrow(test_set)
acc_probit <- sum(diag(tab_class_probit)) / nrow(test_set)
acc_cloglog <- sum(diag(tab_class_cloglog)) / nrow(test_set)
## Chapter 3
# The Gini-measure of the root node is given below
gini_root <- 2 * 89 / 500 * 411 / 500
# Compute the Gini measure for the left leaf node
gini_ll <- 2 * (401/446) * (45/446)
# Compute the Gini measure for the right leaf node
gini_rl <- 2 * (10/54) * (44/54)
# Compute the gain
gain <- gini_root - 446 / 500 * gini_ll - 54 / 500 * gini_rl
# compare the gain-column in small_tree$splits with our computed gain, multiplied by 500, and assure they are the same
small_tree$splits
improve <- gain * 500
# Load package rpart in your workspace.
library(rpart)
# Change the code provided in the video such that a decision tree is constructed using the undersampled training set. Include rpart.control to relax the complexity parameter to 0.001.
tree_undersample <- rpart(loan_status ~ ., method = "class",
data = undersampled_training_set, control = rpart.control(cp = 0.001))
# Plot the decision tree
plot(tree_undersample, uniform = TRUE)
# Add labels to the decision tree
text(tree_undersample)
# Change the code below such that a tree is constructed with adjusted prior probabilities.
tree_prior <- rpart(loan_status ~ ., method = "class",
data = training_set, parms = list(prior = c(0.7, 0.3)), control = rpart.control(cp = 0.001))
# Plot the decision tree
plot(tree_prior, uniform = TRUE)
# Add labels to the decision tree
text(tree_prior)
# Change the code below such that a decision tree is constructed using a loss matrix penalizing 10 times more heavily for misclassified defaults.
tree_loss_matrix <- rpart(loan_status ~ ., method = "class",
data = training_set, parms = list(loss = matrix(c(0, 10, 1, 0), ncol = 2)), control = rpart.control(cp = 0.001))
# Plot the decision tree
plot(tree_loss_matrix, uniform = TRUE)
# Add labels to the decision tree
text(tree_loss_matrix)
# tree_prior is loaded in your workspace
# Plot the cross-validated error rate as a function of the complexity parameter
plotcp(tree_prior)
# Use printcp() to identify for which complexity parameter the cross-validated error rate is minimized.
printcp(tree_prior)
# Create an index for of the row with the minimum xerror
index <- which.min(tree_prior$cptable[ , "xerror"])
# Create tree_min
tree_min <- tree_prior$cptable[index, "CP"]
# Prune the tree using tree_min
ptree_prior <- prune(tree_prior, cp = tree_min)
# Use prp() to plot the pruned tree
prp(ptree_prior)
# set a seed and run the code to construct the tree with the loss matrix again
set.seed(345)
tree_loss_matrix <- rpart(loan_status ~ ., method = "class", data = training_set,
parms = list(loss=matrix(c(0, 10, 1, 0), ncol = 2)),
control = rpart.control(cp = 0.001))
# Plot the cross-validated error rate as a function of the complexity parameter
plotcp(tree_loss_matrix)
# Prune the tree using cp = 0.0012788
ptree_loss_matrix <- prune(tree_loss_matrix, cp = 0.0012788)
# Use prp() and argument extra = 1 to plot the pruned tree
prp(ptree_loss_matrix, extra = 1)
# set a seed and run the code to obtain a tree using weights, minsplit and minbucket
set.seed(345)
tree_weights <- rpart(loan_status ~ ., method = "class",
data = training_set, weights = case_weights,
control = rpart.control(minsplit = 5, minbucket = 2, cp = 0.001))
# Plot the cross-validated error rate for a changing cp
plotcp(tree_weights)
# Create an index for of the row with the minimum xerror
index <- which.min(tree_weights$cp[ , "xerror"])
# Create tree_min
tree_min <- tree_weights$cp[index, "CP"]
# Prune the tree using tree_min
ptree_weights <- prune(tree_weights, tree_min)
# Plot the pruned tree using the rpart.plot()-package
prp(ptree_weights, extra = 1)
# Make predictions for each of the pruned trees using the test set.
pred_undersample <- predict(ptree_undersample, newdata = test_set, type = "class")
pred_prior <- predict(ptree_prior, newdata = test_set, type = "class")
pred_loss_matrix <- predict(ptree_loss_matrix, newdata = test_set, type = "class")
pred_weights <- predict(ptree_weights, newdata = test_set, type = "class")
# Construct confusion matrices using the predictions.
confmat_undersample <- table(test_set$loan_status, pred_undersample)
confmat_prior <- table(test_set$loan_status, pred_prior)
confmat_loss_matrix <- table(test_set$loan_status, pred_loss_matrix)
confmat_weights <- table(test_set$loan_status, pred_weights)
# Compute the accuracies
acc_undersample <- sum(diag(confmat_undersample)) / nrow(test_set)
acc_prior <- sum(diag(confmat_prior)) / nrow(test_set)
acc_loss_matrix <- sum(diag(confmat_loss_matrix)) / nrow(test_set)
acc_weights <- sum(diag(confmat_weights)) / nrow(test_set)
## Chapter 4
# Make predictions for the probability of default using the pruned tree and the test set.
prob_default_prior <- predict(ptree_prior, newdata = test_set)[ ,2]
# Obtain the cutoff for acceptance rate 80%
cutoff_prior <- quantile(prob_default_prior, 0.8)
# Obtain the binary predictions.
bin_pred_prior_80 <- ifelse(prob_default_prior > cutoff_prior, 1, 0)
# Obtain the actual default status for the accepted loans
accepted_status_prior_80 <- test_set$loan_status[bin_pred_prior_80 == 0]
# Obtain the bad rate for the accepted loans
sum(accepted_status_prior_80)/length(accepted_status_prior_80)
# Have a look at the function strategy_bank
strategy_bank
# Apply the function strategy_bank to both predictions_cloglog and predictions_loss_matrix
strategy_cloglog <- strategy_bank(predictions_cloglog)
strategy_loss_matrix <- strategy_bank(predictions_loss_matrix)
# Obtain the strategy tables for both prediction-vectors
strategy_cloglog$table
strategy_loss_matrix$table
# Draw the strategy functions
par(mfrow = c(1,2))
plot(strategy_cloglog$accept_rate, strategy_cloglog$bad_rate,
type = "l", xlab = "Acceptance rate", ylab = "Bad rate",
lwd = 2, main = "logistic regression")
plot(strategy_loss_matrix$accept_rate, strategy_loss_matrix$bad_rate,
type = "l", xlab = "Acceptance rate",
ylab = "Bad rate", lwd = 2, main = "tree")
# Load the pROC-package
library(pROC)
# Construct the objects containing ROC-information
ROC_logit <- roc(test_set$loan_status, predictions_logit)
ROC_probit <- roc(test_set$loan_status, predictions_probit)
ROC_cloglog <- roc(test_set$loan_status, predictions_cloglog)
ROC_all_full <- roc(test_set$loan_status, predictions_all_full)
# Draw all ROCs on one plot
plot(ROC_logit)
lines(ROC_probit, col= "blue")
lines(ROC_cloglog, col= "red")
lines(ROC_all_full, col= "green")
# Compute the AUCs
auc(ROC_logit)
auc(ROC_probit)
auc(ROC_cloglog)
auc(ROC_all_full)
# Construct the objects containing ROC-information
ROC_undersample <- roc(test_set$loan_status, predictions_undersample)
ROC_prior <- roc(test_set$loan_status, predictions_prior)
ROC_loss_matrix <- roc(test_set$loan_status, predictions_loss_matrix)
ROC_weights <- roc(test_set$loan_status, predictions_weights)
# Draw the ROC-curves in one plot
plot(ROC_undersample)
lines(ROC_prior, col="blue")
lines(ROC_loss_matrix, col="red")
lines(ROC_weights, col="green")
# Compute the AUCs
auc(ROC_undersample)
auc(ROC_prior)
auc(ROC_loss_matrix)
auc(ROC_weights)
# Build four models each time deleting one variable in log_3_remove_ir
log_4_remove_amnt <- glm(loan_status ~ grade + annual_inc + emp_cat,
family = binomial, data = training_set)
log_4_remove_grade <- glm(loan_status ~ annual_inc + emp_cat,
family = binomial, data = training_set)
log_4_remove_inc <- glm(loan_status ~ grade + emp_cat,
family = binomial, data = training_set)
log_4_remove_emp <- glm(loan_status ~ grade + annual_inc,
family = binomial, data = training_set)
# Make PD-predictions for each of the models
pred_4_remove_amnt <- predict(log_4_remove_amnt, newdata = test_set, type = "response")
pred_4_remove_grade <- predict(log_4_remove_grade, newdata = test_set, type = "response")
pred_4_remove_inc <- predict(log_4_remove_inc, newdata = test_set, type = "response")
pred_4_remove_emp <- predict(log_4_remove_emp, newdata = test_set, type = "response")
# Compute the AUCs
auc(test_set$loan_status, pred_4_remove_amnt)
auc(test_set$loan_status, pred_4_remove_grade)
auc(test_set$loan_status, pred_4_remove_inc)
auc(test_set$loan_status, pred_4_remove_emp)
# Build three models each time deleting one variable in log_4_remove_amnt
log_5_remove_grade <- glm(loan_status ~ annual_inc + emp_cat, family = binomial, data = training_set)
log_5_remove_inc <- glm(loan_status ~ grade + emp_cat , family = binomial, data = training_set)
log_5_remove_emp <- glm(loan_status ~ grade + annual_inc, family = binomial, data = training_set)
# Make PD-predictions for each of the models
pred_5_remove_grade <- predict(log_5_remove_grade, newdata = test_set, type = "response")
pred_5_remove_inc <- predict(log_5_remove_inc, newdata = test_set, type = "response")
pred_5_remove_emp <- predict(log_5_remove_emp, newdata = test_set, type = "response")
# Compute the AUCs
auc(test_set$loan_status, pred_5_remove_grade)
auc(test_set$loan_status, pred_5_remove_inc)
auc(test_set$loan_status, pred_5_remove_emp)
# Plot the ROC-curve for the best model here
plot(roc(test_set$loan_status, pred_4_remove_amnt)) |
#计算asv在样本中的检出率、均值、中位数、标准差、变异系数、相对丰度
#输入featuretable:feature by sample
asv_stats=function(otutab){
norm=t(t(otutab)/colSums(otutab))*100
asv_stats=cbind(t(apply(otutab,1,function(row){
rate=1-length(row[which(row==0)])/length(row)
mean=mean(row)
median=median(row)
sd=sd(row)
cv=sd(row)/mean(row)
return(c(rate=rate,mean=mean,median=median,sd=sd,cv=cv))
})),RA=rowMeans(norm))
return(asv_stats)
}
mystats=asv_stats(otutab)
| /asv_filter.R | no_license | wangkai7/16s-R-code | R | false | false | 522 | r | #计算asv在样本中的检出率、均值、中位数、标准差、变异系数、相对丰度
#输入featuretable:feature by sample
asv_stats=function(otutab){
norm=t(t(otutab)/colSums(otutab))*100
asv_stats=cbind(t(apply(otutab,1,function(row){
rate=1-length(row[which(row==0)])/length(row)
mean=mean(row)
median=median(row)
sd=sd(row)
cv=sd(row)/mean(row)
return(c(rate=rate,mean=mean,median=median,sd=sd,cv=cv))
})),RA=rowMeans(norm))
return(asv_stats)
}
mystats=asv_stats(otutab)
|
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install()
BiocManager::install(c('BiocGenerics', 'DelayedArray', 'DelayedMatrixStats',
'limma', 'S4Vectors', 'SingleCellExperiment',
'SummarizedExperiment', 'batchelor', 'Matrix.utils'))
install.packages("devtools")
devtools::install_github('cole-trapnell-lab/leidenbase')
devtools::install_github('cole-trapnell-lab/monocle3')
library(monocle3)
library(readxl)
log2TPM_matrix <- read.delim("~/Spring20/Genomics/project/GSM4307111_GEO_processed_BC159-T_3_log2TPM_matrix_final.txt", row.names=1)
cell_metadata <- read_excel("Spring20/Genomics/project/GSE145137_BC159-T_3_All_SC_final_QC_Celltype_Information.xlsx")
gene_metadata <- data.frame(rownames(log2TPM_matrix))
# making things work for cds
log2TPM_matrix <- data.matrix(log2TPM_matrix)
rownames(cell_metadata) <- cell_metadata$Samples
rownames(gene_metadata) <- rownames(log2TPM_matrix)
# load data into cds
cds <- new_cell_data_set(as(log2TPM_matrix, "sparseMatrix"),
cell_metadata = cell_metadata,
gene_metadata = gene_metadata)
cds <- preprocess_cds(cds, num_dim = 100)
plot_pc_variance_explained(cds)
cds_2d <- reduce_dimension(cds)
plot_cells(cds_2d, color_cells_by="cell_type")
saveRDS(cds, file = "cds.rds")
saveRDS(cell_metadata, file = "cell_met.rds")
saveRDS(gene_metadata, file = "gene_met.rds")
| /preprocess_exp_data.R | no_license | ayushiso/dbn-genereg | R | false | false | 1,506 | r | if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install()
BiocManager::install(c('BiocGenerics', 'DelayedArray', 'DelayedMatrixStats',
'limma', 'S4Vectors', 'SingleCellExperiment',
'SummarizedExperiment', 'batchelor', 'Matrix.utils'))
install.packages("devtools")
devtools::install_github('cole-trapnell-lab/leidenbase')
devtools::install_github('cole-trapnell-lab/monocle3')
library(monocle3)
library(readxl)
log2TPM_matrix <- read.delim("~/Spring20/Genomics/project/GSM4307111_GEO_processed_BC159-T_3_log2TPM_matrix_final.txt", row.names=1)
cell_metadata <- read_excel("Spring20/Genomics/project/GSE145137_BC159-T_3_All_SC_final_QC_Celltype_Information.xlsx")
gene_metadata <- data.frame(rownames(log2TPM_matrix))
# making things work for cds
log2TPM_matrix <- data.matrix(log2TPM_matrix)
rownames(cell_metadata) <- cell_metadata$Samples
rownames(gene_metadata) <- rownames(log2TPM_matrix)
# load data into cds
cds <- new_cell_data_set(as(log2TPM_matrix, "sparseMatrix"),
cell_metadata = cell_metadata,
gene_metadata = gene_metadata)
cds <- preprocess_cds(cds, num_dim = 100)
plot_pc_variance_explained(cds)
cds_2d <- reduce_dimension(cds)
plot_cells(cds_2d, color_cells_by="cell_type")
saveRDS(cds, file = "cds.rds")
saveRDS(cell_metadata, file = "cell_met.rds")
saveRDS(gene_metadata, file = "gene_met.rds")
|
##
# Data setup for ensemble model
##
tp_model_ensemble_data <- tabPanel(
"Data",
fluidRow(
column(
tags$h5("SETUP"),
width = 12,
column(
width = 11,
fileInput(
"med_upload",
"Select prediction data",
multiple = TRUE,
accept = c("text/csv", "text/comma-separated-values,text/plain", ".csv")
)
),
column(
width = 1
)
)
),
fluidRow(
column(
tags$h5("PREVIEW"),
width = 12,
column(
width = 6,
DT::dataTableOutput("med_prob")
),
column(
width = 6,
DT::dataTableOutput("med_pred")
)
)
)
) | /iUI/model_ensemble_data.R | no_license | DrRoad/ShinyMLHome | R | false | false | 686 | r | ##
# Data setup for ensemble model
##
tp_model_ensemble_data <- tabPanel(
"Data",
fluidRow(
column(
tags$h5("SETUP"),
width = 12,
column(
width = 11,
fileInput(
"med_upload",
"Select prediction data",
multiple = TRUE,
accept = c("text/csv", "text/comma-separated-values,text/plain", ".csv")
)
),
column(
width = 1
)
)
),
fluidRow(
column(
tags$h5("PREVIEW"),
width = 12,
column(
width = 6,
DT::dataTableOutput("med_prob")
),
column(
width = 6,
DT::dataTableOutput("med_pred")
)
)
)
) |
# no finite sample correction
source(file = "BAGofT.R")
source(file = "testGlmBi.R")
source(file = "parRF.R")
load(file = "genDat3_100.rda")
fm <- y ~ x1 + x2 + x3
result <- list()
system.time( for(k in c(1:length(genDat3_100)) ){
message(paste("replication: ",k))
tryCatch({ result[[k]] <- BAGofT(testModel = function(Train.data, Test.data){testGlmBi(formula = fm, link = "logit", Train.data, Test.data)},
parFun = parRF,
data = genDat3_100[[k]],
nsplits = 40,
nsim = 100)}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
} )
save( result,
file = "sim3_100_1_fl.rda")
| /Rcodes/Subsec5.1_(appendix_Sec2,7,8)/BAGofT_results/sim3_100_1_fl.R | no_license | JZHANG4362/BAGofT | R | false | false | 754 | r | # no finite sample correction
source(file = "BAGofT.R")
source(file = "testGlmBi.R")
source(file = "parRF.R")
load(file = "genDat3_100.rda")
fm <- y ~ x1 + x2 + x3
result <- list()
system.time( for(k in c(1:length(genDat3_100)) ){
message(paste("replication: ",k))
tryCatch({ result[[k]] <- BAGofT(testModel = function(Train.data, Test.data){testGlmBi(formula = fm, link = "logit", Train.data, Test.data)},
parFun = parRF,
data = genDat3_100[[k]],
nsplits = 40,
nsim = 100)}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
} )
save( result,
file = "sim3_100_1_fl.rda")
|
# This script show how to read the PISCO_HyM_GR2M_v1.1.nc file
# By Harold Llauca
# install.packages("ncdf4")
# install.packages("lubridate")
rm(list=ls())
cat('\f')
# Load required packages
require(ncdf4)
require(lubridate)
# Set your work directory
my_location <- 'D:/Taller_Nexus/'
setwd(my_location)
# Read netCDF data and load variables and dimensions
#==========================================================
mync <- nc_open('Grillados/netcdf_GR2M/PISCO_HyM_GR2M_v1.1.nc')
print(mync)
# Load data
comid <- ncvar_get(mync,'comid')
time <- floor_date(as.Date('1960-01-01')+
months(floor(ncvar_get(mync,'time'))),"month")
pr <- as.data.frame(t(ncvar_get(mync,'pr'))) # Precipitation
ae <- as.data.frame(t(ncvar_get(mync,'ae'))) # Actual evapotranspiration
sm <- as.data.frame(t(ncvar_get(mync,'sm'))) # Soil Moisture
ru <- as.data.frame(t(ncvar_get(mync,'ru'))) # Runoff
qr <- as.data.frame(t(ncvar_get(mync,'qr'))) # Discharge
# Save as texfiles
colnames(pr) <- paste0('PR_',comid)
colnames(ae) <- paste0('AE_',comid)
colnames(sm) <- paste0('SM_',comid)
colnames(ru) <- paste0('RU_',comid)
colnames(qr) <- paste0('QR_',comid)
rownames(pr) <- time
rownames(ae) <- time
rownames(sm) <- time
rownames(ru) <- time
rownames(qr) <- time
# write.table(pr, 'pr.txt',sep='\t')
# write.table(ae, 'ae.txt',sep='\t')
# write.table(sm, 'sm.txt',sep='\t')
# write.table(ru, 'ru.txt',sep='\t')
# write.table(qr, 'qr.txt',sep='\t')
# Plot data for an specific COMID
#==========================================================
myCOMID <- 20 # VER EL COMID EN LA TABLA DE ATRIBUTOS DEL SHAPEFILE, SEGUN ESO
# ES EL CAUDAL DE LA CUENCA , POR EJEMPLO SI EL COMID DE LA CUENCA SANTA EULALIA
#EN EL SHAPEFILE ES 25 , PONER 25 AQUI, Y NOS ARROJARA EL CAUDAL DE ESA CUENCA.
#ESTE ES EL PRODUCTO DESARROLLADO POR EL SENAMHI
par(mfrow=c(3,2))
par(mar=c(1,3,3,1), oma=c(0.5,0.5,0.5,0.5))
par(cex=0.6)
par(tck=-0.02)
par(mgp=c(1.5,0.5,0))
ind <- which(comid==myCOMID)
plot(x=time, y=pr[,ind], type='l', col='blue',
main=paste0('PR_',myCOMID), ylab='[mm/month]')
plot(x=time, y=ae[,ind], type='l', col='green',
main=paste0('AE_',myCOMID), ylab='[mm/month]')
plot(x=time, y=sm[,ind], type='l', col='red',
main=paste0('SM_',myCOMID), ylab='[mm/month]')
plot(x=time, y=ru[,ind], type='l', col='black',
main=paste0('RU_',myCOMID), ylab='[mm/month]')
plot(x=time, y=qr[,ind], type='l', col='magenta',
main=paste0('QR_',myCOMID), ylab='[m3/s]')
# Adicionalmente ----------------------------------------------------------
#Agregue una funcion para guardarlo como archivo csv en ves de text
write.csv(qr[,ind],'Salida/Caudales_m3_comid.csv', quote = F, row.names = F)
#Los datos estan hasta marzo del 2020
#Si la cuenca que desea no esta dentro del shapefile, debe aplicarse de manera
#manual.
| /Scripts/04_Producto_GR2M_Senamhi.R | no_license | yesin25/Manejo_datos_hidrometeorologicos_Nexus | R | false | false | 2,927 | r | # This script show how to read the PISCO_HyM_GR2M_v1.1.nc file
# By Harold Llauca
# install.packages("ncdf4")
# install.packages("lubridate")
rm(list=ls())
cat('\f')
# Load required packages
require(ncdf4)
require(lubridate)
# Set your work directory
my_location <- 'D:/Taller_Nexus/'
setwd(my_location)
# Read netCDF data and load variables and dimensions
#==========================================================
mync <- nc_open('Grillados/netcdf_GR2M/PISCO_HyM_GR2M_v1.1.nc')
print(mync)
# Load data
comid <- ncvar_get(mync,'comid')
time <- floor_date(as.Date('1960-01-01')+
months(floor(ncvar_get(mync,'time'))),"month")
pr <- as.data.frame(t(ncvar_get(mync,'pr'))) # Precipitation
ae <- as.data.frame(t(ncvar_get(mync,'ae'))) # Actual evapotranspiration
sm <- as.data.frame(t(ncvar_get(mync,'sm'))) # Soil Moisture
ru <- as.data.frame(t(ncvar_get(mync,'ru'))) # Runoff
qr <- as.data.frame(t(ncvar_get(mync,'qr'))) # Discharge
# Save as texfiles
colnames(pr) <- paste0('PR_',comid)
colnames(ae) <- paste0('AE_',comid)
colnames(sm) <- paste0('SM_',comid)
colnames(ru) <- paste0('RU_',comid)
colnames(qr) <- paste0('QR_',comid)
rownames(pr) <- time
rownames(ae) <- time
rownames(sm) <- time
rownames(ru) <- time
rownames(qr) <- time
# write.table(pr, 'pr.txt',sep='\t')
# write.table(ae, 'ae.txt',sep='\t')
# write.table(sm, 'sm.txt',sep='\t')
# write.table(ru, 'ru.txt',sep='\t')
# write.table(qr, 'qr.txt',sep='\t')
# Plot data for an specific COMID
#==========================================================
myCOMID <- 20 # VER EL COMID EN LA TABLA DE ATRIBUTOS DEL SHAPEFILE, SEGUN ESO
# ES EL CAUDAL DE LA CUENCA , POR EJEMPLO SI EL COMID DE LA CUENCA SANTA EULALIA
#EN EL SHAPEFILE ES 25 , PONER 25 AQUI, Y NOS ARROJARA EL CAUDAL DE ESA CUENCA.
#ESTE ES EL PRODUCTO DESARROLLADO POR EL SENAMHI
par(mfrow=c(3,2))
par(mar=c(1,3,3,1), oma=c(0.5,0.5,0.5,0.5))
par(cex=0.6)
par(tck=-0.02)
par(mgp=c(1.5,0.5,0))
ind <- which(comid==myCOMID)
plot(x=time, y=pr[,ind], type='l', col='blue',
main=paste0('PR_',myCOMID), ylab='[mm/month]')
plot(x=time, y=ae[,ind], type='l', col='green',
main=paste0('AE_',myCOMID), ylab='[mm/month]')
plot(x=time, y=sm[,ind], type='l', col='red',
main=paste0('SM_',myCOMID), ylab='[mm/month]')
plot(x=time, y=ru[,ind], type='l', col='black',
main=paste0('RU_',myCOMID), ylab='[mm/month]')
plot(x=time, y=qr[,ind], type='l', col='magenta',
main=paste0('QR_',myCOMID), ylab='[m3/s]')
# Adicionalmente ----------------------------------------------------------
#Agregue una funcion para guardarlo como archivo csv en ves de text
write.csv(qr[,ind],'Salida/Caudales_m3_comid.csv', quote = F, row.names = F)
#Los datos estan hasta marzo del 2020
#Si la cuenca que desea no esta dentro del shapefile, debe aplicarse de manera
#manual.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairwise.fst.R
\name{pairwise.neifst}
\alias{pairwise.neifst}
\title{Estimate pairwise FSTs according to Nei (1987)}
\usage{
pairwise.neifst(dat,diploid=TRUE)
}
\arguments{
\item{dat}{A data frame containing population of origin as the first column and multi-locus genotypes in following columns}
\item{diploid}{whether the data is from a diploid (default) or haploid organism}
}
\value{
A matrix of pairwise FSTs
}
\description{
Estimate pairwise FSTs according to Nei (1987)
}
\details{
FST are calculated using Nei (87) equations for FST', as described in the note section of \link{basic.stats}
}
\examples{
data(gtrunchier)
pairwise.neifst(gtrunchier[,-2],diploid=TRUE)
}
\author{
Jerome Goudet \email{jerome.goudet@unil.ch}
}
\references{
Nei, M. (1987) Molecular Evolutionary Genetics. Columbia University Press
}
\seealso{
\link{pairwise.WCfst} \link{genet.dist}
}
| /man/pairwise.neifst.Rd | no_license | nmoran/hierfstat | R | false | true | 955 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairwise.fst.R
\name{pairwise.neifst}
\alias{pairwise.neifst}
\title{Estimate pairwise FSTs according to Nei (1987)}
\usage{
pairwise.neifst(dat,diploid=TRUE)
}
\arguments{
\item{dat}{A data frame containing population of origin as the first column and multi-locus genotypes in following columns}
\item{diploid}{whether the data is from a diploid (default) or haploid organism}
}
\value{
A matrix of pairwise FSTs
}
\description{
Estimate pairwise FSTs according to Nei (1987)
}
\details{
FST are calculated using Nei (87) equations for FST', as described in the note section of \link{basic.stats}
}
\examples{
data(gtrunchier)
pairwise.neifst(gtrunchier[,-2],diploid=TRUE)
}
\author{
Jerome Goudet \email{jerome.goudet@unil.ch}
}
\references{
Nei, M. (1987) Molecular Evolutionary Genetics. Columbia University Press
}
\seealso{
\link{pairwise.WCfst} \link{genet.dist}
}
|
.frbemodel <- structure(list(model = structure(list(arima = structure(list(
rules = list(c("MlBi.weight", "QrBi.seasonStrength", "MlMe.frequency"
), c("Bi.weight", "QrBi.seasonStrength", "MlMe.frequency"
), c("QrBi.weight", "QrBi.trendStrength", "QrSm.varcoef"),
c("RoBi.weight", "QrBi.seasonStrength", "QrSm.skewness"
), c("RoBi.weight", "QrBi.trendStrength", "QrSm.skewness"
), c("QrBi.weight", "QrMe.seasonStrength", "QrSm.varcoef"
), c("MlBi.weight", "QrBi.seasonStrength", "QrSm.skewness"
), c("RoBi.weight", "RoSm.length", "QrMe.stationarity"
), c("QrBi.weight", "QrSm.skewness"), c("RoSm.weight",
"QrMe.length", "QrSm.seasonStrength", "MlMe.kurtosis"
), c("RoBi.weight", "QrMe.length", "SiBi.seasonStrength",
"QrMe.skewness"), c("QrBi.weight", "QrMe.trendStrength",
"MlMe.frequency"), c("QrBi.weight", "ExSm.seasonStrength",
"QrMe.stationarity"), c("RoBi.weight", "QrMe.trendStrength",
"QrMe.kurtosis", "MlMe.frequency"), c("MlBi.weight",
"QrBi.trendStrength", "QrSm.skewness", "QrMe.varcoef"
), c("QrBi.weight", "SiBi.seasonStrength"), c("QrBi.weight",
"QrBi.trendStrength", "QrMe.kurtosis", "RoMe.stationarity"
), c("RoBi.weight", "QrBi.trendStrength", "QrMe.kurtosis",
"QrSm.varcoef"), c("QrBi.weight", "QrSm.length", "QrMe.kurtosis"
), c("MlBi.weight", "QrSm.skewness", "QrMe.kurtosis",
"QrBi.stationarity"), c("Bi.weight", "RoBi.seasonStrength",
"QrSm.skewness"), c("MlBi.weight", "ExBi.trendStrength",
"ExBi.seasonStrength", "RoMe.varcoef"), c("QrSm.weight",
"QrSm.seasonStrength", "Me.skewness", "Me.kurtosis",
"Me.varcoef"), c("QrBi.weight", "QrBi.seasonStrength",
"ExBi.stationarity"), c("RoBi.weight", "VeBi.length",
"QrMe.varcoef", "MlBi.stationarity"), c("QrSm.weight",
"Me.length", "RoSm.seasonStrength"), c("RoBi.weight",
"RoSm.length", "RoMe.kurtosis"), c("RoBi.weight", "ExBi.seasonStrength",
"ExBi.stationarity"), c("RoSm.weight", "QrMe.length",
"QrSm.seasonStrength", "QrMe.skewness", "RoMe.kurtosis"
), c("RoBi.weight", "VeBi.trendStrength", "ExBi.seasonStrength"
), c("QrBi.weight", "RoBi.trendStrength", "QrMe.seasonStrength",
"MlMe.kurtosis"), c("QrSm.weight", "MlSm.seasonStrength",
"RoMe.skewness", "Me.kurtosis"), c("QrSm.weight", "QrMe.length",
"QrSm.seasonStrength", "QrMe.kurtosis"), c("Bi.weight",
"VeBi.length", "MlBi.seasonStrength", "RoBi.stationarity"
), c("MlBi.weight", "QrBi.trendStrength", "MlSm.skewness"
), c("RoBi.weight", "MlMe.trendStrength", "ExBi.seasonStrength"
), c("RoBi.weight", "MlBi.trendStrength", "QrMe.seasonStrength",
"VeSm.frequency"), c("MlBi.weight", "VeBi.length", "RoBi.seasonStrength",
"QrBi.stationarity"), c("MlBi.weight", "Me.trendStrength",
"VeBi.seasonStrength", "MlBi.stationarity")), statistics = structure(c(0.0553324297070503,
0.0535291358828545, 0.0683433637022972, 0.0674656629562378,
0.0881131663918495, 0.0504934377968311, 0.0660794749855995,
0.0531193651258945, 0.155274227261543, 0.0525155030190945,
0.0500311888754368, 0.0540792420506477, 0.0596559159457684,
0.0511576645076275, 0.0744946822524071, 0.165995970368385,
0.0799539834260941, 0.0547490678727627, 0.110782355070114,
0.0539162494242191, 0.0623716525733471, 0.0505596026778221,
0.0586908757686615, 0.0979527309536934, 0.0525922030210495,
0.0501280128955841, 0.0832463875412941, 0.0794434621930122,
0.0504496246576309, 0.0643889084458351, 0.053155891597271,
0.0810941010713577, 0.0647504180669785, 0.0523040555417538,
0.0612805485725403, 0.0707165226340294, 0.0563865378499031,
0.0556458756327629, 0.0505959875881672, 0.101096130907536,
0.101096130907536, 0.129691913723946, 0.12867546081543, 0.170518070459366,
0.0982556343078613, 0.12867546081543, 0.103511586785316,
0.303272306919098, 0.103271067142487, 0.0984836667776108,
0.106564275920391, 0.118037797510624, 0.101412616670132,
0.147680655121803, 0.329247862100601, 0.158755585551262,
0.108839794993401, 0.220417127013206, 0.107299707829952,
0.124156303703785, 0.100754670798779, 0.117019392549992,
0.195308491587639, 0.104902468621731, 0.0999998971819878,
0.166111022233963, 0.158527165651321, 0.100701823830605,
0.12855638563633, 0.106153674423695, 0.161976352334023, 0.129332035779953,
0.104504562914371, 0.122440300881863, 0.14130774140358, 0.112691588699818,
0.111219681799412, 0.101161912083626, 0.420343458652496,
0.408192425966263, 0.449764728546143, 0.43272602558136, 0.43272602558136,
0.449764728546143, 0.420343458652496, 0.43272602558136, 0.449764728546143,
0.350143998861313, 0.43272602558136, 0.449764728546143, 0.449764728546143,
0.43272602558136, 0.420343458652496, 0.449764728546143, 0.449764728546143,
0.43272602558136, 0.449764728546143, 0.420343458652496, 0.408192425966263,
0.420343458652496, 0.376548111438751, 0.449764728546143,
0.43272602558136, 0.376548111438751, 0.43272602558136, 0.43272602558136,
0.350143998861313, 0.43272602558136, 0.449764728546143, 0.376548111438751,
0.376548111438751, 0.408192425966263, 0.420343458652496,
0.43272602558136, 0.43272602558136, 0.420343458652496, 0.420343458652496,
0.547324899680468, 0.529487482877196, 0.526967038575503,
0.524308695136593, 0.516737998233722, 0.513898649706149,
0.513535949798408, 0.513173131391217, 0.511996063336455,
0.508520967897398, 0.508015090344005, 0.507480031028857,
0.505396722099962, 0.504450690529264, 0.504430876142618,
0.504167192793086, 0.503629419704903, 0.50302435681804, 0.502603207705708,
0.502482723528616, 0.502363961496106, 0.501809020633861,
0.501548286055136, 0.501528275383459, 0.501343807367322,
0.501280644362635, 0.501149089456832, 0.501134691121313,
0.500980247810553, 0.500861222312079, 0.500744716429769,
0.500653952893865, 0.500652585235305, 0.500495424153015,
0.500493286370369, 0.500443372257013, 0.500361548723062,
0.500323996009286, 0.500148588990112, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), .Dim = c(39L,
7L), .Dimnames = list(NULL, c("support", "lhsSupport", "rhsSupport",
"confidence", "lift", "loLift", "hiLift")))), .Names = c("rules",
"statistics"), class = c("farules", "list")), expSmooth = structure(list(
rules = list(c("QrSm.weight", "QrMe.length", "QrSm.seasonStrength"
), c("MlBi.weight", "QrSm.skewness", "QrBi.stationarity"),
c("RoSm.weight", "QrMe.length", "QrSm.seasonStrength"
), c("QrBi.weight", "QrBi.seasonStrength"), c("RoBi.weight",
"RoSm.skewness", "QrMe.kurtosis", "QrMe.varcoef"), c("QrBi.weight",
"QrMe.length", "QrSm.frequency"), c("MlBi.weight", "QrSm.skewness",
"QrMe.varcoef", "SiSm.frequency"), c("Sm.weight", "QrMe.length",
"QrBi.trendStrength", "QrSm.seasonStrength"), c("RoBi.weight",
"SiBi.stationarity"), c("MlBi.weight", "ExBi.stationarity"
), c("QrBi.weight", "QrSm.skewness", "QrMe.kurtosis"),
c("MlSm.weight", "QrMe.length", "QrSm.seasonStrength"
), c("MlBi.weight", "QrBi.seasonStrength", "MlMe.frequency"
), c("Sm.weight", "QrMe.length", "QrSm.seasonStrength",
"Me.varcoef"), c("Bi.weight", "QrSm.skewness", "QrBi.stationarity"
), c("Sm.weight", "RoMe.length", "RoSm.seasonStrength",
"QrMe.varcoef"), c("QrBi.weight", "MlMe.length", "MlMe.kurtosis",
"QrMe.varcoef", "MlMe.frequency"), c("Bi.weight", "QrBi.seasonStrength",
"RoSm.skewness"), c("Bi.weight", "Bi.seasonStrength",
"SiBi.stationarity"), c("RoBi.weight", "QrSm.skewness",
"MlSm.frequency"), c("VeBi.weight", "QrBi.length", "ExBi.seasonStrength",
"Me.kurtosis", "SiBi.stationarity"), c("Bi.weight", "QrBi.length",
"RoMe.kurtosis", "SiBi.stationarity"), c("VeBi.weight",
"QrBi.length", "VeBi.seasonStrength", "Me.kurtosis",
"ExBi.stationarity"), c("MlBi.weight", "QrBi.trendStrength",
"QrSm.skewness", "QrMe.kurtosis"), c("VeBi.weight", "QrBi.length",
"SiBi.seasonStrength", "RoMe.kurtosis", "ExBi.stationarity"
), c("RoBi.weight", "QrBi.seasonStrength", "QrMe.kurtosis"
), c("Bi.weight", "QrBi.trendStrength", "VeBi.seasonStrength",
"MlMe.kurtosis"), c("Bi.weight", "QrBi.trendStrength",
"QrSm.skewness", "QrMe.kurtosis", "QrMe.varcoef"), c("QrBi.weight",
"QrMe.kurtosis", "RoBi.stationarity"), c("QrBi.weight",
"RoSm.length", "QrMe.stationarity"), c("QrSm.weight",
"Bi.trendStrength", "RoSm.seasonStrength", "MlMe.skewness"
), c("Bi.weight", "QrBi.length", "Bi.seasonStrength",
"Me.kurtosis", "QrBi.stationarity"), c("MlBi.weight",
"VeBi.trendStrength", "RoBi.seasonStrength", "MlMe.kurtosis"
), c("MlBi.weight", "VeBi.seasonStrength", "RoMe.kurtosis"
), c("RoBi.weight", "SiBi.trendStrength", "RoSm.skewness",
"QrMe.varcoef"), c("QrBi.weight", "RoBi.trendStrength",
"QrSm.skewness", "QrMe.varcoef"), c("Bi.weight", "QrBi.length",
"ExBi.seasonStrength", "Me.kurtosis", "RoMe.varcoef")),
statistics = structure(c(0.0815981701016426, 0.0614597760140896,
0.0782240703701973, 0.224344417452812, 0.0862632393836975,
0.0729342624545097, 0.0639304295182228, 0.0502423271536827,
0.128447845578194, 0.116287410259247, 0.123051941394806,
0.0758908912539482, 0.0510317049920559, 0.0501841567456722,
0.0593299306929111, 0.0548232048749924, 0.0550771504640579,
0.0551150441169739, 0.0995828360319138, 0.0728134214878082,
0.0517548844218254, 0.0688572898507118, 0.0529531016945839,
0.0597639642655849, 0.0543591566383839, 0.212206467986107,
0.0661505907773972, 0.0515229105949402, 0.16906464099884,
0.0518100708723068, 0.0507209636271, 0.0725702121853828,
0.0746634677052498, 0.167581930756569, 0.060338344424963,
0.0736311003565788, 0.0620163045823574, 0.149828627705574,
0.117683216929436, 0.149828627705574, 0.432726860046387,
0.168784901499748, 0.142777293920517, 0.125204190611839,
0.0985179245471954, 0.252366900444031, 0.228992059826851,
0.242879539728165, 0.149828627705574, 0.101096130907536,
0.099481888115406, 0.117683216929436, 0.108862869441509,
0.10961326956749, 0.10976929962635, 0.198334977030754, 0.145109206438065,
0.10314866155386, 0.137240052223206, 0.105583660304546, 0.119175434112549,
0.108447961509228, 0.423450648784637, 0.132061049342155,
0.102864116430283, 0.337683796882629, 0.103511586785316,
0.101351194083691, 0.145039677619934, 0.149245545268059,
0.335027664899826, 0.120635487139225, 0.147236660122871,
0.124030612409115, 0.365115106105804, 0.409470558166504,
0.339983552694321, 0.439135015010834, 0.422608703374863,
0.439135015010834, 0.409470558166504, 0.303977817296982,
0.422608703374863, 0.409470558166504, 0.439135015010834,
0.322471439838409, 0.409470558166504, 0.303977817296982,
0.394504189491272, 0.303977817296982, 0.439135015010834,
0.394504189491272, 0.394504189491272, 0.422608703374863,
0.366584867238998, 0.394504189491272, 0.366584867238998,
0.409470558166504, 0.366584867238998, 0.422608703374863,
0.394504189491272, 0.394504189491272, 0.439135015010834,
0.439135015010834, 0.365115106105804, 0.394504189491272,
0.409470558166504, 0.409470558166504, 0.422608703374863,
0.439135015010834, 0.394504189491272, 0.544610007788297,
0.522247586509652, 0.522090281197224, 0.518443475934827,
0.511083862461633, 0.510825359213711, 0.5106093430724, 0.509981583398196,
0.508972632116946, 0.507822892842555, 0.506637741213311,
0.506517962662518, 0.504783956952126, 0.504455209851416,
0.504149463627308, 0.503598749107456, 0.502467909965468,
0.502098895634598, 0.502094171803455, 0.501783610255538,
0.501750421597097, 0.501728822856488, 0.501527428977604,
0.501478888754406, 0.501246458503127, 0.501136244790672,
0.500909171227379, 0.500883227144234, 0.500659618730842,
0.500524361391166, 0.500447617669085, 0.500347307552267,
0.500272671932333, 0.500203261741612, 0.500170769446363,
0.500086733121578, 0.50000804944667, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1), .Dim = c(37L, 7L), .Dimnames = list(
NULL, c("support", "lhsSupport", "rhsSupport", "confidence",
"lift", "loLift", "hiLift")))), .Names = c("rules", "statistics"
), class = c("farules", "list")), randomWalk = structure(list(
rules = list(c("QrBi.weight", "QrMe.length", "QrSm.seasonStrength"
), c("RoBi.weight", "QrMe.length", "QrSm.seasonStrength"),
c("RoSm.weight", "QrBi.seasonStrength"), c("QrSm.weight",
"QrBi.trendStrength"), c("MlSm.weight", "QrBi.seasonStrength"
), c("RoSm.weight", "QrBi.trendStrength"), c("MlSm.weight",
"QrBi.trendStrength"), c("Sm.weight", "QrBi.seasonStrength"
), c("Sm.weight", "QrBi.trendStrength"), c("MlBi.weight",
"QrMe.length", "QrSm.seasonStrength"), c("VeSm.weight",
"QrBi.trendStrength"), c("SiSm.weight", "QrBi.trendStrength"
), c("ExSm.weight", "QrBi.trendStrength"), c("VeSm.weight",
"QrBi.seasonStrength"), c("SiSm.weight", "QrBi.seasonStrength"
), c("QrSm.weight", "RoMe.kurtosis", "MlMe.frequency"
), c("RoSm.weight", "RoMe.kurtosis", "RoMe.frequency"
), c("ExSm.weight", "QrBi.seasonStrength"), c("QrSm.weight",
"QrSm.length"), c("RoSm.weight", "QrSm.length"), c("VeSm.weight",
"QrMe.kurtosis", "QrSm.varcoef"), c("MlSm.weight", "RoMe.kurtosis",
"Me.frequency"), c("RoBi.weight", "QrMe.trendStrength",
"QrSm.seasonStrength"), c("MlSm.weight", "QrSm.length"
), c("SiSm.weight", "QrMe.kurtosis", "QrSm.varcoef"),
c("Sm.weight", "QrSm.length"), c("ExSm.weight", "QrMe.kurtosis",
"QrSm.varcoef"), c("MlSm.weight", "QrMe.seasonStrength",
"QrMe.stationarity", "Sm.frequency"), c("QrBi.weight",
"QrSm.length", "RoMe.trendStrength", "MlMe.varcoef"),
c("Sm.weight", "RoMe.kurtosis", "Me.frequency"), c("VeSm.weight",
"QrSm.length"), c("QrBi.weight", "QrSm.seasonStrength",
"QrMe.skewness"), c("SiSm.weight", "QrSm.length"), c("ExSm.weight",
"QrSm.length"), c("QrBi.weight", "Me.length", "QrMe.skewness",
"RoMe.varcoef", "Me.frequency"), c("VeSm.weight", "RoMe.kurtosis",
"Me.frequency"), c("QrBi.weight", "MlMe.length", "QrMe.varcoef",
"QrMe.stationarity"), c("Sm.weight", "QrMe.seasonStrength",
"QrMe.stationarity", "Sm.frequency"), c("SiSm.weight",
"RoMe.kurtosis", "Me.frequency"), c("RoBi.weight", "MlMe.length",
"Me.skewness", "QrMe.varcoef", "QrMe.stationarity"),
c("RoSm.weight", "QrMe.varcoef", "Sm.frequency"), c("MlSm.weight",
"QrSm.varcoef"), c("QrBi.weight", "Me.skewness", "Me.varcoef",
"MlMe.stationarity"), c("QrBi.weight", "RoMe.length",
"QrBi.trendStrength", "RoMe.skewness", "QrMe.varcoef"
), c("QrBi.weight", "RoSm.length", "MlMe.trendStrength",
"MlMe.skewness"), c("QrBi.weight", "RoMe.length", "RoMe.stationarity",
"MlMe.frequency"), c("RoBi.weight", "RoSm.seasonStrength",
"Me.kurtosis"), c("MlBi.weight", "QrSm.seasonStrength",
"Me.kurtosis", "Me.varcoef"), c("ExSm.weight", "RoMe.kurtosis",
"Me.frequency"), c("RoBi.weight", "QrSm.seasonStrength",
"QrMe.skewness", "MlMe.varcoef"), c("QrBi.weight", "QrMe.trendStrength",
"QrMe.kurtosis", "Me.stationarity"), c("Sm.weight", "QrSm.varcoef"
), c("Bi.weight", "QrMe.length", "QrSm.seasonStrength"
), c("QrBi.weight", "QrSm.seasonStrength", "QrMe.kurtosis",
"MlMe.varcoef"), c("RoBi.weight", "QrSm.seasonStrength",
"MlMe.skewness", "QrMe.varcoef"), c("MlBi.weight", "QrMe.trendStrength",
"QrSm.seasonStrength", "QrMe.skewness"), c("RoBi.weight",
"Me.length", "QrBi.trendStrength", "RoMe.skewness", "QrMe.varcoef"
), c("MlBi.weight", "MlSm.seasonStrength", "QrMe.skewness",
"Me.kurtosis"), c("QrSm.weight", "SiSm.frequency")),
statistics = structure(c(0.0867330580949783, 0.083218052983284,
0.233140662312508, 0.263708263635635, 0.231206223368645,
0.26219317317009, 0.261023730039597, 0.228912457823753, 0.25976425409317,
0.0790695771574974, 0.257887303829193, 0.257227927446365,
0.256534934043884, 0.225063741207123, 0.22338142991066, 0.104536153376102,
0.104169331490993, 0.221582442522049, 0.126719355583191,
0.126641839742661, 0.117890037596226, 0.103806011378765,
0.0937432050704956, 0.126310363411903, 0.117658190429211,
0.126075237989426, 0.117327965795994, 0.0507219471037388,
0.0600524060428143, 0.103344343602657, 0.125694215297699,
0.148976862430573, 0.125574052333832, 0.12543061375618, 0.0513141751289368,
0.102677442133427, 0.0767121538519859, 0.0502900891005993,
0.10237767547369, 0.0507664233446121, 0.21602638065815, 0.128013372421265,
0.0650656968355179, 0.0679261460900307, 0.0544815137982368,
0.0520723909139633, 0.104499533772469, 0.0732661560177803,
0.101968869566917, 0.105553023517132, 0.0548814833164215,
0.127542287111282, 0.0750589668750763, 0.114984482526779,
0.112453736364841, 0.0754802450537682, 0.0533889010548592,
0.083994522690773, 0.252460807561874, 0.149828627705574,
0.149828627705574, 0.432726860046387, 0.491184085607529,
0.432726860046387, 0.491184085607529, 0.491184085607529,
0.432726860046387, 0.491184085607529, 0.149828627705574,
0.491184085607529, 0.491184085607529, 0.491184085607529,
0.432726860046387, 0.432726860046387, 0.20327553153038, 0.20327553153038,
0.432726860046387, 0.24751977622509, 0.24751977622509, 0.230668947100639,
0.20327553153038, 0.183692455291748, 0.24751977622509, 0.230668947100639,
0.24751977622509, 0.230668947100639, 0.0997635945677757,
0.118116416037083, 0.20327553153038, 0.24751977622509, 0.293645918369293,
0.24751977622509, 0.24751977622509, 0.101335622370243, 0.20327553153038,
0.152035176753998, 0.0997635945677757, 0.20327553153038,
0.100921131670475, 0.429517805576324, 0.254553526639938,
0.129407525062561, 0.135301038622856, 0.108540564775467,
0.103757627308369, 0.208276674151421, 0.146042764186859,
0.20327553153038, 0.210630789399147, 0.109528966248035, 0.254553526639938,
0.149828627705574, 0.229638740420341, 0.224624276161194,
0.150807693600655, 0.106687039136887, 0.167926445603371,
0.504772007465363, 0.385609328746796, 0.361263692378998,
0.48637193441391, 0.489094972610474, 0.483034938573837, 0.48637193441391,
0.483034938573837, 0.479497849941254, 0.479497849941254,
0.342880696058273, 0.474217027425766, 0.47221514582634, 0.469980031251907,
0.474217027425766, 0.47221514582634, 0.489094972610474, 0.48637193441391,
0.469980031251907, 0.489094972610474, 0.48637193441391, 0.474217027425766,
0.483034938573837, 0.361263692378998, 0.483034938573837,
0.47221514582634, 0.479497849941254, 0.469980031251907, 0.483034938573837,
0.385609328746796, 0.479497849941254, 0.474217027425766,
0.385609328746796, 0.47221514582634, 0.469980031251907, 0.385609328746796,
0.474217027425766, 0.385609328746796, 0.479497849941254,
0.47221514582634, 0.361263692378998, 0.48637193441391, 0.483034938573837,
0.385609328746796, 0.385609328746796, 0.385609328746796,
0.385609328746796, 0.361263692378998, 0.342880696058273,
0.469980031251907, 0.361263692378998, 0.385609328746796,
0.479497849941254, 0.323303759098053, 0.385609328746796,
0.361263692378998, 0.342880696058273, 0.361263692378998,
0.342880696058273, 0.489094972610474, 0.578881749256999,
0.555421579024367, 0.538770951929159, 0.5368827520327, 0.534300605568742,
0.533798184535625, 0.531417319265842, 0.5289998818174, 0.528853156494019,
0.527733440320069, 0.525031879870907, 0.523689457748227,
0.522278594850208, 0.520105780313699, 0.516218082433603,
0.514258418556781, 0.51245386351591, 0.512060754671656, 0.511956488955269,
0.511643318663535, 0.511078925351799, 0.51066653520594, 0.510326920730678,
0.510304127364106, 0.510073817512494, 0.50935420155994, 0.508642221984064,
0.5084214068618, 0.508417102868756, 0.508395392325968, 0.50781483893794,
0.50733503553493, 0.50732937080404, 0.506749867299959, 0.50637844746691,
0.505114616404686, 0.504568452445126, 0.504092593279947,
0.503639934934271, 0.503030658736302, 0.502950932076696,
0.502893729703998, 0.502796856705685, 0.502037137197231,
0.501946105688139, 0.501865667756681, 0.501734215788829,
0.501676042806459, 0.501628842385675, 0.501128177025952,
0.501068212331511, 0.501043096101704, 0.500965456498564,
0.500719008980393, 0.50063037836633, 0.500506593872082, 0.500425370192884,
0.500186390469799, 0.500148193299325, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), .Dim = c(59L,
7L), .Dimnames = list(NULL, c("support", "lhsSupport", "rhsSupport",
"confidence", "lift", "loLift", "hiLift")))), .Names = c("rules",
"statistics"), class = c("farules", "list")), theta = structure(list(
rules = list(c("RoMe.weight", "QrBi.trendStrength", "QrSm.skewness"
), c("QrMe.weight", "QrSm.length"), c("QrBi.weight", "QrMe.length",
"ExSm.frequency"), c("MlMe.weight", "QrBi.trendStrength",
"QrSm.seasonStrength", "QrMe.stationarity"), c("RoMe.weight",
"QrSm.skewness", "QrMe.stationarity"), c("MlMe.weight", "QrSm.seasonStrength",
"QrSm.skewness"), c("QrMe.weight", "QrBi.seasonStrength",
"RoMe.frequency"), c("MlMe.weight", "QrBi.trendStrength",
"QrSm.skewness"), c("QrBi.weight", "QrMe.length", "QrBi.stationarity"
), c("MlMe.weight", "QrBi.trendStrength", "Sm.seasonStrength",
"QrMe.varcoef"), c("QrMe.weight", "QrSm.skewness"), c("MlBi.weight",
"QrMe.length", "RoMe.kurtosis", "ExSm.frequency"), c("QrSm.weight",
"QrBi.length", "ExBi.seasonStrength"), c("MlMe.weight", "MlSm.length",
"QrMe.stationarity"), c("MlBi.weight", "Me.length", "QrMe.kurtosis",
"QrMe.varcoef", "QrBi.stationarity"), c("QrBi.weight", "MlMe.length",
"QrMe.trendStrength", "MlMe.varcoef"), c("RoBi.weight", "QrMe.length",
"VeSm.frequency"), c("RoMe.weight", "QrBi.trendStrength",
"QrMe.stationarity"), c("RoBi.weight", "QrMe.length", "MlMe.kurtosis",
"QrMe.varcoef", "QrBi.stationarity"), c("MlSm.weight", "QrBi.length",
"ExBi.seasonStrength", "SiBi.stationarity"), c("RoBi.weight",
"Me.length", "QrBi.stationarity"), c("QrBi.weight", "Me.length",
"QrBi.seasonStrength", "MlMe.kurtosis"), c("Bi.weight", "Me.length",
"MlSm.frequency"), c("QrBi.weight", "MlMe.length", "QrMe.trendStrength",
"Me.kurtosis"), c("QrBi.weight", "QrMe.trendStrength", "MlMe.kurtosis",
"Me.varcoef", "ExSm.frequency"), c("MlBi.weight", "QrMe.length",
"QrMe.varcoef", "ExSm.frequency"), c("QrMe.weight", "RoSm.seasonStrength"
), c("RoMe.weight", "QrBi.trendStrength", "QrSm.seasonStrength",
"RoMe.varcoef"), c("MlBi.weight", "QrMe.length", "Me.kurtosis",
"MlMe.varcoef", "QrBi.stationarity"), c("RoSm.weight", "QrBi.length",
"MlBi.seasonStrength", "QrMe.kurtosis", "ExBi.stationarity"
), c("RoMe.weight", "QrSm.length", "RoMe.varcoef"), c("QrSm.weight",
"VeBi.length", "QrBi.seasonStrength", "QrMe.varcoef", "QrBi.stationarity"
), c("MlMe.weight", "QrSm.skewness", "MlMe.stationarity"),
c("Bi.weight", "QrMe.length", "MlMe.kurtosis", "ExSm.frequency"
), c("QrSm.weight", "ExBi.seasonStrength", "ExBi.stationarity"
), c("QrBi.weight", "Me.length", "QrMe.skewness", "Me.kurtosis",
"Me.varcoef"), c("RoMe.weight", "RoSm.length"), c("RoSm.weight",
"QrBi.length", "ExBi.seasonStrength", "QrMe.kurtosis",
"Bi.stationarity"), c("MlSm.weight", "QrBi.length", "VeBi.seasonStrength",
"QrMe.kurtosis", "QrMe.varcoef", "ExBi.stationarity"),
c("QrBi.weight", "Bi.trendStrength", "MlMe.skewness",
"RoSm.stationarity"), c("QrBi.weight", "QrMe.length",
"QrMe.trendStrength", "QrMe.skewness", "MlMe.kurtosis",
"Me.varcoef"), c("MlMe.weight", "VeSm.length", "QrSm.seasonStrength",
"QrMe.varcoef"), c("RoMe.weight", "VeSm.seasonStrength",
"RoMe.varcoef"), c("Bi.weight", "Me.length", "MlMe.kurtosis",
"RoMe.varcoef", "QrBi.stationarity"), c("VeBi.weight",
"Me.length", "Me.kurtosis", "QrMe.varcoef", "Sm.frequency"
), c("RoBi.weight", "Me.length", "QrMe.trendStrength",
"Me.kurtosis", "Me.varcoef"), c("QrMe.weight", "RoBi.trendStrength"
), c("QrSm.weight", "QrBi.length", "RoMe.kurtosis", "SiBi.stationarity"
)), statistics = structure(c(0.0942000597715378, 0.135585427284241,
0.0778455510735512, 0.0524262972176075, 0.0792518183588982,
0.0585502535104752, 0.052469901740551, 0.0869991779327393,
0.0763199254870415, 0.055534839630127, 0.154104575514793,
0.0669538974761963, 0.0863662585616112, 0.0500744171440601,
0.0576989948749542, 0.068632148206234, 0.0722000375390053,
0.115505583584309, 0.0603586621582508, 0.0568646527826786,
0.0645047500729561, 0.0693769678473473, 0.0647298321127892,
0.074457511305809, 0.0719771608710289, 0.0672856122255325,
0.171213790774345, 0.0720370262861252, 0.0510264299809933,
0.0600338950753212, 0.0961058735847473, 0.0505116283893585,
0.0539988093078136, 0.0634234696626663, 0.0794535130262375,
0.0761765688657761, 0.10408902913332, 0.0623421967029572,
0.0515866652131081, 0.0515606738626957, 0.0580988563597202,
0.0685170367360115, 0.102358505129814, 0.0506234243512154,
0.0546018518507481, 0.0518317110836506, 0.244368627667427,
0.0686221346259117, 0.170518070459366, 0.24751977622509,
0.142777293920517, 0.0993655472993851, 0.150413259863853,
0.112556263804436, 0.101096130907536, 0.170518070459366,
0.149817898869514, 0.109234720468521, 0.303272306919098,
0.13191457092762, 0.170460537075996, 0.0989503040909767,
0.114026054739952, 0.135683462023735, 0.142777293920517,
0.22859413921833, 0.119725234806538, 0.112795770168304, 0.127957731485367,
0.137737333774567, 0.128616139292717, 0.147991925477982,
0.143072187900543, 0.133776694536209, 0.340493142604828,
0.143300533294678, 0.101516559720039, 0.119683817028999,
0.191600009799004, 0.100732237100601, 0.10769110918045, 0.126518189907074,
0.158527165651321, 0.152000457048416, 0.207743138074875,
0.124439068138599, 0.102983459830284, 0.102952525019646,
0.116017036139965, 0.136824086308479, 0.204541638493538,
0.101180009543896, 0.109139986336231, 0.1036177277565, 0.488686084747314,
0.137240052223206, 0.379464775323868, 0.4378362596035, 0.380824595689774,
0.33187872171402, 0.379464775323868, 0.33187872171402, 0.4378362596035,
0.33187872171402, 0.380824595689774, 0.33187872171402, 0.4378362596035,
0.331502139568329, 0.286199659109116, 0.33187872171402, 0.331502139568329,
0.380824595689774, 0.35188165307045, 0.379464775323868, 0.35188165307045,
0.231100931763649, 0.35188165307045, 0.380824595689774, 0.313433319330215,
0.380824595689774, 0.380824595689774, 0.331502139568329,
0.4378362596035, 0.379464775323868, 0.331502139568329, 0.251651585102081,
0.379464775323868, 0.286199659109116, 0.33187872171402, 0.313433319330215,
0.286199659109116, 0.380824595689774, 0.379464775323868,
0.251651585102081, 0.231100931763649, 0.380824595689774,
0.380824595689774, 0.33187872171402, 0.379464775323868, 0.313433319330215,
0.279977828264236, 0.35188165307045, 0.4378362596035, 0.286199659109116,
0.552434469366022, 0.547776138747563, 0.545223606191102,
0.52761041067533, 0.526893828580226, 0.520186540770446, 0.519009988508274,
0.510205033978912, 0.509417940465933, 0.508399155432734,
0.508139292638754, 0.507554980510327, 0.506664240551504,
0.506056222909844, 0.506015883883228, 0.505825449782732,
0.505682910471734, 0.505286723357283, 0.504143193001739,
0.504138166686835, 0.504109828489206, 0.503690364450468,
0.503279234384969, 0.503118741548416, 0.503082827817409,
0.502969612598108, 0.502840643028908, 0.502698940680081,
0.502641442161881, 0.501604114621236, 0.501596391803771,
0.501444521071369, 0.501423095358147, 0.501299217995848,
0.50119809245183, 0.501160130337712, 0.501046773905014, 0.500985724463324,
0.500921849956512, 0.500819905610443, 0.500778664002662,
0.500767361833757, 0.500428694537166, 0.500330298241897,
0.500291906602721, 0.500220495140119, 0.500052355273801,
0.500015363695035, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), .Dim = c(48L,
7L), .Dimnames = list(NULL, c("support", "lhsSupport", "rhsSupport",
"confidence", "lift", "loLift", "hiLift")))), .Names = c("rules",
"statistics"), class = c("farules", "list"))), .Names = c("arima",
"expSmooth", "randomWalk", "theta")), featuresContext = structure(list(
length = c(14, 51, 126), trendStrength = c(0.00206004820626882,
0.999999999610942, 1), seasonStrength = c(0, 0.669003044548185,
1), skewness = c(0.000438185907652042, 0.407888740092061,
5.42759768702902), kurtosis = c(1.14929353310186, 2.3965898416211,
40.8947798483529), varcoef = c(0.00592853945985588, 0.196592940983816,
1.68291590311524), stationarity = c(0.01, 0.585769170137984,
0.99), frequency = c(0.0833333333333333, 0.25, 1)), .Names = c("length",
"trendStrength", "seasonStrength", "skewness", "kurtosis", "varcoef",
"stationarity", "frequency")), weightContext = structure(list(
arima = c(0, 0.786124000918699, 1), expSmooth = c(0, 0.772842240528426,
1), randomWalk = c(0, 0.071978067472705, 1), theta = c(0,
0.597010247137179, 1)), .Names = c("arima", "expSmooth",
"randomWalk", "theta"))), .Names = c("model", "featuresContext",
"weightContext"))
| /lfl/R/frbemodel.R | no_license | ingted/R-Examples | R | false | false | 31,963 | r | .frbemodel <- structure(list(model = structure(list(arima = structure(list(
rules = list(c("MlBi.weight", "QrBi.seasonStrength", "MlMe.frequency"
), c("Bi.weight", "QrBi.seasonStrength", "MlMe.frequency"
), c("QrBi.weight", "QrBi.trendStrength", "QrSm.varcoef"),
c("RoBi.weight", "QrBi.seasonStrength", "QrSm.skewness"
), c("RoBi.weight", "QrBi.trendStrength", "QrSm.skewness"
), c("QrBi.weight", "QrMe.seasonStrength", "QrSm.varcoef"
), c("MlBi.weight", "QrBi.seasonStrength", "QrSm.skewness"
), c("RoBi.weight", "RoSm.length", "QrMe.stationarity"
), c("QrBi.weight", "QrSm.skewness"), c("RoSm.weight",
"QrMe.length", "QrSm.seasonStrength", "MlMe.kurtosis"
), c("RoBi.weight", "QrMe.length", "SiBi.seasonStrength",
"QrMe.skewness"), c("QrBi.weight", "QrMe.trendStrength",
"MlMe.frequency"), c("QrBi.weight", "ExSm.seasonStrength",
"QrMe.stationarity"), c("RoBi.weight", "QrMe.trendStrength",
"QrMe.kurtosis", "MlMe.frequency"), c("MlBi.weight",
"QrBi.trendStrength", "QrSm.skewness", "QrMe.varcoef"
), c("QrBi.weight", "SiBi.seasonStrength"), c("QrBi.weight",
"QrBi.trendStrength", "QrMe.kurtosis", "RoMe.stationarity"
), c("RoBi.weight", "QrBi.trendStrength", "QrMe.kurtosis",
"QrSm.varcoef"), c("QrBi.weight", "QrSm.length", "QrMe.kurtosis"
), c("MlBi.weight", "QrSm.skewness", "QrMe.kurtosis",
"QrBi.stationarity"), c("Bi.weight", "RoBi.seasonStrength",
"QrSm.skewness"), c("MlBi.weight", "ExBi.trendStrength",
"ExBi.seasonStrength", "RoMe.varcoef"), c("QrSm.weight",
"QrSm.seasonStrength", "Me.skewness", "Me.kurtosis",
"Me.varcoef"), c("QrBi.weight", "QrBi.seasonStrength",
"ExBi.stationarity"), c("RoBi.weight", "VeBi.length",
"QrMe.varcoef", "MlBi.stationarity"), c("QrSm.weight",
"Me.length", "RoSm.seasonStrength"), c("RoBi.weight",
"RoSm.length", "RoMe.kurtosis"), c("RoBi.weight", "ExBi.seasonStrength",
"ExBi.stationarity"), c("RoSm.weight", "QrMe.length",
"QrSm.seasonStrength", "QrMe.skewness", "RoMe.kurtosis"
), c("RoBi.weight", "VeBi.trendStrength", "ExBi.seasonStrength"
), c("QrBi.weight", "RoBi.trendStrength", "QrMe.seasonStrength",
"MlMe.kurtosis"), c("QrSm.weight", "MlSm.seasonStrength",
"RoMe.skewness", "Me.kurtosis"), c("QrSm.weight", "QrMe.length",
"QrSm.seasonStrength", "QrMe.kurtosis"), c("Bi.weight",
"VeBi.length", "MlBi.seasonStrength", "RoBi.stationarity"
), c("MlBi.weight", "QrBi.trendStrength", "MlSm.skewness"
), c("RoBi.weight", "MlMe.trendStrength", "ExBi.seasonStrength"
), c("RoBi.weight", "MlBi.trendStrength", "QrMe.seasonStrength",
"VeSm.frequency"), c("MlBi.weight", "VeBi.length", "RoBi.seasonStrength",
"QrBi.stationarity"), c("MlBi.weight", "Me.trendStrength",
"VeBi.seasonStrength", "MlBi.stationarity")), statistics = structure(c(0.0553324297070503,
0.0535291358828545, 0.0683433637022972, 0.0674656629562378,
0.0881131663918495, 0.0504934377968311, 0.0660794749855995,
0.0531193651258945, 0.155274227261543, 0.0525155030190945,
0.0500311888754368, 0.0540792420506477, 0.0596559159457684,
0.0511576645076275, 0.0744946822524071, 0.165995970368385,
0.0799539834260941, 0.0547490678727627, 0.110782355070114,
0.0539162494242191, 0.0623716525733471, 0.0505596026778221,
0.0586908757686615, 0.0979527309536934, 0.0525922030210495,
0.0501280128955841, 0.0832463875412941, 0.0794434621930122,
0.0504496246576309, 0.0643889084458351, 0.053155891597271,
0.0810941010713577, 0.0647504180669785, 0.0523040555417538,
0.0612805485725403, 0.0707165226340294, 0.0563865378499031,
0.0556458756327629, 0.0505959875881672, 0.101096130907536,
0.101096130907536, 0.129691913723946, 0.12867546081543, 0.170518070459366,
0.0982556343078613, 0.12867546081543, 0.103511586785316,
0.303272306919098, 0.103271067142487, 0.0984836667776108,
0.106564275920391, 0.118037797510624, 0.101412616670132,
0.147680655121803, 0.329247862100601, 0.158755585551262,
0.108839794993401, 0.220417127013206, 0.107299707829952,
0.124156303703785, 0.100754670798779, 0.117019392549992,
0.195308491587639, 0.104902468621731, 0.0999998971819878,
0.166111022233963, 0.158527165651321, 0.100701823830605,
0.12855638563633, 0.106153674423695, 0.161976352334023, 0.129332035779953,
0.104504562914371, 0.122440300881863, 0.14130774140358, 0.112691588699818,
0.111219681799412, 0.101161912083626, 0.420343458652496,
0.408192425966263, 0.449764728546143, 0.43272602558136, 0.43272602558136,
0.449764728546143, 0.420343458652496, 0.43272602558136, 0.449764728546143,
0.350143998861313, 0.43272602558136, 0.449764728546143, 0.449764728546143,
0.43272602558136, 0.420343458652496, 0.449764728546143, 0.449764728546143,
0.43272602558136, 0.449764728546143, 0.420343458652496, 0.408192425966263,
0.420343458652496, 0.376548111438751, 0.449764728546143,
0.43272602558136, 0.376548111438751, 0.43272602558136, 0.43272602558136,
0.350143998861313, 0.43272602558136, 0.449764728546143, 0.376548111438751,
0.376548111438751, 0.408192425966263, 0.420343458652496,
0.43272602558136, 0.43272602558136, 0.420343458652496, 0.420343458652496,
0.547324899680468, 0.529487482877196, 0.526967038575503,
0.524308695136593, 0.516737998233722, 0.513898649706149,
0.513535949798408, 0.513173131391217, 0.511996063336455,
0.508520967897398, 0.508015090344005, 0.507480031028857,
0.505396722099962, 0.504450690529264, 0.504430876142618,
0.504167192793086, 0.503629419704903, 0.50302435681804, 0.502603207705708,
0.502482723528616, 0.502363961496106, 0.501809020633861,
0.501548286055136, 0.501528275383459, 0.501343807367322,
0.501280644362635, 0.501149089456832, 0.501134691121313,
0.500980247810553, 0.500861222312079, 0.500744716429769,
0.500653952893865, 0.500652585235305, 0.500495424153015,
0.500493286370369, 0.500443372257013, 0.500361548723062,
0.500323996009286, 0.500148588990112, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), .Dim = c(39L,
7L), .Dimnames = list(NULL, c("support", "lhsSupport", "rhsSupport",
"confidence", "lift", "loLift", "hiLift")))), .Names = c("rules",
"statistics"), class = c("farules", "list")), expSmooth = structure(list(
rules = list(c("QrSm.weight", "QrMe.length", "QrSm.seasonStrength"
), c("MlBi.weight", "QrSm.skewness", "QrBi.stationarity"),
c("RoSm.weight", "QrMe.length", "QrSm.seasonStrength"
), c("QrBi.weight", "QrBi.seasonStrength"), c("RoBi.weight",
"RoSm.skewness", "QrMe.kurtosis", "QrMe.varcoef"), c("QrBi.weight",
"QrMe.length", "QrSm.frequency"), c("MlBi.weight", "QrSm.skewness",
"QrMe.varcoef", "SiSm.frequency"), c("Sm.weight", "QrMe.length",
"QrBi.trendStrength", "QrSm.seasonStrength"), c("RoBi.weight",
"SiBi.stationarity"), c("MlBi.weight", "ExBi.stationarity"
), c("QrBi.weight", "QrSm.skewness", "QrMe.kurtosis"),
c("MlSm.weight", "QrMe.length", "QrSm.seasonStrength"
), c("MlBi.weight", "QrBi.seasonStrength", "MlMe.frequency"
), c("Sm.weight", "QrMe.length", "QrSm.seasonStrength",
"Me.varcoef"), c("Bi.weight", "QrSm.skewness", "QrBi.stationarity"
), c("Sm.weight", "RoMe.length", "RoSm.seasonStrength",
"QrMe.varcoef"), c("QrBi.weight", "MlMe.length", "MlMe.kurtosis",
"QrMe.varcoef", "MlMe.frequency"), c("Bi.weight", "QrBi.seasonStrength",
"RoSm.skewness"), c("Bi.weight", "Bi.seasonStrength",
"SiBi.stationarity"), c("RoBi.weight", "QrSm.skewness",
"MlSm.frequency"), c("VeBi.weight", "QrBi.length", "ExBi.seasonStrength",
"Me.kurtosis", "SiBi.stationarity"), c("Bi.weight", "QrBi.length",
"RoMe.kurtosis", "SiBi.stationarity"), c("VeBi.weight",
"QrBi.length", "VeBi.seasonStrength", "Me.kurtosis",
"ExBi.stationarity"), c("MlBi.weight", "QrBi.trendStrength",
"QrSm.skewness", "QrMe.kurtosis"), c("VeBi.weight", "QrBi.length",
"SiBi.seasonStrength", "RoMe.kurtosis", "ExBi.stationarity"
), c("RoBi.weight", "QrBi.seasonStrength", "QrMe.kurtosis"
), c("Bi.weight", "QrBi.trendStrength", "VeBi.seasonStrength",
"MlMe.kurtosis"), c("Bi.weight", "QrBi.trendStrength",
"QrSm.skewness", "QrMe.kurtosis", "QrMe.varcoef"), c("QrBi.weight",
"QrMe.kurtosis", "RoBi.stationarity"), c("QrBi.weight",
"RoSm.length", "QrMe.stationarity"), c("QrSm.weight",
"Bi.trendStrength", "RoSm.seasonStrength", "MlMe.skewness"
), c("Bi.weight", "QrBi.length", "Bi.seasonStrength",
"Me.kurtosis", "QrBi.stationarity"), c("MlBi.weight",
"VeBi.trendStrength", "RoBi.seasonStrength", "MlMe.kurtosis"
), c("MlBi.weight", "VeBi.seasonStrength", "RoMe.kurtosis"
), c("RoBi.weight", "SiBi.trendStrength", "RoSm.skewness",
"QrMe.varcoef"), c("QrBi.weight", "RoBi.trendStrength",
"QrSm.skewness", "QrMe.varcoef"), c("Bi.weight", "QrBi.length",
"ExBi.seasonStrength", "Me.kurtosis", "RoMe.varcoef")),
statistics = structure(c(0.0815981701016426, 0.0614597760140896,
0.0782240703701973, 0.224344417452812, 0.0862632393836975,
0.0729342624545097, 0.0639304295182228, 0.0502423271536827,
0.128447845578194, 0.116287410259247, 0.123051941394806,
0.0758908912539482, 0.0510317049920559, 0.0501841567456722,
0.0593299306929111, 0.0548232048749924, 0.0550771504640579,
0.0551150441169739, 0.0995828360319138, 0.0728134214878082,
0.0517548844218254, 0.0688572898507118, 0.0529531016945839,
0.0597639642655849, 0.0543591566383839, 0.212206467986107,
0.0661505907773972, 0.0515229105949402, 0.16906464099884,
0.0518100708723068, 0.0507209636271, 0.0725702121853828,
0.0746634677052498, 0.167581930756569, 0.060338344424963,
0.0736311003565788, 0.0620163045823574, 0.149828627705574,
0.117683216929436, 0.149828627705574, 0.432726860046387,
0.168784901499748, 0.142777293920517, 0.125204190611839,
0.0985179245471954, 0.252366900444031, 0.228992059826851,
0.242879539728165, 0.149828627705574, 0.101096130907536,
0.099481888115406, 0.117683216929436, 0.108862869441509,
0.10961326956749, 0.10976929962635, 0.198334977030754, 0.145109206438065,
0.10314866155386, 0.137240052223206, 0.105583660304546, 0.119175434112549,
0.108447961509228, 0.423450648784637, 0.132061049342155,
0.102864116430283, 0.337683796882629, 0.103511586785316,
0.101351194083691, 0.145039677619934, 0.149245545268059,
0.335027664899826, 0.120635487139225, 0.147236660122871,
0.124030612409115, 0.365115106105804, 0.409470558166504,
0.339983552694321, 0.439135015010834, 0.422608703374863,
0.439135015010834, 0.409470558166504, 0.303977817296982,
0.422608703374863, 0.409470558166504, 0.439135015010834,
0.322471439838409, 0.409470558166504, 0.303977817296982,
0.394504189491272, 0.303977817296982, 0.439135015010834,
0.394504189491272, 0.394504189491272, 0.422608703374863,
0.366584867238998, 0.394504189491272, 0.366584867238998,
0.409470558166504, 0.366584867238998, 0.422608703374863,
0.394504189491272, 0.394504189491272, 0.439135015010834,
0.439135015010834, 0.365115106105804, 0.394504189491272,
0.409470558166504, 0.409470558166504, 0.422608703374863,
0.439135015010834, 0.394504189491272, 0.544610007788297,
0.522247586509652, 0.522090281197224, 0.518443475934827,
0.511083862461633, 0.510825359213711, 0.5106093430724, 0.509981583398196,
0.508972632116946, 0.507822892842555, 0.506637741213311,
0.506517962662518, 0.504783956952126, 0.504455209851416,
0.504149463627308, 0.503598749107456, 0.502467909965468,
0.502098895634598, 0.502094171803455, 0.501783610255538,
0.501750421597097, 0.501728822856488, 0.501527428977604,
0.501478888754406, 0.501246458503127, 0.501136244790672,
0.500909171227379, 0.500883227144234, 0.500659618730842,
0.500524361391166, 0.500447617669085, 0.500347307552267,
0.500272671932333, 0.500203261741612, 0.500170769446363,
0.500086733121578, 0.50000804944667, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1), .Dim = c(37L, 7L), .Dimnames = list(
NULL, c("support", "lhsSupport", "rhsSupport", "confidence",
"lift", "loLift", "hiLift")))), .Names = c("rules", "statistics"
), class = c("farules", "list")), randomWalk = structure(list(
rules = list(c("QrBi.weight", "QrMe.length", "QrSm.seasonStrength"
), c("RoBi.weight", "QrMe.length", "QrSm.seasonStrength"),
c("RoSm.weight", "QrBi.seasonStrength"), c("QrSm.weight",
"QrBi.trendStrength"), c("MlSm.weight", "QrBi.seasonStrength"
), c("RoSm.weight", "QrBi.trendStrength"), c("MlSm.weight",
"QrBi.trendStrength"), c("Sm.weight", "QrBi.seasonStrength"
), c("Sm.weight", "QrBi.trendStrength"), c("MlBi.weight",
"QrMe.length", "QrSm.seasonStrength"), c("VeSm.weight",
"QrBi.trendStrength"), c("SiSm.weight", "QrBi.trendStrength"
), c("ExSm.weight", "QrBi.trendStrength"), c("VeSm.weight",
"QrBi.seasonStrength"), c("SiSm.weight", "QrBi.seasonStrength"
), c("QrSm.weight", "RoMe.kurtosis", "MlMe.frequency"
), c("RoSm.weight", "RoMe.kurtosis", "RoMe.frequency"
), c("ExSm.weight", "QrBi.seasonStrength"), c("QrSm.weight",
"QrSm.length"), c("RoSm.weight", "QrSm.length"), c("VeSm.weight",
"QrMe.kurtosis", "QrSm.varcoef"), c("MlSm.weight", "RoMe.kurtosis",
"Me.frequency"), c("RoBi.weight", "QrMe.trendStrength",
"QrSm.seasonStrength"), c("MlSm.weight", "QrSm.length"
), c("SiSm.weight", "QrMe.kurtosis", "QrSm.varcoef"),
c("Sm.weight", "QrSm.length"), c("ExSm.weight", "QrMe.kurtosis",
"QrSm.varcoef"), c("MlSm.weight", "QrMe.seasonStrength",
"QrMe.stationarity", "Sm.frequency"), c("QrBi.weight",
"QrSm.length", "RoMe.trendStrength", "MlMe.varcoef"),
c("Sm.weight", "RoMe.kurtosis", "Me.frequency"), c("VeSm.weight",
"QrSm.length"), c("QrBi.weight", "QrSm.seasonStrength",
"QrMe.skewness"), c("SiSm.weight", "QrSm.length"), c("ExSm.weight",
"QrSm.length"), c("QrBi.weight", "Me.length", "QrMe.skewness",
"RoMe.varcoef", "Me.frequency"), c("VeSm.weight", "RoMe.kurtosis",
"Me.frequency"), c("QrBi.weight", "MlMe.length", "QrMe.varcoef",
"QrMe.stationarity"), c("Sm.weight", "QrMe.seasonStrength",
"QrMe.stationarity", "Sm.frequency"), c("SiSm.weight",
"RoMe.kurtosis", "Me.frequency"), c("RoBi.weight", "MlMe.length",
"Me.skewness", "QrMe.varcoef", "QrMe.stationarity"),
c("RoSm.weight", "QrMe.varcoef", "Sm.frequency"), c("MlSm.weight",
"QrSm.varcoef"), c("QrBi.weight", "Me.skewness", "Me.varcoef",
"MlMe.stationarity"), c("QrBi.weight", "RoMe.length",
"QrBi.trendStrength", "RoMe.skewness", "QrMe.varcoef"
), c("QrBi.weight", "RoSm.length", "MlMe.trendStrength",
"MlMe.skewness"), c("QrBi.weight", "RoMe.length", "RoMe.stationarity",
"MlMe.frequency"), c("RoBi.weight", "RoSm.seasonStrength",
"Me.kurtosis"), c("MlBi.weight", "QrSm.seasonStrength",
"Me.kurtosis", "Me.varcoef"), c("ExSm.weight", "RoMe.kurtosis",
"Me.frequency"), c("RoBi.weight", "QrSm.seasonStrength",
"QrMe.skewness", "MlMe.varcoef"), c("QrBi.weight", "QrMe.trendStrength",
"QrMe.kurtosis", "Me.stationarity"), c("Sm.weight", "QrSm.varcoef"
), c("Bi.weight", "QrMe.length", "QrSm.seasonStrength"
), c("QrBi.weight", "QrSm.seasonStrength", "QrMe.kurtosis",
"MlMe.varcoef"), c("RoBi.weight", "QrSm.seasonStrength",
"MlMe.skewness", "QrMe.varcoef"), c("MlBi.weight", "QrMe.trendStrength",
"QrSm.seasonStrength", "QrMe.skewness"), c("RoBi.weight",
"Me.length", "QrBi.trendStrength", "RoMe.skewness", "QrMe.varcoef"
), c("MlBi.weight", "MlSm.seasonStrength", "QrMe.skewness",
"Me.kurtosis"), c("QrSm.weight", "SiSm.frequency")),
statistics = structure(c(0.0867330580949783, 0.083218052983284,
0.233140662312508, 0.263708263635635, 0.231206223368645,
0.26219317317009, 0.261023730039597, 0.228912457823753, 0.25976425409317,
0.0790695771574974, 0.257887303829193, 0.257227927446365,
0.256534934043884, 0.225063741207123, 0.22338142991066, 0.104536153376102,
0.104169331490993, 0.221582442522049, 0.126719355583191,
0.126641839742661, 0.117890037596226, 0.103806011378765,
0.0937432050704956, 0.126310363411903, 0.117658190429211,
0.126075237989426, 0.117327965795994, 0.0507219471037388,
0.0600524060428143, 0.103344343602657, 0.125694215297699,
0.148976862430573, 0.125574052333832, 0.12543061375618, 0.0513141751289368,
0.102677442133427, 0.0767121538519859, 0.0502900891005993,
0.10237767547369, 0.0507664233446121, 0.21602638065815, 0.128013372421265,
0.0650656968355179, 0.0679261460900307, 0.0544815137982368,
0.0520723909139633, 0.104499533772469, 0.0732661560177803,
0.101968869566917, 0.105553023517132, 0.0548814833164215,
0.127542287111282, 0.0750589668750763, 0.114984482526779,
0.112453736364841, 0.0754802450537682, 0.0533889010548592,
0.083994522690773, 0.252460807561874, 0.149828627705574,
0.149828627705574, 0.432726860046387, 0.491184085607529,
0.432726860046387, 0.491184085607529, 0.491184085607529,
0.432726860046387, 0.491184085607529, 0.149828627705574,
0.491184085607529, 0.491184085607529, 0.491184085607529,
0.432726860046387, 0.432726860046387, 0.20327553153038, 0.20327553153038,
0.432726860046387, 0.24751977622509, 0.24751977622509, 0.230668947100639,
0.20327553153038, 0.183692455291748, 0.24751977622509, 0.230668947100639,
0.24751977622509, 0.230668947100639, 0.0997635945677757,
0.118116416037083, 0.20327553153038, 0.24751977622509, 0.293645918369293,
0.24751977622509, 0.24751977622509, 0.101335622370243, 0.20327553153038,
0.152035176753998, 0.0997635945677757, 0.20327553153038,
0.100921131670475, 0.429517805576324, 0.254553526639938,
0.129407525062561, 0.135301038622856, 0.108540564775467,
0.103757627308369, 0.208276674151421, 0.146042764186859,
0.20327553153038, 0.210630789399147, 0.109528966248035, 0.254553526639938,
0.149828627705574, 0.229638740420341, 0.224624276161194,
0.150807693600655, 0.106687039136887, 0.167926445603371,
0.504772007465363, 0.385609328746796, 0.361263692378998,
0.48637193441391, 0.489094972610474, 0.483034938573837, 0.48637193441391,
0.483034938573837, 0.479497849941254, 0.479497849941254,
0.342880696058273, 0.474217027425766, 0.47221514582634, 0.469980031251907,
0.474217027425766, 0.47221514582634, 0.489094972610474, 0.48637193441391,
0.469980031251907, 0.489094972610474, 0.48637193441391, 0.474217027425766,
0.483034938573837, 0.361263692378998, 0.483034938573837,
0.47221514582634, 0.479497849941254, 0.469980031251907, 0.483034938573837,
0.385609328746796, 0.479497849941254, 0.474217027425766,
0.385609328746796, 0.47221514582634, 0.469980031251907, 0.385609328746796,
0.474217027425766, 0.385609328746796, 0.479497849941254,
0.47221514582634, 0.361263692378998, 0.48637193441391, 0.483034938573837,
0.385609328746796, 0.385609328746796, 0.385609328746796,
0.385609328746796, 0.361263692378998, 0.342880696058273,
0.469980031251907, 0.361263692378998, 0.385609328746796,
0.479497849941254, 0.323303759098053, 0.385609328746796,
0.361263692378998, 0.342880696058273, 0.361263692378998,
0.342880696058273, 0.489094972610474, 0.578881749256999,
0.555421579024367, 0.538770951929159, 0.5368827520327, 0.534300605568742,
0.533798184535625, 0.531417319265842, 0.5289998818174, 0.528853156494019,
0.527733440320069, 0.525031879870907, 0.523689457748227,
0.522278594850208, 0.520105780313699, 0.516218082433603,
0.514258418556781, 0.51245386351591, 0.512060754671656, 0.511956488955269,
0.511643318663535, 0.511078925351799, 0.51066653520594, 0.510326920730678,
0.510304127364106, 0.510073817512494, 0.50935420155994, 0.508642221984064,
0.5084214068618, 0.508417102868756, 0.508395392325968, 0.50781483893794,
0.50733503553493, 0.50732937080404, 0.506749867299959, 0.50637844746691,
0.505114616404686, 0.504568452445126, 0.504092593279947,
0.503639934934271, 0.503030658736302, 0.502950932076696,
0.502893729703998, 0.502796856705685, 0.502037137197231,
0.501946105688139, 0.501865667756681, 0.501734215788829,
0.501676042806459, 0.501628842385675, 0.501128177025952,
0.501068212331511, 0.501043096101704, 0.500965456498564,
0.500719008980393, 0.50063037836633, 0.500506593872082, 0.500425370192884,
0.500186390469799, 0.500148193299325, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), .Dim = c(59L,
7L), .Dimnames = list(NULL, c("support", "lhsSupport", "rhsSupport",
"confidence", "lift", "loLift", "hiLift")))), .Names = c("rules",
"statistics"), class = c("farules", "list")), theta = structure(list(
rules = list(c("RoMe.weight", "QrBi.trendStrength", "QrSm.skewness"
), c("QrMe.weight", "QrSm.length"), c("QrBi.weight", "QrMe.length",
"ExSm.frequency"), c("MlMe.weight", "QrBi.trendStrength",
"QrSm.seasonStrength", "QrMe.stationarity"), c("RoMe.weight",
"QrSm.skewness", "QrMe.stationarity"), c("MlMe.weight", "QrSm.seasonStrength",
"QrSm.skewness"), c("QrMe.weight", "QrBi.seasonStrength",
"RoMe.frequency"), c("MlMe.weight", "QrBi.trendStrength",
"QrSm.skewness"), c("QrBi.weight", "QrMe.length", "QrBi.stationarity"
), c("MlMe.weight", "QrBi.trendStrength", "Sm.seasonStrength",
"QrMe.varcoef"), c("QrMe.weight", "QrSm.skewness"), c("MlBi.weight",
"QrMe.length", "RoMe.kurtosis", "ExSm.frequency"), c("QrSm.weight",
"QrBi.length", "ExBi.seasonStrength"), c("MlMe.weight", "MlSm.length",
"QrMe.stationarity"), c("MlBi.weight", "Me.length", "QrMe.kurtosis",
"QrMe.varcoef", "QrBi.stationarity"), c("QrBi.weight", "MlMe.length",
"QrMe.trendStrength", "MlMe.varcoef"), c("RoBi.weight", "QrMe.length",
"VeSm.frequency"), c("RoMe.weight", "QrBi.trendStrength",
"QrMe.stationarity"), c("RoBi.weight", "QrMe.length", "MlMe.kurtosis",
"QrMe.varcoef", "QrBi.stationarity"), c("MlSm.weight", "QrBi.length",
"ExBi.seasonStrength", "SiBi.stationarity"), c("RoBi.weight",
"Me.length", "QrBi.stationarity"), c("QrBi.weight", "Me.length",
"QrBi.seasonStrength", "MlMe.kurtosis"), c("Bi.weight", "Me.length",
"MlSm.frequency"), c("QrBi.weight", "MlMe.length", "QrMe.trendStrength",
"Me.kurtosis"), c("QrBi.weight", "QrMe.trendStrength", "MlMe.kurtosis",
"Me.varcoef", "ExSm.frequency"), c("MlBi.weight", "QrMe.length",
"QrMe.varcoef", "ExSm.frequency"), c("QrMe.weight", "RoSm.seasonStrength"
), c("RoMe.weight", "QrBi.trendStrength", "QrSm.seasonStrength",
"RoMe.varcoef"), c("MlBi.weight", "QrMe.length", "Me.kurtosis",
"MlMe.varcoef", "QrBi.stationarity"), c("RoSm.weight", "QrBi.length",
"MlBi.seasonStrength", "QrMe.kurtosis", "ExBi.stationarity"
), c("RoMe.weight", "QrSm.length", "RoMe.varcoef"), c("QrSm.weight",
"VeBi.length", "QrBi.seasonStrength", "QrMe.varcoef", "QrBi.stationarity"
), c("MlMe.weight", "QrSm.skewness", "MlMe.stationarity"),
c("Bi.weight", "QrMe.length", "MlMe.kurtosis", "ExSm.frequency"
), c("QrSm.weight", "ExBi.seasonStrength", "ExBi.stationarity"
), c("QrBi.weight", "Me.length", "QrMe.skewness", "Me.kurtosis",
"Me.varcoef"), c("RoMe.weight", "RoSm.length"), c("RoSm.weight",
"QrBi.length", "ExBi.seasonStrength", "QrMe.kurtosis",
"Bi.stationarity"), c("MlSm.weight", "QrBi.length", "VeBi.seasonStrength",
"QrMe.kurtosis", "QrMe.varcoef", "ExBi.stationarity"),
c("QrBi.weight", "Bi.trendStrength", "MlMe.skewness",
"RoSm.stationarity"), c("QrBi.weight", "QrMe.length",
"QrMe.trendStrength", "QrMe.skewness", "MlMe.kurtosis",
"Me.varcoef"), c("MlMe.weight", "VeSm.length", "QrSm.seasonStrength",
"QrMe.varcoef"), c("RoMe.weight", "VeSm.seasonStrength",
"RoMe.varcoef"), c("Bi.weight", "Me.length", "MlMe.kurtosis",
"RoMe.varcoef", "QrBi.stationarity"), c("VeBi.weight",
"Me.length", "Me.kurtosis", "QrMe.varcoef", "Sm.frequency"
), c("RoBi.weight", "Me.length", "QrMe.trendStrength",
"Me.kurtosis", "Me.varcoef"), c("QrMe.weight", "RoBi.trendStrength"
), c("QrSm.weight", "QrBi.length", "RoMe.kurtosis", "SiBi.stationarity"
)), statistics = structure(c(0.0942000597715378, 0.135585427284241,
0.0778455510735512, 0.0524262972176075, 0.0792518183588982,
0.0585502535104752, 0.052469901740551, 0.0869991779327393,
0.0763199254870415, 0.055534839630127, 0.154104575514793,
0.0669538974761963, 0.0863662585616112, 0.0500744171440601,
0.0576989948749542, 0.068632148206234, 0.0722000375390053,
0.115505583584309, 0.0603586621582508, 0.0568646527826786,
0.0645047500729561, 0.0693769678473473, 0.0647298321127892,
0.074457511305809, 0.0719771608710289, 0.0672856122255325,
0.171213790774345, 0.0720370262861252, 0.0510264299809933,
0.0600338950753212, 0.0961058735847473, 0.0505116283893585,
0.0539988093078136, 0.0634234696626663, 0.0794535130262375,
0.0761765688657761, 0.10408902913332, 0.0623421967029572,
0.0515866652131081, 0.0515606738626957, 0.0580988563597202,
0.0685170367360115, 0.102358505129814, 0.0506234243512154,
0.0546018518507481, 0.0518317110836506, 0.244368627667427,
0.0686221346259117, 0.170518070459366, 0.24751977622509,
0.142777293920517, 0.0993655472993851, 0.150413259863853,
0.112556263804436, 0.101096130907536, 0.170518070459366,
0.149817898869514, 0.109234720468521, 0.303272306919098,
0.13191457092762, 0.170460537075996, 0.0989503040909767,
0.114026054739952, 0.135683462023735, 0.142777293920517,
0.22859413921833, 0.119725234806538, 0.112795770168304, 0.127957731485367,
0.137737333774567, 0.128616139292717, 0.147991925477982,
0.143072187900543, 0.133776694536209, 0.340493142604828,
0.143300533294678, 0.101516559720039, 0.119683817028999,
0.191600009799004, 0.100732237100601, 0.10769110918045, 0.126518189907074,
0.158527165651321, 0.152000457048416, 0.207743138074875,
0.124439068138599, 0.102983459830284, 0.102952525019646,
0.116017036139965, 0.136824086308479, 0.204541638493538,
0.101180009543896, 0.109139986336231, 0.1036177277565, 0.488686084747314,
0.137240052223206, 0.379464775323868, 0.4378362596035, 0.380824595689774,
0.33187872171402, 0.379464775323868, 0.33187872171402, 0.4378362596035,
0.33187872171402, 0.380824595689774, 0.33187872171402, 0.4378362596035,
0.331502139568329, 0.286199659109116, 0.33187872171402, 0.331502139568329,
0.380824595689774, 0.35188165307045, 0.379464775323868, 0.35188165307045,
0.231100931763649, 0.35188165307045, 0.380824595689774, 0.313433319330215,
0.380824595689774, 0.380824595689774, 0.331502139568329,
0.4378362596035, 0.379464775323868, 0.331502139568329, 0.251651585102081,
0.379464775323868, 0.286199659109116, 0.33187872171402, 0.313433319330215,
0.286199659109116, 0.380824595689774, 0.379464775323868,
0.251651585102081, 0.231100931763649, 0.380824595689774,
0.380824595689774, 0.33187872171402, 0.379464775323868, 0.313433319330215,
0.279977828264236, 0.35188165307045, 0.4378362596035, 0.286199659109116,
0.552434469366022, 0.547776138747563, 0.545223606191102,
0.52761041067533, 0.526893828580226, 0.520186540770446, 0.519009988508274,
0.510205033978912, 0.509417940465933, 0.508399155432734,
0.508139292638754, 0.507554980510327, 0.506664240551504,
0.506056222909844, 0.506015883883228, 0.505825449782732,
0.505682910471734, 0.505286723357283, 0.504143193001739,
0.504138166686835, 0.504109828489206, 0.503690364450468,
0.503279234384969, 0.503118741548416, 0.503082827817409,
0.502969612598108, 0.502840643028908, 0.502698940680081,
0.502641442161881, 0.501604114621236, 0.501596391803771,
0.501444521071369, 0.501423095358147, 0.501299217995848,
0.50119809245183, 0.501160130337712, 0.501046773905014, 0.500985724463324,
0.500921849956512, 0.500819905610443, 0.500778664002662,
0.500767361833757, 0.500428694537166, 0.500330298241897,
0.500291906602721, 0.500220495140119, 0.500052355273801,
0.500015363695035, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), .Dim = c(48L,
7L), .Dimnames = list(NULL, c("support", "lhsSupport", "rhsSupport",
"confidence", "lift", "loLift", "hiLift")))), .Names = c("rules",
"statistics"), class = c("farules", "list"))), .Names = c("arima",
"expSmooth", "randomWalk", "theta")), featuresContext = structure(list(
length = c(14, 51, 126), trendStrength = c(0.00206004820626882,
0.999999999610942, 1), seasonStrength = c(0, 0.669003044548185,
1), skewness = c(0.000438185907652042, 0.407888740092061,
5.42759768702902), kurtosis = c(1.14929353310186, 2.3965898416211,
40.8947798483529), varcoef = c(0.00592853945985588, 0.196592940983816,
1.68291590311524), stationarity = c(0.01, 0.585769170137984,
0.99), frequency = c(0.0833333333333333, 0.25, 1)), .Names = c("length",
"trendStrength", "seasonStrength", "skewness", "kurtosis", "varcoef",
"stationarity", "frequency")), weightContext = structure(list(
arima = c(0, 0.786124000918699, 1), expSmooth = c(0, 0.772842240528426,
1), randomWalk = c(0, 0.071978067472705, 1), theta = c(0,
0.597010247137179, 1)), .Names = c("arima", "expSmooth",
"randomWalk", "theta"))), .Names = c("model", "featuresContext",
"weightContext"))
|
cbands.region <- function(surv.object, tL, tU) {
#Error checking
if(tL < min(surv.object$time)) stop("Lower limit is smaller than smallest observed survival time, choose larger value.")
n <- length(surv.object$time)
lower.lim <- max(surv.object$time[surv.object$time <= tL])
upper.lim <- max(surv.object$time[surv.object$time <= tU])
aL <- (n * surv.object$std.err[surv.object$time == lower.lim]^2) / (1 + n * surv.object$std.err[surv.object$time == lower.lim]^2)
aU <- (n * surv.object$std.err[surv.object$time == upper.lim]^2) / (1 + n * surv.object$std.err[surv.object$time == upper.lim]^2)
writeLines("Find critical regions in Klein and Moeschberger 2nd ed. (Appendix C.3a - C.4c)")
results <- list()
results$aL <- round(aL, 2)
results$aU <- round(aU, 2)
return(results)
}
cbands.interval <- function(surv.object, tL, tU, crit.value, alpha = 0.05, type = "linear", method = "ep") {
if(type != "linear" & type != "log" & type != "asin") stop("type must be one of the three options: linear, log, asin")
if(method != "ep" & method != "hw") stop("method must be either 'ep' or 'hw'")
new.time <- surv.object$time[(surv.object$time >= max(surv.object$time[surv.object$time <= tL]))
& (surv.object$time <= max(surv.object$time[surv.object$time <= tU]))]
new.surv <- surv.object$surv[(surv.object$time >= max(surv.object$time[surv.object$time <= tL]))
& (surv.object$time <= max(surv.object$time[surv.object$time <= tU]))]
new.se <- surv.object$std.err[(surv.object$time >= max(surv.object$time[surv.object$time <= tL]))
& (surv.object$time <= max(surv.object$time[surv.object$time <= tU]))]
new.event <- surv.object$n.event[(surv.object$time >= max(surv.object$time[surv.object$time <= tL]))
& (surv.object$time <= max(surv.object$time[surv.object$time <= tU]))]
results <- data.frame(t = new.time, surv = new.surv, se = new.se)
#- Equal Precision Bands
if(method == "ep") {
if(type == "linear") {
results$LL <- new.surv - crit.value * new.se * new.surv
results$UL <- new.surv + crit.value * new.se * new.surv
} else if (type == "log") {
theta <- exp(crit.value * new.se / log(new.surv))
results$LL <- new.surv^(1 / theta)
results$UL <- new.surv^theta
} else if (type == "asin") {
results$LL <- sin(sapply(asin(sqrt(new.surv)), function(x) max(0, x)) - 0.5 * crit.value * new.se *
sqrt(new.surv / (1 - new.surv)))^2
results$UL <- sin(sapply(asin(sqrt(new.surv)), function(x) min(pi / 2, x)) + 0.5 * crit.value * new.se *
sqrt(new.surv / (1 - new.surv)))^2
}
} else if (method == "hw") { #Hall-Wellner Bands
n <- length(surv.object$time)
if(type == "linear") {
results$LL <- new.surv - crit.value * (1 + n * new.se^2) / sqrt(n) * new.surv
results$UL <- new.surv + crit.value * (1 + n * new.se^2) / sqrt(n) * new.surv
} else if (type == "log") {
theta <- exp(crit.value * (1 + n * new.se^2) / (sqrt(n) * log(new.surv)))
results$LL <- new.surv^(1 / theta)
results$UL <- new.surv^theta
} else if (type == "asin") {
results$LL <- sin(sapply(asin(sqrt(new.surv)), function(x) max(0, x)) - 0.5 * crit.value * (1 + n * new.se^2) / sqrt(n) *
sqrt(new.surv / (1 - new.surv)))^2
results$UL <- sin(sapply(asin(sqrt(new.surv)), function(x) min(pi / 2, x)) + 0.5 * crit.value * (1 + n * new.se^2) / sqrt(n) *
sqrt(new.surv / (1 - new.surv)))^2
}
}
#----- Returning results here
writeLines(paste0("Returning ", type, "-type confidence bands using ", method, " method."))
return(round(results[which(new.event != 0), ], 3))
}
#cbands.interval(surv.object, tL = 100, tU = 600, crit.value = 1.3211, type = "linear", method = "hw")
| /functions/conf_bands.R | no_license | Larryzza/biostatistics-215 | R | false | false | 3,961 | r | cbands.region <- function(surv.object, tL, tU) {
#Error checking
if(tL < min(surv.object$time)) stop("Lower limit is smaller than smallest observed survival time, choose larger value.")
n <- length(surv.object$time)
lower.lim <- max(surv.object$time[surv.object$time <= tL])
upper.lim <- max(surv.object$time[surv.object$time <= tU])
aL <- (n * surv.object$std.err[surv.object$time == lower.lim]^2) / (1 + n * surv.object$std.err[surv.object$time == lower.lim]^2)
aU <- (n * surv.object$std.err[surv.object$time == upper.lim]^2) / (1 + n * surv.object$std.err[surv.object$time == upper.lim]^2)
writeLines("Find critical regions in Klein and Moeschberger 2nd ed. (Appendix C.3a - C.4c)")
results <- list()
results$aL <- round(aL, 2)
results$aU <- round(aU, 2)
return(results)
}
cbands.interval <- function(surv.object, tL, tU, crit.value, alpha = 0.05, type = "linear", method = "ep") {
if(type != "linear" & type != "log" & type != "asin") stop("type must be one of the three options: linear, log, asin")
if(method != "ep" & method != "hw") stop("method must be either 'ep' or 'hw'")
new.time <- surv.object$time[(surv.object$time >= max(surv.object$time[surv.object$time <= tL]))
& (surv.object$time <= max(surv.object$time[surv.object$time <= tU]))]
new.surv <- surv.object$surv[(surv.object$time >= max(surv.object$time[surv.object$time <= tL]))
& (surv.object$time <= max(surv.object$time[surv.object$time <= tU]))]
new.se <- surv.object$std.err[(surv.object$time >= max(surv.object$time[surv.object$time <= tL]))
& (surv.object$time <= max(surv.object$time[surv.object$time <= tU]))]
new.event <- surv.object$n.event[(surv.object$time >= max(surv.object$time[surv.object$time <= tL]))
& (surv.object$time <= max(surv.object$time[surv.object$time <= tU]))]
results <- data.frame(t = new.time, surv = new.surv, se = new.se)
#- Equal Precision Bands
if(method == "ep") {
if(type == "linear") {
results$LL <- new.surv - crit.value * new.se * new.surv
results$UL <- new.surv + crit.value * new.se * new.surv
} else if (type == "log") {
theta <- exp(crit.value * new.se / log(new.surv))
results$LL <- new.surv^(1 / theta)
results$UL <- new.surv^theta
} else if (type == "asin") {
results$LL <- sin(sapply(asin(sqrt(new.surv)), function(x) max(0, x)) - 0.5 * crit.value * new.se *
sqrt(new.surv / (1 - new.surv)))^2
results$UL <- sin(sapply(asin(sqrt(new.surv)), function(x) min(pi / 2, x)) + 0.5 * crit.value * new.se *
sqrt(new.surv / (1 - new.surv)))^2
}
} else if (method == "hw") { #Hall-Wellner Bands
n <- length(surv.object$time)
if(type == "linear") {
results$LL <- new.surv - crit.value * (1 + n * new.se^2) / sqrt(n) * new.surv
results$UL <- new.surv + crit.value * (1 + n * new.se^2) / sqrt(n) * new.surv
} else if (type == "log") {
theta <- exp(crit.value * (1 + n * new.se^2) / (sqrt(n) * log(new.surv)))
results$LL <- new.surv^(1 / theta)
results$UL <- new.surv^theta
} else if (type == "asin") {
results$LL <- sin(sapply(asin(sqrt(new.surv)), function(x) max(0, x)) - 0.5 * crit.value * (1 + n * new.se^2) / sqrt(n) *
sqrt(new.surv / (1 - new.surv)))^2
results$UL <- sin(sapply(asin(sqrt(new.surv)), function(x) min(pi / 2, x)) + 0.5 * crit.value * (1 + n * new.se^2) / sqrt(n) *
sqrt(new.surv / (1 - new.surv)))^2
}
}
#----- Returning results here
writeLines(paste0("Returning ", type, "-type confidence bands using ", method, " method."))
return(round(results[which(new.event != 0), ], 3))
}
#cbands.interval(surv.object, tL = 100, tU = 600, crit.value = 1.3211, type = "linear", method = "hw")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.